1 #include <linux/mm.h> 2 #include <linux/hugetlb.h> 3 #include <linux/mount.h> 4 #include <linux/seq_file.h> 5 #include <linux/highmem.h> 6 #include <linux/ptrace.h> 7 #include <linux/slab.h> 8 #include <linux/pagemap.h> 9 #include <linux/mempolicy.h> 10 #include <linux/swap.h> 11 #include <linux/swapops.h> 12 13 #include <asm/elf.h> 14 #include <asm/uaccess.h> 15 #include <asm/tlbflush.h> 16 #include "internal.h" 17 18 void task_mem(struct seq_file *m, struct mm_struct *mm) 19 { 20 unsigned long data, text, lib, swap; 21 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; 22 23 /* 24 * Note: to minimize their overhead, mm maintains hiwater_vm and 25 * hiwater_rss only when about to *lower* total_vm or rss. Any 26 * collector of these hiwater stats must therefore get total_vm 27 * and rss too, which will usually be the higher. Barriers? not 28 * worth the effort, such snapshots can always be inconsistent. 29 */ 30 hiwater_vm = total_vm = mm->total_vm; 31 if (hiwater_vm < mm->hiwater_vm) 32 hiwater_vm = mm->hiwater_vm; 33 hiwater_rss = total_rss = get_mm_rss(mm); 34 if (hiwater_rss < mm->hiwater_rss) 35 hiwater_rss = mm->hiwater_rss; 36 37 data = mm->total_vm - mm->shared_vm - mm->stack_vm; 38 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10; 39 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text; 40 swap = get_mm_counter(mm, MM_SWAPENTS); 41 seq_printf(m, 42 "VmPeak:\t%8lu kB\n" 43 "VmSize:\t%8lu kB\n" 44 "VmLck:\t%8lu kB\n" 45 "VmHWM:\t%8lu kB\n" 46 "VmRSS:\t%8lu kB\n" 47 "VmData:\t%8lu kB\n" 48 "VmStk:\t%8lu kB\n" 49 "VmExe:\t%8lu kB\n" 50 "VmLib:\t%8lu kB\n" 51 "VmPTE:\t%8lu kB\n" 52 "VmSwap:\t%8lu kB\n", 53 hiwater_vm << (PAGE_SHIFT-10), 54 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10), 55 mm->locked_vm << (PAGE_SHIFT-10), 56 hiwater_rss << (PAGE_SHIFT-10), 57 total_rss << (PAGE_SHIFT-10), 58 data << (PAGE_SHIFT-10), 59 mm->stack_vm << (PAGE_SHIFT-10), text, lib, 60 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10, 61 swap << (PAGE_SHIFT-10)); 62 } 63 64 unsigned long task_vsize(struct mm_struct *mm) 65 { 66 return PAGE_SIZE * mm->total_vm; 67 } 68 69 unsigned long task_statm(struct mm_struct *mm, 70 unsigned long *shared, unsigned long *text, 71 unsigned long *data, unsigned long *resident) 72 { 73 *shared = get_mm_counter(mm, MM_FILEPAGES); 74 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) 75 >> PAGE_SHIFT; 76 *data = mm->total_vm - mm->shared_vm; 77 *resident = *shared + get_mm_counter(mm, MM_ANONPAGES); 78 return mm->total_vm; 79 } 80 81 static void pad_len_spaces(struct seq_file *m, int len) 82 { 83 len = 25 + sizeof(void*) * 6 - len; 84 if (len < 1) 85 len = 1; 86 seq_printf(m, "%*c", len, ' '); 87 } 88 89 static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma) 90 { 91 if (vma && vma != priv->tail_vma) { 92 struct mm_struct *mm = vma->vm_mm; 93 up_read(&mm->mmap_sem); 94 mmput(mm); 95 } 96 } 97 98 static void *m_start(struct seq_file *m, loff_t *pos) 99 { 100 struct proc_maps_private *priv = m->private; 101 unsigned long last_addr = m->version; 102 struct mm_struct *mm; 103 struct vm_area_struct *vma, *tail_vma = NULL; 104 loff_t l = *pos; 105 106 /* Clear the per syscall fields in priv */ 107 priv->task = NULL; 108 priv->tail_vma = NULL; 109 110 /* 111 * We remember last_addr rather than next_addr to hit with 112 * mmap_cache most of the time. We have zero last_addr at 113 * the beginning and also after lseek. We will have -1 last_addr 114 * after the end of the vmas. 115 */ 116 117 if (last_addr == -1UL) 118 return NULL; 119 120 priv->task = get_pid_task(priv->pid, PIDTYPE_PID); 121 if (!priv->task) 122 return NULL; 123 124 mm = mm_for_maps(priv->task); 125 if (!mm) 126 return NULL; 127 down_read(&mm->mmap_sem); 128 129 tail_vma = get_gate_vma(priv->task); 130 priv->tail_vma = tail_vma; 131 132 /* Start with last addr hint */ 133 vma = find_vma(mm, last_addr); 134 if (last_addr && vma) { 135 vma = vma->vm_next; 136 goto out; 137 } 138 139 /* 140 * Check the vma index is within the range and do 141 * sequential scan until m_index. 142 */ 143 vma = NULL; 144 if ((unsigned long)l < mm->map_count) { 145 vma = mm->mmap; 146 while (l-- && vma) 147 vma = vma->vm_next; 148 goto out; 149 } 150 151 if (l != mm->map_count) 152 tail_vma = NULL; /* After gate vma */ 153 154 out: 155 if (vma) 156 return vma; 157 158 /* End of vmas has been reached */ 159 m->version = (tail_vma != NULL)? 0: -1UL; 160 up_read(&mm->mmap_sem); 161 mmput(mm); 162 return tail_vma; 163 } 164 165 static void *m_next(struct seq_file *m, void *v, loff_t *pos) 166 { 167 struct proc_maps_private *priv = m->private; 168 struct vm_area_struct *vma = v; 169 struct vm_area_struct *tail_vma = priv->tail_vma; 170 171 (*pos)++; 172 if (vma && (vma != tail_vma) && vma->vm_next) 173 return vma->vm_next; 174 vma_stop(priv, vma); 175 return (vma != tail_vma)? tail_vma: NULL; 176 } 177 178 static void m_stop(struct seq_file *m, void *v) 179 { 180 struct proc_maps_private *priv = m->private; 181 struct vm_area_struct *vma = v; 182 183 vma_stop(priv, vma); 184 if (priv->task) 185 put_task_struct(priv->task); 186 } 187 188 static int do_maps_open(struct inode *inode, struct file *file, 189 const struct seq_operations *ops) 190 { 191 struct proc_maps_private *priv; 192 int ret = -ENOMEM; 193 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 194 if (priv) { 195 priv->pid = proc_pid(inode); 196 ret = seq_open(file, ops); 197 if (!ret) { 198 struct seq_file *m = file->private_data; 199 m->private = priv; 200 } else { 201 kfree(priv); 202 } 203 } 204 return ret; 205 } 206 207 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) 208 { 209 struct mm_struct *mm = vma->vm_mm; 210 struct file *file = vma->vm_file; 211 int flags = vma->vm_flags; 212 unsigned long ino = 0; 213 unsigned long long pgoff = 0; 214 unsigned long start; 215 dev_t dev = 0; 216 int len; 217 218 if (file) { 219 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 220 dev = inode->i_sb->s_dev; 221 ino = inode->i_ino; 222 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; 223 } 224 225 /* We don't show the stack guard page in /proc/maps */ 226 start = vma->vm_start; 227 if (vma->vm_flags & VM_GROWSDOWN) 228 if (!vma_stack_continue(vma->vm_prev, vma->vm_start)) 229 start += PAGE_SIZE; 230 231 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", 232 start, 233 vma->vm_end, 234 flags & VM_READ ? 'r' : '-', 235 flags & VM_WRITE ? 'w' : '-', 236 flags & VM_EXEC ? 'x' : '-', 237 flags & VM_MAYSHARE ? 's' : 'p', 238 pgoff, 239 MAJOR(dev), MINOR(dev), ino, &len); 240 241 /* 242 * Print the dentry name for named mappings, and a 243 * special [heap] marker for the heap: 244 */ 245 if (file) { 246 pad_len_spaces(m, len); 247 seq_path(m, &file->f_path, "\n"); 248 } else { 249 const char *name = arch_vma_name(vma); 250 if (!name) { 251 if (mm) { 252 if (vma->vm_start <= mm->start_brk && 253 vma->vm_end >= mm->brk) { 254 name = "[heap]"; 255 } else if (vma->vm_start <= mm->start_stack && 256 vma->vm_end >= mm->start_stack) { 257 name = "[stack]"; 258 } 259 } else { 260 name = "[vdso]"; 261 } 262 } 263 if (name) { 264 pad_len_spaces(m, len); 265 seq_puts(m, name); 266 } 267 } 268 seq_putc(m, '\n'); 269 } 270 271 static int show_map(struct seq_file *m, void *v) 272 { 273 struct vm_area_struct *vma = v; 274 struct proc_maps_private *priv = m->private; 275 struct task_struct *task = priv->task; 276 277 show_map_vma(m, vma); 278 279 if (m->count < m->size) /* vma is copied successfully */ 280 m->version = (vma != get_gate_vma(task))? vma->vm_start: 0; 281 return 0; 282 } 283 284 static const struct seq_operations proc_pid_maps_op = { 285 .start = m_start, 286 .next = m_next, 287 .stop = m_stop, 288 .show = show_map 289 }; 290 291 static int maps_open(struct inode *inode, struct file *file) 292 { 293 return do_maps_open(inode, file, &proc_pid_maps_op); 294 } 295 296 const struct file_operations proc_maps_operations = { 297 .open = maps_open, 298 .read = seq_read, 299 .llseek = seq_lseek, 300 .release = seq_release_private, 301 }; 302 303 /* 304 * Proportional Set Size(PSS): my share of RSS. 305 * 306 * PSS of a process is the count of pages it has in memory, where each 307 * page is divided by the number of processes sharing it. So if a 308 * process has 1000 pages all to itself, and 1000 shared with one other 309 * process, its PSS will be 1500. 310 * 311 * To keep (accumulated) division errors low, we adopt a 64bit 312 * fixed-point pss counter to minimize division errors. So (pss >> 313 * PSS_SHIFT) would be the real byte count. 314 * 315 * A shift of 12 before division means (assuming 4K page size): 316 * - 1M 3-user-pages add up to 8KB errors; 317 * - supports mapcount up to 2^24, or 16M; 318 * - supports PSS up to 2^52 bytes, or 4PB. 319 */ 320 #define PSS_SHIFT 12 321 322 #ifdef CONFIG_PROC_PAGE_MONITOR 323 struct mem_size_stats { 324 struct vm_area_struct *vma; 325 unsigned long resident; 326 unsigned long shared_clean; 327 unsigned long shared_dirty; 328 unsigned long private_clean; 329 unsigned long private_dirty; 330 unsigned long referenced; 331 unsigned long anonymous; 332 unsigned long swap; 333 u64 pss; 334 }; 335 336 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 337 struct mm_walk *walk) 338 { 339 struct mem_size_stats *mss = walk->private; 340 struct vm_area_struct *vma = mss->vma; 341 pte_t *pte, ptent; 342 spinlock_t *ptl; 343 struct page *page; 344 int mapcount; 345 346 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 347 for (; addr != end; pte++, addr += PAGE_SIZE) { 348 ptent = *pte; 349 350 if (is_swap_pte(ptent)) { 351 mss->swap += PAGE_SIZE; 352 continue; 353 } 354 355 if (!pte_present(ptent)) 356 continue; 357 358 page = vm_normal_page(vma, addr, ptent); 359 if (!page) 360 continue; 361 362 if (PageAnon(page)) 363 mss->anonymous += PAGE_SIZE; 364 365 mss->resident += PAGE_SIZE; 366 /* Accumulate the size in pages that have been accessed. */ 367 if (pte_young(ptent) || PageReferenced(page)) 368 mss->referenced += PAGE_SIZE; 369 mapcount = page_mapcount(page); 370 if (mapcount >= 2) { 371 if (pte_dirty(ptent) || PageDirty(page)) 372 mss->shared_dirty += PAGE_SIZE; 373 else 374 mss->shared_clean += PAGE_SIZE; 375 mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount; 376 } else { 377 if (pte_dirty(ptent) || PageDirty(page)) 378 mss->private_dirty += PAGE_SIZE; 379 else 380 mss->private_clean += PAGE_SIZE; 381 mss->pss += (PAGE_SIZE << PSS_SHIFT); 382 } 383 } 384 pte_unmap_unlock(pte - 1, ptl); 385 cond_resched(); 386 return 0; 387 } 388 389 static int show_smap(struct seq_file *m, void *v) 390 { 391 struct proc_maps_private *priv = m->private; 392 struct task_struct *task = priv->task; 393 struct vm_area_struct *vma = v; 394 struct mem_size_stats mss; 395 struct mm_walk smaps_walk = { 396 .pmd_entry = smaps_pte_range, 397 .mm = vma->vm_mm, 398 .private = &mss, 399 }; 400 401 memset(&mss, 0, sizeof mss); 402 mss.vma = vma; 403 /* mmap_sem is held in m_start */ 404 if (vma->vm_mm && !is_vm_hugetlb_page(vma)) 405 walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); 406 407 show_map_vma(m, vma); 408 409 seq_printf(m, 410 "Size: %8lu kB\n" 411 "Rss: %8lu kB\n" 412 "Pss: %8lu kB\n" 413 "Shared_Clean: %8lu kB\n" 414 "Shared_Dirty: %8lu kB\n" 415 "Private_Clean: %8lu kB\n" 416 "Private_Dirty: %8lu kB\n" 417 "Referenced: %8lu kB\n" 418 "Anonymous: %8lu kB\n" 419 "Swap: %8lu kB\n" 420 "KernelPageSize: %8lu kB\n" 421 "MMUPageSize: %8lu kB\n" 422 "Locked: %8lu kB\n", 423 (vma->vm_end - vma->vm_start) >> 10, 424 mss.resident >> 10, 425 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)), 426 mss.shared_clean >> 10, 427 mss.shared_dirty >> 10, 428 mss.private_clean >> 10, 429 mss.private_dirty >> 10, 430 mss.referenced >> 10, 431 mss.anonymous >> 10, 432 mss.swap >> 10, 433 vma_kernel_pagesize(vma) >> 10, 434 vma_mmu_pagesize(vma) >> 10, 435 (vma->vm_flags & VM_LOCKED) ? 436 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0); 437 438 if (m->count < m->size) /* vma is copied successfully */ 439 m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0; 440 return 0; 441 } 442 443 static const struct seq_operations proc_pid_smaps_op = { 444 .start = m_start, 445 .next = m_next, 446 .stop = m_stop, 447 .show = show_smap 448 }; 449 450 static int smaps_open(struct inode *inode, struct file *file) 451 { 452 return do_maps_open(inode, file, &proc_pid_smaps_op); 453 } 454 455 const struct file_operations proc_smaps_operations = { 456 .open = smaps_open, 457 .read = seq_read, 458 .llseek = seq_lseek, 459 .release = seq_release_private, 460 }; 461 462 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, 463 unsigned long end, struct mm_walk *walk) 464 { 465 struct vm_area_struct *vma = walk->private; 466 pte_t *pte, ptent; 467 spinlock_t *ptl; 468 struct page *page; 469 470 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 471 for (; addr != end; pte++, addr += PAGE_SIZE) { 472 ptent = *pte; 473 if (!pte_present(ptent)) 474 continue; 475 476 page = vm_normal_page(vma, addr, ptent); 477 if (!page) 478 continue; 479 480 /* Clear accessed and referenced bits. */ 481 ptep_test_and_clear_young(vma, addr, pte); 482 ClearPageReferenced(page); 483 } 484 pte_unmap_unlock(pte - 1, ptl); 485 cond_resched(); 486 return 0; 487 } 488 489 #define CLEAR_REFS_ALL 1 490 #define CLEAR_REFS_ANON 2 491 #define CLEAR_REFS_MAPPED 3 492 493 static ssize_t clear_refs_write(struct file *file, const char __user *buf, 494 size_t count, loff_t *ppos) 495 { 496 struct task_struct *task; 497 char buffer[PROC_NUMBUF]; 498 struct mm_struct *mm; 499 struct vm_area_struct *vma; 500 long type; 501 502 memset(buffer, 0, sizeof(buffer)); 503 if (count > sizeof(buffer) - 1) 504 count = sizeof(buffer) - 1; 505 if (copy_from_user(buffer, buf, count)) 506 return -EFAULT; 507 if (strict_strtol(strstrip(buffer), 10, &type)) 508 return -EINVAL; 509 if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED) 510 return -EINVAL; 511 task = get_proc_task(file->f_path.dentry->d_inode); 512 if (!task) 513 return -ESRCH; 514 mm = get_task_mm(task); 515 if (mm) { 516 struct mm_walk clear_refs_walk = { 517 .pmd_entry = clear_refs_pte_range, 518 .mm = mm, 519 }; 520 down_read(&mm->mmap_sem); 521 for (vma = mm->mmap; vma; vma = vma->vm_next) { 522 clear_refs_walk.private = vma; 523 if (is_vm_hugetlb_page(vma)) 524 continue; 525 /* 526 * Writing 1 to /proc/pid/clear_refs affects all pages. 527 * 528 * Writing 2 to /proc/pid/clear_refs only affects 529 * Anonymous pages. 530 * 531 * Writing 3 to /proc/pid/clear_refs only affects file 532 * mapped pages. 533 */ 534 if (type == CLEAR_REFS_ANON && vma->vm_file) 535 continue; 536 if (type == CLEAR_REFS_MAPPED && !vma->vm_file) 537 continue; 538 walk_page_range(vma->vm_start, vma->vm_end, 539 &clear_refs_walk); 540 } 541 flush_tlb_mm(mm); 542 up_read(&mm->mmap_sem); 543 mmput(mm); 544 } 545 put_task_struct(task); 546 547 return count; 548 } 549 550 const struct file_operations proc_clear_refs_operations = { 551 .write = clear_refs_write, 552 .llseek = noop_llseek, 553 }; 554 555 struct pagemapread { 556 int pos, len; 557 u64 *buffer; 558 }; 559 560 #define PM_ENTRY_BYTES sizeof(u64) 561 #define PM_STATUS_BITS 3 562 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS) 563 #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET) 564 #define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK) 565 #define PM_PSHIFT_BITS 6 566 #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS) 567 #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET) 568 #define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK) 569 #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1) 570 #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK) 571 572 #define PM_PRESENT PM_STATUS(4LL) 573 #define PM_SWAP PM_STATUS(2LL) 574 #define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT) 575 #define PM_END_OF_BUFFER 1 576 577 static int add_to_pagemap(unsigned long addr, u64 pfn, 578 struct pagemapread *pm) 579 { 580 pm->buffer[pm->pos++] = pfn; 581 if (pm->pos >= pm->len) 582 return PM_END_OF_BUFFER; 583 return 0; 584 } 585 586 static int pagemap_pte_hole(unsigned long start, unsigned long end, 587 struct mm_walk *walk) 588 { 589 struct pagemapread *pm = walk->private; 590 unsigned long addr; 591 int err = 0; 592 for (addr = start; addr < end; addr += PAGE_SIZE) { 593 err = add_to_pagemap(addr, PM_NOT_PRESENT, pm); 594 if (err) 595 break; 596 } 597 return err; 598 } 599 600 static u64 swap_pte_to_pagemap_entry(pte_t pte) 601 { 602 swp_entry_t e = pte_to_swp_entry(pte); 603 return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT); 604 } 605 606 static u64 pte_to_pagemap_entry(pte_t pte) 607 { 608 u64 pme = 0; 609 if (is_swap_pte(pte)) 610 pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte)) 611 | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP; 612 else if (pte_present(pte)) 613 pme = PM_PFRAME(pte_pfn(pte)) 614 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; 615 return pme; 616 } 617 618 static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 619 struct mm_walk *walk) 620 { 621 struct vm_area_struct *vma; 622 struct pagemapread *pm = walk->private; 623 pte_t *pte; 624 int err = 0; 625 626 /* find the first VMA at or above 'addr' */ 627 vma = find_vma(walk->mm, addr); 628 for (; addr != end; addr += PAGE_SIZE) { 629 u64 pfn = PM_NOT_PRESENT; 630 631 /* check to see if we've left 'vma' behind 632 * and need a new, higher one */ 633 if (vma && (addr >= vma->vm_end)) 634 vma = find_vma(walk->mm, addr); 635 636 /* check that 'vma' actually covers this address, 637 * and that it isn't a huge page vma */ 638 if (vma && (vma->vm_start <= addr) && 639 !is_vm_hugetlb_page(vma)) { 640 pte = pte_offset_map(pmd, addr); 641 pfn = pte_to_pagemap_entry(*pte); 642 /* unmap before userspace copy */ 643 pte_unmap(pte); 644 } 645 err = add_to_pagemap(addr, pfn, pm); 646 if (err) 647 return err; 648 } 649 650 cond_resched(); 651 652 return err; 653 } 654 655 #ifdef CONFIG_HUGETLB_PAGE 656 static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset) 657 { 658 u64 pme = 0; 659 if (pte_present(pte)) 660 pme = PM_PFRAME(pte_pfn(pte) + offset) 661 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; 662 return pme; 663 } 664 665 /* This function walks within one hugetlb entry in the single call */ 666 static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, 667 unsigned long addr, unsigned long end, 668 struct mm_walk *walk) 669 { 670 struct pagemapread *pm = walk->private; 671 int err = 0; 672 u64 pfn; 673 674 for (; addr != end; addr += PAGE_SIZE) { 675 int offset = (addr & ~hmask) >> PAGE_SHIFT; 676 pfn = huge_pte_to_pagemap_entry(*pte, offset); 677 err = add_to_pagemap(addr, pfn, pm); 678 if (err) 679 return err; 680 } 681 682 cond_resched(); 683 684 return err; 685 } 686 #endif /* HUGETLB_PAGE */ 687 688 /* 689 * /proc/pid/pagemap - an array mapping virtual pages to pfns 690 * 691 * For each page in the address space, this file contains one 64-bit entry 692 * consisting of the following: 693 * 694 * Bits 0-55 page frame number (PFN) if present 695 * Bits 0-4 swap type if swapped 696 * Bits 5-55 swap offset if swapped 697 * Bits 55-60 page shift (page size = 1<<page shift) 698 * Bit 61 reserved for future use 699 * Bit 62 page swapped 700 * Bit 63 page present 701 * 702 * If the page is not present but in swap, then the PFN contains an 703 * encoding of the swap file number and the page's offset into the 704 * swap. Unmapped pages return a null PFN. This allows determining 705 * precisely which pages are mapped (or in swap) and comparing mapped 706 * pages between processes. 707 * 708 * Efficient users of this interface will use /proc/pid/maps to 709 * determine which areas of memory are actually mapped and llseek to 710 * skip over unmapped regions. 711 */ 712 #define PAGEMAP_WALK_SIZE (PMD_SIZE) 713 #define PAGEMAP_WALK_MASK (PMD_MASK) 714 static ssize_t pagemap_read(struct file *file, char __user *buf, 715 size_t count, loff_t *ppos) 716 { 717 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); 718 struct mm_struct *mm; 719 struct pagemapread pm; 720 int ret = -ESRCH; 721 struct mm_walk pagemap_walk = {}; 722 unsigned long src; 723 unsigned long svpfn; 724 unsigned long start_vaddr; 725 unsigned long end_vaddr; 726 int copied = 0; 727 728 if (!task) 729 goto out; 730 731 ret = -EACCES; 732 if (!ptrace_may_access(task, PTRACE_MODE_READ)) 733 goto out_task; 734 735 ret = -EINVAL; 736 /* file position must be aligned */ 737 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES)) 738 goto out_task; 739 740 ret = 0; 741 742 if (!count) 743 goto out_task; 744 745 mm = get_task_mm(task); 746 if (!mm) 747 goto out_task; 748 749 pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); 750 pm.buffer = kmalloc(pm.len, GFP_TEMPORARY); 751 ret = -ENOMEM; 752 if (!pm.buffer) 753 goto out_mm; 754 755 pagemap_walk.pmd_entry = pagemap_pte_range; 756 pagemap_walk.pte_hole = pagemap_pte_hole; 757 #ifdef CONFIG_HUGETLB_PAGE 758 pagemap_walk.hugetlb_entry = pagemap_hugetlb_range; 759 #endif 760 pagemap_walk.mm = mm; 761 pagemap_walk.private = ± 762 763 src = *ppos; 764 svpfn = src / PM_ENTRY_BYTES; 765 start_vaddr = svpfn << PAGE_SHIFT; 766 end_vaddr = TASK_SIZE_OF(task); 767 768 /* watch out for wraparound */ 769 if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT) 770 start_vaddr = end_vaddr; 771 772 /* 773 * The odds are that this will stop walking way 774 * before end_vaddr, because the length of the 775 * user buffer is tracked in "pm", and the walk 776 * will stop when we hit the end of the buffer. 777 */ 778 ret = 0; 779 while (count && (start_vaddr < end_vaddr)) { 780 int len; 781 unsigned long end; 782 783 pm.pos = 0; 784 end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK; 785 /* overflow ? */ 786 if (end < start_vaddr || end > end_vaddr) 787 end = end_vaddr; 788 down_read(&mm->mmap_sem); 789 ret = walk_page_range(start_vaddr, end, &pagemap_walk); 790 up_read(&mm->mmap_sem); 791 start_vaddr = end; 792 793 len = min(count, PM_ENTRY_BYTES * pm.pos); 794 if (copy_to_user(buf, pm.buffer, len)) { 795 ret = -EFAULT; 796 goto out_free; 797 } 798 copied += len; 799 buf += len; 800 count -= len; 801 } 802 *ppos += copied; 803 if (!ret || ret == PM_END_OF_BUFFER) 804 ret = copied; 805 806 out_free: 807 kfree(pm.buffer); 808 out_mm: 809 mmput(mm); 810 out_task: 811 put_task_struct(task); 812 out: 813 return ret; 814 } 815 816 const struct file_operations proc_pagemap_operations = { 817 .llseek = mem_lseek, /* borrow this */ 818 .read = pagemap_read, 819 }; 820 #endif /* CONFIG_PROC_PAGE_MONITOR */ 821 822 #ifdef CONFIG_NUMA 823 extern int show_numa_map(struct seq_file *m, void *v); 824 825 static const struct seq_operations proc_pid_numa_maps_op = { 826 .start = m_start, 827 .next = m_next, 828 .stop = m_stop, 829 .show = show_numa_map, 830 }; 831 832 static int numa_maps_open(struct inode *inode, struct file *file) 833 { 834 return do_maps_open(inode, file, &proc_pid_numa_maps_op); 835 } 836 837 const struct file_operations proc_numa_maps_operations = { 838 .open = numa_maps_open, 839 .read = seq_read, 840 .llseek = seq_lseek, 841 .release = seq_release_private, 842 }; 843 #endif 844