memory.c (1425075e7272faaa3629a1e2df679c0ba4cf55d3) memory.c (3ed3a4f0ddffece942bb2661924d87be4ce63cb7)
1/*
2 * linux/mm/memory.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 */
6
7/*
8 * demand-loading started 01.12.91 - seems it is high on the list of

--- 548 unchanged lines hidden (view full) ---

557 }
558 free_pgd_range(tlb, addr, vma->vm_end,
559 floor, next? next->vm_start: ceiling);
560 }
561 vma = next;
562 }
563}
564
1/*
2 * linux/mm/memory.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 */
6
7/*
8 * demand-loading started 01.12.91 - seems it is high on the list of

--- 548 unchanged lines hidden (view full) ---

557 }
558 free_pgd_range(tlb, addr, vma->vm_end,
559 floor, next? next->vm_start: ceiling);
560 }
561 vma = next;
562 }
563}
564
565int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
566 pmd_t *pmd, unsigned long address)
565int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
567{
568 spinlock_t *ptl;
569 pgtable_t new = pte_alloc_one(mm, address);
570 if (!new)
571 return -ENOMEM;
572
573 /*
574 * Ensure all pte setup (eg. pte page lock and page clearing) are

--- 971 unchanged lines hidden (view full) ---

1546 * vma cannot be a COW mapping.
1547 *
1548 * As this is called only for pages that do not currently exist, we
1549 * do not need to flush old virtual caches or the TLB.
1550 */
1551int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1552 unsigned long pfn)
1553{
566{
567 spinlock_t *ptl;
568 pgtable_t new = pte_alloc_one(mm, address);
569 if (!new)
570 return -ENOMEM;
571
572 /*
573 * Ensure all pte setup (eg. pte page lock and page clearing) are

--- 971 unchanged lines hidden (view full) ---

1545 * vma cannot be a COW mapping.
1546 *
1547 * As this is called only for pages that do not currently exist, we
1548 * do not need to flush old virtual caches or the TLB.
1549 */
1550int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1551 unsigned long pfn)
1552{
1553 return vm_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
1554}
1555EXPORT_SYMBOL(vm_insert_pfn);
1556
1557/**
1558 * vm_insert_pfn_prot - insert single pfn into user vma with specified pgprot
1559 * @vma: user vma to map to
1560 * @addr: target user address of this page
1561 * @pfn: source kernel pfn
1562 * @pgprot: pgprot flags for the inserted page
1563 *
1564 * This is exactly like vm_insert_pfn, except that it allows drivers to
1565 * to override pgprot on a per-page basis.
1566 *
1567 * This only makes sense for IO mappings, and it makes no sense for
1568 * cow mappings. In general, using multiple vmas is preferable;
1569 * vm_insert_pfn_prot should only be used if using multiple VMAs is
1570 * impractical.
1571 */
1572int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
1573 unsigned long pfn, pgprot_t pgprot)
1574{
1554 int ret;
1575 int ret;
1555 pgprot_t pgprot = vma->vm_page_prot;
1556 /*
1557 * Technically, architectures with pte_special can avoid all these
1558 * restrictions (same for remap_pfn_range). However we would like
1559 * consistency in testing and feature parity among all, so we should
1560 * try to keep these invariants in place for everybody.
1561 */
1562 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
1563 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==

--- 5 unchanged lines hidden (view full) ---

1569 return -EFAULT;
1570 if (track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)))
1571 return -EINVAL;
1572
1573 ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot);
1574
1575 return ret;
1576}
1576 /*
1577 * Technically, architectures with pte_special can avoid all these
1578 * restrictions (same for remap_pfn_range). However we would like
1579 * consistency in testing and feature parity among all, so we should
1580 * try to keep these invariants in place for everybody.
1581 */
1582 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
1583 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==

--- 5 unchanged lines hidden (view full) ---

1589 return -EFAULT;
1590 if (track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)))
1591 return -EINVAL;
1592
1593 ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot);
1594
1595 return ret;
1596}
1577EXPORT_SYMBOL(vm_insert_pfn);
1597EXPORT_SYMBOL(vm_insert_pfn_prot);
1578
1579int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1580 pfn_t pfn)
1581{
1582 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
1583
1584 if (addr < vma->vm_start || addr >= vma->vm_end)
1585 return -EFAULT;

--- 285 unchanged lines hidden (view full) ---

1871int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
1872 unsigned long size, pte_fn_t fn, void *data)
1873{
1874 pgd_t *pgd;
1875 unsigned long next;
1876 unsigned long end = addr + size;
1877 int err;
1878
1598
1599int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1600 pfn_t pfn)
1601{
1602 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
1603
1604 if (addr < vma->vm_start || addr >= vma->vm_end)
1605 return -EFAULT;

--- 285 unchanged lines hidden (view full) ---

1891int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
1892 unsigned long size, pte_fn_t fn, void *data)
1893{
1894 pgd_t *pgd;
1895 unsigned long next;
1896 unsigned long end = addr + size;
1897 int err;
1898
1879 BUG_ON(addr >= end);
1899 if (WARN_ON(addr >= end))
1900 return -EINVAL;
1901
1880 pgd = pgd_offset(mm, addr);
1881 do {
1882 next = pgd_addr_end(addr, end);
1883 err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
1884 if (err)
1885 break;
1886 } while (pgd++, addr = next, addr != end);
1887

--- 1229 unchanged lines hidden (view full) ---

3117 * but allow concurrent faults).
3118 * The mmap_sem may have been released depending on flags and our
3119 * return value. See filemap_fault() and __lock_page_or_retry().
3120 */
3121static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3122 unsigned long address, pte_t *page_table, pmd_t *pmd,
3123 unsigned int flags, pte_t orig_pte)
3124{
1902 pgd = pgd_offset(mm, addr);
1903 do {
1904 next = pgd_addr_end(addr, end);
1905 err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
1906 if (err)
1907 break;
1908 } while (pgd++, addr = next, addr != end);
1909

--- 1229 unchanged lines hidden (view full) ---

3139 * but allow concurrent faults).
3140 * The mmap_sem may have been released depending on flags and our
3141 * return value. See filemap_fault() and __lock_page_or_retry().
3142 */
3143static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3144 unsigned long address, pte_t *page_table, pmd_t *pmd,
3145 unsigned int flags, pte_t orig_pte)
3146{
3125 pgoff_t pgoff = (((address & PAGE_MASK)
3126 - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
3147 pgoff_t pgoff = linear_page_index(vma, address);
3127
3128 pte_unmap(page_table);
3129 /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
3130 if (!vma->vm_ops->fault)
3131 return VM_FAULT_SIGBUS;
3132 if (!(flags & FAULT_FLAG_WRITE))
3133 return do_read_fault(mm, vma, address, pmd, pgoff, flags,
3134 orig_pte);

--- 257 unchanged lines hidden (view full) ---

3392 huge_pmd_set_accessed(mm, vma, address, pmd,
3393 orig_pmd, dirty);
3394 return 0;
3395 }
3396 }
3397 }
3398
3399 /*
3148
3149 pte_unmap(page_table);
3150 /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
3151 if (!vma->vm_ops->fault)
3152 return VM_FAULT_SIGBUS;
3153 if (!(flags & FAULT_FLAG_WRITE))
3154 return do_read_fault(mm, vma, address, pmd, pgoff, flags,
3155 orig_pte);

--- 257 unchanged lines hidden (view full) ---

3413 huge_pmd_set_accessed(mm, vma, address, pmd,
3414 orig_pmd, dirty);
3415 return 0;
3416 }
3417 }
3418 }
3419
3420 /*
3400 * Use __pte_alloc instead of pte_alloc_map, because we can't
3421 * Use pte_alloc() instead of pte_alloc_map, because we can't
3401 * run pte_offset_map on the pmd, if an huge pmd could
3402 * materialize from under us from a different thread.
3403 */
3422 * run pte_offset_map on the pmd, if an huge pmd could
3423 * materialize from under us from a different thread.
3424 */
3404 if (unlikely(pmd_none(*pmd)) &&
3405 unlikely(__pte_alloc(mm, vma, pmd, address)))
3425 if (unlikely(pte_alloc(mm, pmd, address)))
3406 return VM_FAULT_OOM;
3407 /*
3408 * If a huge pmd materialized under us just retry later. Use
3409 * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
3410 * didn't become pmd_trans_huge under us and then back to pmd_none, as
3411 * a result of MADV_DONTNEED running immediately after a huge pmd fault
3412 * in a different thread of this mm, in turn leading to a misleading
3413 * pmd_trans_huge() retval. All we have to ensure is that it is a

--- 501 unchanged lines hidden ---
3426 return VM_FAULT_OOM;
3427 /*
3428 * If a huge pmd materialized under us just retry later. Use
3429 * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
3430 * didn't become pmd_trans_huge under us and then back to pmd_none, as
3431 * a result of MADV_DONTNEED running immediately after a huge pmd fault
3432 * in a different thread of this mm, in turn leading to a misleading
3433 * pmd_trans_huge() retval. All we have to ensure is that it is a

--- 501 unchanged lines hidden ---