huge_memory.c (84a73014d86fd660822a20c032625e3afe99ca58) | huge_memory.c (de60f5f10c58d4f34b68622442c0e04180367f3f) |
---|---|
1/* 2 * Copyright (C) 2009 Red Hat, Inc. 3 * 4 * This work is licensed under the terms of the GNU GPL, version 2. See 5 * the COPYING file in the top-level directory. 6 */ 7 8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt --- 1293 unchanged lines hidden (view full) --- 1302 * set_bit will be required on the pmd to set the 1303 * young bit, instead of the current set_pmd_at. 1304 */ 1305 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); 1306 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, 1307 pmd, _pmd, 1)) 1308 update_mmu_cache_pmd(vma, addr, pmd); 1309 } | 1/* 2 * Copyright (C) 2009 Red Hat, Inc. 3 * 4 * This work is licensed under the terms of the GNU GPL, version 2. See 5 * the COPYING file in the top-level directory. 6 */ 7 8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt --- 1293 unchanged lines hidden (view full) --- 1302 * set_bit will be required on the pmd to set the 1303 * young bit, instead of the current set_pmd_at. 1304 */ 1305 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); 1306 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, 1307 pmd, _pmd, 1)) 1308 update_mmu_cache_pmd(vma, addr, pmd); 1309 } |
1310 if ((flags & FOLL_POPULATE) && (vma->vm_flags & VM_LOCKED)) { | 1310 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { |
1311 if (page->mapping && trylock_page(page)) { 1312 lru_add_drain(); 1313 if (page->mapping) 1314 mlock_vma_page(page); 1315 unlock_page(page); 1316 } 1317 } 1318 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; --- 556 unchanged lines hidden (view full) --- 1875 * http://support.amd.com/us/Processor_TechDocs/41322.pdf, 1876 * Erratum 383 on page 93. Intel should be safe but is 1877 * also warns that it's only safe if the permission 1878 * and cache attributes of the two entries loaded in 1879 * the two TLB is identical (which should be the case 1880 * here). But it is generally safer to never allow 1881 * small and huge TLB entries for the same virtual 1882 * address to be loaded simultaneously. So instead of | 1311 if (page->mapping && trylock_page(page)) { 1312 lru_add_drain(); 1313 if (page->mapping) 1314 mlock_vma_page(page); 1315 unlock_page(page); 1316 } 1317 } 1318 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; --- 556 unchanged lines hidden (view full) --- 1875 * http://support.amd.com/us/Processor_TechDocs/41322.pdf, 1876 * Erratum 383 on page 93. Intel should be safe but is 1877 * also warns that it's only safe if the permission 1878 * and cache attributes of the two entries loaded in 1879 * the two TLB is identical (which should be the case 1880 * here). But it is generally safer to never allow 1881 * small and huge TLB entries for the same virtual 1882 * address to be loaded simultaneously. So instead of |
1883 * doing "pmd_populate(); flush_tlb_range();" we first | 1883 * doing "pmd_populate(); flush_pmd_tlb_range();" we first |
1884 * mark the current pmd notpresent (atomically because 1885 * here the pmd_trans_huge and pmd_trans_splitting 1886 * must remain set at all times on the pmd until the 1887 * split is complete for this pmd), then we flush the 1888 * SMP TLB and finally we write the non-huge version 1889 * of the pmd entry with pmd_populate. 1890 */ 1891 pmdp_invalidate(vma, address, pmd); --- 309 unchanged lines hidden (view full) --- 2201{ 2202 struct page *page; 2203 pte_t *_pte; 2204 int none_or_zero = 0; 2205 bool referenced = false, writable = false; 2206 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; 2207 _pte++, address += PAGE_SIZE) { 2208 pte_t pteval = *_pte; | 1884 * mark the current pmd notpresent (atomically because 1885 * here the pmd_trans_huge and pmd_trans_splitting 1886 * must remain set at all times on the pmd until the 1887 * split is complete for this pmd), then we flush the 1888 * SMP TLB and finally we write the non-huge version 1889 * of the pmd entry with pmd_populate. 1890 */ 1891 pmdp_invalidate(vma, address, pmd); --- 309 unchanged lines hidden (view full) --- 2201{ 2202 struct page *page; 2203 pte_t *_pte; 2204 int none_or_zero = 0; 2205 bool referenced = false, writable = false; 2206 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; 2207 _pte++, address += PAGE_SIZE) { 2208 pte_t pteval = *_pte; |
2209 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { | 2209 if (pte_none(pteval) || (pte_present(pteval) && 2210 is_zero_pfn(pte_pfn(pteval)))) { |
2210 if (!userfaultfd_armed(vma) && 2211 ++none_or_zero <= khugepaged_max_ptes_none) 2212 continue; 2213 else 2214 goto out; 2215 } 2216 if (!pte_present(pteval)) 2217 goto out; --- 871 unchanged lines hidden --- | 2211 if (!userfaultfd_armed(vma) && 2212 ++none_or_zero <= khugepaged_max_ptes_none) 2213 continue; 2214 else 2215 goto out; 2216 } 2217 if (!pte_present(pteval)) 2218 goto out; --- 871 unchanged lines hidden --- |