1 #include <linux/mm.h> 2 #include <linux/gfp.h> 3 #include <asm/pgalloc.h> 4 #include <asm/pgtable.h> 5 #include <asm/tlb.h> 6 #include <asm/fixmap.h> 7 8 #define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO 9 10 #ifdef CONFIG_HIGHPTE 11 #define PGALLOC_USER_GFP __GFP_HIGHMEM 12 #else 13 #define PGALLOC_USER_GFP 0 14 #endif 15 16 gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP; 17 18 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 19 { 20 return (pte_t *)__get_free_page(PGALLOC_GFP); 21 } 22 23 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) 24 { 25 struct page *pte; 26 27 pte = alloc_pages(__userpte_alloc_gfp, 0); 28 if (!pte) 29 return NULL; 30 if (!pgtable_page_ctor(pte)) { 31 __free_page(pte); 32 return NULL; 33 } 34 return pte; 35 } 36 37 static int __init setup_userpte(char *arg) 38 { 39 if (!arg) 40 return -EINVAL; 41 42 /* 43 * "userpte=nohigh" disables allocation of user pagetables in 44 * high memory. 45 */ 46 if (strcmp(arg, "nohigh") == 0) 47 __userpte_alloc_gfp &= ~__GFP_HIGHMEM; 48 else 49 return -EINVAL; 50 return 0; 51 } 52 early_param("userpte", setup_userpte); 53 54 void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) 55 { 56 pgtable_page_dtor(pte); 57 paravirt_release_pte(page_to_pfn(pte)); 58 tlb_remove_page(tlb, pte); 59 } 60 61 #if PAGETABLE_LEVELS > 2 62 void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) 63 { 64 struct page *page = virt_to_page(pmd); 65 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); 66 /* 67 * NOTE! For PAE, any changes to the top page-directory-pointer-table 68 * entries need a full cr3 reload to flush. 69 */ 70 #ifdef CONFIG_X86_PAE 71 tlb->need_flush_all = 1; 72 #endif 73 pgtable_pmd_page_dtor(page); 74 tlb_remove_page(tlb, page); 75 } 76 77 #if PAGETABLE_LEVELS > 3 78 void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) 79 { 80 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); 81 tlb_remove_page(tlb, virt_to_page(pud)); 82 } 83 #endif /* PAGETABLE_LEVELS > 3 */ 84 #endif /* PAGETABLE_LEVELS > 2 */ 85 86 static inline void pgd_list_add(pgd_t *pgd) 87 { 88 struct page *page = virt_to_page(pgd); 89 90 list_add(&page->lru, &pgd_list); 91 } 92 93 static inline void pgd_list_del(pgd_t *pgd) 94 { 95 struct page *page = virt_to_page(pgd); 96 97 list_del(&page->lru); 98 } 99 100 #define UNSHARED_PTRS_PER_PGD \ 101 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) 102 103 104 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) 105 { 106 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm)); 107 virt_to_page(pgd)->index = (pgoff_t)mm; 108 } 109 110 struct mm_struct *pgd_page_get_mm(struct page *page) 111 { 112 return (struct mm_struct *)page->index; 113 } 114 115 static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) 116 { 117 /* If the pgd points to a shared pagetable level (either the 118 ptes in non-PAE, or shared PMD in PAE), then just copy the 119 references from swapper_pg_dir. */ 120 if (PAGETABLE_LEVELS == 2 || 121 (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) || 122 PAGETABLE_LEVELS == 4) { 123 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, 124 swapper_pg_dir + KERNEL_PGD_BOUNDARY, 125 KERNEL_PGD_PTRS); 126 } 127 128 /* list required to sync kernel mapping updates */ 129 if (!SHARED_KERNEL_PMD) { 130 pgd_set_mm(pgd, mm); 131 pgd_list_add(pgd); 132 } 133 } 134 135 static void pgd_dtor(pgd_t *pgd) 136 { 137 if (SHARED_KERNEL_PMD) 138 return; 139 140 spin_lock(&pgd_lock); 141 pgd_list_del(pgd); 142 spin_unlock(&pgd_lock); 143 } 144 145 /* 146 * List of all pgd's needed for non-PAE so it can invalidate entries 147 * in both cached and uncached pgd's; not needed for PAE since the 148 * kernel pmd is shared. If PAE were not to share the pmd a similar 149 * tactic would be needed. This is essentially codepath-based locking 150 * against pageattr.c; it is the unique case in which a valid change 151 * of kernel pagetables can't be lazily synchronized by vmalloc faults. 152 * vmalloc faults work because attached pagetables are never freed. 153 * -- nyc 154 */ 155 156 #ifdef CONFIG_X86_PAE 157 /* 158 * In PAE mode, we need to do a cr3 reload (=tlb flush) when 159 * updating the top-level pagetable entries to guarantee the 160 * processor notices the update. Since this is expensive, and 161 * all 4 top-level entries are used almost immediately in a 162 * new process's life, we just pre-populate them here. 163 * 164 * Also, if we're in a paravirt environment where the kernel pmd is 165 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate 166 * and initialize the kernel pmds here. 167 */ 168 #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD 169 170 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) 171 { 172 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); 173 174 /* Note: almost everything apart from _PAGE_PRESENT is 175 reserved at the pmd (PDPT) level. */ 176 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT)); 177 178 /* 179 * According to Intel App note "TLBs, Paging-Structure Caches, 180 * and Their Invalidation", April 2007, document 317080-001, 181 * section 8.1: in PAE mode we explicitly have to flush the 182 * TLB via cr3 if the top-level pgd is changed... 183 */ 184 flush_tlb_mm(mm); 185 } 186 #else /* !CONFIG_X86_PAE */ 187 188 /* No need to prepopulate any pagetable entries in non-PAE modes. */ 189 #define PREALLOCATED_PMDS 0 190 191 #endif /* CONFIG_X86_PAE */ 192 193 static void free_pmds(struct mm_struct *mm, pmd_t *pmds[]) 194 { 195 int i; 196 197 for(i = 0; i < PREALLOCATED_PMDS; i++) 198 if (pmds[i]) { 199 pgtable_pmd_page_dtor(virt_to_page(pmds[i])); 200 free_page((unsigned long)pmds[i]); 201 mm_dec_nr_pmds(mm); 202 } 203 } 204 205 static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[]) 206 { 207 int i; 208 bool failed = false; 209 210 for(i = 0; i < PREALLOCATED_PMDS; i++) { 211 pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP); 212 if (!pmd) 213 failed = true; 214 if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) { 215 free_page((unsigned long)pmd); 216 pmd = NULL; 217 failed = true; 218 } 219 if (pmd) 220 mm_inc_nr_pmds(mm); 221 pmds[i] = pmd; 222 } 223 224 if (failed) { 225 free_pmds(mm, pmds); 226 return -ENOMEM; 227 } 228 229 return 0; 230 } 231 232 /* 233 * Mop up any pmd pages which may still be attached to the pgd. 234 * Normally they will be freed by munmap/exit_mmap, but any pmd we 235 * preallocate which never got a corresponding vma will need to be 236 * freed manually. 237 */ 238 static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) 239 { 240 int i; 241 242 for(i = 0; i < PREALLOCATED_PMDS; i++) { 243 pgd_t pgd = pgdp[i]; 244 245 if (pgd_val(pgd) != 0) { 246 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); 247 248 pgdp[i] = native_make_pgd(0); 249 250 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); 251 pmd_free(mm, pmd); 252 mm_dec_nr_pmds(mm); 253 } 254 } 255 } 256 257 static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) 258 { 259 pud_t *pud; 260 int i; 261 262 if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */ 263 return; 264 265 pud = pud_offset(pgd, 0); 266 267 for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) { 268 pmd_t *pmd = pmds[i]; 269 270 if (i >= KERNEL_PGD_BOUNDARY) 271 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), 272 sizeof(pmd_t) * PTRS_PER_PMD); 273 274 pud_populate(mm, pud, pmd); 275 } 276 } 277 278 pgd_t *pgd_alloc(struct mm_struct *mm) 279 { 280 pgd_t *pgd; 281 pmd_t *pmds[PREALLOCATED_PMDS]; 282 283 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); 284 285 if (pgd == NULL) 286 goto out; 287 288 mm->pgd = pgd; 289 290 if (preallocate_pmds(mm, pmds) != 0) 291 goto out_free_pgd; 292 293 if (paravirt_pgd_alloc(mm) != 0) 294 goto out_free_pmds; 295 296 /* 297 * Make sure that pre-populating the pmds is atomic with 298 * respect to anything walking the pgd_list, so that they 299 * never see a partially populated pgd. 300 */ 301 spin_lock(&pgd_lock); 302 303 pgd_ctor(mm, pgd); 304 pgd_prepopulate_pmd(mm, pgd, pmds); 305 306 spin_unlock(&pgd_lock); 307 308 return pgd; 309 310 out_free_pmds: 311 free_pmds(mm, pmds); 312 out_free_pgd: 313 free_page((unsigned long)pgd); 314 out: 315 return NULL; 316 } 317 318 void pgd_free(struct mm_struct *mm, pgd_t *pgd) 319 { 320 pgd_mop_up_pmds(mm, pgd); 321 pgd_dtor(pgd); 322 paravirt_pgd_free(mm, pgd); 323 free_page((unsigned long)pgd); 324 } 325 326 /* 327 * Used to set accessed or dirty bits in the page table entries 328 * on other architectures. On x86, the accessed and dirty bits 329 * are tracked by hardware. However, do_wp_page calls this function 330 * to also make the pte writeable at the same time the dirty bit is 331 * set. In that case we do actually need to write the PTE. 332 */ 333 int ptep_set_access_flags(struct vm_area_struct *vma, 334 unsigned long address, pte_t *ptep, 335 pte_t entry, int dirty) 336 { 337 int changed = !pte_same(*ptep, entry); 338 339 if (changed && dirty) { 340 *ptep = entry; 341 pte_update_defer(vma->vm_mm, address, ptep); 342 } 343 344 return changed; 345 } 346 347 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 348 int pmdp_set_access_flags(struct vm_area_struct *vma, 349 unsigned long address, pmd_t *pmdp, 350 pmd_t entry, int dirty) 351 { 352 int changed = !pmd_same(*pmdp, entry); 353 354 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 355 356 if (changed && dirty) { 357 *pmdp = entry; 358 pmd_update_defer(vma->vm_mm, address, pmdp); 359 /* 360 * We had a write-protection fault here and changed the pmd 361 * to to more permissive. No need to flush the TLB for that, 362 * #PF is architecturally guaranteed to do that and in the 363 * worst-case we'll generate a spurious fault. 364 */ 365 } 366 367 return changed; 368 } 369 #endif 370 371 int ptep_test_and_clear_young(struct vm_area_struct *vma, 372 unsigned long addr, pte_t *ptep) 373 { 374 int ret = 0; 375 376 if (pte_young(*ptep)) 377 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, 378 (unsigned long *) &ptep->pte); 379 380 if (ret) 381 pte_update(vma->vm_mm, addr, ptep); 382 383 return ret; 384 } 385 386 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 387 int pmdp_test_and_clear_young(struct vm_area_struct *vma, 388 unsigned long addr, pmd_t *pmdp) 389 { 390 int ret = 0; 391 392 if (pmd_young(*pmdp)) 393 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, 394 (unsigned long *)pmdp); 395 396 if (ret) 397 pmd_update(vma->vm_mm, addr, pmdp); 398 399 return ret; 400 } 401 #endif 402 403 int ptep_clear_flush_young(struct vm_area_struct *vma, 404 unsigned long address, pte_t *ptep) 405 { 406 /* 407 * On x86 CPUs, clearing the accessed bit without a TLB flush 408 * doesn't cause data corruption. [ It could cause incorrect 409 * page aging and the (mistaken) reclaim of hot pages, but the 410 * chance of that should be relatively low. ] 411 * 412 * So as a performance optimization don't flush the TLB when 413 * clearing the accessed bit, it will eventually be flushed by 414 * a context switch or a VM operation anyway. [ In the rare 415 * event of it not getting flushed for a long time the delay 416 * shouldn't really matter because there's no real memory 417 * pressure for swapout to react to. ] 418 */ 419 return ptep_test_and_clear_young(vma, address, ptep); 420 } 421 422 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 423 int pmdp_clear_flush_young(struct vm_area_struct *vma, 424 unsigned long address, pmd_t *pmdp) 425 { 426 int young; 427 428 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 429 430 young = pmdp_test_and_clear_young(vma, address, pmdp); 431 if (young) 432 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 433 434 return young; 435 } 436 437 void pmdp_splitting_flush(struct vm_area_struct *vma, 438 unsigned long address, pmd_t *pmdp) 439 { 440 int set; 441 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 442 set = !test_and_set_bit(_PAGE_BIT_SPLITTING, 443 (unsigned long *)pmdp); 444 if (set) { 445 pmd_update(vma->vm_mm, address, pmdp); 446 /* need tlb flush only to serialize against gup-fast */ 447 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 448 } 449 } 450 #endif 451 452 /** 453 * reserve_top_address - reserves a hole in the top of kernel address space 454 * @reserve - size of hole to reserve 455 * 456 * Can be used to relocate the fixmap area and poke a hole in the top 457 * of kernel address space to make room for a hypervisor. 458 */ 459 void __init reserve_top_address(unsigned long reserve) 460 { 461 #ifdef CONFIG_X86_32 462 BUG_ON(fixmaps_set > 0); 463 __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE; 464 printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n", 465 -reserve, __FIXADDR_TOP + PAGE_SIZE); 466 #endif 467 } 468 469 int fixmaps_set; 470 471 void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) 472 { 473 unsigned long address = __fix_to_virt(idx); 474 475 if (idx >= __end_of_fixed_addresses) { 476 BUG(); 477 return; 478 } 479 set_pte_vaddr(address, pte); 480 fixmaps_set++; 481 } 482 483 void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys, 484 pgprot_t flags) 485 { 486 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags)); 487 } 488