1 #include <linux/mm.h> 2 #include <linux/gfp.h> 3 #include <asm/pgalloc.h> 4 #include <asm/pgtable.h> 5 #include <asm/tlb.h> 6 #include <asm/fixmap.h> 7 8 #define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO 9 10 #ifdef CONFIG_HIGHPTE 11 #define PGALLOC_USER_GFP __GFP_HIGHMEM 12 #else 13 #define PGALLOC_USER_GFP 0 14 #endif 15 16 gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP; 17 18 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 19 { 20 return (pte_t *)__get_free_page(PGALLOC_GFP); 21 } 22 23 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) 24 { 25 struct page *pte; 26 27 pte = alloc_pages(__userpte_alloc_gfp, 0); 28 if (pte) 29 pgtable_page_ctor(pte); 30 return pte; 31 } 32 33 static int __init setup_userpte(char *arg) 34 { 35 if (!arg) 36 return -EINVAL; 37 38 /* 39 * "userpte=nohigh" disables allocation of user pagetables in 40 * high memory. 41 */ 42 if (strcmp(arg, "nohigh") == 0) 43 __userpte_alloc_gfp &= ~__GFP_HIGHMEM; 44 else 45 return -EINVAL; 46 return 0; 47 } 48 early_param("userpte", setup_userpte); 49 50 void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) 51 { 52 pgtable_page_dtor(pte); 53 paravirt_release_pte(page_to_pfn(pte)); 54 tlb_remove_page(tlb, pte); 55 } 56 57 #if PAGETABLE_LEVELS > 2 58 void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) 59 { 60 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); 61 tlb_remove_page(tlb, virt_to_page(pmd)); 62 } 63 64 #if PAGETABLE_LEVELS > 3 65 void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) 66 { 67 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); 68 tlb_remove_page(tlb, virt_to_page(pud)); 69 } 70 #endif /* PAGETABLE_LEVELS > 3 */ 71 #endif /* PAGETABLE_LEVELS > 2 */ 72 73 static inline void pgd_list_add(pgd_t *pgd) 74 { 75 struct page *page = virt_to_page(pgd); 76 77 list_add(&page->lru, &pgd_list); 78 } 79 80 static inline void pgd_list_del(pgd_t *pgd) 81 { 82 struct page *page = virt_to_page(pgd); 83 84 list_del(&page->lru); 85 } 86 87 #define UNSHARED_PTRS_PER_PGD \ 88 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) 89 90 static void pgd_ctor(pgd_t *pgd) 91 { 92 /* If the pgd points to a shared pagetable level (either the 93 ptes in non-PAE, or shared PMD in PAE), then just copy the 94 references from swapper_pg_dir. */ 95 if (PAGETABLE_LEVELS == 2 || 96 (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) || 97 PAGETABLE_LEVELS == 4) { 98 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, 99 swapper_pg_dir + KERNEL_PGD_BOUNDARY, 100 KERNEL_PGD_PTRS); 101 paravirt_alloc_pmd_clone(__pa(pgd) >> PAGE_SHIFT, 102 __pa(swapper_pg_dir) >> PAGE_SHIFT, 103 KERNEL_PGD_BOUNDARY, 104 KERNEL_PGD_PTRS); 105 } 106 107 /* list required to sync kernel mapping updates */ 108 if (!SHARED_KERNEL_PMD) 109 pgd_list_add(pgd); 110 } 111 112 static void pgd_dtor(pgd_t *pgd) 113 { 114 unsigned long flags; /* can be called from interrupt context */ 115 116 if (SHARED_KERNEL_PMD) 117 return; 118 119 spin_lock_irqsave(&pgd_lock, flags); 120 pgd_list_del(pgd); 121 spin_unlock_irqrestore(&pgd_lock, flags); 122 } 123 124 /* 125 * List of all pgd's needed for non-PAE so it can invalidate entries 126 * in both cached and uncached pgd's; not needed for PAE since the 127 * kernel pmd is shared. If PAE were not to share the pmd a similar 128 * tactic would be needed. This is essentially codepath-based locking 129 * against pageattr.c; it is the unique case in which a valid change 130 * of kernel pagetables can't be lazily synchronized by vmalloc faults. 131 * vmalloc faults work because attached pagetables are never freed. 132 * -- wli 133 */ 134 135 #ifdef CONFIG_X86_PAE 136 /* 137 * In PAE mode, we need to do a cr3 reload (=tlb flush) when 138 * updating the top-level pagetable entries to guarantee the 139 * processor notices the update. Since this is expensive, and 140 * all 4 top-level entries are used almost immediately in a 141 * new process's life, we just pre-populate them here. 142 * 143 * Also, if we're in a paravirt environment where the kernel pmd is 144 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate 145 * and initialize the kernel pmds here. 146 */ 147 #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD 148 149 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) 150 { 151 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); 152 153 /* Note: almost everything apart from _PAGE_PRESENT is 154 reserved at the pmd (PDPT) level. */ 155 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT)); 156 157 /* 158 * According to Intel App note "TLBs, Paging-Structure Caches, 159 * and Their Invalidation", April 2007, document 317080-001, 160 * section 8.1: in PAE mode we explicitly have to flush the 161 * TLB via cr3 if the top-level pgd is changed... 162 */ 163 if (mm == current->active_mm) 164 write_cr3(read_cr3()); 165 } 166 #else /* !CONFIG_X86_PAE */ 167 168 /* No need to prepopulate any pagetable entries in non-PAE modes. */ 169 #define PREALLOCATED_PMDS 0 170 171 #endif /* CONFIG_X86_PAE */ 172 173 static void free_pmds(pmd_t *pmds[]) 174 { 175 int i; 176 177 for(i = 0; i < PREALLOCATED_PMDS; i++) 178 if (pmds[i]) 179 free_page((unsigned long)pmds[i]); 180 } 181 182 static int preallocate_pmds(pmd_t *pmds[]) 183 { 184 int i; 185 bool failed = false; 186 187 for(i = 0; i < PREALLOCATED_PMDS; i++) { 188 pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP); 189 if (pmd == NULL) 190 failed = true; 191 pmds[i] = pmd; 192 } 193 194 if (failed) { 195 free_pmds(pmds); 196 return -ENOMEM; 197 } 198 199 return 0; 200 } 201 202 /* 203 * Mop up any pmd pages which may still be attached to the pgd. 204 * Normally they will be freed by munmap/exit_mmap, but any pmd we 205 * preallocate which never got a corresponding vma will need to be 206 * freed manually. 207 */ 208 static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) 209 { 210 int i; 211 212 for(i = 0; i < PREALLOCATED_PMDS; i++) { 213 pgd_t pgd = pgdp[i]; 214 215 if (pgd_val(pgd) != 0) { 216 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); 217 218 pgdp[i] = native_make_pgd(0); 219 220 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); 221 pmd_free(mm, pmd); 222 } 223 } 224 } 225 226 static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) 227 { 228 pud_t *pud; 229 unsigned long addr; 230 int i; 231 232 if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */ 233 return; 234 235 pud = pud_offset(pgd, 0); 236 237 for (addr = i = 0; i < PREALLOCATED_PMDS; 238 i++, pud++, addr += PUD_SIZE) { 239 pmd_t *pmd = pmds[i]; 240 241 if (i >= KERNEL_PGD_BOUNDARY) 242 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), 243 sizeof(pmd_t) * PTRS_PER_PMD); 244 245 pud_populate(mm, pud, pmd); 246 } 247 } 248 249 pgd_t *pgd_alloc(struct mm_struct *mm) 250 { 251 pgd_t *pgd; 252 pmd_t *pmds[PREALLOCATED_PMDS]; 253 unsigned long flags; 254 255 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); 256 257 if (pgd == NULL) 258 goto out; 259 260 mm->pgd = pgd; 261 262 if (preallocate_pmds(pmds) != 0) 263 goto out_free_pgd; 264 265 if (paravirt_pgd_alloc(mm) != 0) 266 goto out_free_pmds; 267 268 /* 269 * Make sure that pre-populating the pmds is atomic with 270 * respect to anything walking the pgd_list, so that they 271 * never see a partially populated pgd. 272 */ 273 spin_lock_irqsave(&pgd_lock, flags); 274 275 pgd_ctor(pgd); 276 pgd_prepopulate_pmd(mm, pgd, pmds); 277 278 spin_unlock_irqrestore(&pgd_lock, flags); 279 280 return pgd; 281 282 out_free_pmds: 283 free_pmds(pmds); 284 out_free_pgd: 285 free_page((unsigned long)pgd); 286 out: 287 return NULL; 288 } 289 290 void pgd_free(struct mm_struct *mm, pgd_t *pgd) 291 { 292 pgd_mop_up_pmds(mm, pgd); 293 pgd_dtor(pgd); 294 paravirt_pgd_free(mm, pgd); 295 free_page((unsigned long)pgd); 296 } 297 298 int ptep_set_access_flags(struct vm_area_struct *vma, 299 unsigned long address, pte_t *ptep, 300 pte_t entry, int dirty) 301 { 302 int changed = !pte_same(*ptep, entry); 303 304 if (changed && dirty) { 305 *ptep = entry; 306 pte_update_defer(vma->vm_mm, address, ptep); 307 flush_tlb_page(vma, address); 308 } 309 310 return changed; 311 } 312 313 int ptep_test_and_clear_young(struct vm_area_struct *vma, 314 unsigned long addr, pte_t *ptep) 315 { 316 int ret = 0; 317 318 if (pte_young(*ptep)) 319 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, 320 (unsigned long *) &ptep->pte); 321 322 if (ret) 323 pte_update(vma->vm_mm, addr, ptep); 324 325 return ret; 326 } 327 328 int ptep_clear_flush_young(struct vm_area_struct *vma, 329 unsigned long address, pte_t *ptep) 330 { 331 int young; 332 333 young = ptep_test_and_clear_young(vma, address, ptep); 334 if (young) 335 flush_tlb_page(vma, address); 336 337 return young; 338 } 339 340 /** 341 * reserve_top_address - reserves a hole in the top of kernel address space 342 * @reserve - size of hole to reserve 343 * 344 * Can be used to relocate the fixmap area and poke a hole in the top 345 * of kernel address space to make room for a hypervisor. 346 */ 347 void __init reserve_top_address(unsigned long reserve) 348 { 349 #ifdef CONFIG_X86_32 350 BUG_ON(fixmaps_set > 0); 351 printk(KERN_INFO "Reserving virtual address space above 0x%08x\n", 352 (int)-reserve); 353 __FIXADDR_TOP = -reserve - PAGE_SIZE; 354 #endif 355 } 356 357 int fixmaps_set; 358 359 void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) 360 { 361 unsigned long address = __fix_to_virt(idx); 362 363 if (idx >= __end_of_fixed_addresses) { 364 BUG(); 365 return; 366 } 367 set_pte_vaddr(address, pte); 368 fixmaps_set++; 369 } 370 371 void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys, 372 pgprot_t flags) 373 { 374 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags)); 375 } 376