1 #include <linux/mm.h> 2 #include <asm/pgalloc.h> 3 #include <asm/pgtable.h> 4 #include <asm/tlb.h> 5 #include <asm/fixmap.h> 6 7 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 8 { 9 return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 10 } 11 12 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) 13 { 14 struct page *pte; 15 16 #ifdef CONFIG_HIGHPTE 17 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0); 18 #else 19 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); 20 #endif 21 if (pte) 22 pgtable_page_ctor(pte); 23 return pte; 24 } 25 26 void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte) 27 { 28 pgtable_page_dtor(pte); 29 paravirt_release_pte(page_to_pfn(pte)); 30 tlb_remove_page(tlb, pte); 31 } 32 33 #if PAGETABLE_LEVELS > 2 34 void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) 35 { 36 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); 37 tlb_remove_page(tlb, virt_to_page(pmd)); 38 } 39 40 #if PAGETABLE_LEVELS > 3 41 void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) 42 { 43 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); 44 tlb_remove_page(tlb, virt_to_page(pud)); 45 } 46 #endif /* PAGETABLE_LEVELS > 3 */ 47 #endif /* PAGETABLE_LEVELS > 2 */ 48 49 static inline void pgd_list_add(pgd_t *pgd) 50 { 51 struct page *page = virt_to_page(pgd); 52 53 list_add(&page->lru, &pgd_list); 54 } 55 56 static inline void pgd_list_del(pgd_t *pgd) 57 { 58 struct page *page = virt_to_page(pgd); 59 60 list_del(&page->lru); 61 } 62 63 #define UNSHARED_PTRS_PER_PGD \ 64 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) 65 66 static void pgd_ctor(void *p) 67 { 68 pgd_t *pgd = p; 69 70 /* If the pgd points to a shared pagetable level (either the 71 ptes in non-PAE, or shared PMD in PAE), then just copy the 72 references from swapper_pg_dir. */ 73 if (PAGETABLE_LEVELS == 2 || 74 (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) || 75 PAGETABLE_LEVELS == 4) { 76 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, 77 swapper_pg_dir + KERNEL_PGD_BOUNDARY, 78 KERNEL_PGD_PTRS); 79 paravirt_alloc_pmd_clone(__pa(pgd) >> PAGE_SHIFT, 80 __pa(swapper_pg_dir) >> PAGE_SHIFT, 81 KERNEL_PGD_BOUNDARY, 82 KERNEL_PGD_PTRS); 83 } 84 85 /* list required to sync kernel mapping updates */ 86 if (!SHARED_KERNEL_PMD) 87 pgd_list_add(pgd); 88 } 89 90 static void pgd_dtor(void *pgd) 91 { 92 unsigned long flags; /* can be called from interrupt context */ 93 94 if (SHARED_KERNEL_PMD) 95 return; 96 97 spin_lock_irqsave(&pgd_lock, flags); 98 pgd_list_del(pgd); 99 spin_unlock_irqrestore(&pgd_lock, flags); 100 } 101 102 /* 103 * List of all pgd's needed for non-PAE so it can invalidate entries 104 * in both cached and uncached pgd's; not needed for PAE since the 105 * kernel pmd is shared. If PAE were not to share the pmd a similar 106 * tactic would be needed. This is essentially codepath-based locking 107 * against pageattr.c; it is the unique case in which a valid change 108 * of kernel pagetables can't be lazily synchronized by vmalloc faults. 109 * vmalloc faults work because attached pagetables are never freed. 110 * -- wli 111 */ 112 113 #ifdef CONFIG_X86_PAE 114 /* 115 * In PAE mode, we need to do a cr3 reload (=tlb flush) when 116 * updating the top-level pagetable entries to guarantee the 117 * processor notices the update. Since this is expensive, and 118 * all 4 top-level entries are used almost immediately in a 119 * new process's life, we just pre-populate them here. 120 * 121 * Also, if we're in a paravirt environment where the kernel pmd is 122 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate 123 * and initialize the kernel pmds here. 124 */ 125 #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD 126 127 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) 128 { 129 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); 130 131 /* Note: almost everything apart from _PAGE_PRESENT is 132 reserved at the pmd (PDPT) level. */ 133 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT)); 134 135 /* 136 * According to Intel App note "TLBs, Paging-Structure Caches, 137 * and Their Invalidation", April 2007, document 317080-001, 138 * section 8.1: in PAE mode we explicitly have to flush the 139 * TLB via cr3 if the top-level pgd is changed... 140 */ 141 if (mm == current->active_mm) 142 write_cr3(read_cr3()); 143 } 144 #else /* !CONFIG_X86_PAE */ 145 146 /* No need to prepopulate any pagetable entries in non-PAE modes. */ 147 #define PREALLOCATED_PMDS 0 148 149 #endif /* CONFIG_X86_PAE */ 150 151 static void free_pmds(pmd_t *pmds[]) 152 { 153 int i; 154 155 for(i = 0; i < PREALLOCATED_PMDS; i++) 156 if (pmds[i]) 157 free_page((unsigned long)pmds[i]); 158 } 159 160 static int preallocate_pmds(pmd_t *pmds[]) 161 { 162 int i; 163 bool failed = false; 164 165 for(i = 0; i < PREALLOCATED_PMDS; i++) { 166 pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); 167 if (pmd == NULL) 168 failed = true; 169 pmds[i] = pmd; 170 } 171 172 if (failed) { 173 free_pmds(pmds); 174 return -ENOMEM; 175 } 176 177 return 0; 178 } 179 180 /* 181 * Mop up any pmd pages which may still be attached to the pgd. 182 * Normally they will be freed by munmap/exit_mmap, but any pmd we 183 * preallocate which never got a corresponding vma will need to be 184 * freed manually. 185 */ 186 static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) 187 { 188 int i; 189 190 for(i = 0; i < PREALLOCATED_PMDS; i++) { 191 pgd_t pgd = pgdp[i]; 192 193 if (pgd_val(pgd) != 0) { 194 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); 195 196 pgdp[i] = native_make_pgd(0); 197 198 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); 199 pmd_free(mm, pmd); 200 } 201 } 202 } 203 204 static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) 205 { 206 pud_t *pud; 207 unsigned long addr; 208 int i; 209 210 pud = pud_offset(pgd, 0); 211 212 for (addr = i = 0; i < PREALLOCATED_PMDS; 213 i++, pud++, addr += PUD_SIZE) { 214 pmd_t *pmd = pmds[i]; 215 216 if (i >= KERNEL_PGD_BOUNDARY) 217 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), 218 sizeof(pmd_t) * PTRS_PER_PMD); 219 220 pud_populate(mm, pud, pmd); 221 } 222 } 223 224 pgd_t *pgd_alloc(struct mm_struct *mm) 225 { 226 pgd_t *pgd; 227 pmd_t *pmds[PREALLOCATED_PMDS]; 228 unsigned long flags; 229 230 pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); 231 232 if (pgd == NULL) 233 goto out; 234 235 mm->pgd = pgd; 236 237 if (preallocate_pmds(pmds) != 0) 238 goto out_free_pgd; 239 240 if (paravirt_pgd_alloc(mm) != 0) 241 goto out_free_pmds; 242 243 /* 244 * Make sure that pre-populating the pmds is atomic with 245 * respect to anything walking the pgd_list, so that they 246 * never see a partially populated pgd. 247 */ 248 spin_lock_irqsave(&pgd_lock, flags); 249 250 pgd_ctor(pgd); 251 pgd_prepopulate_pmd(mm, pgd, pmds); 252 253 spin_unlock_irqrestore(&pgd_lock, flags); 254 255 return pgd; 256 257 out_free_pmds: 258 free_pmds(pmds); 259 out_free_pgd: 260 free_page((unsigned long)pgd); 261 out: 262 return NULL; 263 } 264 265 void pgd_free(struct mm_struct *mm, pgd_t *pgd) 266 { 267 pgd_mop_up_pmds(mm, pgd); 268 pgd_dtor(pgd); 269 paravirt_pgd_free(mm, pgd); 270 free_page((unsigned long)pgd); 271 } 272 273 int ptep_set_access_flags(struct vm_area_struct *vma, 274 unsigned long address, pte_t *ptep, 275 pte_t entry, int dirty) 276 { 277 int changed = !pte_same(*ptep, entry); 278 279 if (changed && dirty) { 280 *ptep = entry; 281 pte_update_defer(vma->vm_mm, address, ptep); 282 flush_tlb_page(vma, address); 283 } 284 285 return changed; 286 } 287 288 int ptep_test_and_clear_young(struct vm_area_struct *vma, 289 unsigned long addr, pte_t *ptep) 290 { 291 int ret = 0; 292 293 if (pte_young(*ptep)) 294 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, 295 (unsigned long *) &ptep->pte); 296 297 if (ret) 298 pte_update(vma->vm_mm, addr, ptep); 299 300 return ret; 301 } 302 303 int ptep_clear_flush_young(struct vm_area_struct *vma, 304 unsigned long address, pte_t *ptep) 305 { 306 int young; 307 308 young = ptep_test_and_clear_young(vma, address, ptep); 309 if (young) 310 flush_tlb_page(vma, address); 311 312 return young; 313 } 314 315 int fixmaps_set; 316 317 void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) 318 { 319 unsigned long address = __fix_to_virt(idx); 320 321 if (idx >= __end_of_fixed_addresses) { 322 BUG(); 323 return; 324 } 325 set_pte_vaddr(address, pte); 326 fixmaps_set++; 327 } 328 329 void native_set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags) 330 { 331 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags)); 332 } 333