xref: /openbmc/linux/arch/x86/mm/pgtable.c (revision 976fa9a3)
1 #include <linux/mm.h>
2 #include <linux/gfp.h>
3 #include <asm/pgalloc.h>
4 #include <asm/pgtable.h>
5 #include <asm/tlb.h>
6 #include <asm/fixmap.h>
7 #include <asm/mtrr.h>
8 
9 #define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
10 
11 #ifdef CONFIG_HIGHPTE
12 #define PGALLOC_USER_GFP __GFP_HIGHMEM
13 #else
14 #define PGALLOC_USER_GFP 0
15 #endif
16 
17 gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
18 
19 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
20 {
21 	return (pte_t *)__get_free_page(PGALLOC_GFP);
22 }
23 
24 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
25 {
26 	struct page *pte;
27 
28 	pte = alloc_pages(__userpte_alloc_gfp, 0);
29 	if (!pte)
30 		return NULL;
31 	if (!pgtable_page_ctor(pte)) {
32 		__free_page(pte);
33 		return NULL;
34 	}
35 	return pte;
36 }
37 
38 static int __init setup_userpte(char *arg)
39 {
40 	if (!arg)
41 		return -EINVAL;
42 
43 	/*
44 	 * "userpte=nohigh" disables allocation of user pagetables in
45 	 * high memory.
46 	 */
47 	if (strcmp(arg, "nohigh") == 0)
48 		__userpte_alloc_gfp &= ~__GFP_HIGHMEM;
49 	else
50 		return -EINVAL;
51 	return 0;
52 }
53 early_param("userpte", setup_userpte);
54 
55 void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
56 {
57 	pgtable_page_dtor(pte);
58 	paravirt_release_pte(page_to_pfn(pte));
59 	tlb_remove_page(tlb, pte);
60 }
61 
62 #if CONFIG_PGTABLE_LEVELS > 2
63 void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
64 {
65 	struct page *page = virt_to_page(pmd);
66 	paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
67 	/*
68 	 * NOTE! For PAE, any changes to the top page-directory-pointer-table
69 	 * entries need a full cr3 reload to flush.
70 	 */
71 #ifdef CONFIG_X86_PAE
72 	tlb->need_flush_all = 1;
73 #endif
74 	pgtable_pmd_page_dtor(page);
75 	tlb_remove_page(tlb, page);
76 }
77 
78 #if CONFIG_PGTABLE_LEVELS > 3
79 void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
80 {
81 	paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
82 	tlb_remove_page(tlb, virt_to_page(pud));
83 }
84 #endif	/* CONFIG_PGTABLE_LEVELS > 3 */
85 #endif	/* CONFIG_PGTABLE_LEVELS > 2 */
86 
87 static inline void pgd_list_add(pgd_t *pgd)
88 {
89 	struct page *page = virt_to_page(pgd);
90 
91 	list_add(&page->lru, &pgd_list);
92 }
93 
94 static inline void pgd_list_del(pgd_t *pgd)
95 {
96 	struct page *page = virt_to_page(pgd);
97 
98 	list_del(&page->lru);
99 }
100 
101 #define UNSHARED_PTRS_PER_PGD				\
102 	(SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
103 
104 
105 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
106 {
107 	BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
108 	virt_to_page(pgd)->index = (pgoff_t)mm;
109 }
110 
111 struct mm_struct *pgd_page_get_mm(struct page *page)
112 {
113 	return (struct mm_struct *)page->index;
114 }
115 
116 static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
117 {
118 	/* If the pgd points to a shared pagetable level (either the
119 	   ptes in non-PAE, or shared PMD in PAE), then just copy the
120 	   references from swapper_pg_dir. */
121 	if (CONFIG_PGTABLE_LEVELS == 2 ||
122 	    (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
123 	    CONFIG_PGTABLE_LEVELS == 4) {
124 		clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
125 				swapper_pg_dir + KERNEL_PGD_BOUNDARY,
126 				KERNEL_PGD_PTRS);
127 	}
128 
129 	/* list required to sync kernel mapping updates */
130 	if (!SHARED_KERNEL_PMD) {
131 		pgd_set_mm(pgd, mm);
132 		pgd_list_add(pgd);
133 	}
134 }
135 
136 static void pgd_dtor(pgd_t *pgd)
137 {
138 	if (SHARED_KERNEL_PMD)
139 		return;
140 
141 	spin_lock(&pgd_lock);
142 	pgd_list_del(pgd);
143 	spin_unlock(&pgd_lock);
144 }
145 
146 /*
147  * List of all pgd's needed for non-PAE so it can invalidate entries
148  * in both cached and uncached pgd's; not needed for PAE since the
149  * kernel pmd is shared. If PAE were not to share the pmd a similar
150  * tactic would be needed. This is essentially codepath-based locking
151  * against pageattr.c; it is the unique case in which a valid change
152  * of kernel pagetables can't be lazily synchronized by vmalloc faults.
153  * vmalloc faults work because attached pagetables are never freed.
154  * -- nyc
155  */
156 
157 #ifdef CONFIG_X86_PAE
158 /*
159  * In PAE mode, we need to do a cr3 reload (=tlb flush) when
160  * updating the top-level pagetable entries to guarantee the
161  * processor notices the update.  Since this is expensive, and
162  * all 4 top-level entries are used almost immediately in a
163  * new process's life, we just pre-populate them here.
164  *
165  * Also, if we're in a paravirt environment where the kernel pmd is
166  * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
167  * and initialize the kernel pmds here.
168  */
169 #define PREALLOCATED_PMDS	UNSHARED_PTRS_PER_PGD
170 
171 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
172 {
173 	paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
174 
175 	/* Note: almost everything apart from _PAGE_PRESENT is
176 	   reserved at the pmd (PDPT) level. */
177 	set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
178 
179 	/*
180 	 * According to Intel App note "TLBs, Paging-Structure Caches,
181 	 * and Their Invalidation", April 2007, document 317080-001,
182 	 * section 8.1: in PAE mode we explicitly have to flush the
183 	 * TLB via cr3 if the top-level pgd is changed...
184 	 */
185 	flush_tlb_mm(mm);
186 }
187 #else  /* !CONFIG_X86_PAE */
188 
189 /* No need to prepopulate any pagetable entries in non-PAE modes. */
190 #define PREALLOCATED_PMDS	0
191 
192 #endif	/* CONFIG_X86_PAE */
193 
194 static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
195 {
196 	int i;
197 
198 	for(i = 0; i < PREALLOCATED_PMDS; i++)
199 		if (pmds[i]) {
200 			pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
201 			free_page((unsigned long)pmds[i]);
202 			mm_dec_nr_pmds(mm);
203 		}
204 }
205 
206 static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
207 {
208 	int i;
209 	bool failed = false;
210 
211 	for(i = 0; i < PREALLOCATED_PMDS; i++) {
212 		pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
213 		if (!pmd)
214 			failed = true;
215 		if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
216 			free_page((unsigned long)pmd);
217 			pmd = NULL;
218 			failed = true;
219 		}
220 		if (pmd)
221 			mm_inc_nr_pmds(mm);
222 		pmds[i] = pmd;
223 	}
224 
225 	if (failed) {
226 		free_pmds(mm, pmds);
227 		return -ENOMEM;
228 	}
229 
230 	return 0;
231 }
232 
233 /*
234  * Mop up any pmd pages which may still be attached to the pgd.
235  * Normally they will be freed by munmap/exit_mmap, but any pmd we
236  * preallocate which never got a corresponding vma will need to be
237  * freed manually.
238  */
239 static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
240 {
241 	int i;
242 
243 	for(i = 0; i < PREALLOCATED_PMDS; i++) {
244 		pgd_t pgd = pgdp[i];
245 
246 		if (pgd_val(pgd) != 0) {
247 			pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
248 
249 			pgdp[i] = native_make_pgd(0);
250 
251 			paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
252 			pmd_free(mm, pmd);
253 			mm_dec_nr_pmds(mm);
254 		}
255 	}
256 }
257 
258 static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
259 {
260 	pud_t *pud;
261 	int i;
262 
263 	if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
264 		return;
265 
266 	pud = pud_offset(pgd, 0);
267 
268 	for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
269 		pmd_t *pmd = pmds[i];
270 
271 		if (i >= KERNEL_PGD_BOUNDARY)
272 			memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
273 			       sizeof(pmd_t) * PTRS_PER_PMD);
274 
275 		pud_populate(mm, pud, pmd);
276 	}
277 }
278 
279 /*
280  * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
281  * assumes that pgd should be in one page.
282  *
283  * But kernel with PAE paging that is not running as a Xen domain
284  * only needs to allocate 32 bytes for pgd instead of one page.
285  */
286 #ifdef CONFIG_X86_PAE
287 
288 #include <linux/slab.h>
289 
290 #define PGD_SIZE	(PTRS_PER_PGD * sizeof(pgd_t))
291 #define PGD_ALIGN	32
292 
293 static struct kmem_cache *pgd_cache;
294 
295 static int __init pgd_cache_init(void)
296 {
297 	/*
298 	 * When PAE kernel is running as a Xen domain, it does not use
299 	 * shared kernel pmd. And this requires a whole page for pgd.
300 	 */
301 	if (!SHARED_KERNEL_PMD)
302 		return 0;
303 
304 	/*
305 	 * when PAE kernel is not running as a Xen domain, it uses
306 	 * shared kernel pmd. Shared kernel pmd does not require a whole
307 	 * page for pgd. We are able to just allocate a 32-byte for pgd.
308 	 * During boot time, we create a 32-byte slab for pgd table allocation.
309 	 */
310 	pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
311 				      SLAB_PANIC, NULL);
312 	if (!pgd_cache)
313 		return -ENOMEM;
314 
315 	return 0;
316 }
317 core_initcall(pgd_cache_init);
318 
319 static inline pgd_t *_pgd_alloc(void)
320 {
321 	/*
322 	 * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
323 	 * We allocate one page for pgd.
324 	 */
325 	if (!SHARED_KERNEL_PMD)
326 		return (pgd_t *)__get_free_page(PGALLOC_GFP);
327 
328 	/*
329 	 * Now PAE kernel is not running as a Xen domain. We can allocate
330 	 * a 32-byte slab for pgd to save memory space.
331 	 */
332 	return kmem_cache_alloc(pgd_cache, PGALLOC_GFP);
333 }
334 
335 static inline void _pgd_free(pgd_t *pgd)
336 {
337 	if (!SHARED_KERNEL_PMD)
338 		free_page((unsigned long)pgd);
339 	else
340 		kmem_cache_free(pgd_cache, pgd);
341 }
342 #else
343 static inline pgd_t *_pgd_alloc(void)
344 {
345 	return (pgd_t *)__get_free_page(PGALLOC_GFP);
346 }
347 
348 static inline void _pgd_free(pgd_t *pgd)
349 {
350 	free_page((unsigned long)pgd);
351 }
352 #endif /* CONFIG_X86_PAE */
353 
354 pgd_t *pgd_alloc(struct mm_struct *mm)
355 {
356 	pgd_t *pgd;
357 	pmd_t *pmds[PREALLOCATED_PMDS];
358 
359 	pgd = _pgd_alloc();
360 
361 	if (pgd == NULL)
362 		goto out;
363 
364 	mm->pgd = pgd;
365 
366 	if (preallocate_pmds(mm, pmds) != 0)
367 		goto out_free_pgd;
368 
369 	if (paravirt_pgd_alloc(mm) != 0)
370 		goto out_free_pmds;
371 
372 	/*
373 	 * Make sure that pre-populating the pmds is atomic with
374 	 * respect to anything walking the pgd_list, so that they
375 	 * never see a partially populated pgd.
376 	 */
377 	spin_lock(&pgd_lock);
378 
379 	pgd_ctor(mm, pgd);
380 	pgd_prepopulate_pmd(mm, pgd, pmds);
381 
382 	spin_unlock(&pgd_lock);
383 
384 	return pgd;
385 
386 out_free_pmds:
387 	free_pmds(mm, pmds);
388 out_free_pgd:
389 	_pgd_free(pgd);
390 out:
391 	return NULL;
392 }
393 
394 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
395 {
396 	pgd_mop_up_pmds(mm, pgd);
397 	pgd_dtor(pgd);
398 	paravirt_pgd_free(mm, pgd);
399 	_pgd_free(pgd);
400 }
401 
402 /*
403  * Used to set accessed or dirty bits in the page table entries
404  * on other architectures. On x86, the accessed and dirty bits
405  * are tracked by hardware. However, do_wp_page calls this function
406  * to also make the pte writeable at the same time the dirty bit is
407  * set. In that case we do actually need to write the PTE.
408  */
409 int ptep_set_access_flags(struct vm_area_struct *vma,
410 			  unsigned long address, pte_t *ptep,
411 			  pte_t entry, int dirty)
412 {
413 	int changed = !pte_same(*ptep, entry);
414 
415 	if (changed && dirty) {
416 		*ptep = entry;
417 		pte_update_defer(vma->vm_mm, address, ptep);
418 	}
419 
420 	return changed;
421 }
422 
423 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
424 int pmdp_set_access_flags(struct vm_area_struct *vma,
425 			  unsigned long address, pmd_t *pmdp,
426 			  pmd_t entry, int dirty)
427 {
428 	int changed = !pmd_same(*pmdp, entry);
429 
430 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
431 
432 	if (changed && dirty) {
433 		*pmdp = entry;
434 		pmd_update_defer(vma->vm_mm, address, pmdp);
435 		/*
436 		 * We had a write-protection fault here and changed the pmd
437 		 * to to more permissive. No need to flush the TLB for that,
438 		 * #PF is architecturally guaranteed to do that and in the
439 		 * worst-case we'll generate a spurious fault.
440 		 */
441 	}
442 
443 	return changed;
444 }
445 #endif
446 
447 int ptep_test_and_clear_young(struct vm_area_struct *vma,
448 			      unsigned long addr, pte_t *ptep)
449 {
450 	int ret = 0;
451 
452 	if (pte_young(*ptep))
453 		ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
454 					 (unsigned long *) &ptep->pte);
455 
456 	if (ret)
457 		pte_update(vma->vm_mm, addr, ptep);
458 
459 	return ret;
460 }
461 
462 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
463 int pmdp_test_and_clear_young(struct vm_area_struct *vma,
464 			      unsigned long addr, pmd_t *pmdp)
465 {
466 	int ret = 0;
467 
468 	if (pmd_young(*pmdp))
469 		ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
470 					 (unsigned long *)pmdp);
471 
472 	if (ret)
473 		pmd_update(vma->vm_mm, addr, pmdp);
474 
475 	return ret;
476 }
477 #endif
478 
479 int ptep_clear_flush_young(struct vm_area_struct *vma,
480 			   unsigned long address, pte_t *ptep)
481 {
482 	/*
483 	 * On x86 CPUs, clearing the accessed bit without a TLB flush
484 	 * doesn't cause data corruption. [ It could cause incorrect
485 	 * page aging and the (mistaken) reclaim of hot pages, but the
486 	 * chance of that should be relatively low. ]
487 	 *
488 	 * So as a performance optimization don't flush the TLB when
489 	 * clearing the accessed bit, it will eventually be flushed by
490 	 * a context switch or a VM operation anyway. [ In the rare
491 	 * event of it not getting flushed for a long time the delay
492 	 * shouldn't really matter because there's no real memory
493 	 * pressure for swapout to react to. ]
494 	 */
495 	return ptep_test_and_clear_young(vma, address, ptep);
496 }
497 
498 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
499 int pmdp_clear_flush_young(struct vm_area_struct *vma,
500 			   unsigned long address, pmd_t *pmdp)
501 {
502 	int young;
503 
504 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
505 
506 	young = pmdp_test_and_clear_young(vma, address, pmdp);
507 	if (young)
508 		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
509 
510 	return young;
511 }
512 
513 void pmdp_splitting_flush(struct vm_area_struct *vma,
514 			  unsigned long address, pmd_t *pmdp)
515 {
516 	int set;
517 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
518 	set = !test_and_set_bit(_PAGE_BIT_SPLITTING,
519 				(unsigned long *)pmdp);
520 	if (set) {
521 		pmd_update(vma->vm_mm, address, pmdp);
522 		/* need tlb flush only to serialize against gup-fast */
523 		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
524 	}
525 }
526 #endif
527 
528 /**
529  * reserve_top_address - reserves a hole in the top of kernel address space
530  * @reserve - size of hole to reserve
531  *
532  * Can be used to relocate the fixmap area and poke a hole in the top
533  * of kernel address space to make room for a hypervisor.
534  */
535 void __init reserve_top_address(unsigned long reserve)
536 {
537 #ifdef CONFIG_X86_32
538 	BUG_ON(fixmaps_set > 0);
539 	__FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE;
540 	printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n",
541 	       -reserve, __FIXADDR_TOP + PAGE_SIZE);
542 #endif
543 }
544 
545 int fixmaps_set;
546 
547 void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
548 {
549 	unsigned long address = __fix_to_virt(idx);
550 
551 	if (idx >= __end_of_fixed_addresses) {
552 		BUG();
553 		return;
554 	}
555 	set_pte_vaddr(address, pte);
556 	fixmaps_set++;
557 }
558 
559 void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
560 		       pgprot_t flags)
561 {
562 	__native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
563 }
564 
565 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
566 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
567 {
568 	u8 mtrr;
569 
570 	/*
571 	 * Do not use a huge page when the range is covered by non-WB type
572 	 * of MTRRs.
573 	 */
574 	mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE);
575 	if ((mtrr != MTRR_TYPE_WRBACK) && (mtrr != 0xFF))
576 		return 0;
577 
578 	prot = pgprot_4k_2_large(prot);
579 
580 	set_pte((pte_t *)pud, pfn_pte(
581 		(u64)addr >> PAGE_SHIFT,
582 		__pgprot(pgprot_val(prot) | _PAGE_PSE)));
583 
584 	return 1;
585 }
586 
587 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
588 {
589 	u8 mtrr;
590 
591 	/*
592 	 * Do not use a huge page when the range is covered by non-WB type
593 	 * of MTRRs.
594 	 */
595 	mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE);
596 	if ((mtrr != MTRR_TYPE_WRBACK) && (mtrr != 0xFF))
597 		return 0;
598 
599 	prot = pgprot_4k_2_large(prot);
600 
601 	set_pte((pte_t *)pmd, pfn_pte(
602 		(u64)addr >> PAGE_SHIFT,
603 		__pgprot(pgprot_val(prot) | _PAGE_PSE)));
604 
605 	return 1;
606 }
607 
608 int pud_clear_huge(pud_t *pud)
609 {
610 	if (pud_large(*pud)) {
611 		pud_clear(pud);
612 		return 1;
613 	}
614 
615 	return 0;
616 }
617 
618 int pmd_clear_huge(pmd_t *pmd)
619 {
620 	if (pmd_large(*pmd)) {
621 		pmd_clear(pmd);
622 		return 1;
623 	}
624 
625 	return 0;
626 }
627 #endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
628