xref: /openbmc/linux/arch/x86/include/asm/pgalloc.h (revision 12eb4683)
1 #ifndef _ASM_X86_PGALLOC_H
2 #define _ASM_X86_PGALLOC_H
3 
4 #include <linux/threads.h>
5 #include <linux/mm.h>		/* for struct page */
6 #include <linux/pagemap.h>
7 
8 static inline int  __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; }
9 
10 #ifdef CONFIG_PARAVIRT
11 #include <asm/paravirt.h>
12 #else
13 #define paravirt_pgd_alloc(mm)	__paravirt_pgd_alloc(mm)
14 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {}
15 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)	{}
16 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)	{}
17 static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
18 					    unsigned long start, unsigned long count) {}
19 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)	{}
20 static inline void paravirt_release_pte(unsigned long pfn) {}
21 static inline void paravirt_release_pmd(unsigned long pfn) {}
22 static inline void paravirt_release_pud(unsigned long pfn) {}
23 #endif
24 
25 /*
26  * Flags to use when allocating a user page table page.
27  */
28 extern gfp_t __userpte_alloc_gfp;
29 
30 /*
31  * Allocate and free page tables.
32  */
33 extern pgd_t *pgd_alloc(struct mm_struct *);
34 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
35 
36 extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
37 extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
38 
39 /* Should really implement gc for free page table pages. This could be
40    done with a reference count in struct page. */
41 
42 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
43 {
44 	BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
45 	free_page((unsigned long)pte);
46 }
47 
48 static inline void pte_free(struct mm_struct *mm, struct page *pte)
49 {
50 	pgtable_page_dtor(pte);
51 	__free_page(pte);
52 }
53 
54 extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
55 
56 static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
57 				  unsigned long address)
58 {
59 	___pte_free_tlb(tlb, pte);
60 }
61 
62 static inline void pmd_populate_kernel(struct mm_struct *mm,
63 				       pmd_t *pmd, pte_t *pte)
64 {
65 	paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
66 	set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
67 }
68 
69 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
70 				struct page *pte)
71 {
72 	unsigned long pfn = page_to_pfn(pte);
73 
74 	paravirt_alloc_pte(mm, pfn);
75 	set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
76 }
77 
78 #define pmd_pgtable(pmd) pmd_page(pmd)
79 
80 #if PAGETABLE_LEVELS > 2
81 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
82 {
83 	struct page *page;
84 	page = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0);
85 	if (!page)
86 		return NULL;
87 	if (!pgtable_pmd_page_ctor(page)) {
88 		__free_pages(page, 0);
89 		return NULL;
90 	}
91 	return (pmd_t *)page_address(page);
92 }
93 
94 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
95 {
96 	BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
97 	pgtable_pmd_page_dtor(virt_to_page(pmd));
98 	free_page((unsigned long)pmd);
99 }
100 
101 extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
102 
103 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
104 				  unsigned long address)
105 {
106 	___pmd_free_tlb(tlb, pmd);
107 }
108 
109 #ifdef CONFIG_X86_PAE
110 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
111 #else	/* !CONFIG_X86_PAE */
112 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
113 {
114 	paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
115 	set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
116 }
117 #endif	/* CONFIG_X86_PAE */
118 
119 #if PAGETABLE_LEVELS > 3
120 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
121 {
122 	paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
123 	set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
124 }
125 
126 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
127 {
128 	return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
129 }
130 
131 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
132 {
133 	BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
134 	free_page((unsigned long)pud);
135 }
136 
137 extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
138 
139 static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
140 				  unsigned long address)
141 {
142 	___pud_free_tlb(tlb, pud);
143 }
144 
145 #endif	/* PAGETABLE_LEVELS > 3 */
146 #endif	/* PAGETABLE_LEVELS > 2 */
147 
148 #endif /* _ASM_X86_PGALLOC_H */
149