xref: /openbmc/linux/arch/parisc/include/asm/pgalloc.h (revision 151f4e2b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_PGALLOC_H
3 #define _ASM_PGALLOC_H
4 
5 #include <linux/gfp.h>
6 #include <linux/mm.h>
7 #include <linux/threads.h>
8 #include <asm/processor.h>
9 #include <asm/fixmap.h>
10 
11 #include <asm/cache.h>
12 
13 /* Allocate the top level pgd (page directory)
14  *
15  * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we
16  * allocate the first pmd adjacent to the pgd.  This means that we can
17  * subtract a constant offset to get to it.  The pmd and pgd sizes are
18  * arranged so that a single pmd covers 4GB (giving a full 64-bit
19  * process access to 8TB) so our lookups are effectively L2 for the
20  * first 4GB of the kernel (i.e. for all ILP32 processes and all the
21  * kernel for machines with under 4GB of memory) */
22 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
23 {
24 	pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL,
25 					       PGD_ALLOC_ORDER);
26 	pgd_t *actual_pgd = pgd;
27 
28 	if (likely(pgd != NULL)) {
29 		memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
30 #if CONFIG_PGTABLE_LEVELS == 3
31 		actual_pgd += PTRS_PER_PGD;
32 		/* Populate first pmd with allocated memory.  We mark it
33 		 * with PxD_FLAG_ATTACHED as a signal to the system that this
34 		 * pmd entry may not be cleared. */
35 		__pgd_val_set(*actual_pgd, (PxD_FLAG_PRESENT |
36 				        PxD_FLAG_VALID |
37 					PxD_FLAG_ATTACHED)
38 			+ (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
39 		/* The first pmd entry also is marked with PxD_FLAG_ATTACHED as
40 		 * a signal that this pmd may not be freed */
41 		__pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
42 #endif
43 	}
44 	spin_lock_init(pgd_spinlock(actual_pgd));
45 	return actual_pgd;
46 }
47 
48 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
49 {
50 #if CONFIG_PGTABLE_LEVELS == 3
51 	pgd -= PTRS_PER_PGD;
52 #endif
53 	free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
54 }
55 
56 #if CONFIG_PGTABLE_LEVELS == 3
57 
58 /* Three Level Page Table Support for pmd's */
59 
60 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
61 {
62 	__pgd_val_set(*pgd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) +
63 		        (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
64 }
65 
66 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
67 {
68 	pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER);
69 	if (pmd)
70 		memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
71 	return pmd;
72 }
73 
74 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
75 {
76 	if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
77 		/*
78 		 * This is the permanent pmd attached to the pgd;
79 		 * cannot free it.
80 		 * Increment the counter to compensate for the decrement
81 		 * done by generic mm code.
82 		 */
83 		mm_inc_nr_pmds(mm);
84 		return;
85 	}
86 	free_pages((unsigned long)pmd, PMD_ORDER);
87 }
88 
89 #else
90 
91 /* Two Level Page Table Support for pmd's */
92 
93 /*
94  * allocating and freeing a pmd is trivial: the 1-entry pmd is
95  * inside the pgd, so has no extra memory associated with it.
96  */
97 
98 #define pmd_alloc_one(mm, addr)		({ BUG(); ((pmd_t *)2); })
99 #define pmd_free(mm, x)			do { } while (0)
100 #define pgd_populate(mm, pmd, pte)	BUG()
101 
102 #endif
103 
104 static inline void
105 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
106 {
107 #if CONFIG_PGTABLE_LEVELS == 3
108 	/* preserve the gateway marker if this is the beginning of
109 	 * the permanent pmd */
110 	if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
111 		__pmd_val_set(*pmd, (PxD_FLAG_PRESENT |
112 				 PxD_FLAG_VALID |
113 				 PxD_FLAG_ATTACHED)
114 			+ (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
115 	else
116 #endif
117 		__pmd_val_set(*pmd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID)
118 			+ (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
119 }
120 
121 #define pmd_populate(mm, pmd, pte_page) \
122 	pmd_populate_kernel(mm, pmd, page_address(pte_page))
123 #define pmd_pgtable(pmd) pmd_page(pmd)
124 
125 static inline pgtable_t
126 pte_alloc_one(struct mm_struct *mm)
127 {
128 	struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO);
129 	if (!page)
130 		return NULL;
131 	if (!pgtable_page_ctor(page)) {
132 		__free_page(page);
133 		return NULL;
134 	}
135 	return page;
136 }
137 
138 static inline pte_t *
139 pte_alloc_one_kernel(struct mm_struct *mm)
140 {
141 	pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
142 	return pte;
143 }
144 
145 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
146 {
147 	free_page((unsigned long)pte);
148 }
149 
150 static inline void pte_free(struct mm_struct *mm, struct page *pte)
151 {
152 	pgtable_page_dtor(pte);
153 	pte_free_kernel(mm, page_address(pte));
154 }
155 
156 #define check_pgt_cache()	do { } while (0)
157 
158 #endif
159