xref: /openbmc/linux/arch/s390/include/asm/pgalloc.h (revision 8ee90c5c)
1 /*
2  *  S390 version
3  *    Copyright IBM Corp. 1999, 2000
4  *    Author(s): Hartmut Penner (hp@de.ibm.com)
5  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
6  *
7  *  Derived from "include/asm-i386/pgalloc.h"
8  *    Copyright (C) 1994  Linus Torvalds
9  */
10 
11 #ifndef _S390_PGALLOC_H
12 #define _S390_PGALLOC_H
13 
14 #include <linux/threads.h>
15 #include <linux/gfp.h>
16 #include <linux/mm.h>
17 
18 #define CRST_ALLOC_ORDER 2
19 
20 unsigned long *crst_table_alloc(struct mm_struct *);
21 void crst_table_free(struct mm_struct *, unsigned long *);
22 
23 unsigned long *page_table_alloc(struct mm_struct *);
24 struct page *page_table_alloc_pgste(struct mm_struct *mm);
25 void page_table_free(struct mm_struct *, unsigned long *);
26 void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
27 void page_table_free_pgste(struct page *page);
28 extern int page_table_allocate_pgste;
29 
30 static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
31 {
32 	struct addrtype { char _[256]; };
33 	int i;
34 
35 	for (i = 0; i < n; i += 256) {
36 		*s = val;
37 		asm volatile(
38 			"mvc	8(248,%[s]),0(%[s])\n"
39 			: "+m" (*(struct addrtype *) s)
40 			: [s] "a" (s));
41 		s += 256 / sizeof(long);
42 	}
43 }
44 
45 static inline void crst_table_init(unsigned long *crst, unsigned long entry)
46 {
47 	clear_table(crst, entry, _CRST_TABLE_SIZE);
48 }
49 
50 static inline unsigned long pgd_entry_type(struct mm_struct *mm)
51 {
52 	if (mm->context.asce_limit <= _REGION3_SIZE)
53 		return _SEGMENT_ENTRY_EMPTY;
54 	if (mm->context.asce_limit <= _REGION2_SIZE)
55 		return _REGION3_ENTRY_EMPTY;
56 	if (mm->context.asce_limit <= _REGION1_SIZE)
57 		return _REGION2_ENTRY_EMPTY;
58 	return _REGION1_ENTRY_EMPTY;
59 }
60 
61 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
62 void crst_table_downgrade(struct mm_struct *);
63 
64 static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
65 {
66 	unsigned long *table = crst_table_alloc(mm);
67 
68 	if (table)
69 		crst_table_init(table, _REGION2_ENTRY_EMPTY);
70 	return (p4d_t *) table;
71 }
72 #define p4d_free(mm, p4d) crst_table_free(mm, (unsigned long *) p4d)
73 
74 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
75 {
76 	unsigned long *table = crst_table_alloc(mm);
77 	if (table)
78 		crst_table_init(table, _REGION3_ENTRY_EMPTY);
79 	return (pud_t *) table;
80 }
81 #define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
82 
83 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
84 {
85 	unsigned long *table = crst_table_alloc(mm);
86 
87 	if (!table)
88 		return NULL;
89 	crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
90 	if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
91 		crst_table_free(mm, table);
92 		return NULL;
93 	}
94 	return (pmd_t *) table;
95 }
96 
97 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
98 {
99 	pgtable_pmd_page_dtor(virt_to_page(pmd));
100 	crst_table_free(mm, (unsigned long *) pmd);
101 }
102 
103 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
104 {
105 	pgd_val(*pgd) = _REGION1_ENTRY | __pa(p4d);
106 }
107 
108 static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
109 {
110 	p4d_val(*p4d) = _REGION2_ENTRY | __pa(pud);
111 }
112 
113 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
114 {
115 	pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
116 }
117 
118 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
119 {
120 	unsigned long *table = crst_table_alloc(mm);
121 
122 	if (!table)
123 		return NULL;
124 	if (mm->context.asce_limit == _REGION3_SIZE) {
125 		/* Forking a compat process with 2 page table levels */
126 		if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
127 			crst_table_free(mm, table);
128 			return NULL;
129 		}
130 	}
131 	return (pgd_t *) table;
132 }
133 
134 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
135 {
136 	if (mm->context.asce_limit == _REGION3_SIZE)
137 		pgtable_pmd_page_dtor(virt_to_page(pgd));
138 	crst_table_free(mm, (unsigned long *) pgd);
139 }
140 
141 static inline void pmd_populate(struct mm_struct *mm,
142 				pmd_t *pmd, pgtable_t pte)
143 {
144 	pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
145 }
146 
147 #define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
148 
149 #define pmd_pgtable(pmd) \
150 	(pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
151 
152 /*
153  * page table entry allocation/free routines.
154  */
155 #define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
156 #define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
157 
158 #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
159 #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
160 
161 extern void rcu_table_freelist_finish(void);
162 
163 void vmem_map_init(void);
164 void *vmem_crst_alloc(unsigned long val);
165 pte_t *vmem_pte_alloc(void);
166 
167 #endif /* _S390_PGALLOC_H */
168