xref: /openbmc/linux/arch/s390/mm/pgalloc.c (revision 020c5260)
1 /*
2  *  Page table allocation functions
3  *
4  *    Copyright IBM Corp. 2016
5  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6  */
7 
8 #include <linux/mm.h>
9 #include <linux/sysctl.h>
10 #include <asm/mmu_context.h>
11 #include <asm/pgalloc.h>
12 #include <asm/gmap.h>
13 #include <asm/tlb.h>
14 #include <asm/tlbflush.h>
15 
16 #ifdef CONFIG_PGSTE
17 
18 static int page_table_allocate_pgste_min = 0;
19 static int page_table_allocate_pgste_max = 1;
20 int page_table_allocate_pgste = 0;
21 EXPORT_SYMBOL(page_table_allocate_pgste);
22 
23 static struct ctl_table page_table_sysctl[] = {
24 	{
25 		.procname	= "allocate_pgste",
26 		.data		= &page_table_allocate_pgste,
27 		.maxlen		= sizeof(int),
28 		.mode		= S_IRUGO | S_IWUSR,
29 		.proc_handler	= proc_dointvec,
30 		.extra1		= &page_table_allocate_pgste_min,
31 		.extra2		= &page_table_allocate_pgste_max,
32 	},
33 	{ }
34 };
35 
36 static struct ctl_table page_table_sysctl_dir[] = {
37 	{
38 		.procname	= "vm",
39 		.maxlen		= 0,
40 		.mode		= 0555,
41 		.child		= page_table_sysctl,
42 	},
43 	{ }
44 };
45 
46 static int __init page_table_register_sysctl(void)
47 {
48 	return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
49 }
50 __initcall(page_table_register_sysctl);
51 
52 #endif /* CONFIG_PGSTE */
53 
54 unsigned long *crst_table_alloc(struct mm_struct *mm)
55 {
56 	struct page *page = alloc_pages(GFP_KERNEL, 2);
57 
58 	if (!page)
59 		return NULL;
60 	return (unsigned long *) page_to_phys(page);
61 }
62 
63 void crst_table_free(struct mm_struct *mm, unsigned long *table)
64 {
65 	free_pages((unsigned long) table, 2);
66 }
67 
68 static void __crst_table_upgrade(void *arg)
69 {
70 	struct mm_struct *mm = arg;
71 
72 	if (current->active_mm == mm) {
73 		clear_user_asce();
74 		set_user_asce(mm);
75 	}
76 	__tlb_flush_local();
77 }
78 
79 int crst_table_upgrade(struct mm_struct *mm)
80 {
81 	unsigned long *table, *pgd;
82 
83 	/* upgrade should only happen from 3 to 4 levels */
84 	BUG_ON(mm->context.asce_limit != (1UL << 42));
85 
86 	table = crst_table_alloc(mm);
87 	if (!table)
88 		return -ENOMEM;
89 
90 	spin_lock_bh(&mm->page_table_lock);
91 	pgd = (unsigned long *) mm->pgd;
92 	crst_table_init(table, _REGION2_ENTRY_EMPTY);
93 	pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
94 	mm->pgd = (pgd_t *) table;
95 	mm->context.asce_limit = 1UL << 53;
96 	mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
97 			   _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
98 	spin_unlock_bh(&mm->page_table_lock);
99 
100 	on_each_cpu(__crst_table_upgrade, mm, 0);
101 	return 0;
102 }
103 
104 void crst_table_downgrade(struct mm_struct *mm)
105 {
106 	pgd_t *pgd;
107 
108 	/* downgrade should only happen from 3 to 2 levels (compat only) */
109 	BUG_ON(mm->context.asce_limit != (1UL << 42));
110 
111 	if (current->active_mm == mm) {
112 		clear_user_asce();
113 		__tlb_flush_mm(mm);
114 	}
115 
116 	pgd = mm->pgd;
117 	mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
118 	mm->context.asce_limit = 1UL << 31;
119 	mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
120 			   _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
121 	crst_table_free(mm, (unsigned long *) pgd);
122 
123 	if (current->active_mm == mm)
124 		set_user_asce(mm);
125 }
126 
127 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
128 {
129 	unsigned int old, new;
130 
131 	do {
132 		old = atomic_read(v);
133 		new = old ^ bits;
134 	} while (atomic_cmpxchg(v, old, new) != old);
135 	return new;
136 }
137 
138 #ifdef CONFIG_PGSTE
139 
140 struct page *page_table_alloc_pgste(struct mm_struct *mm)
141 {
142 	struct page *page;
143 	unsigned long *table;
144 
145 	page = alloc_page(GFP_KERNEL);
146 	if (page) {
147 		table = (unsigned long *) page_to_phys(page);
148 		clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
149 		clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
150 	}
151 	return page;
152 }
153 
154 void page_table_free_pgste(struct page *page)
155 {
156 	__free_page(page);
157 }
158 
159 #endif /* CONFIG_PGSTE */
160 
161 /*
162  * page table entry allocation/free routines.
163  */
164 unsigned long *page_table_alloc(struct mm_struct *mm)
165 {
166 	unsigned long *table;
167 	struct page *page;
168 	unsigned int mask, bit;
169 
170 	/* Try to get a fragment of a 4K page as a 2K page table */
171 	if (!mm_alloc_pgste(mm)) {
172 		table = NULL;
173 		spin_lock_bh(&mm->context.pgtable_lock);
174 		if (!list_empty(&mm->context.pgtable_list)) {
175 			page = list_first_entry(&mm->context.pgtable_list,
176 						struct page, lru);
177 			mask = atomic_read(&page->_mapcount);
178 			mask = (mask | (mask >> 4)) & 3;
179 			if (mask != 3) {
180 				table = (unsigned long *) page_to_phys(page);
181 				bit = mask & 1;		/* =1 -> second 2K */
182 				if (bit)
183 					table += PTRS_PER_PTE;
184 				atomic_xor_bits(&page->_mapcount, 1U << bit);
185 				list_del(&page->lru);
186 			}
187 		}
188 		spin_unlock_bh(&mm->context.pgtable_lock);
189 		if (table)
190 			return table;
191 	}
192 	/* Allocate a fresh page */
193 	page = alloc_page(GFP_KERNEL);
194 	if (!page)
195 		return NULL;
196 	if (!pgtable_page_ctor(page)) {
197 		__free_page(page);
198 		return NULL;
199 	}
200 	/* Initialize page table */
201 	table = (unsigned long *) page_to_phys(page);
202 	if (mm_alloc_pgste(mm)) {
203 		/* Return 4K page table with PGSTEs */
204 		atomic_set(&page->_mapcount, 3);
205 		clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
206 		clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
207 	} else {
208 		/* Return the first 2K fragment of the page */
209 		atomic_set(&page->_mapcount, 1);
210 		clear_table(table, _PAGE_INVALID, PAGE_SIZE);
211 		spin_lock_bh(&mm->context.pgtable_lock);
212 		list_add(&page->lru, &mm->context.pgtable_list);
213 		spin_unlock_bh(&mm->context.pgtable_lock);
214 	}
215 	return table;
216 }
217 
218 void page_table_free(struct mm_struct *mm, unsigned long *table)
219 {
220 	struct page *page;
221 	unsigned int bit, mask;
222 
223 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
224 	if (!mm_alloc_pgste(mm)) {
225 		/* Free 2K page table fragment of a 4K page */
226 		bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
227 		spin_lock_bh(&mm->context.pgtable_lock);
228 		mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
229 		if (mask & 3)
230 			list_add(&page->lru, &mm->context.pgtable_list);
231 		else
232 			list_del(&page->lru);
233 		spin_unlock_bh(&mm->context.pgtable_lock);
234 		if (mask != 0)
235 			return;
236 	}
237 
238 	pgtable_page_dtor(page);
239 	atomic_set(&page->_mapcount, -1);
240 	__free_page(page);
241 }
242 
243 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
244 			 unsigned long vmaddr)
245 {
246 	struct mm_struct *mm;
247 	struct page *page;
248 	unsigned int bit, mask;
249 
250 	mm = tlb->mm;
251 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
252 	if (mm_alloc_pgste(mm)) {
253 		gmap_unlink(mm, table, vmaddr);
254 		table = (unsigned long *) (__pa(table) | 3);
255 		tlb_remove_table(tlb, table);
256 		return;
257 	}
258 	bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
259 	spin_lock_bh(&mm->context.pgtable_lock);
260 	mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
261 	if (mask & 3)
262 		list_add_tail(&page->lru, &mm->context.pgtable_list);
263 	else
264 		list_del(&page->lru);
265 	spin_unlock_bh(&mm->context.pgtable_lock);
266 	table = (unsigned long *) (__pa(table) | (1U << bit));
267 	tlb_remove_table(tlb, table);
268 }
269 
270 static void __tlb_remove_table(void *_table)
271 {
272 	unsigned int mask = (unsigned long) _table & 3;
273 	void *table = (void *)((unsigned long) _table ^ mask);
274 	struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
275 
276 	switch (mask) {
277 	case 0:		/* pmd or pud */
278 		free_pages((unsigned long) table, 2);
279 		break;
280 	case 1:		/* lower 2K of a 4K page table */
281 	case 2:		/* higher 2K of a 4K page table */
282 		if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
283 			break;
284 		/* fallthrough */
285 	case 3:		/* 4K page table with pgstes */
286 		pgtable_page_dtor(page);
287 		atomic_set(&page->_mapcount, -1);
288 		__free_page(page);
289 		break;
290 	}
291 }
292 
293 static void tlb_remove_table_smp_sync(void *arg)
294 {
295 	/* Simply deliver the interrupt */
296 }
297 
298 static void tlb_remove_table_one(void *table)
299 {
300 	/*
301 	 * This isn't an RCU grace period and hence the page-tables cannot be
302 	 * assumed to be actually RCU-freed.
303 	 *
304 	 * It is however sufficient for software page-table walkers that rely
305 	 * on IRQ disabling. See the comment near struct mmu_table_batch.
306 	 */
307 	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
308 	__tlb_remove_table(table);
309 }
310 
311 static void tlb_remove_table_rcu(struct rcu_head *head)
312 {
313 	struct mmu_table_batch *batch;
314 	int i;
315 
316 	batch = container_of(head, struct mmu_table_batch, rcu);
317 
318 	for (i = 0; i < batch->nr; i++)
319 		__tlb_remove_table(batch->tables[i]);
320 
321 	free_page((unsigned long)batch);
322 }
323 
324 void tlb_table_flush(struct mmu_gather *tlb)
325 {
326 	struct mmu_table_batch **batch = &tlb->batch;
327 
328 	if (*batch) {
329 		call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
330 		*batch = NULL;
331 	}
332 }
333 
334 void tlb_remove_table(struct mmu_gather *tlb, void *table)
335 {
336 	struct mmu_table_batch **batch = &tlb->batch;
337 
338 	tlb->mm->context.flush_mm = 1;
339 	if (*batch == NULL) {
340 		*batch = (struct mmu_table_batch *)
341 			__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
342 		if (*batch == NULL) {
343 			__tlb_flush_mm_lazy(tlb->mm);
344 			tlb_remove_table_one(table);
345 			return;
346 		}
347 		(*batch)->nr = 0;
348 	}
349 	(*batch)->tables[(*batch)->nr++] = table;
350 	if ((*batch)->nr == MAX_TABLE_BATCH)
351 		tlb_flush_mmu(tlb);
352 }
353