xref: /openbmc/linux/arch/s390/mm/pgalloc.c (revision b24413180f5600bcb3bb70fbed5cf186b60864bd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Page table allocation functions
4  *
5  *    Copyright IBM Corp. 2016
6  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7  */
8 
9 #include <linux/mm.h>
10 #include <linux/sysctl.h>
11 #include <asm/mmu_context.h>
12 #include <asm/pgalloc.h>
13 #include <asm/gmap.h>
14 #include <asm/tlb.h>
15 #include <asm/tlbflush.h>
16 
17 #ifdef CONFIG_PGSTE
18 
19 static int page_table_allocate_pgste_min = 0;
20 static int page_table_allocate_pgste_max = 1;
21 int page_table_allocate_pgste = 0;
22 EXPORT_SYMBOL(page_table_allocate_pgste);
23 
24 static struct ctl_table page_table_sysctl[] = {
25 	{
26 		.procname	= "allocate_pgste",
27 		.data		= &page_table_allocate_pgste,
28 		.maxlen		= sizeof(int),
29 		.mode		= S_IRUGO | S_IWUSR,
30 		.proc_handler	= proc_dointvec,
31 		.extra1		= &page_table_allocate_pgste_min,
32 		.extra2		= &page_table_allocate_pgste_max,
33 	},
34 	{ }
35 };
36 
37 static struct ctl_table page_table_sysctl_dir[] = {
38 	{
39 		.procname	= "vm",
40 		.maxlen		= 0,
41 		.mode		= 0555,
42 		.child		= page_table_sysctl,
43 	},
44 	{ }
45 };
46 
47 static int __init page_table_register_sysctl(void)
48 {
49 	return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
50 }
51 __initcall(page_table_register_sysctl);
52 
53 #endif /* CONFIG_PGSTE */
54 
55 unsigned long *crst_table_alloc(struct mm_struct *mm)
56 {
57 	struct page *page = alloc_pages(GFP_KERNEL, 2);
58 
59 	if (!page)
60 		return NULL;
61 	arch_set_page_dat(page, 2);
62 	return (unsigned long *) page_to_phys(page);
63 }
64 
65 void crst_table_free(struct mm_struct *mm, unsigned long *table)
66 {
67 	free_pages((unsigned long) table, 2);
68 }
69 
70 static void __crst_table_upgrade(void *arg)
71 {
72 	struct mm_struct *mm = arg;
73 
74 	if (current->active_mm == mm) {
75 		clear_user_asce();
76 		set_user_asce(mm);
77 	}
78 	__tlb_flush_local();
79 }
80 
81 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
82 {
83 	unsigned long *table, *pgd;
84 	int rc, notify;
85 
86 	/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
87 	VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE);
88 	if (end >= TASK_SIZE_MAX)
89 		return -ENOMEM;
90 	rc = 0;
91 	notify = 0;
92 	while (mm->context.asce_limit < end) {
93 		table = crst_table_alloc(mm);
94 		if (!table) {
95 			rc = -ENOMEM;
96 			break;
97 		}
98 		spin_lock_bh(&mm->page_table_lock);
99 		pgd = (unsigned long *) mm->pgd;
100 		if (mm->context.asce_limit == _REGION2_SIZE) {
101 			crst_table_init(table, _REGION2_ENTRY_EMPTY);
102 			p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd);
103 			mm->pgd = (pgd_t *) table;
104 			mm->context.asce_limit = _REGION1_SIZE;
105 			mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
106 				_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
107 		} else {
108 			crst_table_init(table, _REGION1_ENTRY_EMPTY);
109 			pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
110 			mm->pgd = (pgd_t *) table;
111 			mm->context.asce_limit = -PAGE_SIZE;
112 			mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
113 				_ASCE_USER_BITS | _ASCE_TYPE_REGION1;
114 		}
115 		notify = 1;
116 		spin_unlock_bh(&mm->page_table_lock);
117 	}
118 	if (notify)
119 		on_each_cpu(__crst_table_upgrade, mm, 0);
120 	return rc;
121 }
122 
123 void crst_table_downgrade(struct mm_struct *mm)
124 {
125 	pgd_t *pgd;
126 
127 	/* downgrade should only happen from 3 to 2 levels (compat only) */
128 	VM_BUG_ON(mm->context.asce_limit != _REGION2_SIZE);
129 
130 	if (current->active_mm == mm) {
131 		clear_user_asce();
132 		__tlb_flush_mm(mm);
133 	}
134 
135 	pgd = mm->pgd;
136 	mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
137 	mm->context.asce_limit = _REGION3_SIZE;
138 	mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
139 			   _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
140 	crst_table_free(mm, (unsigned long *) pgd);
141 
142 	if (current->active_mm == mm)
143 		set_user_asce(mm);
144 }
145 
146 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
147 {
148 	unsigned int old, new;
149 
150 	do {
151 		old = atomic_read(v);
152 		new = old ^ bits;
153 	} while (atomic_cmpxchg(v, old, new) != old);
154 	return new;
155 }
156 
157 #ifdef CONFIG_PGSTE
158 
159 struct page *page_table_alloc_pgste(struct mm_struct *mm)
160 {
161 	struct page *page;
162 	unsigned long *table;
163 
164 	page = alloc_page(GFP_KERNEL);
165 	if (page) {
166 		table = (unsigned long *) page_to_phys(page);
167 		clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
168 		clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
169 	}
170 	return page;
171 }
172 
173 void page_table_free_pgste(struct page *page)
174 {
175 	__free_page(page);
176 }
177 
178 #endif /* CONFIG_PGSTE */
179 
180 /*
181  * page table entry allocation/free routines.
182  */
183 unsigned long *page_table_alloc(struct mm_struct *mm)
184 {
185 	unsigned long *table;
186 	struct page *page;
187 	unsigned int mask, bit;
188 
189 	/* Try to get a fragment of a 4K page as a 2K page table */
190 	if (!mm_alloc_pgste(mm)) {
191 		table = NULL;
192 		spin_lock_bh(&mm->context.lock);
193 		if (!list_empty(&mm->context.pgtable_list)) {
194 			page = list_first_entry(&mm->context.pgtable_list,
195 						struct page, lru);
196 			mask = atomic_read(&page->_mapcount);
197 			mask = (mask | (mask >> 4)) & 3;
198 			if (mask != 3) {
199 				table = (unsigned long *) page_to_phys(page);
200 				bit = mask & 1;		/* =1 -> second 2K */
201 				if (bit)
202 					table += PTRS_PER_PTE;
203 				atomic_xor_bits(&page->_mapcount, 1U << bit);
204 				list_del(&page->lru);
205 			}
206 		}
207 		spin_unlock_bh(&mm->context.lock);
208 		if (table)
209 			return table;
210 	}
211 	/* Allocate a fresh page */
212 	page = alloc_page(GFP_KERNEL);
213 	if (!page)
214 		return NULL;
215 	if (!pgtable_page_ctor(page)) {
216 		__free_page(page);
217 		return NULL;
218 	}
219 	arch_set_page_dat(page, 0);
220 	/* Initialize page table */
221 	table = (unsigned long *) page_to_phys(page);
222 	if (mm_alloc_pgste(mm)) {
223 		/* Return 4K page table with PGSTEs */
224 		atomic_set(&page->_mapcount, 3);
225 		clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
226 		clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
227 	} else {
228 		/* Return the first 2K fragment of the page */
229 		atomic_set(&page->_mapcount, 1);
230 		clear_table(table, _PAGE_INVALID, PAGE_SIZE);
231 		spin_lock_bh(&mm->context.lock);
232 		list_add(&page->lru, &mm->context.pgtable_list);
233 		spin_unlock_bh(&mm->context.lock);
234 	}
235 	return table;
236 }
237 
238 void page_table_free(struct mm_struct *mm, unsigned long *table)
239 {
240 	struct page *page;
241 	unsigned int bit, mask;
242 
243 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
244 	if (!mm_alloc_pgste(mm)) {
245 		/* Free 2K page table fragment of a 4K page */
246 		bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
247 		spin_lock_bh(&mm->context.lock);
248 		mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
249 		if (mask & 3)
250 			list_add(&page->lru, &mm->context.pgtable_list);
251 		else
252 			list_del(&page->lru);
253 		spin_unlock_bh(&mm->context.lock);
254 		if (mask != 0)
255 			return;
256 	}
257 
258 	pgtable_page_dtor(page);
259 	atomic_set(&page->_mapcount, -1);
260 	__free_page(page);
261 }
262 
263 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
264 			 unsigned long vmaddr)
265 {
266 	struct mm_struct *mm;
267 	struct page *page;
268 	unsigned int bit, mask;
269 
270 	mm = tlb->mm;
271 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
272 	if (mm_alloc_pgste(mm)) {
273 		gmap_unlink(mm, table, vmaddr);
274 		table = (unsigned long *) (__pa(table) | 3);
275 		tlb_remove_table(tlb, table);
276 		return;
277 	}
278 	bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
279 	spin_lock_bh(&mm->context.lock);
280 	mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
281 	if (mask & 3)
282 		list_add_tail(&page->lru, &mm->context.pgtable_list);
283 	else
284 		list_del(&page->lru);
285 	spin_unlock_bh(&mm->context.lock);
286 	table = (unsigned long *) (__pa(table) | (1U << bit));
287 	tlb_remove_table(tlb, table);
288 }
289 
290 static void __tlb_remove_table(void *_table)
291 {
292 	unsigned int mask = (unsigned long) _table & 3;
293 	void *table = (void *)((unsigned long) _table ^ mask);
294 	struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
295 
296 	switch (mask) {
297 	case 0:		/* pmd, pud, or p4d */
298 		free_pages((unsigned long) table, 2);
299 		break;
300 	case 1:		/* lower 2K of a 4K page table */
301 	case 2:		/* higher 2K of a 4K page table */
302 		if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
303 			break;
304 		/* fallthrough */
305 	case 3:		/* 4K page table with pgstes */
306 		pgtable_page_dtor(page);
307 		atomic_set(&page->_mapcount, -1);
308 		__free_page(page);
309 		break;
310 	}
311 }
312 
313 static void tlb_remove_table_smp_sync(void *arg)
314 {
315 	/* Simply deliver the interrupt */
316 }
317 
318 static void tlb_remove_table_one(void *table)
319 {
320 	/*
321 	 * This isn't an RCU grace period and hence the page-tables cannot be
322 	 * assumed to be actually RCU-freed.
323 	 *
324 	 * It is however sufficient for software page-table walkers that rely
325 	 * on IRQ disabling. See the comment near struct mmu_table_batch.
326 	 */
327 	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
328 	__tlb_remove_table(table);
329 }
330 
331 static void tlb_remove_table_rcu(struct rcu_head *head)
332 {
333 	struct mmu_table_batch *batch;
334 	int i;
335 
336 	batch = container_of(head, struct mmu_table_batch, rcu);
337 
338 	for (i = 0; i < batch->nr; i++)
339 		__tlb_remove_table(batch->tables[i]);
340 
341 	free_page((unsigned long)batch);
342 }
343 
344 void tlb_table_flush(struct mmu_gather *tlb)
345 {
346 	struct mmu_table_batch **batch = &tlb->batch;
347 
348 	if (*batch) {
349 		call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
350 		*batch = NULL;
351 	}
352 }
353 
354 void tlb_remove_table(struct mmu_gather *tlb, void *table)
355 {
356 	struct mmu_table_batch **batch = &tlb->batch;
357 
358 	tlb->mm->context.flush_mm = 1;
359 	if (*batch == NULL) {
360 		*batch = (struct mmu_table_batch *)
361 			__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
362 		if (*batch == NULL) {
363 			__tlb_flush_mm_lazy(tlb->mm);
364 			tlb_remove_table_one(table);
365 			return;
366 		}
367 		(*batch)->nr = 0;
368 	}
369 	(*batch)->tables[(*batch)->nr++] = table;
370 	if ((*batch)->nr == MAX_TABLE_BATCH)
371 		tlb_flush_mmu(tlb);
372 }
373