xref: /openbmc/linux/arch/s390/mm/pgalloc.c (revision 160b8e75)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Page table allocation functions
4  *
5  *    Copyright IBM Corp. 2016
6  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7  */
8 
9 #include <linux/mm.h>
10 #include <linux/sysctl.h>
11 #include <asm/mmu_context.h>
12 #include <asm/pgalloc.h>
13 #include <asm/gmap.h>
14 #include <asm/tlb.h>
15 #include <asm/tlbflush.h>
16 
17 #ifdef CONFIG_PGSTE
18 
19 static int page_table_allocate_pgste_min = 0;
20 static int page_table_allocate_pgste_max = 1;
21 int page_table_allocate_pgste = 0;
22 EXPORT_SYMBOL(page_table_allocate_pgste);
23 
24 static struct ctl_table page_table_sysctl[] = {
25 	{
26 		.procname	= "allocate_pgste",
27 		.data		= &page_table_allocate_pgste,
28 		.maxlen		= sizeof(int),
29 		.mode		= S_IRUGO | S_IWUSR,
30 		.proc_handler	= proc_dointvec,
31 		.extra1		= &page_table_allocate_pgste_min,
32 		.extra2		= &page_table_allocate_pgste_max,
33 	},
34 	{ }
35 };
36 
37 static struct ctl_table page_table_sysctl_dir[] = {
38 	{
39 		.procname	= "vm",
40 		.maxlen		= 0,
41 		.mode		= 0555,
42 		.child		= page_table_sysctl,
43 	},
44 	{ }
45 };
46 
47 static int __init page_table_register_sysctl(void)
48 {
49 	return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
50 }
51 __initcall(page_table_register_sysctl);
52 
53 #endif /* CONFIG_PGSTE */
54 
55 unsigned long *crst_table_alloc(struct mm_struct *mm)
56 {
57 	struct page *page = alloc_pages(GFP_KERNEL, 2);
58 
59 	if (!page)
60 		return NULL;
61 	arch_set_page_dat(page, 2);
62 	return (unsigned long *) page_to_phys(page);
63 }
64 
65 void crst_table_free(struct mm_struct *mm, unsigned long *table)
66 {
67 	free_pages((unsigned long) table, 2);
68 }
69 
70 static void __crst_table_upgrade(void *arg)
71 {
72 	struct mm_struct *mm = arg;
73 
74 	if (current->active_mm == mm)
75 		set_user_asce(mm);
76 	__tlb_flush_local();
77 }
78 
79 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
80 {
81 	unsigned long *table, *pgd;
82 	int rc, notify;
83 
84 	/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
85 	VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE);
86 	rc = 0;
87 	notify = 0;
88 	while (mm->context.asce_limit < end) {
89 		table = crst_table_alloc(mm);
90 		if (!table) {
91 			rc = -ENOMEM;
92 			break;
93 		}
94 		spin_lock_bh(&mm->page_table_lock);
95 		pgd = (unsigned long *) mm->pgd;
96 		if (mm->context.asce_limit == _REGION2_SIZE) {
97 			crst_table_init(table, _REGION2_ENTRY_EMPTY);
98 			p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd);
99 			mm->pgd = (pgd_t *) table;
100 			mm->context.asce_limit = _REGION1_SIZE;
101 			mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
102 				_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
103 		} else {
104 			crst_table_init(table, _REGION1_ENTRY_EMPTY);
105 			pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
106 			mm->pgd = (pgd_t *) table;
107 			mm->context.asce_limit = -PAGE_SIZE;
108 			mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
109 				_ASCE_USER_BITS | _ASCE_TYPE_REGION1;
110 		}
111 		notify = 1;
112 		spin_unlock_bh(&mm->page_table_lock);
113 	}
114 	if (notify)
115 		on_each_cpu(__crst_table_upgrade, mm, 0);
116 	return rc;
117 }
118 
119 void crst_table_downgrade(struct mm_struct *mm)
120 {
121 	pgd_t *pgd;
122 
123 	/* downgrade should only happen from 3 to 2 levels (compat only) */
124 	VM_BUG_ON(mm->context.asce_limit != _REGION2_SIZE);
125 
126 	if (current->active_mm == mm) {
127 		clear_user_asce();
128 		__tlb_flush_mm(mm);
129 	}
130 
131 	pgd = mm->pgd;
132 	mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
133 	mm->context.asce_limit = _REGION3_SIZE;
134 	mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
135 			   _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
136 	crst_table_free(mm, (unsigned long *) pgd);
137 
138 	if (current->active_mm == mm)
139 		set_user_asce(mm);
140 }
141 
142 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
143 {
144 	unsigned int old, new;
145 
146 	do {
147 		old = atomic_read(v);
148 		new = old ^ bits;
149 	} while (atomic_cmpxchg(v, old, new) != old);
150 	return new;
151 }
152 
153 #ifdef CONFIG_PGSTE
154 
155 struct page *page_table_alloc_pgste(struct mm_struct *mm)
156 {
157 	struct page *page;
158 	u64 *table;
159 
160 	page = alloc_page(GFP_KERNEL);
161 	if (page) {
162 		table = (u64 *)page_to_phys(page);
163 		memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
164 		memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
165 	}
166 	return page;
167 }
168 
169 void page_table_free_pgste(struct page *page)
170 {
171 	__free_page(page);
172 }
173 
174 #endif /* CONFIG_PGSTE */
175 
176 /*
177  * page table entry allocation/free routines.
178  */
179 unsigned long *page_table_alloc(struct mm_struct *mm)
180 {
181 	unsigned long *table;
182 	struct page *page;
183 	unsigned int mask, bit;
184 
185 	/* Try to get a fragment of a 4K page as a 2K page table */
186 	if (!mm_alloc_pgste(mm)) {
187 		table = NULL;
188 		spin_lock_bh(&mm->context.lock);
189 		if (!list_empty(&mm->context.pgtable_list)) {
190 			page = list_first_entry(&mm->context.pgtable_list,
191 						struct page, lru);
192 			mask = atomic_read(&page->_mapcount);
193 			mask = (mask | (mask >> 4)) & 3;
194 			if (mask != 3) {
195 				table = (unsigned long *) page_to_phys(page);
196 				bit = mask & 1;		/* =1 -> second 2K */
197 				if (bit)
198 					table += PTRS_PER_PTE;
199 				atomic_xor_bits(&page->_mapcount, 1U << bit);
200 				list_del(&page->lru);
201 			}
202 		}
203 		spin_unlock_bh(&mm->context.lock);
204 		if (table)
205 			return table;
206 	}
207 	/* Allocate a fresh page */
208 	page = alloc_page(GFP_KERNEL);
209 	if (!page)
210 		return NULL;
211 	if (!pgtable_page_ctor(page)) {
212 		__free_page(page);
213 		return NULL;
214 	}
215 	arch_set_page_dat(page, 0);
216 	/* Initialize page table */
217 	table = (unsigned long *) page_to_phys(page);
218 	if (mm_alloc_pgste(mm)) {
219 		/* Return 4K page table with PGSTEs */
220 		atomic_set(&page->_mapcount, 3);
221 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
222 		memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
223 	} else {
224 		/* Return the first 2K fragment of the page */
225 		atomic_set(&page->_mapcount, 1);
226 		memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
227 		spin_lock_bh(&mm->context.lock);
228 		list_add(&page->lru, &mm->context.pgtable_list);
229 		spin_unlock_bh(&mm->context.lock);
230 	}
231 	return table;
232 }
233 
234 void page_table_free(struct mm_struct *mm, unsigned long *table)
235 {
236 	struct page *page;
237 	unsigned int bit, mask;
238 
239 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
240 	if (!mm_alloc_pgste(mm)) {
241 		/* Free 2K page table fragment of a 4K page */
242 		bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
243 		spin_lock_bh(&mm->context.lock);
244 		mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
245 		if (mask & 3)
246 			list_add(&page->lru, &mm->context.pgtable_list);
247 		else
248 			list_del(&page->lru);
249 		spin_unlock_bh(&mm->context.lock);
250 		if (mask != 0)
251 			return;
252 	}
253 
254 	pgtable_page_dtor(page);
255 	atomic_set(&page->_mapcount, -1);
256 	__free_page(page);
257 }
258 
259 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
260 			 unsigned long vmaddr)
261 {
262 	struct mm_struct *mm;
263 	struct page *page;
264 	unsigned int bit, mask;
265 
266 	mm = tlb->mm;
267 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
268 	if (mm_alloc_pgste(mm)) {
269 		gmap_unlink(mm, table, vmaddr);
270 		table = (unsigned long *) (__pa(table) | 3);
271 		tlb_remove_table(tlb, table);
272 		return;
273 	}
274 	bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
275 	spin_lock_bh(&mm->context.lock);
276 	mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
277 	if (mask & 3)
278 		list_add_tail(&page->lru, &mm->context.pgtable_list);
279 	else
280 		list_del(&page->lru);
281 	spin_unlock_bh(&mm->context.lock);
282 	table = (unsigned long *) (__pa(table) | (1U << bit));
283 	tlb_remove_table(tlb, table);
284 }
285 
286 static void __tlb_remove_table(void *_table)
287 {
288 	unsigned int mask = (unsigned long) _table & 3;
289 	void *table = (void *)((unsigned long) _table ^ mask);
290 	struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
291 
292 	switch (mask) {
293 	case 0:		/* pmd, pud, or p4d */
294 		free_pages((unsigned long) table, 2);
295 		break;
296 	case 1:		/* lower 2K of a 4K page table */
297 	case 2:		/* higher 2K of a 4K page table */
298 		if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
299 			break;
300 		/* fallthrough */
301 	case 3:		/* 4K page table with pgstes */
302 		pgtable_page_dtor(page);
303 		atomic_set(&page->_mapcount, -1);
304 		__free_page(page);
305 		break;
306 	}
307 }
308 
309 static void tlb_remove_table_smp_sync(void *arg)
310 {
311 	/* Simply deliver the interrupt */
312 }
313 
314 static void tlb_remove_table_one(void *table)
315 {
316 	/*
317 	 * This isn't an RCU grace period and hence the page-tables cannot be
318 	 * assumed to be actually RCU-freed.
319 	 *
320 	 * It is however sufficient for software page-table walkers that rely
321 	 * on IRQ disabling. See the comment near struct mmu_table_batch.
322 	 */
323 	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
324 	__tlb_remove_table(table);
325 }
326 
327 static void tlb_remove_table_rcu(struct rcu_head *head)
328 {
329 	struct mmu_table_batch *batch;
330 	int i;
331 
332 	batch = container_of(head, struct mmu_table_batch, rcu);
333 
334 	for (i = 0; i < batch->nr; i++)
335 		__tlb_remove_table(batch->tables[i]);
336 
337 	free_page((unsigned long)batch);
338 }
339 
340 void tlb_table_flush(struct mmu_gather *tlb)
341 {
342 	struct mmu_table_batch **batch = &tlb->batch;
343 
344 	if (*batch) {
345 		call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
346 		*batch = NULL;
347 	}
348 }
349 
350 void tlb_remove_table(struct mmu_gather *tlb, void *table)
351 {
352 	struct mmu_table_batch **batch = &tlb->batch;
353 
354 	tlb->mm->context.flush_mm = 1;
355 	if (*batch == NULL) {
356 		*batch = (struct mmu_table_batch *)
357 			__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
358 		if (*batch == NULL) {
359 			__tlb_flush_mm_lazy(tlb->mm);
360 			tlb_remove_table_one(table);
361 			return;
362 		}
363 		(*batch)->nr = 0;
364 	}
365 	(*batch)->tables[(*batch)->nr++] = table;
366 	if ((*batch)->nr == MAX_TABLE_BATCH)
367 		tlb_flush_mmu(tlb);
368 }
369