xref: /openbmc/linux/arch/s390/mm/pgalloc.c (revision b35565bb)
1 /*
2  *  Page table allocation functions
3  *
4  *    Copyright IBM Corp. 2016
5  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6  */
7 
8 #include <linux/mm.h>
9 #include <linux/sysctl.h>
10 #include <asm/mmu_context.h>
11 #include <asm/pgalloc.h>
12 #include <asm/gmap.h>
13 #include <asm/tlb.h>
14 #include <asm/tlbflush.h>
15 
16 #ifdef CONFIG_PGSTE
17 
18 static int page_table_allocate_pgste_min = 0;
19 static int page_table_allocate_pgste_max = 1;
20 int page_table_allocate_pgste = 0;
21 EXPORT_SYMBOL(page_table_allocate_pgste);
22 
23 static struct ctl_table page_table_sysctl[] = {
24 	{
25 		.procname	= "allocate_pgste",
26 		.data		= &page_table_allocate_pgste,
27 		.maxlen		= sizeof(int),
28 		.mode		= S_IRUGO | S_IWUSR,
29 		.proc_handler	= proc_dointvec,
30 		.extra1		= &page_table_allocate_pgste_min,
31 		.extra2		= &page_table_allocate_pgste_max,
32 	},
33 	{ }
34 };
35 
36 static struct ctl_table page_table_sysctl_dir[] = {
37 	{
38 		.procname	= "vm",
39 		.maxlen		= 0,
40 		.mode		= 0555,
41 		.child		= page_table_sysctl,
42 	},
43 	{ }
44 };
45 
46 static int __init page_table_register_sysctl(void)
47 {
48 	return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
49 }
50 __initcall(page_table_register_sysctl);
51 
52 #endif /* CONFIG_PGSTE */
53 
54 unsigned long *crst_table_alloc(struct mm_struct *mm)
55 {
56 	struct page *page = alloc_pages(GFP_KERNEL, 2);
57 
58 	if (!page)
59 		return NULL;
60 	arch_set_page_dat(page, 2);
61 	return (unsigned long *) page_to_phys(page);
62 }
63 
64 void crst_table_free(struct mm_struct *mm, unsigned long *table)
65 {
66 	free_pages((unsigned long) table, 2);
67 }
68 
69 static void __crst_table_upgrade(void *arg)
70 {
71 	struct mm_struct *mm = arg;
72 
73 	if (current->active_mm == mm) {
74 		clear_user_asce();
75 		set_user_asce(mm);
76 	}
77 	__tlb_flush_local();
78 }
79 
80 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
81 {
82 	unsigned long *table, *pgd;
83 	int rc, notify;
84 
85 	/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
86 	VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE);
87 	if (end >= TASK_SIZE_MAX)
88 		return -ENOMEM;
89 	rc = 0;
90 	notify = 0;
91 	while (mm->context.asce_limit < end) {
92 		table = crst_table_alloc(mm);
93 		if (!table) {
94 			rc = -ENOMEM;
95 			break;
96 		}
97 		spin_lock_bh(&mm->page_table_lock);
98 		pgd = (unsigned long *) mm->pgd;
99 		if (mm->context.asce_limit == _REGION2_SIZE) {
100 			crst_table_init(table, _REGION2_ENTRY_EMPTY);
101 			p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd);
102 			mm->pgd = (pgd_t *) table;
103 			mm->context.asce_limit = _REGION1_SIZE;
104 			mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
105 				_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
106 		} else {
107 			crst_table_init(table, _REGION1_ENTRY_EMPTY);
108 			pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
109 			mm->pgd = (pgd_t *) table;
110 			mm->context.asce_limit = -PAGE_SIZE;
111 			mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
112 				_ASCE_USER_BITS | _ASCE_TYPE_REGION1;
113 		}
114 		notify = 1;
115 		spin_unlock_bh(&mm->page_table_lock);
116 	}
117 	if (notify)
118 		on_each_cpu(__crst_table_upgrade, mm, 0);
119 	return rc;
120 }
121 
122 void crst_table_downgrade(struct mm_struct *mm)
123 {
124 	pgd_t *pgd;
125 
126 	/* downgrade should only happen from 3 to 2 levels (compat only) */
127 	VM_BUG_ON(mm->context.asce_limit != _REGION2_SIZE);
128 
129 	if (current->active_mm == mm) {
130 		clear_user_asce();
131 		__tlb_flush_mm(mm);
132 	}
133 
134 	pgd = mm->pgd;
135 	mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
136 	mm->context.asce_limit = _REGION3_SIZE;
137 	mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
138 			   _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
139 	crst_table_free(mm, (unsigned long *) pgd);
140 
141 	if (current->active_mm == mm)
142 		set_user_asce(mm);
143 }
144 
145 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
146 {
147 	unsigned int old, new;
148 
149 	do {
150 		old = atomic_read(v);
151 		new = old ^ bits;
152 	} while (atomic_cmpxchg(v, old, new) != old);
153 	return new;
154 }
155 
156 #ifdef CONFIG_PGSTE
157 
158 struct page *page_table_alloc_pgste(struct mm_struct *mm)
159 {
160 	struct page *page;
161 	unsigned long *table;
162 
163 	page = alloc_page(GFP_KERNEL);
164 	if (page) {
165 		table = (unsigned long *) page_to_phys(page);
166 		clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
167 		clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
168 	}
169 	return page;
170 }
171 
172 void page_table_free_pgste(struct page *page)
173 {
174 	__free_page(page);
175 }
176 
177 #endif /* CONFIG_PGSTE */
178 
179 /*
180  * page table entry allocation/free routines.
181  */
182 unsigned long *page_table_alloc(struct mm_struct *mm)
183 {
184 	unsigned long *table;
185 	struct page *page;
186 	unsigned int mask, bit;
187 
188 	/* Try to get a fragment of a 4K page as a 2K page table */
189 	if (!mm_alloc_pgste(mm)) {
190 		table = NULL;
191 		spin_lock_bh(&mm->context.lock);
192 		if (!list_empty(&mm->context.pgtable_list)) {
193 			page = list_first_entry(&mm->context.pgtable_list,
194 						struct page, lru);
195 			mask = atomic_read(&page->_mapcount);
196 			mask = (mask | (mask >> 4)) & 3;
197 			if (mask != 3) {
198 				table = (unsigned long *) page_to_phys(page);
199 				bit = mask & 1;		/* =1 -> second 2K */
200 				if (bit)
201 					table += PTRS_PER_PTE;
202 				atomic_xor_bits(&page->_mapcount, 1U << bit);
203 				list_del(&page->lru);
204 			}
205 		}
206 		spin_unlock_bh(&mm->context.lock);
207 		if (table)
208 			return table;
209 	}
210 	/* Allocate a fresh page */
211 	page = alloc_page(GFP_KERNEL);
212 	if (!page)
213 		return NULL;
214 	if (!pgtable_page_ctor(page)) {
215 		__free_page(page);
216 		return NULL;
217 	}
218 	arch_set_page_dat(page, 0);
219 	/* Initialize page table */
220 	table = (unsigned long *) page_to_phys(page);
221 	if (mm_alloc_pgste(mm)) {
222 		/* Return 4K page table with PGSTEs */
223 		atomic_set(&page->_mapcount, 3);
224 		clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
225 		clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
226 	} else {
227 		/* Return the first 2K fragment of the page */
228 		atomic_set(&page->_mapcount, 1);
229 		clear_table(table, _PAGE_INVALID, PAGE_SIZE);
230 		spin_lock_bh(&mm->context.lock);
231 		list_add(&page->lru, &mm->context.pgtable_list);
232 		spin_unlock_bh(&mm->context.lock);
233 	}
234 	return table;
235 }
236 
237 void page_table_free(struct mm_struct *mm, unsigned long *table)
238 {
239 	struct page *page;
240 	unsigned int bit, mask;
241 
242 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
243 	if (!mm_alloc_pgste(mm)) {
244 		/* Free 2K page table fragment of a 4K page */
245 		bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
246 		spin_lock_bh(&mm->context.lock);
247 		mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
248 		if (mask & 3)
249 			list_add(&page->lru, &mm->context.pgtable_list);
250 		else
251 			list_del(&page->lru);
252 		spin_unlock_bh(&mm->context.lock);
253 		if (mask != 0)
254 			return;
255 	}
256 
257 	pgtable_page_dtor(page);
258 	atomic_set(&page->_mapcount, -1);
259 	__free_page(page);
260 }
261 
262 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
263 			 unsigned long vmaddr)
264 {
265 	struct mm_struct *mm;
266 	struct page *page;
267 	unsigned int bit, mask;
268 
269 	mm = tlb->mm;
270 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
271 	if (mm_alloc_pgste(mm)) {
272 		gmap_unlink(mm, table, vmaddr);
273 		table = (unsigned long *) (__pa(table) | 3);
274 		tlb_remove_table(tlb, table);
275 		return;
276 	}
277 	bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
278 	spin_lock_bh(&mm->context.lock);
279 	mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
280 	if (mask & 3)
281 		list_add_tail(&page->lru, &mm->context.pgtable_list);
282 	else
283 		list_del(&page->lru);
284 	spin_unlock_bh(&mm->context.lock);
285 	table = (unsigned long *) (__pa(table) | (1U << bit));
286 	tlb_remove_table(tlb, table);
287 }
288 
289 static void __tlb_remove_table(void *_table)
290 {
291 	unsigned int mask = (unsigned long) _table & 3;
292 	void *table = (void *)((unsigned long) _table ^ mask);
293 	struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
294 
295 	switch (mask) {
296 	case 0:		/* pmd, pud, or p4d */
297 		free_pages((unsigned long) table, 2);
298 		break;
299 	case 1:		/* lower 2K of a 4K page table */
300 	case 2:		/* higher 2K of a 4K page table */
301 		if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
302 			break;
303 		/* fallthrough */
304 	case 3:		/* 4K page table with pgstes */
305 		pgtable_page_dtor(page);
306 		atomic_set(&page->_mapcount, -1);
307 		__free_page(page);
308 		break;
309 	}
310 }
311 
312 static void tlb_remove_table_smp_sync(void *arg)
313 {
314 	/* Simply deliver the interrupt */
315 }
316 
317 static void tlb_remove_table_one(void *table)
318 {
319 	/*
320 	 * This isn't an RCU grace period and hence the page-tables cannot be
321 	 * assumed to be actually RCU-freed.
322 	 *
323 	 * It is however sufficient for software page-table walkers that rely
324 	 * on IRQ disabling. See the comment near struct mmu_table_batch.
325 	 */
326 	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
327 	__tlb_remove_table(table);
328 }
329 
330 static void tlb_remove_table_rcu(struct rcu_head *head)
331 {
332 	struct mmu_table_batch *batch;
333 	int i;
334 
335 	batch = container_of(head, struct mmu_table_batch, rcu);
336 
337 	for (i = 0; i < batch->nr; i++)
338 		__tlb_remove_table(batch->tables[i]);
339 
340 	free_page((unsigned long)batch);
341 }
342 
343 void tlb_table_flush(struct mmu_gather *tlb)
344 {
345 	struct mmu_table_batch **batch = &tlb->batch;
346 
347 	if (*batch) {
348 		call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
349 		*batch = NULL;
350 	}
351 }
352 
353 void tlb_remove_table(struct mmu_gather *tlb, void *table)
354 {
355 	struct mmu_table_batch **batch = &tlb->batch;
356 
357 	tlb->mm->context.flush_mm = 1;
358 	if (*batch == NULL) {
359 		*batch = (struct mmu_table_batch *)
360 			__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
361 		if (*batch == NULL) {
362 			__tlb_flush_mm_lazy(tlb->mm);
363 			tlb_remove_table_one(table);
364 			return;
365 		}
366 		(*batch)->nr = 0;
367 	}
368 	(*batch)->tables[(*batch)->nr++] = table;
369 	if ((*batch)->nr == MAX_TABLE_BATCH)
370 		tlb_flush_mmu(tlb);
371 }
372