xref: /openbmc/linux/arch/s390/mm/pgtable.c (revision 179dd8c0)
1 /*
2  *    Copyright IBM Corp. 2007, 2011
3  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
9 #include <linux/gfp.h>
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/smp.h>
13 #include <linux/highmem.h>
14 #include <linux/pagemap.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/quicklist.h>
18 #include <linux/rcupdate.h>
19 #include <linux/slab.h>
20 #include <linux/swapops.h>
21 #include <linux/sysctl.h>
22 #include <linux/ksm.h>
23 #include <linux/mman.h>
24 
25 #include <asm/pgtable.h>
26 #include <asm/pgalloc.h>
27 #include <asm/tlb.h>
28 #include <asm/tlbflush.h>
29 #include <asm/mmu_context.h>
30 
31 #define ALLOC_ORDER	2
32 #define FRAG_MASK	0x03
33 
34 int HPAGE_SHIFT;
35 
36 unsigned long *crst_table_alloc(struct mm_struct *mm)
37 {
38 	struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
39 
40 	if (!page)
41 		return NULL;
42 	return (unsigned long *) page_to_phys(page);
43 }
44 
45 void crst_table_free(struct mm_struct *mm, unsigned long *table)
46 {
47 	free_pages((unsigned long) table, ALLOC_ORDER);
48 }
49 
50 static void __crst_table_upgrade(void *arg)
51 {
52 	struct mm_struct *mm = arg;
53 
54 	if (current->active_mm == mm) {
55 		clear_user_asce();
56 		set_user_asce(mm);
57 	}
58 	__tlb_flush_local();
59 }
60 
61 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
62 {
63 	unsigned long *table, *pgd;
64 	unsigned long entry;
65 	int flush;
66 
67 	BUG_ON(limit > (1UL << 53));
68 	flush = 0;
69 repeat:
70 	table = crst_table_alloc(mm);
71 	if (!table)
72 		return -ENOMEM;
73 	spin_lock_bh(&mm->page_table_lock);
74 	if (mm->context.asce_limit < limit) {
75 		pgd = (unsigned long *) mm->pgd;
76 		if (mm->context.asce_limit <= (1UL << 31)) {
77 			entry = _REGION3_ENTRY_EMPTY;
78 			mm->context.asce_limit = 1UL << 42;
79 			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
80 						_ASCE_USER_BITS |
81 						_ASCE_TYPE_REGION3;
82 		} else {
83 			entry = _REGION2_ENTRY_EMPTY;
84 			mm->context.asce_limit = 1UL << 53;
85 			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
86 						_ASCE_USER_BITS |
87 						_ASCE_TYPE_REGION2;
88 		}
89 		crst_table_init(table, entry);
90 		pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
91 		mm->pgd = (pgd_t *) table;
92 		mm->task_size = mm->context.asce_limit;
93 		table = NULL;
94 		flush = 1;
95 	}
96 	spin_unlock_bh(&mm->page_table_lock);
97 	if (table)
98 		crst_table_free(mm, table);
99 	if (mm->context.asce_limit < limit)
100 		goto repeat;
101 	if (flush)
102 		on_each_cpu(__crst_table_upgrade, mm, 0);
103 	return 0;
104 }
105 
106 void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
107 {
108 	pgd_t *pgd;
109 
110 	if (current->active_mm == mm) {
111 		clear_user_asce();
112 		__tlb_flush_mm(mm);
113 	}
114 	while (mm->context.asce_limit > limit) {
115 		pgd = mm->pgd;
116 		switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
117 		case _REGION_ENTRY_TYPE_R2:
118 			mm->context.asce_limit = 1UL << 42;
119 			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
120 						_ASCE_USER_BITS |
121 						_ASCE_TYPE_REGION3;
122 			break;
123 		case _REGION_ENTRY_TYPE_R3:
124 			mm->context.asce_limit = 1UL << 31;
125 			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
126 						_ASCE_USER_BITS |
127 						_ASCE_TYPE_SEGMENT;
128 			break;
129 		default:
130 			BUG();
131 		}
132 		mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
133 		mm->task_size = mm->context.asce_limit;
134 		crst_table_free(mm, (unsigned long *) pgd);
135 	}
136 	if (current->active_mm == mm)
137 		set_user_asce(mm);
138 }
139 
140 #ifdef CONFIG_PGSTE
141 
142 /**
143  * gmap_alloc - allocate a guest address space
144  * @mm: pointer to the parent mm_struct
145  * @limit: maximum size of the gmap address space
146  *
147  * Returns a guest address space structure.
148  */
149 struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit)
150 {
151 	struct gmap *gmap;
152 	struct page *page;
153 	unsigned long *table;
154 	unsigned long etype, atype;
155 
156 	if (limit < (1UL << 31)) {
157 		limit = (1UL << 31) - 1;
158 		atype = _ASCE_TYPE_SEGMENT;
159 		etype = _SEGMENT_ENTRY_EMPTY;
160 	} else if (limit < (1UL << 42)) {
161 		limit = (1UL << 42) - 1;
162 		atype = _ASCE_TYPE_REGION3;
163 		etype = _REGION3_ENTRY_EMPTY;
164 	} else if (limit < (1UL << 53)) {
165 		limit = (1UL << 53) - 1;
166 		atype = _ASCE_TYPE_REGION2;
167 		etype = _REGION2_ENTRY_EMPTY;
168 	} else {
169 		limit = -1UL;
170 		atype = _ASCE_TYPE_REGION1;
171 		etype = _REGION1_ENTRY_EMPTY;
172 	}
173 	gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
174 	if (!gmap)
175 		goto out;
176 	INIT_LIST_HEAD(&gmap->crst_list);
177 	INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
178 	INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
179 	spin_lock_init(&gmap->guest_table_lock);
180 	gmap->mm = mm;
181 	page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
182 	if (!page)
183 		goto out_free;
184 	page->index = 0;
185 	list_add(&page->lru, &gmap->crst_list);
186 	table = (unsigned long *) page_to_phys(page);
187 	crst_table_init(table, etype);
188 	gmap->table = table;
189 	gmap->asce = atype | _ASCE_TABLE_LENGTH |
190 		_ASCE_USER_BITS | __pa(table);
191 	gmap->asce_end = limit;
192 	down_write(&mm->mmap_sem);
193 	list_add(&gmap->list, &mm->context.gmap_list);
194 	up_write(&mm->mmap_sem);
195 	return gmap;
196 
197 out_free:
198 	kfree(gmap);
199 out:
200 	return NULL;
201 }
202 EXPORT_SYMBOL_GPL(gmap_alloc);
203 
204 static void gmap_flush_tlb(struct gmap *gmap)
205 {
206 	if (MACHINE_HAS_IDTE)
207 		__tlb_flush_asce(gmap->mm, gmap->asce);
208 	else
209 		__tlb_flush_global();
210 }
211 
212 static void gmap_radix_tree_free(struct radix_tree_root *root)
213 {
214 	struct radix_tree_iter iter;
215 	unsigned long indices[16];
216 	unsigned long index;
217 	void **slot;
218 	int i, nr;
219 
220 	/* A radix tree is freed by deleting all of its entries */
221 	index = 0;
222 	do {
223 		nr = 0;
224 		radix_tree_for_each_slot(slot, root, &iter, index) {
225 			indices[nr] = iter.index;
226 			if (++nr == 16)
227 				break;
228 		}
229 		for (i = 0; i < nr; i++) {
230 			index = indices[i];
231 			radix_tree_delete(root, index);
232 		}
233 	} while (nr > 0);
234 }
235 
236 /**
237  * gmap_free - free a guest address space
238  * @gmap: pointer to the guest address space structure
239  */
240 void gmap_free(struct gmap *gmap)
241 {
242 	struct page *page, *next;
243 
244 	/* Flush tlb. */
245 	if (MACHINE_HAS_IDTE)
246 		__tlb_flush_asce(gmap->mm, gmap->asce);
247 	else
248 		__tlb_flush_global();
249 
250 	/* Free all segment & region tables. */
251 	list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
252 		__free_pages(page, ALLOC_ORDER);
253 	gmap_radix_tree_free(&gmap->guest_to_host);
254 	gmap_radix_tree_free(&gmap->host_to_guest);
255 	down_write(&gmap->mm->mmap_sem);
256 	list_del(&gmap->list);
257 	up_write(&gmap->mm->mmap_sem);
258 	kfree(gmap);
259 }
260 EXPORT_SYMBOL_GPL(gmap_free);
261 
262 /**
263  * gmap_enable - switch primary space to the guest address space
264  * @gmap: pointer to the guest address space structure
265  */
266 void gmap_enable(struct gmap *gmap)
267 {
268 	S390_lowcore.gmap = (unsigned long) gmap;
269 }
270 EXPORT_SYMBOL_GPL(gmap_enable);
271 
272 /**
273  * gmap_disable - switch back to the standard primary address space
274  * @gmap: pointer to the guest address space structure
275  */
276 void gmap_disable(struct gmap *gmap)
277 {
278 	S390_lowcore.gmap = 0UL;
279 }
280 EXPORT_SYMBOL_GPL(gmap_disable);
281 
282 /*
283  * gmap_alloc_table is assumed to be called with mmap_sem held
284  */
285 static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
286 			    unsigned long init, unsigned long gaddr)
287 {
288 	struct page *page;
289 	unsigned long *new;
290 
291 	/* since we dont free the gmap table until gmap_free we can unlock */
292 	page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
293 	if (!page)
294 		return -ENOMEM;
295 	new = (unsigned long *) page_to_phys(page);
296 	crst_table_init(new, init);
297 	spin_lock(&gmap->mm->page_table_lock);
298 	if (*table & _REGION_ENTRY_INVALID) {
299 		list_add(&page->lru, &gmap->crst_list);
300 		*table = (unsigned long) new | _REGION_ENTRY_LENGTH |
301 			(*table & _REGION_ENTRY_TYPE_MASK);
302 		page->index = gaddr;
303 		page = NULL;
304 	}
305 	spin_unlock(&gmap->mm->page_table_lock);
306 	if (page)
307 		__free_pages(page, ALLOC_ORDER);
308 	return 0;
309 }
310 
311 /**
312  * __gmap_segment_gaddr - find virtual address from segment pointer
313  * @entry: pointer to a segment table entry in the guest address space
314  *
315  * Returns the virtual address in the guest address space for the segment
316  */
317 static unsigned long __gmap_segment_gaddr(unsigned long *entry)
318 {
319 	struct page *page;
320 	unsigned long offset, mask;
321 
322 	offset = (unsigned long) entry / sizeof(unsigned long);
323 	offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
324 	mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
325 	page = virt_to_page((void *)((unsigned long) entry & mask));
326 	return page->index + offset;
327 }
328 
329 /**
330  * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
331  * @gmap: pointer to the guest address space structure
332  * @vmaddr: address in the host process address space
333  *
334  * Returns 1 if a TLB flush is required
335  */
336 static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
337 {
338 	unsigned long *entry;
339 	int flush = 0;
340 
341 	spin_lock(&gmap->guest_table_lock);
342 	entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
343 	if (entry) {
344 		flush = (*entry != _SEGMENT_ENTRY_INVALID);
345 		*entry = _SEGMENT_ENTRY_INVALID;
346 	}
347 	spin_unlock(&gmap->guest_table_lock);
348 	return flush;
349 }
350 
351 /**
352  * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
353  * @gmap: pointer to the guest address space structure
354  * @gaddr: address in the guest address space
355  *
356  * Returns 1 if a TLB flush is required
357  */
358 static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
359 {
360 	unsigned long vmaddr;
361 
362 	vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
363 						   gaddr >> PMD_SHIFT);
364 	return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
365 }
366 
367 /**
368  * gmap_unmap_segment - unmap segment from the guest address space
369  * @gmap: pointer to the guest address space structure
370  * @to: address in the guest address space
371  * @len: length of the memory area to unmap
372  *
373  * Returns 0 if the unmap succeeded, -EINVAL if not.
374  */
375 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
376 {
377 	unsigned long off;
378 	int flush;
379 
380 	if ((to | len) & (PMD_SIZE - 1))
381 		return -EINVAL;
382 	if (len == 0 || to + len < to)
383 		return -EINVAL;
384 
385 	flush = 0;
386 	down_write(&gmap->mm->mmap_sem);
387 	for (off = 0; off < len; off += PMD_SIZE)
388 		flush |= __gmap_unmap_by_gaddr(gmap, to + off);
389 	up_write(&gmap->mm->mmap_sem);
390 	if (flush)
391 		gmap_flush_tlb(gmap);
392 	return 0;
393 }
394 EXPORT_SYMBOL_GPL(gmap_unmap_segment);
395 
396 /**
397  * gmap_mmap_segment - map a segment to the guest address space
398  * @gmap: pointer to the guest address space structure
399  * @from: source address in the parent address space
400  * @to: target address in the guest address space
401  * @len: length of the memory area to map
402  *
403  * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
404  */
405 int gmap_map_segment(struct gmap *gmap, unsigned long from,
406 		     unsigned long to, unsigned long len)
407 {
408 	unsigned long off;
409 	int flush;
410 
411 	if ((from | to | len) & (PMD_SIZE - 1))
412 		return -EINVAL;
413 	if (len == 0 || from + len < from || to + len < to ||
414 	    from + len > TASK_MAX_SIZE || to + len > gmap->asce_end)
415 		return -EINVAL;
416 
417 	flush = 0;
418 	down_write(&gmap->mm->mmap_sem);
419 	for (off = 0; off < len; off += PMD_SIZE) {
420 		/* Remove old translation */
421 		flush |= __gmap_unmap_by_gaddr(gmap, to + off);
422 		/* Store new translation */
423 		if (radix_tree_insert(&gmap->guest_to_host,
424 				      (to + off) >> PMD_SHIFT,
425 				      (void *) from + off))
426 			break;
427 	}
428 	up_write(&gmap->mm->mmap_sem);
429 	if (flush)
430 		gmap_flush_tlb(gmap);
431 	if (off >= len)
432 		return 0;
433 	gmap_unmap_segment(gmap, to, len);
434 	return -ENOMEM;
435 }
436 EXPORT_SYMBOL_GPL(gmap_map_segment);
437 
438 /**
439  * __gmap_translate - translate a guest address to a user space address
440  * @gmap: pointer to guest mapping meta data structure
441  * @gaddr: guest address
442  *
443  * Returns user space address which corresponds to the guest address or
444  * -EFAULT if no such mapping exists.
445  * This function does not establish potentially missing page table entries.
446  * The mmap_sem of the mm that belongs to the address space must be held
447  * when this function gets called.
448  */
449 unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
450 {
451 	unsigned long vmaddr;
452 
453 	vmaddr = (unsigned long)
454 		radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
455 	return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
456 }
457 EXPORT_SYMBOL_GPL(__gmap_translate);
458 
459 /**
460  * gmap_translate - translate a guest address to a user space address
461  * @gmap: pointer to guest mapping meta data structure
462  * @gaddr: guest address
463  *
464  * Returns user space address which corresponds to the guest address or
465  * -EFAULT if no such mapping exists.
466  * This function does not establish potentially missing page table entries.
467  */
468 unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
469 {
470 	unsigned long rc;
471 
472 	down_read(&gmap->mm->mmap_sem);
473 	rc = __gmap_translate(gmap, gaddr);
474 	up_read(&gmap->mm->mmap_sem);
475 	return rc;
476 }
477 EXPORT_SYMBOL_GPL(gmap_translate);
478 
479 /**
480  * gmap_unlink - disconnect a page table from the gmap shadow tables
481  * @gmap: pointer to guest mapping meta data structure
482  * @table: pointer to the host page table
483  * @vmaddr: vm address associated with the host page table
484  */
485 static void gmap_unlink(struct mm_struct *mm, unsigned long *table,
486 			unsigned long vmaddr)
487 {
488 	struct gmap *gmap;
489 	int flush;
490 
491 	list_for_each_entry(gmap, &mm->context.gmap_list, list) {
492 		flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
493 		if (flush)
494 			gmap_flush_tlb(gmap);
495 	}
496 }
497 
498 /**
499  * gmap_link - set up shadow page tables to connect a host to a guest address
500  * @gmap: pointer to guest mapping meta data structure
501  * @gaddr: guest address
502  * @vmaddr: vm address
503  *
504  * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
505  * if the vm address is already mapped to a different guest segment.
506  * The mmap_sem of the mm that belongs to the address space must be held
507  * when this function gets called.
508  */
509 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
510 {
511 	struct mm_struct *mm;
512 	unsigned long *table;
513 	spinlock_t *ptl;
514 	pgd_t *pgd;
515 	pud_t *pud;
516 	pmd_t *pmd;
517 	int rc;
518 
519 	/* Create higher level tables in the gmap page table */
520 	table = gmap->table;
521 	if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
522 		table += (gaddr >> 53) & 0x7ff;
523 		if ((*table & _REGION_ENTRY_INVALID) &&
524 		    gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
525 				     gaddr & 0xffe0000000000000UL))
526 			return -ENOMEM;
527 		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
528 	}
529 	if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
530 		table += (gaddr >> 42) & 0x7ff;
531 		if ((*table & _REGION_ENTRY_INVALID) &&
532 		    gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
533 				     gaddr & 0xfffffc0000000000UL))
534 			return -ENOMEM;
535 		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
536 	}
537 	if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
538 		table += (gaddr >> 31) & 0x7ff;
539 		if ((*table & _REGION_ENTRY_INVALID) &&
540 		    gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
541 				     gaddr & 0xffffffff80000000UL))
542 			return -ENOMEM;
543 		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
544 	}
545 	table += (gaddr >> 20) & 0x7ff;
546 	/* Walk the parent mm page table */
547 	mm = gmap->mm;
548 	pgd = pgd_offset(mm, vmaddr);
549 	VM_BUG_ON(pgd_none(*pgd));
550 	pud = pud_offset(pgd, vmaddr);
551 	VM_BUG_ON(pud_none(*pud));
552 	pmd = pmd_offset(pud, vmaddr);
553 	VM_BUG_ON(pmd_none(*pmd));
554 	/* large pmds cannot yet be handled */
555 	if (pmd_large(*pmd))
556 		return -EFAULT;
557 	/* Link gmap segment table entry location to page table. */
558 	rc = radix_tree_preload(GFP_KERNEL);
559 	if (rc)
560 		return rc;
561 	ptl = pmd_lock(mm, pmd);
562 	spin_lock(&gmap->guest_table_lock);
563 	if (*table == _SEGMENT_ENTRY_INVALID) {
564 		rc = radix_tree_insert(&gmap->host_to_guest,
565 				       vmaddr >> PMD_SHIFT, table);
566 		if (!rc)
567 			*table = pmd_val(*pmd);
568 	} else
569 		rc = 0;
570 	spin_unlock(&gmap->guest_table_lock);
571 	spin_unlock(ptl);
572 	radix_tree_preload_end();
573 	return rc;
574 }
575 
576 /**
577  * gmap_fault - resolve a fault on a guest address
578  * @gmap: pointer to guest mapping meta data structure
579  * @gaddr: guest address
580  * @fault_flags: flags to pass down to handle_mm_fault()
581  *
582  * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
583  * if the vm address is already mapped to a different guest segment.
584  */
585 int gmap_fault(struct gmap *gmap, unsigned long gaddr,
586 	       unsigned int fault_flags)
587 {
588 	unsigned long vmaddr;
589 	int rc;
590 
591 	down_read(&gmap->mm->mmap_sem);
592 	vmaddr = __gmap_translate(gmap, gaddr);
593 	if (IS_ERR_VALUE(vmaddr)) {
594 		rc = vmaddr;
595 		goto out_up;
596 	}
597 	if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags)) {
598 		rc = -EFAULT;
599 		goto out_up;
600 	}
601 	rc = __gmap_link(gmap, gaddr, vmaddr);
602 out_up:
603 	up_read(&gmap->mm->mmap_sem);
604 	return rc;
605 }
606 EXPORT_SYMBOL_GPL(gmap_fault);
607 
608 static void gmap_zap_swap_entry(swp_entry_t entry, struct mm_struct *mm)
609 {
610 	if (!non_swap_entry(entry))
611 		dec_mm_counter(mm, MM_SWAPENTS);
612 	else if (is_migration_entry(entry)) {
613 		struct page *page = migration_entry_to_page(entry);
614 
615 		if (PageAnon(page))
616 			dec_mm_counter(mm, MM_ANONPAGES);
617 		else
618 			dec_mm_counter(mm, MM_FILEPAGES);
619 	}
620 	free_swap_and_cache(entry);
621 }
622 
623 /*
624  * this function is assumed to be called with mmap_sem held
625  */
626 void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
627 {
628 	unsigned long vmaddr, ptev, pgstev;
629 	pte_t *ptep, pte;
630 	spinlock_t *ptl;
631 	pgste_t pgste;
632 
633 	/* Find the vm address for the guest address */
634 	vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
635 						   gaddr >> PMD_SHIFT);
636 	if (!vmaddr)
637 		return;
638 	vmaddr |= gaddr & ~PMD_MASK;
639 	/* Get pointer to the page table entry */
640 	ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
641 	if (unlikely(!ptep))
642 		return;
643 	pte = *ptep;
644 	if (!pte_swap(pte))
645 		goto out_pte;
646 	/* Zap unused and logically-zero pages */
647 	pgste = pgste_get_lock(ptep);
648 	pgstev = pgste_val(pgste);
649 	ptev = pte_val(pte);
650 	if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) ||
651 	    ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID))) {
652 		gmap_zap_swap_entry(pte_to_swp_entry(pte), gmap->mm);
653 		pte_clear(gmap->mm, vmaddr, ptep);
654 	}
655 	pgste_set_unlock(ptep, pgste);
656 out_pte:
657 	pte_unmap_unlock(ptep, ptl);
658 }
659 EXPORT_SYMBOL_GPL(__gmap_zap);
660 
661 void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
662 {
663 	unsigned long gaddr, vmaddr, size;
664 	struct vm_area_struct *vma;
665 
666 	down_read(&gmap->mm->mmap_sem);
667 	for (gaddr = from; gaddr < to;
668 	     gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
669 		/* Find the vm address for the guest address */
670 		vmaddr = (unsigned long)
671 			radix_tree_lookup(&gmap->guest_to_host,
672 					  gaddr >> PMD_SHIFT);
673 		if (!vmaddr)
674 			continue;
675 		vmaddr |= gaddr & ~PMD_MASK;
676 		/* Find vma in the parent mm */
677 		vma = find_vma(gmap->mm, vmaddr);
678 		size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
679 		zap_page_range(vma, vmaddr, size, NULL);
680 	}
681 	up_read(&gmap->mm->mmap_sem);
682 }
683 EXPORT_SYMBOL_GPL(gmap_discard);
684 
685 static LIST_HEAD(gmap_notifier_list);
686 static DEFINE_SPINLOCK(gmap_notifier_lock);
687 
688 /**
689  * gmap_register_ipte_notifier - register a pte invalidation callback
690  * @nb: pointer to the gmap notifier block
691  */
692 void gmap_register_ipte_notifier(struct gmap_notifier *nb)
693 {
694 	spin_lock(&gmap_notifier_lock);
695 	list_add(&nb->list, &gmap_notifier_list);
696 	spin_unlock(&gmap_notifier_lock);
697 }
698 EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
699 
700 /**
701  * gmap_unregister_ipte_notifier - remove a pte invalidation callback
702  * @nb: pointer to the gmap notifier block
703  */
704 void gmap_unregister_ipte_notifier(struct gmap_notifier *nb)
705 {
706 	spin_lock(&gmap_notifier_lock);
707 	list_del_init(&nb->list);
708 	spin_unlock(&gmap_notifier_lock);
709 }
710 EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
711 
712 /**
713  * gmap_ipte_notify - mark a range of ptes for invalidation notification
714  * @gmap: pointer to guest mapping meta data structure
715  * @gaddr: virtual address in the guest address space
716  * @len: size of area
717  *
718  * Returns 0 if for each page in the given range a gmap mapping exists and
719  * the invalidation notification could be set. If the gmap mapping is missing
720  * for one or more pages -EFAULT is returned. If no memory could be allocated
721  * -ENOMEM is returned. This function establishes missing page table entries.
722  */
723 int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
724 {
725 	unsigned long addr;
726 	spinlock_t *ptl;
727 	pte_t *ptep, entry;
728 	pgste_t pgste;
729 	int rc = 0;
730 
731 	if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK))
732 		return -EINVAL;
733 	down_read(&gmap->mm->mmap_sem);
734 	while (len) {
735 		/* Convert gmap address and connect the page tables */
736 		addr = __gmap_translate(gmap, gaddr);
737 		if (IS_ERR_VALUE(addr)) {
738 			rc = addr;
739 			break;
740 		}
741 		/* Get the page mapped */
742 		if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) {
743 			rc = -EFAULT;
744 			break;
745 		}
746 		rc = __gmap_link(gmap, gaddr, addr);
747 		if (rc)
748 			break;
749 		/* Walk the process page table, lock and get pte pointer */
750 		ptep = get_locked_pte(gmap->mm, addr, &ptl);
751 		VM_BUG_ON(!ptep);
752 		/* Set notification bit in the pgste of the pte */
753 		entry = *ptep;
754 		if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
755 			pgste = pgste_get_lock(ptep);
756 			pgste_val(pgste) |= PGSTE_IN_BIT;
757 			pgste_set_unlock(ptep, pgste);
758 			gaddr += PAGE_SIZE;
759 			len -= PAGE_SIZE;
760 		}
761 		pte_unmap_unlock(ptep, ptl);
762 	}
763 	up_read(&gmap->mm->mmap_sem);
764 	return rc;
765 }
766 EXPORT_SYMBOL_GPL(gmap_ipte_notify);
767 
768 /**
769  * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte.
770  * @mm: pointer to the process mm_struct
771  * @addr: virtual address in the process address space
772  * @pte: pointer to the page table entry
773  *
774  * This function is assumed to be called with the page table lock held
775  * for the pte to notify.
776  */
777 void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long vmaddr, pte_t *pte)
778 {
779 	unsigned long offset, gaddr;
780 	unsigned long *table;
781 	struct gmap_notifier *nb;
782 	struct gmap *gmap;
783 
784 	offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
785 	offset = offset * (4096 / sizeof(pte_t));
786 	spin_lock(&gmap_notifier_lock);
787 	list_for_each_entry(gmap, &mm->context.gmap_list, list) {
788 		table = radix_tree_lookup(&gmap->host_to_guest,
789 					  vmaddr >> PMD_SHIFT);
790 		if (!table)
791 			continue;
792 		gaddr = __gmap_segment_gaddr(table) + offset;
793 		list_for_each_entry(nb, &gmap_notifier_list, list)
794 			nb->notifier_call(gmap, gaddr);
795 	}
796 	spin_unlock(&gmap_notifier_lock);
797 }
798 EXPORT_SYMBOL_GPL(gmap_do_ipte_notify);
799 
800 static inline int page_table_with_pgste(struct page *page)
801 {
802 	return atomic_read(&page->_mapcount) == 0;
803 }
804 
805 static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
806 {
807 	struct page *page;
808 	unsigned long *table;
809 
810 	page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
811 	if (!page)
812 		return NULL;
813 	if (!pgtable_page_ctor(page)) {
814 		__free_page(page);
815 		return NULL;
816 	}
817 	atomic_set(&page->_mapcount, 0);
818 	table = (unsigned long *) page_to_phys(page);
819 	clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
820 	clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
821 	return table;
822 }
823 
824 static inline void page_table_free_pgste(unsigned long *table)
825 {
826 	struct page *page;
827 
828 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
829 	pgtable_page_dtor(page);
830 	atomic_set(&page->_mapcount, -1);
831 	__free_page(page);
832 }
833 
834 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
835 			  unsigned long key, bool nq)
836 {
837 	spinlock_t *ptl;
838 	pgste_t old, new;
839 	pte_t *ptep;
840 
841 	down_read(&mm->mmap_sem);
842 retry:
843 	ptep = get_locked_pte(mm, addr, &ptl);
844 	if (unlikely(!ptep)) {
845 		up_read(&mm->mmap_sem);
846 		return -EFAULT;
847 	}
848 	if (!(pte_val(*ptep) & _PAGE_INVALID) &&
849 	     (pte_val(*ptep) & _PAGE_PROTECT)) {
850 		pte_unmap_unlock(ptep, ptl);
851 		if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) {
852 			up_read(&mm->mmap_sem);
853 			return -EFAULT;
854 		}
855 		goto retry;
856 	}
857 
858 	new = old = pgste_get_lock(ptep);
859 	pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
860 			    PGSTE_ACC_BITS | PGSTE_FP_BIT);
861 	pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
862 	pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
863 	if (!(pte_val(*ptep) & _PAGE_INVALID)) {
864 		unsigned long address, bits, skey;
865 
866 		address = pte_val(*ptep) & PAGE_MASK;
867 		skey = (unsigned long) page_get_storage_key(address);
868 		bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
869 		skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
870 		/* Set storage key ACC and FP */
871 		page_set_storage_key(address, skey, !nq);
872 		/* Merge host changed & referenced into pgste  */
873 		pgste_val(new) |= bits << 52;
874 	}
875 	/* changing the guest storage key is considered a change of the page */
876 	if ((pgste_val(new) ^ pgste_val(old)) &
877 	    (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
878 		pgste_val(new) |= PGSTE_UC_BIT;
879 
880 	pgste_set_unlock(ptep, new);
881 	pte_unmap_unlock(ptep, ptl);
882 	up_read(&mm->mmap_sem);
883 	return 0;
884 }
885 EXPORT_SYMBOL(set_guest_storage_key);
886 
887 unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
888 {
889 	spinlock_t *ptl;
890 	pgste_t pgste;
891 	pte_t *ptep;
892 	uint64_t physaddr;
893 	unsigned long key = 0;
894 
895 	down_read(&mm->mmap_sem);
896 	ptep = get_locked_pte(mm, addr, &ptl);
897 	if (unlikely(!ptep)) {
898 		up_read(&mm->mmap_sem);
899 		return -EFAULT;
900 	}
901 	pgste = pgste_get_lock(ptep);
902 
903 	if (pte_val(*ptep) & _PAGE_INVALID) {
904 		key |= (pgste_val(pgste) & PGSTE_ACC_BITS) >> 56;
905 		key |= (pgste_val(pgste) & PGSTE_FP_BIT) >> 56;
906 		key |= (pgste_val(pgste) & PGSTE_GR_BIT) >> 48;
907 		key |= (pgste_val(pgste) & PGSTE_GC_BIT) >> 48;
908 	} else {
909 		physaddr = pte_val(*ptep) & PAGE_MASK;
910 		key = page_get_storage_key(physaddr);
911 
912 		/* Reflect guest's logical view, not physical */
913 		if (pgste_val(pgste) & PGSTE_GR_BIT)
914 			key |= _PAGE_REFERENCED;
915 		if (pgste_val(pgste) & PGSTE_GC_BIT)
916 			key |= _PAGE_CHANGED;
917 	}
918 
919 	pgste_set_unlock(ptep, pgste);
920 	pte_unmap_unlock(ptep, ptl);
921 	up_read(&mm->mmap_sem);
922 	return key;
923 }
924 EXPORT_SYMBOL(get_guest_storage_key);
925 
926 static int page_table_allocate_pgste_min = 0;
927 static int page_table_allocate_pgste_max = 1;
928 int page_table_allocate_pgste = 0;
929 EXPORT_SYMBOL(page_table_allocate_pgste);
930 
931 static struct ctl_table page_table_sysctl[] = {
932 	{
933 		.procname	= "allocate_pgste",
934 		.data		= &page_table_allocate_pgste,
935 		.maxlen		= sizeof(int),
936 		.mode		= S_IRUGO | S_IWUSR,
937 		.proc_handler	= proc_dointvec,
938 		.extra1		= &page_table_allocate_pgste_min,
939 		.extra2		= &page_table_allocate_pgste_max,
940 	},
941 	{ }
942 };
943 
944 static struct ctl_table page_table_sysctl_dir[] = {
945 	{
946 		.procname	= "vm",
947 		.maxlen		= 0,
948 		.mode		= 0555,
949 		.child		= page_table_sysctl,
950 	},
951 	{ }
952 };
953 
954 static int __init page_table_register_sysctl(void)
955 {
956 	return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
957 }
958 __initcall(page_table_register_sysctl);
959 
960 #else /* CONFIG_PGSTE */
961 
962 static inline int page_table_with_pgste(struct page *page)
963 {
964 	return 0;
965 }
966 
967 static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
968 {
969 	return NULL;
970 }
971 
972 static inline void page_table_free_pgste(unsigned long *table)
973 {
974 }
975 
976 static inline void gmap_unlink(struct mm_struct *mm, unsigned long *table,
977 			unsigned long vmaddr)
978 {
979 }
980 
981 #endif /* CONFIG_PGSTE */
982 
983 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
984 {
985 	unsigned int old, new;
986 
987 	do {
988 		old = atomic_read(v);
989 		new = old ^ bits;
990 	} while (atomic_cmpxchg(v, old, new) != old);
991 	return new;
992 }
993 
994 /*
995  * page table entry allocation/free routines.
996  */
997 unsigned long *page_table_alloc(struct mm_struct *mm)
998 {
999 	unsigned long *uninitialized_var(table);
1000 	struct page *uninitialized_var(page);
1001 	unsigned int mask, bit;
1002 
1003 	if (mm_alloc_pgste(mm))
1004 		return page_table_alloc_pgste(mm);
1005 	/* Allocate fragments of a 4K page as 1K/2K page table */
1006 	spin_lock_bh(&mm->context.list_lock);
1007 	mask = FRAG_MASK;
1008 	if (!list_empty(&mm->context.pgtable_list)) {
1009 		page = list_first_entry(&mm->context.pgtable_list,
1010 					struct page, lru);
1011 		table = (unsigned long *) page_to_phys(page);
1012 		mask = atomic_read(&page->_mapcount);
1013 		mask = mask | (mask >> 4);
1014 	}
1015 	if ((mask & FRAG_MASK) == FRAG_MASK) {
1016 		spin_unlock_bh(&mm->context.list_lock);
1017 		page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
1018 		if (!page)
1019 			return NULL;
1020 		if (!pgtable_page_ctor(page)) {
1021 			__free_page(page);
1022 			return NULL;
1023 		}
1024 		atomic_set(&page->_mapcount, 1);
1025 		table = (unsigned long *) page_to_phys(page);
1026 		clear_table(table, _PAGE_INVALID, PAGE_SIZE);
1027 		spin_lock_bh(&mm->context.list_lock);
1028 		list_add(&page->lru, &mm->context.pgtable_list);
1029 	} else {
1030 		for (bit = 1; mask & bit; bit <<= 1)
1031 			table += PTRS_PER_PTE;
1032 		mask = atomic_xor_bits(&page->_mapcount, bit);
1033 		if ((mask & FRAG_MASK) == FRAG_MASK)
1034 			list_del(&page->lru);
1035 	}
1036 	spin_unlock_bh(&mm->context.list_lock);
1037 	return table;
1038 }
1039 
1040 void page_table_free(struct mm_struct *mm, unsigned long *table)
1041 {
1042 	struct page *page;
1043 	unsigned int bit, mask;
1044 
1045 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1046 	if (page_table_with_pgste(page))
1047 		return page_table_free_pgste(table);
1048 	/* Free 1K/2K page table fragment of a 4K page */
1049 	bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
1050 	spin_lock_bh(&mm->context.list_lock);
1051 	if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
1052 		list_del(&page->lru);
1053 	mask = atomic_xor_bits(&page->_mapcount, bit);
1054 	if (mask & FRAG_MASK)
1055 		list_add(&page->lru, &mm->context.pgtable_list);
1056 	spin_unlock_bh(&mm->context.list_lock);
1057 	if (mask == 0) {
1058 		pgtable_page_dtor(page);
1059 		atomic_set(&page->_mapcount, -1);
1060 		__free_page(page);
1061 	}
1062 }
1063 
1064 static void __page_table_free_rcu(void *table, unsigned bit)
1065 {
1066 	struct page *page;
1067 
1068 	if (bit == FRAG_MASK)
1069 		return page_table_free_pgste(table);
1070 	/* Free 1K/2K page table fragment of a 4K page */
1071 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1072 	if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
1073 		pgtable_page_dtor(page);
1074 		atomic_set(&page->_mapcount, -1);
1075 		__free_page(page);
1076 	}
1077 }
1078 
1079 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
1080 			 unsigned long vmaddr)
1081 {
1082 	struct mm_struct *mm;
1083 	struct page *page;
1084 	unsigned int bit, mask;
1085 
1086 	mm = tlb->mm;
1087 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1088 	if (page_table_with_pgste(page)) {
1089 		gmap_unlink(mm, table, vmaddr);
1090 		table = (unsigned long *) (__pa(table) | FRAG_MASK);
1091 		tlb_remove_table(tlb, table);
1092 		return;
1093 	}
1094 	bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
1095 	spin_lock_bh(&mm->context.list_lock);
1096 	if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
1097 		list_del(&page->lru);
1098 	mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
1099 	if (mask & FRAG_MASK)
1100 		list_add_tail(&page->lru, &mm->context.pgtable_list);
1101 	spin_unlock_bh(&mm->context.list_lock);
1102 	table = (unsigned long *) (__pa(table) | (bit << 4));
1103 	tlb_remove_table(tlb, table);
1104 }
1105 
1106 static void __tlb_remove_table(void *_table)
1107 {
1108 	const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
1109 	void *table = (void *)((unsigned long) _table & ~mask);
1110 	unsigned type = (unsigned long) _table & mask;
1111 
1112 	if (type)
1113 		__page_table_free_rcu(table, type);
1114 	else
1115 		free_pages((unsigned long) table, ALLOC_ORDER);
1116 }
1117 
1118 static void tlb_remove_table_smp_sync(void *arg)
1119 {
1120 	/* Simply deliver the interrupt */
1121 }
1122 
1123 static void tlb_remove_table_one(void *table)
1124 {
1125 	/*
1126 	 * This isn't an RCU grace period and hence the page-tables cannot be
1127 	 * assumed to be actually RCU-freed.
1128 	 *
1129 	 * It is however sufficient for software page-table walkers that rely
1130 	 * on IRQ disabling. See the comment near struct mmu_table_batch.
1131 	 */
1132 	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
1133 	__tlb_remove_table(table);
1134 }
1135 
1136 static void tlb_remove_table_rcu(struct rcu_head *head)
1137 {
1138 	struct mmu_table_batch *batch;
1139 	int i;
1140 
1141 	batch = container_of(head, struct mmu_table_batch, rcu);
1142 
1143 	for (i = 0; i < batch->nr; i++)
1144 		__tlb_remove_table(batch->tables[i]);
1145 
1146 	free_page((unsigned long)batch);
1147 }
1148 
1149 void tlb_table_flush(struct mmu_gather *tlb)
1150 {
1151 	struct mmu_table_batch **batch = &tlb->batch;
1152 
1153 	if (*batch) {
1154 		call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
1155 		*batch = NULL;
1156 	}
1157 }
1158 
1159 void tlb_remove_table(struct mmu_gather *tlb, void *table)
1160 {
1161 	struct mmu_table_batch **batch = &tlb->batch;
1162 
1163 	tlb->mm->context.flush_mm = 1;
1164 	if (*batch == NULL) {
1165 		*batch = (struct mmu_table_batch *)
1166 			__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
1167 		if (*batch == NULL) {
1168 			__tlb_flush_mm_lazy(tlb->mm);
1169 			tlb_remove_table_one(table);
1170 			return;
1171 		}
1172 		(*batch)->nr = 0;
1173 	}
1174 	(*batch)->tables[(*batch)->nr++] = table;
1175 	if ((*batch)->nr == MAX_TABLE_BATCH)
1176 		tlb_flush_mmu(tlb);
1177 }
1178 
1179 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1180 static inline void thp_split_vma(struct vm_area_struct *vma)
1181 {
1182 	unsigned long addr;
1183 
1184 	for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE)
1185 		follow_page(vma, addr, FOLL_SPLIT);
1186 }
1187 
1188 static inline void thp_split_mm(struct mm_struct *mm)
1189 {
1190 	struct vm_area_struct *vma;
1191 
1192 	for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
1193 		thp_split_vma(vma);
1194 		vma->vm_flags &= ~VM_HUGEPAGE;
1195 		vma->vm_flags |= VM_NOHUGEPAGE;
1196 	}
1197 	mm->def_flags |= VM_NOHUGEPAGE;
1198 }
1199 #else
1200 static inline void thp_split_mm(struct mm_struct *mm)
1201 {
1202 }
1203 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1204 
1205 /*
1206  * switch on pgstes for its userspace process (for kvm)
1207  */
1208 int s390_enable_sie(void)
1209 {
1210 	struct mm_struct *mm = current->mm;
1211 
1212 	/* Do we have pgstes? if yes, we are done */
1213 	if (mm_has_pgste(mm))
1214 		return 0;
1215 	/* Fail if the page tables are 2K */
1216 	if (!mm_alloc_pgste(mm))
1217 		return -EINVAL;
1218 	down_write(&mm->mmap_sem);
1219 	mm->context.has_pgste = 1;
1220 	/* split thp mappings and disable thp for future mappings */
1221 	thp_split_mm(mm);
1222 	up_write(&mm->mmap_sem);
1223 	return 0;
1224 }
1225 EXPORT_SYMBOL_GPL(s390_enable_sie);
1226 
1227 /*
1228  * Enable storage key handling from now on and initialize the storage
1229  * keys with the default key.
1230  */
1231 static int __s390_enable_skey(pte_t *pte, unsigned long addr,
1232 			      unsigned long next, struct mm_walk *walk)
1233 {
1234 	unsigned long ptev;
1235 	pgste_t pgste;
1236 
1237 	pgste = pgste_get_lock(pte);
1238 	/*
1239 	 * Remove all zero page mappings,
1240 	 * after establishing a policy to forbid zero page mappings
1241 	 * following faults for that page will get fresh anonymous pages
1242 	 */
1243 	if (is_zero_pfn(pte_pfn(*pte))) {
1244 		ptep_flush_direct(walk->mm, addr, pte);
1245 		pte_val(*pte) = _PAGE_INVALID;
1246 	}
1247 	/* Clear storage key */
1248 	pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
1249 			      PGSTE_GR_BIT | PGSTE_GC_BIT);
1250 	ptev = pte_val(*pte);
1251 	if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
1252 		page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
1253 	pgste_set_unlock(pte, pgste);
1254 	return 0;
1255 }
1256 
1257 int s390_enable_skey(void)
1258 {
1259 	struct mm_walk walk = { .pte_entry = __s390_enable_skey };
1260 	struct mm_struct *mm = current->mm;
1261 	struct vm_area_struct *vma;
1262 	int rc = 0;
1263 
1264 	down_write(&mm->mmap_sem);
1265 	if (mm_use_skey(mm))
1266 		goto out_up;
1267 
1268 	mm->context.use_skey = 1;
1269 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
1270 		if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
1271 				MADV_UNMERGEABLE, &vma->vm_flags)) {
1272 			mm->context.use_skey = 0;
1273 			rc = -ENOMEM;
1274 			goto out_up;
1275 		}
1276 	}
1277 	mm->def_flags &= ~VM_MERGEABLE;
1278 
1279 	walk.mm = mm;
1280 	walk_page_range(0, TASK_SIZE, &walk);
1281 
1282 out_up:
1283 	up_write(&mm->mmap_sem);
1284 	return rc;
1285 }
1286 EXPORT_SYMBOL_GPL(s390_enable_skey);
1287 
1288 /*
1289  * Reset CMMA state, make all pages stable again.
1290  */
1291 static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
1292 			     unsigned long next, struct mm_walk *walk)
1293 {
1294 	pgste_t pgste;
1295 
1296 	pgste = pgste_get_lock(pte);
1297 	pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
1298 	pgste_set_unlock(pte, pgste);
1299 	return 0;
1300 }
1301 
1302 void s390_reset_cmma(struct mm_struct *mm)
1303 {
1304 	struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
1305 
1306 	down_write(&mm->mmap_sem);
1307 	walk.mm = mm;
1308 	walk_page_range(0, TASK_SIZE, &walk);
1309 	up_write(&mm->mmap_sem);
1310 }
1311 EXPORT_SYMBOL_GPL(s390_reset_cmma);
1312 
1313 /*
1314  * Test and reset if a guest page is dirty
1315  */
1316 bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap)
1317 {
1318 	pte_t *pte;
1319 	spinlock_t *ptl;
1320 	bool dirty = false;
1321 
1322 	pte = get_locked_pte(gmap->mm, address, &ptl);
1323 	if (unlikely(!pte))
1324 		return false;
1325 
1326 	if (ptep_test_and_clear_user_dirty(gmap->mm, address, pte))
1327 		dirty = true;
1328 
1329 	spin_unlock(ptl);
1330 	return dirty;
1331 }
1332 EXPORT_SYMBOL_GPL(gmap_test_and_clear_dirty);
1333 
1334 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1335 int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
1336 			   pmd_t *pmdp)
1337 {
1338 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1339 	/* No need to flush TLB
1340 	 * On s390 reference bits are in storage key and never in TLB */
1341 	return pmdp_test_and_clear_young(vma, address, pmdp);
1342 }
1343 
1344 int pmdp_set_access_flags(struct vm_area_struct *vma,
1345 			  unsigned long address, pmd_t *pmdp,
1346 			  pmd_t entry, int dirty)
1347 {
1348 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1349 
1350 	entry = pmd_mkyoung(entry);
1351 	if (dirty)
1352 		entry = pmd_mkdirty(entry);
1353 	if (pmd_same(*pmdp, entry))
1354 		return 0;
1355 	pmdp_invalidate(vma, address, pmdp);
1356 	set_pmd_at(vma->vm_mm, address, pmdp, entry);
1357 	return 1;
1358 }
1359 
1360 static void pmdp_splitting_flush_sync(void *arg)
1361 {
1362 	/* Simply deliver the interrupt */
1363 }
1364 
1365 void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
1366 			  pmd_t *pmdp)
1367 {
1368 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1369 	if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT,
1370 			      (unsigned long *) pmdp)) {
1371 		/* need to serialize against gup-fast (IRQ disabled) */
1372 		smp_call_function(pmdp_splitting_flush_sync, NULL, 1);
1373 	}
1374 }
1375 
1376 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1377 				pgtable_t pgtable)
1378 {
1379 	struct list_head *lh = (struct list_head *) pgtable;
1380 
1381 	assert_spin_locked(pmd_lockptr(mm, pmdp));
1382 
1383 	/* FIFO */
1384 	if (!pmd_huge_pte(mm, pmdp))
1385 		INIT_LIST_HEAD(lh);
1386 	else
1387 		list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
1388 	pmd_huge_pte(mm, pmdp) = pgtable;
1389 }
1390 
1391 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
1392 {
1393 	struct list_head *lh;
1394 	pgtable_t pgtable;
1395 	pte_t *ptep;
1396 
1397 	assert_spin_locked(pmd_lockptr(mm, pmdp));
1398 
1399 	/* FIFO */
1400 	pgtable = pmd_huge_pte(mm, pmdp);
1401 	lh = (struct list_head *) pgtable;
1402 	if (list_empty(lh))
1403 		pmd_huge_pte(mm, pmdp) = NULL;
1404 	else {
1405 		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
1406 		list_del(lh);
1407 	}
1408 	ptep = (pte_t *) pgtable;
1409 	pte_val(*ptep) = _PAGE_INVALID;
1410 	ptep++;
1411 	pte_val(*ptep) = _PAGE_INVALID;
1412 	return pgtable;
1413 }
1414 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1415