xref: /openbmc/linux/arch/s390/include/asm/pgtable.h (revision 5c73cc4b6c83e88863a5de869cc5df3b913aef4a)
1 /*
2  *  S390 version
3  *    Copyright IBM Corp. 1999, 2000
4  *    Author(s): Hartmut Penner (hp@de.ibm.com)
5  *               Ulrich Weigand (weigand@de.ibm.com)
6  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
7  *
8  *  Derived from "include/asm-i386/pgtable.h"
9  */
10 
11 #ifndef _ASM_S390_PGTABLE_H
12 #define _ASM_S390_PGTABLE_H
13 
14 /*
15  * The Linux memory management assumes a three-level page table setup. For
16  * s390 31 bit we "fold" the mid level into the top-level page table, so
17  * that we physically have the same two-level page table as the s390 mmu
18  * expects in 31 bit mode. For s390 64 bit we use three of the five levels
19  * the hardware provides (region first and region second tables are not
20  * used).
21  *
22  * The "pgd_xxx()" functions are trivial for a folded two-level
23  * setup: the pgd is never bad, and a pmd always exists (as it's folded
24  * into the pgd entry)
25  *
26  * This file contains the functions and defines necessary to modify and use
27  * the S390 page table tree.
28  */
29 #ifndef __ASSEMBLY__
30 #include <linux/sched.h>
31 #include <linux/mm_types.h>
32 #include <linux/page-flags.h>
33 #include <linux/radix-tree.h>
34 #include <asm/bug.h>
35 #include <asm/page.h>
36 
37 extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
38 extern void paging_init(void);
39 extern void vmem_map_init(void);
40 
41 /*
42  * The S390 doesn't have any external MMU info: the kernel page
43  * tables contain all the necessary information.
44  */
45 #define update_mmu_cache(vma, address, ptep)     do { } while (0)
46 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
47 
48 /*
49  * ZERO_PAGE is a global shared page that is always zero; used
50  * for zero-mapped memory areas etc..
51  */
52 
53 extern unsigned long empty_zero_page;
54 extern unsigned long zero_page_mask;
55 
56 #define ZERO_PAGE(vaddr) \
57 	(virt_to_page((void *)(empty_zero_page + \
58 	 (((unsigned long)(vaddr)) &zero_page_mask))))
59 #define __HAVE_COLOR_ZERO_PAGE
60 
61 /* TODO: s390 cannot support io_remap_pfn_range... */
62 #endif /* !__ASSEMBLY__ */
63 
64 /*
65  * PMD_SHIFT determines the size of the area a second-level page
66  * table can map
67  * PGDIR_SHIFT determines what a third-level page table entry can map
68  */
69 #define PMD_SHIFT	20
70 #define PUD_SHIFT	31
71 #define PGDIR_SHIFT	42
72 
73 #define PMD_SIZE        (1UL << PMD_SHIFT)
74 #define PMD_MASK        (~(PMD_SIZE-1))
75 #define PUD_SIZE	(1UL << PUD_SHIFT)
76 #define PUD_MASK	(~(PUD_SIZE-1))
77 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
78 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
79 
80 /*
81  * entries per page directory level: the S390 is two-level, so
82  * we don't really have any PMD directory physically.
83  * for S390 segment-table entries are combined to one PGD
84  * that leads to 1024 pte per pgd
85  */
86 #define PTRS_PER_PTE	256
87 #define PTRS_PER_PMD	2048
88 #define PTRS_PER_PUD	2048
89 #define PTRS_PER_PGD	2048
90 
91 #define FIRST_USER_ADDRESS  0UL
92 
93 #define pte_ERROR(e) \
94 	printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
95 #define pmd_ERROR(e) \
96 	printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
97 #define pud_ERROR(e) \
98 	printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
99 #define pgd_ERROR(e) \
100 	printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
101 
102 #ifndef __ASSEMBLY__
103 /*
104  * The vmalloc and module area will always be on the topmost area of the kernel
105  * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules.
106  * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
107  * modules will reside. That makes sure that inter module branches always
108  * happen without trampolines and in addition the placement within a 2GB frame
109  * is branch prediction unit friendly.
110  */
111 extern unsigned long VMALLOC_START;
112 extern unsigned long VMALLOC_END;
113 extern struct page *vmemmap;
114 
115 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
116 
117 extern unsigned long MODULES_VADDR;
118 extern unsigned long MODULES_END;
119 #define MODULES_VADDR	MODULES_VADDR
120 #define MODULES_END	MODULES_END
121 #define MODULES_LEN	(1UL << 31)
122 
123 static inline int is_module_addr(void *addr)
124 {
125 	BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
126 	if (addr < (void *)MODULES_VADDR)
127 		return 0;
128 	if (addr > (void *)MODULES_END)
129 		return 0;
130 	return 1;
131 }
132 
133 /*
134  * A 31 bit pagetable entry of S390 has following format:
135  *  |   PFRA          |    |  OS  |
136  * 0                   0IP0
137  * 00000000001111111111222222222233
138  * 01234567890123456789012345678901
139  *
140  * I Page-Invalid Bit:    Page is not available for address-translation
141  * P Page-Protection Bit: Store access not possible for page
142  *
143  * A 31 bit segmenttable entry of S390 has following format:
144  *  |   P-table origin      |  |PTL
145  * 0                         IC
146  * 00000000001111111111222222222233
147  * 01234567890123456789012345678901
148  *
149  * I Segment-Invalid Bit:    Segment is not available for address-translation
150  * C Common-Segment Bit:     Segment is not private (PoP 3-30)
151  * PTL Page-Table-Length:    Page-table length (PTL+1*16 entries -> up to 256)
152  *
153  * The 31 bit segmenttable origin of S390 has following format:
154  *
155  *  |S-table origin   |     | STL |
156  * X                   **GPS
157  * 00000000001111111111222222222233
158  * 01234567890123456789012345678901
159  *
160  * X Space-Switch event:
161  * G Segment-Invalid Bit:     *
162  * P Private-Space Bit:       Segment is not private (PoP 3-30)
163  * S Storage-Alteration:
164  * STL Segment-Table-Length:  Segment-table length (STL+1*16 entries -> up to 2048)
165  *
166  * A 64 bit pagetable entry of S390 has following format:
167  * |			 PFRA			      |0IPC|  OS  |
168  * 0000000000111111111122222222223333333333444444444455555555556666
169  * 0123456789012345678901234567890123456789012345678901234567890123
170  *
171  * I Page-Invalid Bit:    Page is not available for address-translation
172  * P Page-Protection Bit: Store access not possible for page
173  * C Change-bit override: HW is not required to set change bit
174  *
175  * A 64 bit segmenttable entry of S390 has following format:
176  * |        P-table origin                              |      TT
177  * 0000000000111111111122222222223333333333444444444455555555556666
178  * 0123456789012345678901234567890123456789012345678901234567890123
179  *
180  * I Segment-Invalid Bit:    Segment is not available for address-translation
181  * C Common-Segment Bit:     Segment is not private (PoP 3-30)
182  * P Page-Protection Bit: Store access not possible for page
183  * TT Type 00
184  *
185  * A 64 bit region table entry of S390 has following format:
186  * |        S-table origin                             |   TF  TTTL
187  * 0000000000111111111122222222223333333333444444444455555555556666
188  * 0123456789012345678901234567890123456789012345678901234567890123
189  *
190  * I Segment-Invalid Bit:    Segment is not available for address-translation
191  * TT Type 01
192  * TF
193  * TL Table length
194  *
195  * The 64 bit regiontable origin of S390 has following format:
196  * |      region table origon                          |       DTTL
197  * 0000000000111111111122222222223333333333444444444455555555556666
198  * 0123456789012345678901234567890123456789012345678901234567890123
199  *
200  * X Space-Switch event:
201  * G Segment-Invalid Bit:
202  * P Private-Space Bit:
203  * S Storage-Alteration:
204  * R Real space
205  * TL Table-Length:
206  *
207  * A storage key has the following format:
208  * | ACC |F|R|C|0|
209  *  0   3 4 5 6 7
210  * ACC: access key
211  * F  : fetch protection bit
212  * R  : referenced bit
213  * C  : changed bit
214  */
215 
216 /* Hardware bits in the page table entry */
217 #define _PAGE_PROTECT	0x200		/* HW read-only bit  */
218 #define _PAGE_INVALID	0x400		/* HW invalid bit    */
219 #define _PAGE_LARGE	0x800		/* Bit to mark a large pte */
220 
221 /* Software bits in the page table entry */
222 #define _PAGE_PRESENT	0x001		/* SW pte present bit */
223 #define _PAGE_TYPE	0x002		/* SW pte type bit */
224 #define _PAGE_YOUNG	0x004		/* SW pte young bit */
225 #define _PAGE_DIRTY	0x008		/* SW pte dirty bit */
226 #define _PAGE_READ	0x010		/* SW pte read bit */
227 #define _PAGE_WRITE	0x020		/* SW pte write bit */
228 #define _PAGE_SPECIAL	0x040		/* SW associated with special page */
229 #define _PAGE_UNUSED	0x080		/* SW bit for pgste usage state */
230 #define __HAVE_ARCH_PTE_SPECIAL
231 
232 /* Set of bits not changed in pte_modify */
233 #define _PAGE_CHG_MASK		(PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
234 				 _PAGE_YOUNG)
235 
236 /*
237  * handle_pte_fault uses pte_present and pte_none to find out the pte type
238  * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
239  * distinguish present from not-present ptes. It is changed only with the page
240  * table lock held.
241  *
242  * The following table gives the different possible bit combinations for
243  * the pte hardware and software bits in the last 12 bits of a pte:
244  *
245  *				842100000000
246  *				000084210000
247  *				000000008421
248  *				.IR...wrdytp
249  * empty			.10...000000
250  * swap				.10...xxxx10
251  * file				.11...xxxxx0
252  * prot-none, clean, old	.11...000001
253  * prot-none, clean, young	.11...000101
254  * prot-none, dirty, old	.10...001001
255  * prot-none, dirty, young	.10...001101
256  * read-only, clean, old	.11...010001
257  * read-only, clean, young	.01...010101
258  * read-only, dirty, old	.11...011001
259  * read-only, dirty, young	.01...011101
260  * read-write, clean, old	.11...110001
261  * read-write, clean, young	.01...110101
262  * read-write, dirty, old	.10...111001
263  * read-write, dirty, young	.00...111101
264  *
265  * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001
266  * pte_none    is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400
267  * pte_swap    is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402
268  */
269 
270 /* Bits in the segment/region table address-space-control-element */
271 #define _ASCE_ORIGIN		~0xfffUL/* segment table origin		    */
272 #define _ASCE_PRIVATE_SPACE	0x100	/* private space control	    */
273 #define _ASCE_ALT_EVENT		0x80	/* storage alteration event control */
274 #define _ASCE_SPACE_SWITCH	0x40	/* space switch event		    */
275 #define _ASCE_REAL_SPACE	0x20	/* real space control		    */
276 #define _ASCE_TYPE_MASK		0x0c	/* asce table type mask		    */
277 #define _ASCE_TYPE_REGION1	0x0c	/* region first table type	    */
278 #define _ASCE_TYPE_REGION2	0x08	/* region second table type	    */
279 #define _ASCE_TYPE_REGION3	0x04	/* region third table type	    */
280 #define _ASCE_TYPE_SEGMENT	0x00	/* segment table type		    */
281 #define _ASCE_TABLE_LENGTH	0x03	/* region table length		    */
282 
283 /* Bits in the region table entry */
284 #define _REGION_ENTRY_ORIGIN	~0xfffUL/* region/segment table origin	    */
285 #define _REGION_ENTRY_PROTECT	0x200	/* region protection bit	    */
286 #define _REGION_ENTRY_INVALID	0x20	/* invalid region table entry	    */
287 #define _REGION_ENTRY_TYPE_MASK	0x0c	/* region/segment table type mask   */
288 #define _REGION_ENTRY_TYPE_R1	0x0c	/* region first table type	    */
289 #define _REGION_ENTRY_TYPE_R2	0x08	/* region second table type	    */
290 #define _REGION_ENTRY_TYPE_R3	0x04	/* region third table type	    */
291 #define _REGION_ENTRY_LENGTH	0x03	/* region third length		    */
292 
293 #define _REGION1_ENTRY		(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
294 #define _REGION1_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
295 #define _REGION2_ENTRY		(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
296 #define _REGION2_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
297 #define _REGION3_ENTRY		(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
298 #define _REGION3_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
299 
300 #define _REGION3_ENTRY_LARGE	0x400	/* RTTE-format control, large page  */
301 #define _REGION3_ENTRY_RO	0x200	/* page protection bit		    */
302 
303 /* Bits in the segment table entry */
304 #define _SEGMENT_ENTRY_BITS	0xfffffffffffffe33UL
305 #define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
306 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address	    */
307 #define _SEGMENT_ENTRY_ORIGIN	~0x7ffUL/* segment table origin		    */
308 #define _SEGMENT_ENTRY_PROTECT	0x200	/* page protection bit		    */
309 #define _SEGMENT_ENTRY_INVALID	0x20	/* invalid segment table entry	    */
310 
311 #define _SEGMENT_ENTRY		(0)
312 #define _SEGMENT_ENTRY_EMPTY	(_SEGMENT_ENTRY_INVALID)
313 
314 #define _SEGMENT_ENTRY_DIRTY	0x2000	/* SW segment dirty bit */
315 #define _SEGMENT_ENTRY_YOUNG	0x1000	/* SW segment young bit */
316 #define _SEGMENT_ENTRY_SPLIT	0x0800	/* THP splitting bit */
317 #define _SEGMENT_ENTRY_LARGE	0x0400	/* STE-format control, large page */
318 #define _SEGMENT_ENTRY_READ	0x0002	/* SW segment read bit */
319 #define _SEGMENT_ENTRY_WRITE	0x0001	/* SW segment write bit */
320 
321 /*
322  * Segment table entry encoding (R = read-only, I = invalid, y = young bit):
323  *				dy..R...I...wr
324  * prot-none, clean, old	00..1...1...00
325  * prot-none, clean, young	01..1...1...00
326  * prot-none, dirty, old	10..1...1...00
327  * prot-none, dirty, young	11..1...1...00
328  * read-only, clean, old	00..1...1...01
329  * read-only, clean, young	01..1...0...01
330  * read-only, dirty, old	10..1...1...01
331  * read-only, dirty, young	11..1...0...01
332  * read-write, clean, old	00..1...1...11
333  * read-write, clean, young	01..1...0...11
334  * read-write, dirty, old	10..0...1...11
335  * read-write, dirty, young	11..0...0...11
336  * The segment table origin is used to distinguish empty (origin==0) from
337  * read-write, old segment table entries (origin!=0)
338  */
339 
340 #define _SEGMENT_ENTRY_SPLIT_BIT 11	/* THP splitting bit number */
341 
342 /* Page status table bits for virtualization */
343 #define PGSTE_ACC_BITS	0xf000000000000000UL
344 #define PGSTE_FP_BIT	0x0800000000000000UL
345 #define PGSTE_PCL_BIT	0x0080000000000000UL
346 #define PGSTE_HR_BIT	0x0040000000000000UL
347 #define PGSTE_HC_BIT	0x0020000000000000UL
348 #define PGSTE_GR_BIT	0x0004000000000000UL
349 #define PGSTE_GC_BIT	0x0002000000000000UL
350 #define PGSTE_UC_BIT	0x0000800000000000UL	/* user dirty (migration) */
351 #define PGSTE_IN_BIT	0x0000400000000000UL	/* IPTE notify bit */
352 
353 /* Guest Page State used for virtualization */
354 #define _PGSTE_GPS_ZERO		0x0000000080000000UL
355 #define _PGSTE_GPS_USAGE_MASK	0x0000000003000000UL
356 #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
357 #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
358 
359 /*
360  * A user page table pointer has the space-switch-event bit, the
361  * private-space-control bit and the storage-alteration-event-control
362  * bit set. A kernel page table pointer doesn't need them.
363  */
364 #define _ASCE_USER_BITS		(_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
365 				 _ASCE_ALT_EVENT)
366 
367 /*
368  * Page protection definitions.
369  */
370 #define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_INVALID)
371 #define PAGE_READ	__pgprot(_PAGE_PRESENT | _PAGE_READ | \
372 				 _PAGE_INVALID | _PAGE_PROTECT)
373 #define PAGE_WRITE	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
374 				 _PAGE_INVALID | _PAGE_PROTECT)
375 
376 #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
377 				 _PAGE_YOUNG | _PAGE_DIRTY)
378 #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
379 				 _PAGE_YOUNG | _PAGE_DIRTY)
380 #define PAGE_KERNEL_RO	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
381 				 _PAGE_PROTECT)
382 
383 /*
384  * On s390 the page table entry has an invalid bit and a read-only bit.
385  * Read permission implies execute permission and write permission
386  * implies read permission.
387  */
388          /*xwr*/
389 #define __P000	PAGE_NONE
390 #define __P001	PAGE_READ
391 #define __P010	PAGE_READ
392 #define __P011	PAGE_READ
393 #define __P100	PAGE_READ
394 #define __P101	PAGE_READ
395 #define __P110	PAGE_READ
396 #define __P111	PAGE_READ
397 
398 #define __S000	PAGE_NONE
399 #define __S001	PAGE_READ
400 #define __S010	PAGE_WRITE
401 #define __S011	PAGE_WRITE
402 #define __S100	PAGE_READ
403 #define __S101	PAGE_READ
404 #define __S110	PAGE_WRITE
405 #define __S111	PAGE_WRITE
406 
407 /*
408  * Segment entry (large page) protection definitions.
409  */
410 #define SEGMENT_NONE	__pgprot(_SEGMENT_ENTRY_INVALID | \
411 				 _SEGMENT_ENTRY_PROTECT)
412 #define SEGMENT_READ	__pgprot(_SEGMENT_ENTRY_PROTECT | \
413 				 _SEGMENT_ENTRY_READ)
414 #define SEGMENT_WRITE	__pgprot(_SEGMENT_ENTRY_READ | \
415 				 _SEGMENT_ENTRY_WRITE)
416 
417 static inline int mm_has_pgste(struct mm_struct *mm)
418 {
419 #ifdef CONFIG_PGSTE
420 	if (unlikely(mm->context.has_pgste))
421 		return 1;
422 #endif
423 	return 0;
424 }
425 
426 /*
427  * In the case that a guest uses storage keys
428  * faults should no longer be backed by zero pages
429  */
430 #define mm_forbids_zeropage mm_use_skey
431 static inline int mm_use_skey(struct mm_struct *mm)
432 {
433 #ifdef CONFIG_PGSTE
434 	if (mm->context.use_skey)
435 		return 1;
436 #endif
437 	return 0;
438 }
439 
440 /*
441  * pgd/pmd/pte query functions
442  */
443 static inline int pgd_present(pgd_t pgd)
444 {
445 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
446 		return 1;
447 	return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
448 }
449 
450 static inline int pgd_none(pgd_t pgd)
451 {
452 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
453 		return 0;
454 	return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
455 }
456 
457 static inline int pgd_bad(pgd_t pgd)
458 {
459 	/*
460 	 * With dynamic page table levels the pgd can be a region table
461 	 * entry or a segment table entry. Check for the bit that are
462 	 * invalid for either table entry.
463 	 */
464 	unsigned long mask =
465 		~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
466 		~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
467 	return (pgd_val(pgd) & mask) != 0;
468 }
469 
470 static inline int pud_present(pud_t pud)
471 {
472 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
473 		return 1;
474 	return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
475 }
476 
477 static inline int pud_none(pud_t pud)
478 {
479 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
480 		return 0;
481 	return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL;
482 }
483 
484 static inline int pud_large(pud_t pud)
485 {
486 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
487 		return 0;
488 	return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
489 }
490 
491 static inline int pud_bad(pud_t pud)
492 {
493 	/*
494 	 * With dynamic page table levels the pud can be a region table
495 	 * entry or a segment table entry. Check for the bit that are
496 	 * invalid for either table entry.
497 	 */
498 	unsigned long mask =
499 		~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
500 		~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
501 	return (pud_val(pud) & mask) != 0;
502 }
503 
504 static inline int pmd_present(pmd_t pmd)
505 {
506 	return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
507 }
508 
509 static inline int pmd_none(pmd_t pmd)
510 {
511 	return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID;
512 }
513 
514 static inline int pmd_large(pmd_t pmd)
515 {
516 	return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
517 }
518 
519 static inline int pmd_pfn(pmd_t pmd)
520 {
521 	unsigned long origin_mask;
522 
523 	origin_mask = _SEGMENT_ENTRY_ORIGIN;
524 	if (pmd_large(pmd))
525 		origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
526 	return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
527 }
528 
529 static inline int pmd_bad(pmd_t pmd)
530 {
531 	if (pmd_large(pmd))
532 		return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
533 	return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
534 }
535 
536 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
537 extern void pmdp_splitting_flush(struct vm_area_struct *vma,
538 				 unsigned long addr, pmd_t *pmdp);
539 
540 #define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
541 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
542 				 unsigned long address, pmd_t *pmdp,
543 				 pmd_t entry, int dirty);
544 
545 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
546 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
547 				  unsigned long address, pmd_t *pmdp);
548 
549 #define __HAVE_ARCH_PMD_WRITE
550 static inline int pmd_write(pmd_t pmd)
551 {
552 	return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
553 }
554 
555 static inline int pmd_dirty(pmd_t pmd)
556 {
557 	int dirty = 1;
558 	if (pmd_large(pmd))
559 		dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
560 	return dirty;
561 }
562 
563 static inline int pmd_young(pmd_t pmd)
564 {
565 	int young = 1;
566 	if (pmd_large(pmd))
567 		young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
568 	return young;
569 }
570 
571 static inline int pte_present(pte_t pte)
572 {
573 	/* Bit pattern: (pte & 0x001) == 0x001 */
574 	return (pte_val(pte) & _PAGE_PRESENT) != 0;
575 }
576 
577 static inline int pte_none(pte_t pte)
578 {
579 	/* Bit pattern: pte == 0x400 */
580 	return pte_val(pte) == _PAGE_INVALID;
581 }
582 
583 static inline int pte_swap(pte_t pte)
584 {
585 	/* Bit pattern: (pte & 0x603) == 0x402 */
586 	return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT |
587 				_PAGE_TYPE | _PAGE_PRESENT))
588 		== (_PAGE_INVALID | _PAGE_TYPE);
589 }
590 
591 static inline int pte_special(pte_t pte)
592 {
593 	return (pte_val(pte) & _PAGE_SPECIAL);
594 }
595 
596 #define __HAVE_ARCH_PTE_SAME
597 static inline int pte_same(pte_t a, pte_t b)
598 {
599 	return pte_val(a) == pte_val(b);
600 }
601 
602 static inline pgste_t pgste_get_lock(pte_t *ptep)
603 {
604 	unsigned long new = 0;
605 #ifdef CONFIG_PGSTE
606 	unsigned long old;
607 
608 	preempt_disable();
609 	asm(
610 		"	lg	%0,%2\n"
611 		"0:	lgr	%1,%0\n"
612 		"	nihh	%0,0xff7f\n"	/* clear PCL bit in old */
613 		"	oihh	%1,0x0080\n"	/* set PCL bit in new */
614 		"	csg	%0,%1,%2\n"
615 		"	jl	0b\n"
616 		: "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
617 		: "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
618 #endif
619 	return __pgste(new);
620 }
621 
622 static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
623 {
624 #ifdef CONFIG_PGSTE
625 	asm(
626 		"	nihh	%1,0xff7f\n"	/* clear PCL bit */
627 		"	stg	%1,%0\n"
628 		: "=Q" (ptep[PTRS_PER_PTE])
629 		: "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
630 		: "cc", "memory");
631 	preempt_enable();
632 #endif
633 }
634 
635 static inline pgste_t pgste_get(pte_t *ptep)
636 {
637 	unsigned long pgste = 0;
638 #ifdef CONFIG_PGSTE
639 	pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
640 #endif
641 	return __pgste(pgste);
642 }
643 
644 static inline void pgste_set(pte_t *ptep, pgste_t pgste)
645 {
646 #ifdef CONFIG_PGSTE
647 	*(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
648 #endif
649 }
650 
651 static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste,
652 				       struct mm_struct *mm)
653 {
654 #ifdef CONFIG_PGSTE
655 	unsigned long address, bits, skey;
656 
657 	if (!mm_use_skey(mm) || pte_val(*ptep) & _PAGE_INVALID)
658 		return pgste;
659 	address = pte_val(*ptep) & PAGE_MASK;
660 	skey = (unsigned long) page_get_storage_key(address);
661 	bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
662 	/* Transfer page changed & referenced bit to guest bits in pgste */
663 	pgste_val(pgste) |= bits << 48;		/* GR bit & GC bit */
664 	/* Copy page access key and fetch protection bit to pgste */
665 	pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
666 	pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
667 #endif
668 	return pgste;
669 
670 }
671 
672 static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
673 				 struct mm_struct *mm)
674 {
675 #ifdef CONFIG_PGSTE
676 	unsigned long address;
677 	unsigned long nkey;
678 
679 	if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID)
680 		return;
681 	VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
682 	address = pte_val(entry) & PAGE_MASK;
683 	/*
684 	 * Set page access key and fetch protection bit from pgste.
685 	 * The guest C/R information is still in the PGSTE, set real
686 	 * key C/R to 0.
687 	 */
688 	nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
689 	nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
690 	page_set_storage_key(address, nkey, 0);
691 #endif
692 }
693 
694 static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
695 {
696 	if ((pte_val(entry) & _PAGE_PRESENT) &&
697 	    (pte_val(entry) & _PAGE_WRITE) &&
698 	    !(pte_val(entry) & _PAGE_INVALID)) {
699 		if (!MACHINE_HAS_ESOP) {
700 			/*
701 			 * Without enhanced suppression-on-protection force
702 			 * the dirty bit on for all writable ptes.
703 			 */
704 			pte_val(entry) |= _PAGE_DIRTY;
705 			pte_val(entry) &= ~_PAGE_PROTECT;
706 		}
707 		if (!(pte_val(entry) & _PAGE_PROTECT))
708 			/* This pte allows write access, set user-dirty */
709 			pgste_val(pgste) |= PGSTE_UC_BIT;
710 	}
711 	*ptep = entry;
712 	return pgste;
713 }
714 
715 /**
716  * struct gmap_struct - guest address space
717  * @crst_list: list of all crst tables used in the guest address space
718  * @mm: pointer to the parent mm_struct
719  * @guest_to_host: radix tree with guest to host address translation
720  * @host_to_guest: radix tree with pointer to segment table entries
721  * @guest_table_lock: spinlock to protect all entries in the guest page table
722  * @table: pointer to the page directory
723  * @asce: address space control element for gmap page table
724  * @pfault_enabled: defines if pfaults are applicable for the guest
725  */
726 struct gmap {
727 	struct list_head list;
728 	struct list_head crst_list;
729 	struct mm_struct *mm;
730 	struct radix_tree_root guest_to_host;
731 	struct radix_tree_root host_to_guest;
732 	spinlock_t guest_table_lock;
733 	unsigned long *table;
734 	unsigned long asce;
735 	unsigned long asce_end;
736 	void *private;
737 	bool pfault_enabled;
738 };
739 
740 /**
741  * struct gmap_notifier - notify function block for page invalidation
742  * @notifier_call: address of callback function
743  */
744 struct gmap_notifier {
745 	struct list_head list;
746 	void (*notifier_call)(struct gmap *gmap, unsigned long gaddr);
747 };
748 
749 struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit);
750 void gmap_free(struct gmap *gmap);
751 void gmap_enable(struct gmap *gmap);
752 void gmap_disable(struct gmap *gmap);
753 int gmap_map_segment(struct gmap *gmap, unsigned long from,
754 		     unsigned long to, unsigned long len);
755 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
756 unsigned long __gmap_translate(struct gmap *, unsigned long gaddr);
757 unsigned long gmap_translate(struct gmap *, unsigned long gaddr);
758 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr);
759 int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags);
760 void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
761 void __gmap_zap(struct gmap *, unsigned long gaddr);
762 bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *);
763 
764 
765 void gmap_register_ipte_notifier(struct gmap_notifier *);
766 void gmap_unregister_ipte_notifier(struct gmap_notifier *);
767 int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
768 void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *);
769 
770 static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
771 					unsigned long addr,
772 					pte_t *ptep, pgste_t pgste)
773 {
774 #ifdef CONFIG_PGSTE
775 	if (pgste_val(pgste) & PGSTE_IN_BIT) {
776 		pgste_val(pgste) &= ~PGSTE_IN_BIT;
777 		gmap_do_ipte_notify(mm, addr, ptep);
778 	}
779 #endif
780 	return pgste;
781 }
782 
783 /*
784  * Certain architectures need to do special things when PTEs
785  * within a page table are directly modified.  Thus, the following
786  * hook is made available.
787  */
788 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
789 			      pte_t *ptep, pte_t entry)
790 {
791 	pgste_t pgste;
792 
793 	if (mm_has_pgste(mm)) {
794 		pgste = pgste_get_lock(ptep);
795 		pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
796 		pgste_set_key(ptep, pgste, entry, mm);
797 		pgste = pgste_set_pte(ptep, pgste, entry);
798 		pgste_set_unlock(ptep, pgste);
799 	} else {
800 		*ptep = entry;
801 	}
802 }
803 
804 /*
805  * query functions pte_write/pte_dirty/pte_young only work if
806  * pte_present() is true. Undefined behaviour if not..
807  */
808 static inline int pte_write(pte_t pte)
809 {
810 	return (pte_val(pte) & _PAGE_WRITE) != 0;
811 }
812 
813 static inline int pte_dirty(pte_t pte)
814 {
815 	return (pte_val(pte) & _PAGE_DIRTY) != 0;
816 }
817 
818 static inline int pte_young(pte_t pte)
819 {
820 	return (pte_val(pte) & _PAGE_YOUNG) != 0;
821 }
822 
823 #define __HAVE_ARCH_PTE_UNUSED
824 static inline int pte_unused(pte_t pte)
825 {
826 	return pte_val(pte) & _PAGE_UNUSED;
827 }
828 
829 /*
830  * pgd/pmd/pte modification functions
831  */
832 
833 static inline void pgd_clear(pgd_t *pgd)
834 {
835 	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
836 		pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
837 }
838 
839 static inline void pud_clear(pud_t *pud)
840 {
841 	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
842 		pud_val(*pud) = _REGION3_ENTRY_EMPTY;
843 }
844 
845 static inline void pmd_clear(pmd_t *pmdp)
846 {
847 	pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID;
848 }
849 
850 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
851 {
852 	pte_val(*ptep) = _PAGE_INVALID;
853 }
854 
855 /*
856  * The following pte modification functions only work if
857  * pte_present() is true. Undefined behaviour if not..
858  */
859 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
860 {
861 	pte_val(pte) &= _PAGE_CHG_MASK;
862 	pte_val(pte) |= pgprot_val(newprot);
863 	/*
864 	 * newprot for PAGE_NONE, PAGE_READ and PAGE_WRITE has the
865 	 * invalid bit set, clear it again for readable, young pages
866 	 */
867 	if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
868 		pte_val(pte) &= ~_PAGE_INVALID;
869 	/*
870 	 * newprot for PAGE_READ and PAGE_WRITE has the page protection
871 	 * bit set, clear it again for writable, dirty pages
872 	 */
873 	if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
874 		pte_val(pte) &= ~_PAGE_PROTECT;
875 	return pte;
876 }
877 
878 static inline pte_t pte_wrprotect(pte_t pte)
879 {
880 	pte_val(pte) &= ~_PAGE_WRITE;
881 	pte_val(pte) |= _PAGE_PROTECT;
882 	return pte;
883 }
884 
885 static inline pte_t pte_mkwrite(pte_t pte)
886 {
887 	pte_val(pte) |= _PAGE_WRITE;
888 	if (pte_val(pte) & _PAGE_DIRTY)
889 		pte_val(pte) &= ~_PAGE_PROTECT;
890 	return pte;
891 }
892 
893 static inline pte_t pte_mkclean(pte_t pte)
894 {
895 	pte_val(pte) &= ~_PAGE_DIRTY;
896 	pte_val(pte) |= _PAGE_PROTECT;
897 	return pte;
898 }
899 
900 static inline pte_t pte_mkdirty(pte_t pte)
901 {
902 	pte_val(pte) |= _PAGE_DIRTY;
903 	if (pte_val(pte) & _PAGE_WRITE)
904 		pte_val(pte) &= ~_PAGE_PROTECT;
905 	return pte;
906 }
907 
908 static inline pte_t pte_mkold(pte_t pte)
909 {
910 	pte_val(pte) &= ~_PAGE_YOUNG;
911 	pte_val(pte) |= _PAGE_INVALID;
912 	return pte;
913 }
914 
915 static inline pte_t pte_mkyoung(pte_t pte)
916 {
917 	pte_val(pte) |= _PAGE_YOUNG;
918 	if (pte_val(pte) & _PAGE_READ)
919 		pte_val(pte) &= ~_PAGE_INVALID;
920 	return pte;
921 }
922 
923 static inline pte_t pte_mkspecial(pte_t pte)
924 {
925 	pte_val(pte) |= _PAGE_SPECIAL;
926 	return pte;
927 }
928 
929 #ifdef CONFIG_HUGETLB_PAGE
930 static inline pte_t pte_mkhuge(pte_t pte)
931 {
932 	pte_val(pte) |= _PAGE_LARGE;
933 	return pte;
934 }
935 #endif
936 
937 static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
938 {
939 	unsigned long pto = (unsigned long) ptep;
940 
941 	/* Invalidation + global TLB flush for the pte */
942 	asm volatile(
943 		"	ipte	%2,%3"
944 		: "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
945 }
946 
947 static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
948 {
949 	unsigned long pto = (unsigned long) ptep;
950 
951 	/* Invalidation + local TLB flush for the pte */
952 	asm volatile(
953 		"	.insn rrf,0xb2210000,%2,%3,0,1"
954 		: "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
955 }
956 
957 static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep)
958 {
959 	unsigned long pto = (unsigned long) ptep;
960 
961 	/* Invalidate a range of ptes + global TLB flush of the ptes */
962 	do {
963 		asm volatile(
964 			"	.insn rrf,0xb2210000,%2,%0,%1,0"
965 			: "+a" (address), "+a" (nr) : "a" (pto) : "memory");
966 	} while (nr != 255);
967 }
968 
969 static inline void ptep_flush_direct(struct mm_struct *mm,
970 				     unsigned long address, pte_t *ptep)
971 {
972 	int active, count;
973 
974 	if (pte_val(*ptep) & _PAGE_INVALID)
975 		return;
976 	active = (mm == current->active_mm) ? 1 : 0;
977 	count = atomic_add_return(0x10000, &mm->context.attach_count);
978 	if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
979 	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
980 		__ptep_ipte_local(address, ptep);
981 	else
982 		__ptep_ipte(address, ptep);
983 	atomic_sub(0x10000, &mm->context.attach_count);
984 }
985 
986 static inline void ptep_flush_lazy(struct mm_struct *mm,
987 				   unsigned long address, pte_t *ptep)
988 {
989 	int active, count;
990 
991 	if (pte_val(*ptep) & _PAGE_INVALID)
992 		return;
993 	active = (mm == current->active_mm) ? 1 : 0;
994 	count = atomic_add_return(0x10000, &mm->context.attach_count);
995 	if ((count & 0xffff) <= active) {
996 		pte_val(*ptep) |= _PAGE_INVALID;
997 		mm->context.flush_mm = 1;
998 	} else
999 		__ptep_ipte(address, ptep);
1000 	atomic_sub(0x10000, &mm->context.attach_count);
1001 }
1002 
1003 /*
1004  * Get (and clear) the user dirty bit for a pte.
1005  */
1006 static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
1007 						 unsigned long addr,
1008 						 pte_t *ptep)
1009 {
1010 	pgste_t pgste;
1011 	pte_t pte;
1012 	int dirty;
1013 
1014 	if (!mm_has_pgste(mm))
1015 		return 0;
1016 	pgste = pgste_get_lock(ptep);
1017 	dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
1018 	pgste_val(pgste) &= ~PGSTE_UC_BIT;
1019 	pte = *ptep;
1020 	if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
1021 		pgste = pgste_ipte_notify(mm, addr, ptep, pgste);
1022 		__ptep_ipte(addr, ptep);
1023 		if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
1024 			pte_val(pte) |= _PAGE_PROTECT;
1025 		else
1026 			pte_val(pte) |= _PAGE_INVALID;
1027 		*ptep = pte;
1028 	}
1029 	pgste_set_unlock(ptep, pgste);
1030 	return dirty;
1031 }
1032 
1033 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1034 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1035 					    unsigned long addr, pte_t *ptep)
1036 {
1037 	pgste_t pgste;
1038 	pte_t pte, oldpte;
1039 	int young;
1040 
1041 	if (mm_has_pgste(vma->vm_mm)) {
1042 		pgste = pgste_get_lock(ptep);
1043 		pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste);
1044 	}
1045 
1046 	oldpte = pte = *ptep;
1047 	ptep_flush_direct(vma->vm_mm, addr, ptep);
1048 	young = pte_young(pte);
1049 	pte = pte_mkold(pte);
1050 
1051 	if (mm_has_pgste(vma->vm_mm)) {
1052 		pgste = pgste_update_all(&oldpte, pgste, vma->vm_mm);
1053 		pgste = pgste_set_pte(ptep, pgste, pte);
1054 		pgste_set_unlock(ptep, pgste);
1055 	} else
1056 		*ptep = pte;
1057 
1058 	return young;
1059 }
1060 
1061 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1062 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1063 					 unsigned long address, pte_t *ptep)
1064 {
1065 	return ptep_test_and_clear_young(vma, address, ptep);
1066 }
1067 
1068 /*
1069  * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1070  * both clear the TLB for the unmapped pte. The reason is that
1071  * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1072  * to modify an active pte. The sequence is
1073  *   1) ptep_get_and_clear
1074  *   2) set_pte_at
1075  *   3) flush_tlb_range
1076  * On s390 the tlb needs to get flushed with the modification of the pte
1077  * if the pte is active. The only way how this can be implemented is to
1078  * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1079  * is a nop.
1080  */
1081 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1082 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1083 				       unsigned long address, pte_t *ptep)
1084 {
1085 	pgste_t pgste;
1086 	pte_t pte;
1087 
1088 	if (mm_has_pgste(mm)) {
1089 		pgste = pgste_get_lock(ptep);
1090 		pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1091 	}
1092 
1093 	pte = *ptep;
1094 	ptep_flush_lazy(mm, address, ptep);
1095 	pte_val(*ptep) = _PAGE_INVALID;
1096 
1097 	if (mm_has_pgste(mm)) {
1098 		pgste = pgste_update_all(&pte, pgste, mm);
1099 		pgste_set_unlock(ptep, pgste);
1100 	}
1101 	return pte;
1102 }
1103 
1104 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1105 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
1106 					   unsigned long address,
1107 					   pte_t *ptep)
1108 {
1109 	pgste_t pgste;
1110 	pte_t pte;
1111 
1112 	if (mm_has_pgste(mm)) {
1113 		pgste = pgste_get_lock(ptep);
1114 		pgste_ipte_notify(mm, address, ptep, pgste);
1115 	}
1116 
1117 	pte = *ptep;
1118 	ptep_flush_lazy(mm, address, ptep);
1119 
1120 	if (mm_has_pgste(mm)) {
1121 		pgste = pgste_update_all(&pte, pgste, mm);
1122 		pgste_set(ptep, pgste);
1123 	}
1124 	return pte;
1125 }
1126 
1127 static inline void ptep_modify_prot_commit(struct mm_struct *mm,
1128 					   unsigned long address,
1129 					   pte_t *ptep, pte_t pte)
1130 {
1131 	pgste_t pgste;
1132 
1133 	if (mm_has_pgste(mm)) {
1134 		pgste = pgste_get(ptep);
1135 		pgste_set_key(ptep, pgste, pte, mm);
1136 		pgste = pgste_set_pte(ptep, pgste, pte);
1137 		pgste_set_unlock(ptep, pgste);
1138 	} else
1139 		*ptep = pte;
1140 }
1141 
1142 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1143 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1144 				     unsigned long address, pte_t *ptep)
1145 {
1146 	pgste_t pgste;
1147 	pte_t pte;
1148 
1149 	if (mm_has_pgste(vma->vm_mm)) {
1150 		pgste = pgste_get_lock(ptep);
1151 		pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
1152 	}
1153 
1154 	pte = *ptep;
1155 	ptep_flush_direct(vma->vm_mm, address, ptep);
1156 	pte_val(*ptep) = _PAGE_INVALID;
1157 
1158 	if (mm_has_pgste(vma->vm_mm)) {
1159 		if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
1160 		    _PGSTE_GPS_USAGE_UNUSED)
1161 			pte_val(pte) |= _PAGE_UNUSED;
1162 		pgste = pgste_update_all(&pte, pgste, vma->vm_mm);
1163 		pgste_set_unlock(ptep, pgste);
1164 	}
1165 	return pte;
1166 }
1167 
1168 /*
1169  * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1170  * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1171  * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1172  * cannot be accessed while the batched unmap is running. In this case
1173  * full==1 and a simple pte_clear is enough. See tlb.h.
1174  */
1175 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1176 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1177 					    unsigned long address,
1178 					    pte_t *ptep, int full)
1179 {
1180 	pgste_t pgste;
1181 	pte_t pte;
1182 
1183 	if (!full && mm_has_pgste(mm)) {
1184 		pgste = pgste_get_lock(ptep);
1185 		pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1186 	}
1187 
1188 	pte = *ptep;
1189 	if (!full)
1190 		ptep_flush_lazy(mm, address, ptep);
1191 	pte_val(*ptep) = _PAGE_INVALID;
1192 
1193 	if (!full && mm_has_pgste(mm)) {
1194 		pgste = pgste_update_all(&pte, pgste, mm);
1195 		pgste_set_unlock(ptep, pgste);
1196 	}
1197 	return pte;
1198 }
1199 
1200 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1201 static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
1202 				       unsigned long address, pte_t *ptep)
1203 {
1204 	pgste_t pgste;
1205 	pte_t pte = *ptep;
1206 
1207 	if (pte_write(pte)) {
1208 		if (mm_has_pgste(mm)) {
1209 			pgste = pgste_get_lock(ptep);
1210 			pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1211 		}
1212 
1213 		ptep_flush_lazy(mm, address, ptep);
1214 		pte = pte_wrprotect(pte);
1215 
1216 		if (mm_has_pgste(mm)) {
1217 			pgste = pgste_set_pte(ptep, pgste, pte);
1218 			pgste_set_unlock(ptep, pgste);
1219 		} else
1220 			*ptep = pte;
1221 	}
1222 	return pte;
1223 }
1224 
1225 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1226 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1227 					unsigned long address, pte_t *ptep,
1228 					pte_t entry, int dirty)
1229 {
1230 	pgste_t pgste;
1231 
1232 	if (pte_same(*ptep, entry))
1233 		return 0;
1234 	if (mm_has_pgste(vma->vm_mm)) {
1235 		pgste = pgste_get_lock(ptep);
1236 		pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
1237 	}
1238 
1239 	ptep_flush_direct(vma->vm_mm, address, ptep);
1240 
1241 	if (mm_has_pgste(vma->vm_mm)) {
1242 		pgste_set_key(ptep, pgste, entry, vma->vm_mm);
1243 		pgste = pgste_set_pte(ptep, pgste, entry);
1244 		pgste_set_unlock(ptep, pgste);
1245 	} else
1246 		*ptep = entry;
1247 	return 1;
1248 }
1249 
1250 /*
1251  * Conversion functions: convert a page and protection to a page entry,
1252  * and a page entry and page directory to the page they refer to.
1253  */
1254 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1255 {
1256 	pte_t __pte;
1257 	pte_val(__pte) = physpage + pgprot_val(pgprot);
1258 	return pte_mkyoung(__pte);
1259 }
1260 
1261 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1262 {
1263 	unsigned long physpage = page_to_phys(page);
1264 	pte_t __pte = mk_pte_phys(physpage, pgprot);
1265 
1266 	if (pte_write(__pte) && PageDirty(page))
1267 		__pte = pte_mkdirty(__pte);
1268 	return __pte;
1269 }
1270 
1271 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1272 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1273 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1274 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1275 
1276 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1277 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1278 
1279 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1280 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1281 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1282 
1283 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
1284 {
1285 	pud_t *pud = (pud_t *) pgd;
1286 	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1287 		pud = (pud_t *) pgd_deref(*pgd);
1288 	return pud  + pud_index(address);
1289 }
1290 
1291 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1292 {
1293 	pmd_t *pmd = (pmd_t *) pud;
1294 	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1295 		pmd = (pmd_t *) pud_deref(*pud);
1296 	return pmd + pmd_index(address);
1297 }
1298 
1299 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1300 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1301 #define pte_page(x) pfn_to_page(pte_pfn(x))
1302 
1303 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1304 
1305 /* Find an entry in the lowest level page table.. */
1306 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1307 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1308 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1309 #define pte_unmap(pte) do { } while (0)
1310 
1311 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1312 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1313 {
1314 	/*
1315 	 * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx)
1316 	 * Convert to segment table entry format.
1317 	 */
1318 	if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1319 		return pgprot_val(SEGMENT_NONE);
1320 	if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
1321 		return pgprot_val(SEGMENT_READ);
1322 	return pgprot_val(SEGMENT_WRITE);
1323 }
1324 
1325 static inline pmd_t pmd_wrprotect(pmd_t pmd)
1326 {
1327 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1328 	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1329 	return pmd;
1330 }
1331 
1332 static inline pmd_t pmd_mkwrite(pmd_t pmd)
1333 {
1334 	pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1335 	if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1336 		return pmd;
1337 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1338 	return pmd;
1339 }
1340 
1341 static inline pmd_t pmd_mkclean(pmd_t pmd)
1342 {
1343 	if (pmd_large(pmd)) {
1344 		pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1345 		pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1346 	}
1347 	return pmd;
1348 }
1349 
1350 static inline pmd_t pmd_mkdirty(pmd_t pmd)
1351 {
1352 	if (pmd_large(pmd)) {
1353 		pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY;
1354 		if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1355 			pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1356 	}
1357 	return pmd;
1358 }
1359 
1360 static inline pmd_t pmd_mkyoung(pmd_t pmd)
1361 {
1362 	if (pmd_large(pmd)) {
1363 		pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1364 		if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1365 			pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1366 	}
1367 	return pmd;
1368 }
1369 
1370 static inline pmd_t pmd_mkold(pmd_t pmd)
1371 {
1372 	if (pmd_large(pmd)) {
1373 		pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1374 		pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1375 	}
1376 	return pmd;
1377 }
1378 
1379 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1380 {
1381 	if (pmd_large(pmd)) {
1382 		pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1383 			_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1384 			_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SPLIT;
1385 		pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1386 		if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1387 			pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1388 		if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1389 			pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1390 		return pmd;
1391 	}
1392 	pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
1393 	pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1394 	return pmd;
1395 }
1396 
1397 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1398 {
1399 	pmd_t __pmd;
1400 	pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1401 	return __pmd;
1402 }
1403 
1404 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1405 
1406 static inline void __pmdp_csp(pmd_t *pmdp)
1407 {
1408 	register unsigned long reg2 asm("2") = pmd_val(*pmdp);
1409 	register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
1410 					       _SEGMENT_ENTRY_INVALID;
1411 	register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
1412 
1413 	asm volatile(
1414 		"	csp %1,%3"
1415 		: "=m" (*pmdp)
1416 		: "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
1417 }
1418 
1419 static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp)
1420 {
1421 	unsigned long sto;
1422 
1423 	sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
1424 	asm volatile(
1425 		"	.insn	rrf,0xb98e0000,%2,%3,0,0"
1426 		: "=m" (*pmdp)
1427 		: "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
1428 		: "cc" );
1429 }
1430 
1431 static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp)
1432 {
1433 	unsigned long sto;
1434 
1435 	sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
1436 	asm volatile(
1437 		"	.insn	rrf,0xb98e0000,%2,%3,0,1"
1438 		: "=m" (*pmdp)
1439 		: "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
1440 		: "cc" );
1441 }
1442 
1443 static inline void pmdp_flush_direct(struct mm_struct *mm,
1444 				     unsigned long address, pmd_t *pmdp)
1445 {
1446 	int active, count;
1447 
1448 	if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
1449 		return;
1450 	if (!MACHINE_HAS_IDTE) {
1451 		__pmdp_csp(pmdp);
1452 		return;
1453 	}
1454 	active = (mm == current->active_mm) ? 1 : 0;
1455 	count = atomic_add_return(0x10000, &mm->context.attach_count);
1456 	if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
1457 	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
1458 		__pmdp_idte_local(address, pmdp);
1459 	else
1460 		__pmdp_idte(address, pmdp);
1461 	atomic_sub(0x10000, &mm->context.attach_count);
1462 }
1463 
1464 static inline void pmdp_flush_lazy(struct mm_struct *mm,
1465 				   unsigned long address, pmd_t *pmdp)
1466 {
1467 	int active, count;
1468 
1469 	if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
1470 		return;
1471 	active = (mm == current->active_mm) ? 1 : 0;
1472 	count = atomic_add_return(0x10000, &mm->context.attach_count);
1473 	if ((count & 0xffff) <= active) {
1474 		pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
1475 		mm->context.flush_mm = 1;
1476 	} else if (MACHINE_HAS_IDTE)
1477 		__pmdp_idte(address, pmdp);
1478 	else
1479 		__pmdp_csp(pmdp);
1480 	atomic_sub(0x10000, &mm->context.attach_count);
1481 }
1482 
1483 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1484 
1485 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1486 extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1487 				       pgtable_t pgtable);
1488 
1489 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1490 extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1491 
1492 static inline int pmd_trans_splitting(pmd_t pmd)
1493 {
1494 	return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) &&
1495 		(pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT);
1496 }
1497 
1498 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1499 			      pmd_t *pmdp, pmd_t entry)
1500 {
1501 	*pmdp = entry;
1502 }
1503 
1504 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1505 {
1506 	pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1507 	pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1508 	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1509 	return pmd;
1510 }
1511 
1512 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1513 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1514 					    unsigned long address, pmd_t *pmdp)
1515 {
1516 	pmd_t pmd;
1517 
1518 	pmd = *pmdp;
1519 	pmdp_flush_direct(vma->vm_mm, address, pmdp);
1520 	*pmdp = pmd_mkold(pmd);
1521 	return pmd_young(pmd);
1522 }
1523 
1524 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
1525 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
1526 				       unsigned long address, pmd_t *pmdp)
1527 {
1528 	pmd_t pmd = *pmdp;
1529 
1530 	pmdp_flush_direct(mm, address, pmdp);
1531 	pmd_clear(pmdp);
1532 	return pmd;
1533 }
1534 
1535 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL
1536 static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm,
1537 					    unsigned long address,
1538 					    pmd_t *pmdp, int full)
1539 {
1540 	pmd_t pmd = *pmdp;
1541 
1542 	if (!full)
1543 		pmdp_flush_lazy(mm, address, pmdp);
1544 	pmd_clear(pmdp);
1545 	return pmd;
1546 }
1547 
1548 #define __HAVE_ARCH_PMDP_CLEAR_FLUSH
1549 static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
1550 				     unsigned long address, pmd_t *pmdp)
1551 {
1552 	return pmdp_get_and_clear(vma->vm_mm, address, pmdp);
1553 }
1554 
1555 #define __HAVE_ARCH_PMDP_INVALIDATE
1556 static inline void pmdp_invalidate(struct vm_area_struct *vma,
1557 				   unsigned long address, pmd_t *pmdp)
1558 {
1559 	pmdp_flush_direct(vma->vm_mm, address, pmdp);
1560 }
1561 
1562 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1563 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1564 				      unsigned long address, pmd_t *pmdp)
1565 {
1566 	pmd_t pmd = *pmdp;
1567 
1568 	if (pmd_write(pmd)) {
1569 		pmdp_flush_direct(mm, address, pmdp);
1570 		set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd));
1571 	}
1572 }
1573 
1574 #define pfn_pmd(pfn, pgprot)	mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1575 #define mk_pmd(page, pgprot)	pfn_pmd(page_to_pfn(page), (pgprot))
1576 
1577 static inline int pmd_trans_huge(pmd_t pmd)
1578 {
1579 	return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1580 }
1581 
1582 static inline int has_transparent_hugepage(void)
1583 {
1584 	return MACHINE_HAS_HPAGE ? 1 : 0;
1585 }
1586 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1587 
1588 /*
1589  * 31 bit swap entry format:
1590  * A page-table entry has some bits we have to treat in a special way.
1591  * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
1592  * exception will occur instead of a page translation exception. The
1593  * specifiation exception has the bad habit not to store necessary
1594  * information in the lowcore.
1595  * Bits 21, 22, 30 and 31 are used to indicate the page type.
1596  * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
1597  * This leaves the bits 1-19 and bits 24-29 to store type and offset.
1598  * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
1599  * plus 24 for the offset.
1600  * 0|     offset        |0110|o|type |00|
1601  * 0 0000000001111111111 2222 2 22222 33
1602  * 0 1234567890123456789 0123 4 56789 01
1603  *
1604  * 64 bit swap entry format:
1605  * A page-table entry has some bits we have to treat in a special way.
1606  * Bits 52 and bit 55 have to be zero, otherwise an specification
1607  * exception will occur instead of a page translation exception. The
1608  * specifiation exception has the bad habit not to store necessary
1609  * information in the lowcore.
1610  * Bits 53, 54, 62 and 63 are used to indicate the page type.
1611  * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
1612  * This leaves the bits 0-51 and bits 56-61 to store type and offset.
1613  * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
1614  * plus 56 for the offset.
1615  * |                      offset                        |0110|o|type |00|
1616  *  0000000000111111111122222222223333333333444444444455 5555 5 55566 66
1617  *  0123456789012345678901234567890123456789012345678901 2345 6 78901 23
1618  */
1619 
1620 #define __SWP_OFFSET_MASK (~0UL >> 11)
1621 
1622 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1623 {
1624 	pte_t pte;
1625 	offset &= __SWP_OFFSET_MASK;
1626 	pte_val(pte) = _PAGE_INVALID | _PAGE_TYPE | ((type & 0x1f) << 2) |
1627 		((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
1628 	return pte;
1629 }
1630 
1631 #define __swp_type(entry)	(((entry).val >> 2) & 0x1f)
1632 #define __swp_offset(entry)	(((entry).val >> 11) | (((entry).val >> 7) & 1))
1633 #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
1634 
1635 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
1636 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
1637 
1638 #endif /* !__ASSEMBLY__ */
1639 
1640 #define kern_addr_valid(addr)   (1)
1641 
1642 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1643 extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1644 extern int s390_enable_sie(void);
1645 extern int s390_enable_skey(void);
1646 extern void s390_reset_cmma(struct mm_struct *mm);
1647 
1648 /* s390 has a private copy of get unmapped area to deal with cache synonyms */
1649 #define HAVE_ARCH_UNMAPPED_AREA
1650 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1651 
1652 /*
1653  * No page table caches to initialise
1654  */
1655 static inline void pgtable_cache_init(void) { }
1656 static inline void check_pgt_cache(void) { }
1657 
1658 #include <asm-generic/pgtable.h>
1659 
1660 #endif /* _S390_PAGE_H */
1661