xref: /openbmc/linux/arch/s390/include/asm/pgtable.h (revision 9613163a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999, 2000
5  *    Author(s): Hartmut Penner (hp@de.ibm.com)
6  *               Ulrich Weigand (weigand@de.ibm.com)
7  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
8  *
9  *  Derived from "include/asm-i386/pgtable.h"
10  */
11 
12 #ifndef _ASM_S390_PGTABLE_H
13 #define _ASM_S390_PGTABLE_H
14 
15 #include <linux/sched.h>
16 #include <linux/mm_types.h>
17 #include <linux/page-flags.h>
18 #include <linux/radix-tree.h>
19 #include <linux/atomic.h>
20 #include <asm/bug.h>
21 #include <asm/page.h>
22 
23 extern pgd_t swapper_pg_dir[];
24 extern void paging_init(void);
25 
26 enum {
27 	PG_DIRECT_MAP_4K = 0,
28 	PG_DIRECT_MAP_1M,
29 	PG_DIRECT_MAP_2G,
30 	PG_DIRECT_MAP_MAX
31 };
32 
33 extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
34 
35 static inline void update_page_count(int level, long count)
36 {
37 	if (IS_ENABLED(CONFIG_PROC_FS))
38 		atomic_long_add(count, &direct_pages_count[level]);
39 }
40 
41 struct seq_file;
42 void arch_report_meminfo(struct seq_file *m);
43 
44 /*
45  * The S390 doesn't have any external MMU info: the kernel page
46  * tables contain all the necessary information.
47  */
48 #define update_mmu_cache(vma, address, ptep)     do { } while (0)
49 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
50 
51 /*
52  * ZERO_PAGE is a global shared page that is always zero; used
53  * for zero-mapped memory areas etc..
54  */
55 
56 extern unsigned long empty_zero_page;
57 extern unsigned long zero_page_mask;
58 
59 #define ZERO_PAGE(vaddr) \
60 	(virt_to_page((void *)(empty_zero_page + \
61 	 (((unsigned long)(vaddr)) &zero_page_mask))))
62 #define __HAVE_COLOR_ZERO_PAGE
63 
64 /* TODO: s390 cannot support io_remap_pfn_range... */
65 
66 #define FIRST_USER_ADDRESS  0UL
67 
68 #define pte_ERROR(e) \
69 	printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
70 #define pmd_ERROR(e) \
71 	printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
72 #define pud_ERROR(e) \
73 	printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
74 #define p4d_ERROR(e) \
75 	printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
76 #define pgd_ERROR(e) \
77 	printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
78 
79 /*
80  * The vmalloc and module area will always be on the topmost area of the
81  * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
82  * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
83  * modules will reside. That makes sure that inter module branches always
84  * happen without trampolines and in addition the placement within a 2GB frame
85  * is branch prediction unit friendly.
86  */
87 extern unsigned long VMALLOC_START;
88 extern unsigned long VMALLOC_END;
89 extern struct page *vmemmap;
90 
91 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
92 
93 extern unsigned long MODULES_VADDR;
94 extern unsigned long MODULES_END;
95 #define MODULES_VADDR	MODULES_VADDR
96 #define MODULES_END	MODULES_END
97 #define MODULES_LEN	(1UL << 31)
98 
99 static inline int is_module_addr(void *addr)
100 {
101 	BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
102 	if (addr < (void *)MODULES_VADDR)
103 		return 0;
104 	if (addr > (void *)MODULES_END)
105 		return 0;
106 	return 1;
107 }
108 
109 /*
110  * A 64 bit pagetable entry of S390 has following format:
111  * |			 PFRA			      |0IPC|  OS  |
112  * 0000000000111111111122222222223333333333444444444455555555556666
113  * 0123456789012345678901234567890123456789012345678901234567890123
114  *
115  * I Page-Invalid Bit:    Page is not available for address-translation
116  * P Page-Protection Bit: Store access not possible for page
117  * C Change-bit override: HW is not required to set change bit
118  *
119  * A 64 bit segmenttable entry of S390 has following format:
120  * |        P-table origin                              |      TT
121  * 0000000000111111111122222222223333333333444444444455555555556666
122  * 0123456789012345678901234567890123456789012345678901234567890123
123  *
124  * I Segment-Invalid Bit:    Segment is not available for address-translation
125  * C Common-Segment Bit:     Segment is not private (PoP 3-30)
126  * P Page-Protection Bit: Store access not possible for page
127  * TT Type 00
128  *
129  * A 64 bit region table entry of S390 has following format:
130  * |        S-table origin                             |   TF  TTTL
131  * 0000000000111111111122222222223333333333444444444455555555556666
132  * 0123456789012345678901234567890123456789012345678901234567890123
133  *
134  * I Segment-Invalid Bit:    Segment is not available for address-translation
135  * TT Type 01
136  * TF
137  * TL Table length
138  *
139  * The 64 bit regiontable origin of S390 has following format:
140  * |      region table origon                          |       DTTL
141  * 0000000000111111111122222222223333333333444444444455555555556666
142  * 0123456789012345678901234567890123456789012345678901234567890123
143  *
144  * X Space-Switch event:
145  * G Segment-Invalid Bit:
146  * P Private-Space Bit:
147  * S Storage-Alteration:
148  * R Real space
149  * TL Table-Length:
150  *
151  * A storage key has the following format:
152  * | ACC |F|R|C|0|
153  *  0   3 4 5 6 7
154  * ACC: access key
155  * F  : fetch protection bit
156  * R  : referenced bit
157  * C  : changed bit
158  */
159 
160 /* Hardware bits in the page table entry */
161 #define _PAGE_NOEXEC	0x100		/* HW no-execute bit  */
162 #define _PAGE_PROTECT	0x200		/* HW read-only bit  */
163 #define _PAGE_INVALID	0x400		/* HW invalid bit    */
164 #define _PAGE_LARGE	0x800		/* Bit to mark a large pte */
165 
166 /* Software bits in the page table entry */
167 #define _PAGE_PRESENT	0x001		/* SW pte present bit */
168 #define _PAGE_YOUNG	0x004		/* SW pte young bit */
169 #define _PAGE_DIRTY	0x008		/* SW pte dirty bit */
170 #define _PAGE_READ	0x010		/* SW pte read bit */
171 #define _PAGE_WRITE	0x020		/* SW pte write bit */
172 #define _PAGE_SPECIAL	0x040		/* SW associated with special page */
173 #define _PAGE_UNUSED	0x080		/* SW bit for pgste usage state */
174 
175 #ifdef CONFIG_MEM_SOFT_DIRTY
176 #define _PAGE_SOFT_DIRTY 0x002		/* SW pte soft dirty bit */
177 #else
178 #define _PAGE_SOFT_DIRTY 0x000
179 #endif
180 
181 /* Set of bits not changed in pte_modify */
182 #define _PAGE_CHG_MASK		(PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
183 				 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
184 
185 /*
186  * handle_pte_fault uses pte_present and pte_none to find out the pte type
187  * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
188  * distinguish present from not-present ptes. It is changed only with the page
189  * table lock held.
190  *
191  * The following table gives the different possible bit combinations for
192  * the pte hardware and software bits in the last 12 bits of a pte
193  * (. unassigned bit, x don't care, t swap type):
194  *
195  *				842100000000
196  *				000084210000
197  *				000000008421
198  *				.IR.uswrdy.p
199  * empty			.10.00000000
200  * swap				.11..ttttt.0
201  * prot-none, clean, old	.11.xx0000.1
202  * prot-none, clean, young	.11.xx0001.1
203  * prot-none, dirty, old	.11.xx0010.1
204  * prot-none, dirty, young	.11.xx0011.1
205  * read-only, clean, old	.11.xx0100.1
206  * read-only, clean, young	.01.xx0101.1
207  * read-only, dirty, old	.11.xx0110.1
208  * read-only, dirty, young	.01.xx0111.1
209  * read-write, clean, old	.11.xx1100.1
210  * read-write, clean, young	.01.xx1101.1
211  * read-write, dirty, old	.10.xx1110.1
212  * read-write, dirty, young	.00.xx1111.1
213  * HW-bits: R read-only, I invalid
214  * SW-bits: p present, y young, d dirty, r read, w write, s special,
215  *	    u unused, l large
216  *
217  * pte_none    is true for the bit pattern .10.00000000, pte == 0x400
218  * pte_swap    is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
219  * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
220  */
221 
222 /* Bits in the segment/region table address-space-control-element */
223 #define _ASCE_ORIGIN		~0xfffUL/* region/segment table origin	    */
224 #define _ASCE_PRIVATE_SPACE	0x100	/* private space control	    */
225 #define _ASCE_ALT_EVENT		0x80	/* storage alteration event control */
226 #define _ASCE_SPACE_SWITCH	0x40	/* space switch event		    */
227 #define _ASCE_REAL_SPACE	0x20	/* real space control		    */
228 #define _ASCE_TYPE_MASK		0x0c	/* asce table type mask		    */
229 #define _ASCE_TYPE_REGION1	0x0c	/* region first table type	    */
230 #define _ASCE_TYPE_REGION2	0x08	/* region second table type	    */
231 #define _ASCE_TYPE_REGION3	0x04	/* region third table type	    */
232 #define _ASCE_TYPE_SEGMENT	0x00	/* segment table type		    */
233 #define _ASCE_TABLE_LENGTH	0x03	/* region table length		    */
234 
235 /* Bits in the region table entry */
236 #define _REGION_ENTRY_ORIGIN	~0xfffUL/* region/segment table origin	    */
237 #define _REGION_ENTRY_PROTECT	0x200	/* region protection bit	    */
238 #define _REGION_ENTRY_NOEXEC	0x100	/* region no-execute bit	    */
239 #define _REGION_ENTRY_OFFSET	0xc0	/* region table offset		    */
240 #define _REGION_ENTRY_INVALID	0x20	/* invalid region table entry	    */
241 #define _REGION_ENTRY_TYPE_MASK	0x0c	/* region/segment table type mask   */
242 #define _REGION_ENTRY_TYPE_R1	0x0c	/* region first table type	    */
243 #define _REGION_ENTRY_TYPE_R2	0x08	/* region second table type	    */
244 #define _REGION_ENTRY_TYPE_R3	0x04	/* region third table type	    */
245 #define _REGION_ENTRY_LENGTH	0x03	/* region third length		    */
246 
247 #define _REGION1_ENTRY		(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
248 #define _REGION1_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
249 #define _REGION2_ENTRY		(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
250 #define _REGION2_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
251 #define _REGION3_ENTRY		(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
252 #define _REGION3_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
253 
254 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address	     */
255 #define _REGION3_ENTRY_DIRTY	0x2000	/* SW region dirty bit */
256 #define _REGION3_ENTRY_YOUNG	0x1000	/* SW region young bit */
257 #define _REGION3_ENTRY_LARGE	0x0400	/* RTTE-format control, large page  */
258 #define _REGION3_ENTRY_READ	0x0002	/* SW region read bit */
259 #define _REGION3_ENTRY_WRITE	0x0001	/* SW region write bit */
260 
261 #ifdef CONFIG_MEM_SOFT_DIRTY
262 #define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
263 #else
264 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
265 #endif
266 
267 #define _REGION_ENTRY_BITS	 0xfffffffffffff22fUL
268 #define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL
269 
270 /* Bits in the segment table entry */
271 #define _SEGMENT_ENTRY_BITS			0xfffffffffffffe33UL
272 #define _SEGMENT_ENTRY_BITS_LARGE		0xfffffffffff0ff33UL
273 #define _SEGMENT_ENTRY_HARDWARE_BITS		0xfffffffffffffe30UL
274 #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE	0xfffffffffff00730UL
275 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address	    */
276 #define _SEGMENT_ENTRY_ORIGIN	~0x7ffUL/* page table origin		    */
277 #define _SEGMENT_ENTRY_PROTECT	0x200	/* segment protection bit	    */
278 #define _SEGMENT_ENTRY_NOEXEC	0x100	/* segment no-execute bit	    */
279 #define _SEGMENT_ENTRY_INVALID	0x20	/* invalid segment table entry	    */
280 
281 #define _SEGMENT_ENTRY		(0)
282 #define _SEGMENT_ENTRY_EMPTY	(_SEGMENT_ENTRY_INVALID)
283 
284 #define _SEGMENT_ENTRY_DIRTY	0x2000	/* SW segment dirty bit */
285 #define _SEGMENT_ENTRY_YOUNG	0x1000	/* SW segment young bit */
286 #define _SEGMENT_ENTRY_LARGE	0x0400	/* STE-format control, large page */
287 #define _SEGMENT_ENTRY_WRITE	0x0002	/* SW segment write bit */
288 #define _SEGMENT_ENTRY_READ	0x0001	/* SW segment read bit */
289 
290 #ifdef CONFIG_MEM_SOFT_DIRTY
291 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
292 #else
293 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
294 #endif
295 
296 #define _CRST_ENTRIES	2048	/* number of region/segment table entries */
297 #define _PAGE_ENTRIES	256	/* number of page table entries	*/
298 
299 #define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
300 #define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
301 
302 #define _REGION1_SHIFT	53
303 #define _REGION2_SHIFT	42
304 #define _REGION3_SHIFT	31
305 #define _SEGMENT_SHIFT	20
306 
307 #define _REGION1_INDEX	(0x7ffUL << _REGION1_SHIFT)
308 #define _REGION2_INDEX	(0x7ffUL << _REGION2_SHIFT)
309 #define _REGION3_INDEX	(0x7ffUL << _REGION3_SHIFT)
310 #define _SEGMENT_INDEX	(0x7ffUL << _SEGMENT_SHIFT)
311 #define _PAGE_INDEX	(0xffUL  << _PAGE_SHIFT)
312 
313 #define _REGION1_SIZE	(1UL << _REGION1_SHIFT)
314 #define _REGION2_SIZE	(1UL << _REGION2_SHIFT)
315 #define _REGION3_SIZE	(1UL << _REGION3_SHIFT)
316 #define _SEGMENT_SIZE	(1UL << _SEGMENT_SHIFT)
317 
318 #define _REGION1_MASK	(~(_REGION1_SIZE - 1))
319 #define _REGION2_MASK	(~(_REGION2_SIZE - 1))
320 #define _REGION3_MASK	(~(_REGION3_SIZE - 1))
321 #define _SEGMENT_MASK	(~(_SEGMENT_SIZE - 1))
322 
323 #define PMD_SHIFT	_SEGMENT_SHIFT
324 #define PUD_SHIFT	_REGION3_SHIFT
325 #define P4D_SHIFT	_REGION2_SHIFT
326 #define PGDIR_SHIFT	_REGION1_SHIFT
327 
328 #define PMD_SIZE	_SEGMENT_SIZE
329 #define PUD_SIZE	_REGION3_SIZE
330 #define P4D_SIZE	_REGION2_SIZE
331 #define PGDIR_SIZE	_REGION1_SIZE
332 
333 #define PMD_MASK	_SEGMENT_MASK
334 #define PUD_MASK	_REGION3_MASK
335 #define P4D_MASK	_REGION2_MASK
336 #define PGDIR_MASK	_REGION1_MASK
337 
338 #define PTRS_PER_PTE	_PAGE_ENTRIES
339 #define PTRS_PER_PMD	_CRST_ENTRIES
340 #define PTRS_PER_PUD	_CRST_ENTRIES
341 #define PTRS_PER_P4D	_CRST_ENTRIES
342 #define PTRS_PER_PGD	_CRST_ENTRIES
343 
344 #define MAX_PTRS_PER_P4D	PTRS_PER_P4D
345 
346 /*
347  * Segment table and region3 table entry encoding
348  * (R = read-only, I = invalid, y = young bit):
349  *				dy..R...I...wr
350  * prot-none, clean, old	00..1...1...00
351  * prot-none, clean, young	01..1...1...00
352  * prot-none, dirty, old	10..1...1...00
353  * prot-none, dirty, young	11..1...1...00
354  * read-only, clean, old	00..1...1...01
355  * read-only, clean, young	01..1...0...01
356  * read-only, dirty, old	10..1...1...01
357  * read-only, dirty, young	11..1...0...01
358  * read-write, clean, old	00..1...1...11
359  * read-write, clean, young	01..1...0...11
360  * read-write, dirty, old	10..0...1...11
361  * read-write, dirty, young	11..0...0...11
362  * The segment table origin is used to distinguish empty (origin==0) from
363  * read-write, old segment table entries (origin!=0)
364  * HW-bits: R read-only, I invalid
365  * SW-bits: y young, d dirty, r read, w write
366  */
367 
368 /* Page status table bits for virtualization */
369 #define PGSTE_ACC_BITS	0xf000000000000000UL
370 #define PGSTE_FP_BIT	0x0800000000000000UL
371 #define PGSTE_PCL_BIT	0x0080000000000000UL
372 #define PGSTE_HR_BIT	0x0040000000000000UL
373 #define PGSTE_HC_BIT	0x0020000000000000UL
374 #define PGSTE_GR_BIT	0x0004000000000000UL
375 #define PGSTE_GC_BIT	0x0002000000000000UL
376 #define PGSTE_UC_BIT	0x0000800000000000UL	/* user dirty (migration) */
377 #define PGSTE_IN_BIT	0x0000400000000000UL	/* IPTE notify bit */
378 #define PGSTE_VSIE_BIT	0x0000200000000000UL	/* ref'd in a shadow table */
379 
380 /* Guest Page State used for virtualization */
381 #define _PGSTE_GPS_ZERO			0x0000000080000000UL
382 #define _PGSTE_GPS_NODAT		0x0000000040000000UL
383 #define _PGSTE_GPS_USAGE_MASK		0x0000000003000000UL
384 #define _PGSTE_GPS_USAGE_STABLE		0x0000000000000000UL
385 #define _PGSTE_GPS_USAGE_UNUSED		0x0000000001000000UL
386 #define _PGSTE_GPS_USAGE_POT_VOLATILE	0x0000000002000000UL
387 #define _PGSTE_GPS_USAGE_VOLATILE	_PGSTE_GPS_USAGE_MASK
388 
389 /*
390  * A user page table pointer has the space-switch-event bit, the
391  * private-space-control bit and the storage-alteration-event-control
392  * bit set. A kernel page table pointer doesn't need them.
393  */
394 #define _ASCE_USER_BITS		(_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
395 				 _ASCE_ALT_EVENT)
396 
397 /*
398  * Page protection definitions.
399  */
400 #define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
401 #define PAGE_RO		__pgprot(_PAGE_PRESENT | _PAGE_READ | \
402 				 _PAGE_NOEXEC  | _PAGE_INVALID | _PAGE_PROTECT)
403 #define PAGE_RX		__pgprot(_PAGE_PRESENT | _PAGE_READ | \
404 				 _PAGE_INVALID | _PAGE_PROTECT)
405 #define PAGE_RW		__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
406 				 _PAGE_NOEXEC  | _PAGE_INVALID | _PAGE_PROTECT)
407 #define PAGE_RWX	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
408 				 _PAGE_INVALID | _PAGE_PROTECT)
409 
410 #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
411 				 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
412 #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
413 				 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
414 #define PAGE_KERNEL_RO	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
415 				 _PAGE_PROTECT | _PAGE_NOEXEC)
416 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
417 				  _PAGE_YOUNG |	_PAGE_DIRTY)
418 
419 /*
420  * On s390 the page table entry has an invalid bit and a read-only bit.
421  * Read permission implies execute permission and write permission
422  * implies read permission.
423  */
424          /*xwr*/
425 #define __P000	PAGE_NONE
426 #define __P001	PAGE_RO
427 #define __P010	PAGE_RO
428 #define __P011	PAGE_RO
429 #define __P100	PAGE_RX
430 #define __P101	PAGE_RX
431 #define __P110	PAGE_RX
432 #define __P111	PAGE_RX
433 
434 #define __S000	PAGE_NONE
435 #define __S001	PAGE_RO
436 #define __S010	PAGE_RW
437 #define __S011	PAGE_RW
438 #define __S100	PAGE_RX
439 #define __S101	PAGE_RX
440 #define __S110	PAGE_RWX
441 #define __S111	PAGE_RWX
442 
443 /*
444  * Segment entry (large page) protection definitions.
445  */
446 #define SEGMENT_NONE	__pgprot(_SEGMENT_ENTRY_INVALID | \
447 				 _SEGMENT_ENTRY_PROTECT)
448 #define SEGMENT_RO	__pgprot(_SEGMENT_ENTRY_PROTECT | \
449 				 _SEGMENT_ENTRY_READ | \
450 				 _SEGMENT_ENTRY_NOEXEC)
451 #define SEGMENT_RX	__pgprot(_SEGMENT_ENTRY_PROTECT | \
452 				 _SEGMENT_ENTRY_READ)
453 #define SEGMENT_RW	__pgprot(_SEGMENT_ENTRY_READ | \
454 				 _SEGMENT_ENTRY_WRITE | \
455 				 _SEGMENT_ENTRY_NOEXEC)
456 #define SEGMENT_RWX	__pgprot(_SEGMENT_ENTRY_READ | \
457 				 _SEGMENT_ENTRY_WRITE)
458 #define SEGMENT_KERNEL	__pgprot(_SEGMENT_ENTRY |	\
459 				 _SEGMENT_ENTRY_LARGE |	\
460 				 _SEGMENT_ENTRY_READ |	\
461 				 _SEGMENT_ENTRY_WRITE | \
462 				 _SEGMENT_ENTRY_YOUNG | \
463 				 _SEGMENT_ENTRY_DIRTY | \
464 				 _SEGMENT_ENTRY_NOEXEC)
465 #define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY |	\
466 				 _SEGMENT_ENTRY_LARGE |	\
467 				 _SEGMENT_ENTRY_READ |	\
468 				 _SEGMENT_ENTRY_YOUNG |	\
469 				 _SEGMENT_ENTRY_PROTECT | \
470 				 _SEGMENT_ENTRY_NOEXEC)
471 #define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY |	\
472 				 _SEGMENT_ENTRY_LARGE |	\
473 				 _SEGMENT_ENTRY_READ |	\
474 				 _SEGMENT_ENTRY_WRITE | \
475 				 _SEGMENT_ENTRY_YOUNG |	\
476 				 _SEGMENT_ENTRY_DIRTY)
477 
478 /*
479  * Region3 entry (large page) protection definitions.
480  */
481 
482 #define REGION3_KERNEL	__pgprot(_REGION_ENTRY_TYPE_R3 | \
483 				 _REGION3_ENTRY_LARGE |	 \
484 				 _REGION3_ENTRY_READ |	 \
485 				 _REGION3_ENTRY_WRITE |	 \
486 				 _REGION3_ENTRY_YOUNG |	 \
487 				 _REGION3_ENTRY_DIRTY | \
488 				 _REGION_ENTRY_NOEXEC)
489 #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
490 				   _REGION3_ENTRY_LARGE |  \
491 				   _REGION3_ENTRY_READ |   \
492 				   _REGION3_ENTRY_YOUNG |  \
493 				   _REGION_ENTRY_PROTECT | \
494 				   _REGION_ENTRY_NOEXEC)
495 
496 static inline bool mm_p4d_folded(struct mm_struct *mm)
497 {
498 	return mm->context.asce_limit <= _REGION1_SIZE;
499 }
500 #define mm_p4d_folded(mm) mm_p4d_folded(mm)
501 
502 static inline bool mm_pud_folded(struct mm_struct *mm)
503 {
504 	return mm->context.asce_limit <= _REGION2_SIZE;
505 }
506 #define mm_pud_folded(mm) mm_pud_folded(mm)
507 
508 static inline bool mm_pmd_folded(struct mm_struct *mm)
509 {
510 	return mm->context.asce_limit <= _REGION3_SIZE;
511 }
512 #define mm_pmd_folded(mm) mm_pmd_folded(mm)
513 
514 static inline int mm_has_pgste(struct mm_struct *mm)
515 {
516 #ifdef CONFIG_PGSTE
517 	if (unlikely(mm->context.has_pgste))
518 		return 1;
519 #endif
520 	return 0;
521 }
522 
523 static inline int mm_alloc_pgste(struct mm_struct *mm)
524 {
525 #ifdef CONFIG_PGSTE
526 	if (unlikely(mm->context.alloc_pgste))
527 		return 1;
528 #endif
529 	return 0;
530 }
531 
532 /*
533  * In the case that a guest uses storage keys
534  * faults should no longer be backed by zero pages
535  */
536 #define mm_forbids_zeropage mm_has_pgste
537 static inline int mm_uses_skeys(struct mm_struct *mm)
538 {
539 #ifdef CONFIG_PGSTE
540 	if (mm->context.uses_skeys)
541 		return 1;
542 #endif
543 	return 0;
544 }
545 
546 static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
547 {
548 	register unsigned long reg2 asm("2") = old;
549 	register unsigned long reg3 asm("3") = new;
550 	unsigned long address = (unsigned long)ptr | 1;
551 
552 	asm volatile(
553 		"	csp	%0,%3"
554 		: "+d" (reg2), "+m" (*ptr)
555 		: "d" (reg3), "d" (address)
556 		: "cc");
557 }
558 
559 static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
560 {
561 	register unsigned long reg2 asm("2") = old;
562 	register unsigned long reg3 asm("3") = new;
563 	unsigned long address = (unsigned long)ptr | 1;
564 
565 	asm volatile(
566 		"	.insn	rre,0xb98a0000,%0,%3"
567 		: "+d" (reg2), "+m" (*ptr)
568 		: "d" (reg3), "d" (address)
569 		: "cc");
570 }
571 
572 #define CRDTE_DTT_PAGE		0x00UL
573 #define CRDTE_DTT_SEGMENT	0x10UL
574 #define CRDTE_DTT_REGION3	0x14UL
575 #define CRDTE_DTT_REGION2	0x18UL
576 #define CRDTE_DTT_REGION1	0x1cUL
577 
578 static inline void crdte(unsigned long old, unsigned long new,
579 			 unsigned long table, unsigned long dtt,
580 			 unsigned long address, unsigned long asce)
581 {
582 	register unsigned long reg2 asm("2") = old;
583 	register unsigned long reg3 asm("3") = new;
584 	register unsigned long reg4 asm("4") = table | dtt;
585 	register unsigned long reg5 asm("5") = address;
586 
587 	asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
588 		     : "+d" (reg2)
589 		     : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
590 		     : "memory", "cc");
591 }
592 
593 /*
594  * pgd/p4d/pud/pmd/pte query functions
595  */
596 static inline int pgd_folded(pgd_t pgd)
597 {
598 	return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
599 }
600 
601 static inline int pgd_present(pgd_t pgd)
602 {
603 	if (pgd_folded(pgd))
604 		return 1;
605 	return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
606 }
607 
608 static inline int pgd_none(pgd_t pgd)
609 {
610 	if (pgd_folded(pgd))
611 		return 0;
612 	return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
613 }
614 
615 static inline int pgd_bad(pgd_t pgd)
616 {
617 	/*
618 	 * With dynamic page table levels the pgd can be a region table
619 	 * entry or a segment table entry. Check for the bit that are
620 	 * invalid for either table entry.
621 	 */
622 	unsigned long mask =
623 		~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
624 		~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
625 	return (pgd_val(pgd) & mask) != 0;
626 }
627 
628 static inline unsigned long pgd_pfn(pgd_t pgd)
629 {
630 	unsigned long origin_mask;
631 
632 	origin_mask = _REGION_ENTRY_ORIGIN;
633 	return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
634 }
635 
636 static inline int p4d_folded(p4d_t p4d)
637 {
638 	return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
639 }
640 
641 static inline int p4d_present(p4d_t p4d)
642 {
643 	if (p4d_folded(p4d))
644 		return 1;
645 	return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
646 }
647 
648 static inline int p4d_none(p4d_t p4d)
649 {
650 	if (p4d_folded(p4d))
651 		return 0;
652 	return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
653 }
654 
655 static inline unsigned long p4d_pfn(p4d_t p4d)
656 {
657 	unsigned long origin_mask;
658 
659 	origin_mask = _REGION_ENTRY_ORIGIN;
660 	return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
661 }
662 
663 static inline int pud_folded(pud_t pud)
664 {
665 	return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
666 }
667 
668 static inline int pud_present(pud_t pud)
669 {
670 	if (pud_folded(pud))
671 		return 1;
672 	return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
673 }
674 
675 static inline int pud_none(pud_t pud)
676 {
677 	if (pud_folded(pud))
678 		return 0;
679 	return pud_val(pud) == _REGION3_ENTRY_EMPTY;
680 }
681 
682 static inline int pud_large(pud_t pud)
683 {
684 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
685 		return 0;
686 	return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
687 }
688 
689 static inline unsigned long pud_pfn(pud_t pud)
690 {
691 	unsigned long origin_mask;
692 
693 	origin_mask = _REGION_ENTRY_ORIGIN;
694 	if (pud_large(pud))
695 		origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
696 	return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
697 }
698 
699 static inline int pmd_large(pmd_t pmd)
700 {
701 	return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
702 }
703 
704 static inline int pmd_bad(pmd_t pmd)
705 {
706 	if (pmd_large(pmd))
707 		return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
708 	return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
709 }
710 
711 static inline int pud_bad(pud_t pud)
712 {
713 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
714 		return pmd_bad(__pmd(pud_val(pud)));
715 	if (pud_large(pud))
716 		return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0;
717 	return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
718 }
719 
720 static inline int p4d_bad(p4d_t p4d)
721 {
722 	if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
723 		return pud_bad(__pud(p4d_val(p4d)));
724 	return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
725 }
726 
727 static inline int pmd_present(pmd_t pmd)
728 {
729 	return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
730 }
731 
732 static inline int pmd_none(pmd_t pmd)
733 {
734 	return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
735 }
736 
737 static inline unsigned long pmd_pfn(pmd_t pmd)
738 {
739 	unsigned long origin_mask;
740 
741 	origin_mask = _SEGMENT_ENTRY_ORIGIN;
742 	if (pmd_large(pmd))
743 		origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
744 	return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
745 }
746 
747 #define pmd_write pmd_write
748 static inline int pmd_write(pmd_t pmd)
749 {
750 	return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
751 }
752 
753 static inline int pmd_dirty(pmd_t pmd)
754 {
755 	int dirty = 1;
756 	if (pmd_large(pmd))
757 		dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
758 	return dirty;
759 }
760 
761 static inline int pmd_young(pmd_t pmd)
762 {
763 	int young = 1;
764 	if (pmd_large(pmd))
765 		young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
766 	return young;
767 }
768 
769 static inline int pte_present(pte_t pte)
770 {
771 	/* Bit pattern: (pte & 0x001) == 0x001 */
772 	return (pte_val(pte) & _PAGE_PRESENT) != 0;
773 }
774 
775 static inline int pte_none(pte_t pte)
776 {
777 	/* Bit pattern: pte == 0x400 */
778 	return pte_val(pte) == _PAGE_INVALID;
779 }
780 
781 static inline int pte_swap(pte_t pte)
782 {
783 	/* Bit pattern: (pte & 0x201) == 0x200 */
784 	return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
785 		== _PAGE_PROTECT;
786 }
787 
788 static inline int pte_special(pte_t pte)
789 {
790 	return (pte_val(pte) & _PAGE_SPECIAL);
791 }
792 
793 #define __HAVE_ARCH_PTE_SAME
794 static inline int pte_same(pte_t a, pte_t b)
795 {
796 	return pte_val(a) == pte_val(b);
797 }
798 
799 #ifdef CONFIG_NUMA_BALANCING
800 static inline int pte_protnone(pte_t pte)
801 {
802 	return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
803 }
804 
805 static inline int pmd_protnone(pmd_t pmd)
806 {
807 	/* pmd_large(pmd) implies pmd_present(pmd) */
808 	return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
809 }
810 #endif
811 
812 static inline int pte_soft_dirty(pte_t pte)
813 {
814 	return pte_val(pte) & _PAGE_SOFT_DIRTY;
815 }
816 #define pte_swp_soft_dirty pte_soft_dirty
817 
818 static inline pte_t pte_mksoft_dirty(pte_t pte)
819 {
820 	pte_val(pte) |= _PAGE_SOFT_DIRTY;
821 	return pte;
822 }
823 #define pte_swp_mksoft_dirty pte_mksoft_dirty
824 
825 static inline pte_t pte_clear_soft_dirty(pte_t pte)
826 {
827 	pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
828 	return pte;
829 }
830 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
831 
832 static inline int pmd_soft_dirty(pmd_t pmd)
833 {
834 	return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
835 }
836 
837 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
838 {
839 	pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
840 	return pmd;
841 }
842 
843 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
844 {
845 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
846 	return pmd;
847 }
848 
849 /*
850  * query functions pte_write/pte_dirty/pte_young only work if
851  * pte_present() is true. Undefined behaviour if not..
852  */
853 static inline int pte_write(pte_t pte)
854 {
855 	return (pte_val(pte) & _PAGE_WRITE) != 0;
856 }
857 
858 static inline int pte_dirty(pte_t pte)
859 {
860 	return (pte_val(pte) & _PAGE_DIRTY) != 0;
861 }
862 
863 static inline int pte_young(pte_t pte)
864 {
865 	return (pte_val(pte) & _PAGE_YOUNG) != 0;
866 }
867 
868 #define __HAVE_ARCH_PTE_UNUSED
869 static inline int pte_unused(pte_t pte)
870 {
871 	return pte_val(pte) & _PAGE_UNUSED;
872 }
873 
874 /*
875  * pgd/pmd/pte modification functions
876  */
877 
878 static inline void pgd_clear(pgd_t *pgd)
879 {
880 	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
881 		pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
882 }
883 
884 static inline void p4d_clear(p4d_t *p4d)
885 {
886 	if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
887 		p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
888 }
889 
890 static inline void pud_clear(pud_t *pud)
891 {
892 	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
893 		pud_val(*pud) = _REGION3_ENTRY_EMPTY;
894 }
895 
896 static inline void pmd_clear(pmd_t *pmdp)
897 {
898 	pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
899 }
900 
901 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
902 {
903 	pte_val(*ptep) = _PAGE_INVALID;
904 }
905 
906 /*
907  * The following pte modification functions only work if
908  * pte_present() is true. Undefined behaviour if not..
909  */
910 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
911 {
912 	pte_val(pte) &= _PAGE_CHG_MASK;
913 	pte_val(pte) |= pgprot_val(newprot);
914 	/*
915 	 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
916 	 * has the invalid bit set, clear it again for readable, young pages
917 	 */
918 	if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
919 		pte_val(pte) &= ~_PAGE_INVALID;
920 	/*
921 	 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
922 	 * protection bit set, clear it again for writable, dirty pages
923 	 */
924 	if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
925 		pte_val(pte) &= ~_PAGE_PROTECT;
926 	return pte;
927 }
928 
929 static inline pte_t pte_wrprotect(pte_t pte)
930 {
931 	pte_val(pte) &= ~_PAGE_WRITE;
932 	pte_val(pte) |= _PAGE_PROTECT;
933 	return pte;
934 }
935 
936 static inline pte_t pte_mkwrite(pte_t pte)
937 {
938 	pte_val(pte) |= _PAGE_WRITE;
939 	if (pte_val(pte) & _PAGE_DIRTY)
940 		pte_val(pte) &= ~_PAGE_PROTECT;
941 	return pte;
942 }
943 
944 static inline pte_t pte_mkclean(pte_t pte)
945 {
946 	pte_val(pte) &= ~_PAGE_DIRTY;
947 	pte_val(pte) |= _PAGE_PROTECT;
948 	return pte;
949 }
950 
951 static inline pte_t pte_mkdirty(pte_t pte)
952 {
953 	pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
954 	if (pte_val(pte) & _PAGE_WRITE)
955 		pte_val(pte) &= ~_PAGE_PROTECT;
956 	return pte;
957 }
958 
959 static inline pte_t pte_mkold(pte_t pte)
960 {
961 	pte_val(pte) &= ~_PAGE_YOUNG;
962 	pte_val(pte) |= _PAGE_INVALID;
963 	return pte;
964 }
965 
966 static inline pte_t pte_mkyoung(pte_t pte)
967 {
968 	pte_val(pte) |= _PAGE_YOUNG;
969 	if (pte_val(pte) & _PAGE_READ)
970 		pte_val(pte) &= ~_PAGE_INVALID;
971 	return pte;
972 }
973 
974 static inline pte_t pte_mkspecial(pte_t pte)
975 {
976 	pte_val(pte) |= _PAGE_SPECIAL;
977 	return pte;
978 }
979 
980 #ifdef CONFIG_HUGETLB_PAGE
981 static inline pte_t pte_mkhuge(pte_t pte)
982 {
983 	pte_val(pte) |= _PAGE_LARGE;
984 	return pte;
985 }
986 #endif
987 
988 #define IPTE_GLOBAL	0
989 #define	IPTE_LOCAL	1
990 
991 #define IPTE_NODAT	0x400
992 #define IPTE_GUEST_ASCE	0x800
993 
994 static inline void __ptep_ipte(unsigned long address, pte_t *ptep,
995 			       unsigned long opt, unsigned long asce,
996 			       int local)
997 {
998 	unsigned long pto = (unsigned long) ptep;
999 
1000 	if (__builtin_constant_p(opt) && opt == 0) {
1001 		/* Invalidation + TLB flush for the pte */
1002 		asm volatile(
1003 			"	.insn	rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
1004 			: "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
1005 			  [m4] "i" (local));
1006 		return;
1007 	}
1008 
1009 	/* Invalidate ptes with options + TLB flush of the ptes */
1010 	opt = opt | (asce & _ASCE_ORIGIN);
1011 	asm volatile(
1012 		"	.insn	rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1013 		: [r2] "+a" (address), [r3] "+a" (opt)
1014 		: [r1] "a" (pto), [m4] "i" (local) : "memory");
1015 }
1016 
1017 static inline void __ptep_ipte_range(unsigned long address, int nr,
1018 				     pte_t *ptep, int local)
1019 {
1020 	unsigned long pto = (unsigned long) ptep;
1021 
1022 	/* Invalidate a range of ptes + TLB flush of the ptes */
1023 	do {
1024 		asm volatile(
1025 			"       .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1026 			: [r2] "+a" (address), [r3] "+a" (nr)
1027 			: [r1] "a" (pto), [m4] "i" (local) : "memory");
1028 	} while (nr != 255);
1029 }
1030 
1031 /*
1032  * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1033  * both clear the TLB for the unmapped pte. The reason is that
1034  * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1035  * to modify an active pte. The sequence is
1036  *   1) ptep_get_and_clear
1037  *   2) set_pte_at
1038  *   3) flush_tlb_range
1039  * On s390 the tlb needs to get flushed with the modification of the pte
1040  * if the pte is active. The only way how this can be implemented is to
1041  * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1042  * is a nop.
1043  */
1044 pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1045 pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1046 
1047 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1048 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1049 					    unsigned long addr, pte_t *ptep)
1050 {
1051 	pte_t pte = *ptep;
1052 
1053 	pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1054 	return pte_young(pte);
1055 }
1056 
1057 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1058 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1059 					 unsigned long address, pte_t *ptep)
1060 {
1061 	return ptep_test_and_clear_young(vma, address, ptep);
1062 }
1063 
1064 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1065 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1066 				       unsigned long addr, pte_t *ptep)
1067 {
1068 	return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1069 }
1070 
1071 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1072 pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1073 void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1074 			     pte_t *, pte_t, pte_t);
1075 
1076 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1077 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1078 				     unsigned long addr, pte_t *ptep)
1079 {
1080 	return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1081 }
1082 
1083 /*
1084  * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1085  * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1086  * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1087  * cannot be accessed while the batched unmap is running. In this case
1088  * full==1 and a simple pte_clear is enough. See tlb.h.
1089  */
1090 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1091 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1092 					    unsigned long addr,
1093 					    pte_t *ptep, int full)
1094 {
1095 	if (full) {
1096 		pte_t pte = *ptep;
1097 		*ptep = __pte(_PAGE_INVALID);
1098 		return pte;
1099 	}
1100 	return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1101 }
1102 
1103 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1104 static inline void ptep_set_wrprotect(struct mm_struct *mm,
1105 				      unsigned long addr, pte_t *ptep)
1106 {
1107 	pte_t pte = *ptep;
1108 
1109 	if (pte_write(pte))
1110 		ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1111 }
1112 
1113 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1114 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1115 					unsigned long addr, pte_t *ptep,
1116 					pte_t entry, int dirty)
1117 {
1118 	if (pte_same(*ptep, entry))
1119 		return 0;
1120 	ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1121 	return 1;
1122 }
1123 
1124 /*
1125  * Additional functions to handle KVM guest page tables
1126  */
1127 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1128 		     pte_t *ptep, pte_t entry);
1129 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1130 void ptep_notify(struct mm_struct *mm, unsigned long addr,
1131 		 pte_t *ptep, unsigned long bits);
1132 int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1133 		    pte_t *ptep, int prot, unsigned long bit);
1134 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1135 		     pte_t *ptep , int reset);
1136 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1137 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1138 		    pte_t *sptep, pte_t *tptep, pte_t pte);
1139 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1140 
1141 bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1142 			    pte_t *ptep);
1143 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1144 			  unsigned char key, bool nq);
1145 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1146 			       unsigned char key, unsigned char *oldkey,
1147 			       bool nq, bool mr, bool mc);
1148 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1149 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1150 			  unsigned char *key);
1151 
1152 int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1153 				unsigned long bits, unsigned long value);
1154 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1155 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1156 			unsigned long *oldpte, unsigned long *oldpgste);
1157 void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1158 void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1159 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1160 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
1161 
1162 /*
1163  * Certain architectures need to do special things when PTEs
1164  * within a page table are directly modified.  Thus, the following
1165  * hook is made available.
1166  */
1167 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1168 			      pte_t *ptep, pte_t entry)
1169 {
1170 	if (!MACHINE_HAS_NX)
1171 		pte_val(entry) &= ~_PAGE_NOEXEC;
1172 	if (pte_present(entry))
1173 		pte_val(entry) &= ~_PAGE_UNUSED;
1174 	if (mm_has_pgste(mm))
1175 		ptep_set_pte_at(mm, addr, ptep, entry);
1176 	else
1177 		*ptep = entry;
1178 }
1179 
1180 /*
1181  * Conversion functions: convert a page and protection to a page entry,
1182  * and a page entry and page directory to the page they refer to.
1183  */
1184 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1185 {
1186 	pte_t __pte;
1187 	pte_val(__pte) = physpage + pgprot_val(pgprot);
1188 	return pte_mkyoung(__pte);
1189 }
1190 
1191 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1192 {
1193 	unsigned long physpage = page_to_phys(page);
1194 	pte_t __pte = mk_pte_phys(physpage, pgprot);
1195 
1196 	if (pte_write(__pte) && PageDirty(page))
1197 		__pte = pte_mkdirty(__pte);
1198 	return __pte;
1199 }
1200 
1201 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1202 #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1203 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1204 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1205 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1206 
1207 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1208 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1209 #define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr))
1210 
1211 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1212 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1213 #define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
1214 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1215 
1216 static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
1217 {
1218 	p4d_t *p4d = (p4d_t *) pgd;
1219 
1220 	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
1221 		p4d = (p4d_t *) pgd_deref(*pgd);
1222 	return p4d + p4d_index(address);
1223 }
1224 
1225 static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
1226 {
1227 	pud_t *pud = (pud_t *) p4d;
1228 
1229 	if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1230 		pud = (pud_t *) p4d_deref(*p4d);
1231 	return pud + pud_index(address);
1232 }
1233 
1234 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1235 {
1236 	pmd_t *pmd = (pmd_t *) pud;
1237 
1238 	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1239 		pmd = (pmd_t *) pud_deref(*pud);
1240 	return pmd + pmd_index(address);
1241 }
1242 
1243 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1244 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1245 #define pte_page(x) pfn_to_page(pte_pfn(x))
1246 
1247 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1248 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
1249 #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1250 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1251 
1252 /* Find an entry in the lowest level page table.. */
1253 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1254 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1255 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1256 #define pte_unmap(pte) do { } while (0)
1257 
1258 static inline pmd_t pmd_wrprotect(pmd_t pmd)
1259 {
1260 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1261 	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1262 	return pmd;
1263 }
1264 
1265 static inline pmd_t pmd_mkwrite(pmd_t pmd)
1266 {
1267 	pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1268 	if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1269 		return pmd;
1270 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1271 	return pmd;
1272 }
1273 
1274 static inline pmd_t pmd_mkclean(pmd_t pmd)
1275 {
1276 	if (pmd_large(pmd)) {
1277 		pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1278 		pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1279 	}
1280 	return pmd;
1281 }
1282 
1283 static inline pmd_t pmd_mkdirty(pmd_t pmd)
1284 {
1285 	if (pmd_large(pmd)) {
1286 		pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY |
1287 				_SEGMENT_ENTRY_SOFT_DIRTY;
1288 		if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1289 			pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1290 	}
1291 	return pmd;
1292 }
1293 
1294 static inline pud_t pud_wrprotect(pud_t pud)
1295 {
1296 	pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
1297 	pud_val(pud) |= _REGION_ENTRY_PROTECT;
1298 	return pud;
1299 }
1300 
1301 static inline pud_t pud_mkwrite(pud_t pud)
1302 {
1303 	pud_val(pud) |= _REGION3_ENTRY_WRITE;
1304 	if (pud_large(pud) && !(pud_val(pud) & _REGION3_ENTRY_DIRTY))
1305 		return pud;
1306 	pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1307 	return pud;
1308 }
1309 
1310 static inline pud_t pud_mkclean(pud_t pud)
1311 {
1312 	if (pud_large(pud)) {
1313 		pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
1314 		pud_val(pud) |= _REGION_ENTRY_PROTECT;
1315 	}
1316 	return pud;
1317 }
1318 
1319 static inline pud_t pud_mkdirty(pud_t pud)
1320 {
1321 	if (pud_large(pud)) {
1322 		pud_val(pud) |= _REGION3_ENTRY_DIRTY |
1323 				_REGION3_ENTRY_SOFT_DIRTY;
1324 		if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1325 			pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1326 	}
1327 	return pud;
1328 }
1329 
1330 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1331 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1332 {
1333 	/*
1334 	 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1335 	 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1336 	 */
1337 	if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1338 		return pgprot_val(SEGMENT_NONE);
1339 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1340 		return pgprot_val(SEGMENT_RO);
1341 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1342 		return pgprot_val(SEGMENT_RX);
1343 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1344 		return pgprot_val(SEGMENT_RW);
1345 	return pgprot_val(SEGMENT_RWX);
1346 }
1347 
1348 static inline pmd_t pmd_mkyoung(pmd_t pmd)
1349 {
1350 	if (pmd_large(pmd)) {
1351 		pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1352 		if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1353 			pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1354 	}
1355 	return pmd;
1356 }
1357 
1358 static inline pmd_t pmd_mkold(pmd_t pmd)
1359 {
1360 	if (pmd_large(pmd)) {
1361 		pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1362 		pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1363 	}
1364 	return pmd;
1365 }
1366 
1367 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1368 {
1369 	if (pmd_large(pmd)) {
1370 		pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1371 			_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1372 			_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
1373 		pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1374 		if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1375 			pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1376 		if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1377 			pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1378 		return pmd;
1379 	}
1380 	pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
1381 	pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1382 	return pmd;
1383 }
1384 
1385 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1386 {
1387 	pmd_t __pmd;
1388 	pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1389 	return __pmd;
1390 }
1391 
1392 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1393 
1394 static inline void __pmdp_csp(pmd_t *pmdp)
1395 {
1396 	csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1397 	    pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1398 }
1399 
1400 #define IDTE_GLOBAL	0
1401 #define IDTE_LOCAL	1
1402 
1403 #define IDTE_PTOA	0x0800
1404 #define IDTE_NODAT	0x1000
1405 #define IDTE_GUEST_ASCE	0x2000
1406 
1407 static inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1408 			       unsigned long opt, unsigned long asce,
1409 			       int local)
1410 {
1411 	unsigned long sto;
1412 
1413 	sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t);
1414 	if (__builtin_constant_p(opt) && opt == 0) {
1415 		/* flush without guest asce */
1416 		asm volatile(
1417 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1418 			: "+m" (*pmdp)
1419 			: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1420 			  [m4] "i" (local)
1421 			: "cc" );
1422 	} else {
1423 		/* flush with guest asce */
1424 		asm volatile(
1425 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1426 			: "+m" (*pmdp)
1427 			: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1428 			  [r3] "a" (asce), [m4] "i" (local)
1429 			: "cc" );
1430 	}
1431 }
1432 
1433 static inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1434 			       unsigned long opt, unsigned long asce,
1435 			       int local)
1436 {
1437 	unsigned long r3o;
1438 
1439 	r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t);
1440 	r3o |= _ASCE_TYPE_REGION3;
1441 	if (__builtin_constant_p(opt) && opt == 0) {
1442 		/* flush without guest asce */
1443 		asm volatile(
1444 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1445 			: "+m" (*pudp)
1446 			: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1447 			  [m4] "i" (local)
1448 			: "cc");
1449 	} else {
1450 		/* flush with guest asce */
1451 		asm volatile(
1452 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1453 			: "+m" (*pudp)
1454 			: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1455 			  [r3] "a" (asce), [m4] "i" (local)
1456 			: "cc" );
1457 	}
1458 }
1459 
1460 pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1461 pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1462 pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1463 
1464 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1465 
1466 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1467 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1468 				pgtable_t pgtable);
1469 
1470 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1471 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1472 
1473 #define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1474 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1475 					unsigned long addr, pmd_t *pmdp,
1476 					pmd_t entry, int dirty)
1477 {
1478 	VM_BUG_ON(addr & ~HPAGE_MASK);
1479 
1480 	entry = pmd_mkyoung(entry);
1481 	if (dirty)
1482 		entry = pmd_mkdirty(entry);
1483 	if (pmd_val(*pmdp) == pmd_val(entry))
1484 		return 0;
1485 	pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1486 	return 1;
1487 }
1488 
1489 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1490 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1491 					    unsigned long addr, pmd_t *pmdp)
1492 {
1493 	pmd_t pmd = *pmdp;
1494 
1495 	pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1496 	return pmd_young(pmd);
1497 }
1498 
1499 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1500 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1501 					 unsigned long addr, pmd_t *pmdp)
1502 {
1503 	VM_BUG_ON(addr & ~HPAGE_MASK);
1504 	return pmdp_test_and_clear_young(vma, addr, pmdp);
1505 }
1506 
1507 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1508 			      pmd_t *pmdp, pmd_t entry)
1509 {
1510 	if (!MACHINE_HAS_NX)
1511 		pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
1512 	*pmdp = entry;
1513 }
1514 
1515 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1516 {
1517 	pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1518 	pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1519 	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1520 	return pmd;
1521 }
1522 
1523 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1524 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1525 					    unsigned long addr, pmd_t *pmdp)
1526 {
1527 	return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1528 }
1529 
1530 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1531 static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
1532 						 unsigned long addr,
1533 						 pmd_t *pmdp, int full)
1534 {
1535 	if (full) {
1536 		pmd_t pmd = *pmdp;
1537 		*pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
1538 		return pmd;
1539 	}
1540 	return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1541 }
1542 
1543 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1544 static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1545 					  unsigned long addr, pmd_t *pmdp)
1546 {
1547 	return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1548 }
1549 
1550 #define __HAVE_ARCH_PMDP_INVALIDATE
1551 static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1552 				   unsigned long addr, pmd_t *pmdp)
1553 {
1554 	pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1555 
1556 	return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1557 }
1558 
1559 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1560 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1561 				      unsigned long addr, pmd_t *pmdp)
1562 {
1563 	pmd_t pmd = *pmdp;
1564 
1565 	if (pmd_write(pmd))
1566 		pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1567 }
1568 
1569 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1570 					unsigned long address,
1571 					pmd_t *pmdp)
1572 {
1573 	return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1574 }
1575 #define pmdp_collapse_flush pmdp_collapse_flush
1576 
1577 #define pfn_pmd(pfn, pgprot)	mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1578 #define mk_pmd(page, pgprot)	pfn_pmd(page_to_pfn(page), (pgprot))
1579 
1580 static inline int pmd_trans_huge(pmd_t pmd)
1581 {
1582 	return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1583 }
1584 
1585 #define has_transparent_hugepage has_transparent_hugepage
1586 static inline int has_transparent_hugepage(void)
1587 {
1588 	return MACHINE_HAS_EDAT1 ? 1 : 0;
1589 }
1590 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1591 
1592 /*
1593  * 64 bit swap entry format:
1594  * A page-table entry has some bits we have to treat in a special way.
1595  * Bits 52 and bit 55 have to be zero, otherwise a specification
1596  * exception will occur instead of a page translation exception. The
1597  * specification exception has the bad habit not to store necessary
1598  * information in the lowcore.
1599  * Bits 54 and 63 are used to indicate the page type.
1600  * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1601  * This leaves the bits 0-51 and bits 56-62 to store type and offset.
1602  * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
1603  * for the offset.
1604  * |			  offset			|01100|type |00|
1605  * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1606  * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1607  */
1608 
1609 #define __SWP_OFFSET_MASK	((1UL << 52) - 1)
1610 #define __SWP_OFFSET_SHIFT	12
1611 #define __SWP_TYPE_MASK		((1UL << 5) - 1)
1612 #define __SWP_TYPE_SHIFT	2
1613 
1614 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1615 {
1616 	pte_t pte;
1617 
1618 	pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1619 	pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1620 	pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1621 	return pte;
1622 }
1623 
1624 static inline unsigned long __swp_type(swp_entry_t entry)
1625 {
1626 	return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1627 }
1628 
1629 static inline unsigned long __swp_offset(swp_entry_t entry)
1630 {
1631 	return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1632 }
1633 
1634 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1635 {
1636 	return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1637 }
1638 
1639 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
1640 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
1641 
1642 #define kern_addr_valid(addr)   (1)
1643 
1644 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1645 extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1646 extern int s390_enable_sie(void);
1647 extern int s390_enable_skey(void);
1648 extern void s390_reset_cmma(struct mm_struct *mm);
1649 
1650 /* s390 has a private copy of get unmapped area to deal with cache synonyms */
1651 #define HAVE_ARCH_UNMAPPED_AREA
1652 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1653 
1654 /*
1655  * No page table caches to initialise
1656  */
1657 static inline void pgtable_cache_init(void) { }
1658 static inline void check_pgt_cache(void) { }
1659 
1660 #include <asm-generic/pgtable.h>
1661 
1662 #endif /* _S390_PAGE_H */
1663