xref: /openbmc/linux/arch/s390/include/asm/pgtable.h (revision 6486a57f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999, 2000
5  *    Author(s): Hartmut Penner (hp@de.ibm.com)
6  *               Ulrich Weigand (weigand@de.ibm.com)
7  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
8  *
9  *  Derived from "include/asm-i386/pgtable.h"
10  */
11 
12 #ifndef _ASM_S390_PGTABLE_H
13 #define _ASM_S390_PGTABLE_H
14 
15 #include <linux/sched.h>
16 #include <linux/mm_types.h>
17 #include <linux/page-flags.h>
18 #include <linux/radix-tree.h>
19 #include <linux/atomic.h>
20 #include <asm/sections.h>
21 #include <asm/bug.h>
22 #include <asm/page.h>
23 #include <asm/uv.h>
24 
25 extern pgd_t swapper_pg_dir[];
26 extern pgd_t invalid_pg_dir[];
27 extern void paging_init(void);
28 extern unsigned long s390_invalid_asce;
29 
30 enum {
31 	PG_DIRECT_MAP_4K = 0,
32 	PG_DIRECT_MAP_1M,
33 	PG_DIRECT_MAP_2G,
34 	PG_DIRECT_MAP_MAX
35 };
36 
37 extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
38 
39 static inline void update_page_count(int level, long count)
40 {
41 	if (IS_ENABLED(CONFIG_PROC_FS))
42 		atomic_long_add(count, &direct_pages_count[level]);
43 }
44 
45 struct seq_file;
46 void arch_report_meminfo(struct seq_file *m);
47 
48 /*
49  * The S390 doesn't have any external MMU info: the kernel page
50  * tables contain all the necessary information.
51  */
52 #define update_mmu_cache(vma, address, ptep)     do { } while (0)
53 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
54 
55 /*
56  * ZERO_PAGE is a global shared page that is always zero; used
57  * for zero-mapped memory areas etc..
58  */
59 
60 extern unsigned long empty_zero_page;
61 extern unsigned long zero_page_mask;
62 
63 #define ZERO_PAGE(vaddr) \
64 	(virt_to_page((void *)(empty_zero_page + \
65 	 (((unsigned long)(vaddr)) &zero_page_mask))))
66 #define __HAVE_COLOR_ZERO_PAGE
67 
68 /* TODO: s390 cannot support io_remap_pfn_range... */
69 
70 #define pte_ERROR(e) \
71 	pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
72 #define pmd_ERROR(e) \
73 	pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
74 #define pud_ERROR(e) \
75 	pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
76 #define p4d_ERROR(e) \
77 	pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e))
78 #define pgd_ERROR(e) \
79 	pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
80 
81 /*
82  * The vmalloc and module area will always be on the topmost area of the
83  * kernel mapping. 512GB are reserved for vmalloc by default.
84  * At the top of the vmalloc area a 2GB area is reserved where modules
85  * will reside. That makes sure that inter module branches always
86  * happen without trampolines and in addition the placement within a
87  * 2GB frame is branch prediction unit friendly.
88  */
89 extern unsigned long __bootdata_preserved(VMALLOC_START);
90 extern unsigned long __bootdata_preserved(VMALLOC_END);
91 #define VMALLOC_DEFAULT_SIZE	((512UL << 30) - MODULES_LEN)
92 extern struct page *__bootdata_preserved(vmemmap);
93 extern unsigned long __bootdata_preserved(vmemmap_size);
94 
95 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
96 
97 extern unsigned long __bootdata_preserved(MODULES_VADDR);
98 extern unsigned long __bootdata_preserved(MODULES_END);
99 #define MODULES_VADDR	MODULES_VADDR
100 #define MODULES_END	MODULES_END
101 #define MODULES_LEN	(1UL << 31)
102 
103 static inline int is_module_addr(void *addr)
104 {
105 	BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
106 	if (addr < (void *)MODULES_VADDR)
107 		return 0;
108 	if (addr > (void *)MODULES_END)
109 		return 0;
110 	return 1;
111 }
112 
113 /*
114  * A 64 bit pagetable entry of S390 has following format:
115  * |			 PFRA			      |0IPC|  OS  |
116  * 0000000000111111111122222222223333333333444444444455555555556666
117  * 0123456789012345678901234567890123456789012345678901234567890123
118  *
119  * I Page-Invalid Bit:    Page is not available for address-translation
120  * P Page-Protection Bit: Store access not possible for page
121  * C Change-bit override: HW is not required to set change bit
122  *
123  * A 64 bit segmenttable entry of S390 has following format:
124  * |        P-table origin                              |      TT
125  * 0000000000111111111122222222223333333333444444444455555555556666
126  * 0123456789012345678901234567890123456789012345678901234567890123
127  *
128  * I Segment-Invalid Bit:    Segment is not available for address-translation
129  * C Common-Segment Bit:     Segment is not private (PoP 3-30)
130  * P Page-Protection Bit: Store access not possible for page
131  * TT Type 00
132  *
133  * A 64 bit region table entry of S390 has following format:
134  * |        S-table origin                             |   TF  TTTL
135  * 0000000000111111111122222222223333333333444444444455555555556666
136  * 0123456789012345678901234567890123456789012345678901234567890123
137  *
138  * I Segment-Invalid Bit:    Segment is not available for address-translation
139  * TT Type 01
140  * TF
141  * TL Table length
142  *
143  * The 64 bit regiontable origin of S390 has following format:
144  * |      region table origon                          |       DTTL
145  * 0000000000111111111122222222223333333333444444444455555555556666
146  * 0123456789012345678901234567890123456789012345678901234567890123
147  *
148  * X Space-Switch event:
149  * G Segment-Invalid Bit:
150  * P Private-Space Bit:
151  * S Storage-Alteration:
152  * R Real space
153  * TL Table-Length:
154  *
155  * A storage key has the following format:
156  * | ACC |F|R|C|0|
157  *  0   3 4 5 6 7
158  * ACC: access key
159  * F  : fetch protection bit
160  * R  : referenced bit
161  * C  : changed bit
162  */
163 
164 /* Hardware bits in the page table entry */
165 #define _PAGE_NOEXEC	0x100		/* HW no-execute bit  */
166 #define _PAGE_PROTECT	0x200		/* HW read-only bit  */
167 #define _PAGE_INVALID	0x400		/* HW invalid bit    */
168 #define _PAGE_LARGE	0x800		/* Bit to mark a large pte */
169 
170 /* Software bits in the page table entry */
171 #define _PAGE_PRESENT	0x001		/* SW pte present bit */
172 #define _PAGE_YOUNG	0x004		/* SW pte young bit */
173 #define _PAGE_DIRTY	0x008		/* SW pte dirty bit */
174 #define _PAGE_READ	0x010		/* SW pte read bit */
175 #define _PAGE_WRITE	0x020		/* SW pte write bit */
176 #define _PAGE_SPECIAL	0x040		/* SW associated with special page */
177 #define _PAGE_UNUSED	0x080		/* SW bit for pgste usage state */
178 
179 #ifdef CONFIG_MEM_SOFT_DIRTY
180 #define _PAGE_SOFT_DIRTY 0x002		/* SW pte soft dirty bit */
181 #else
182 #define _PAGE_SOFT_DIRTY 0x000
183 #endif
184 
185 #define _PAGE_SW_BITS	0xffUL		/* All SW bits */
186 
187 #define _PAGE_SWP_EXCLUSIVE _PAGE_LARGE	/* SW pte exclusive swap bit */
188 
189 /* Set of bits not changed in pte_modify */
190 #define _PAGE_CHG_MASK		(PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
191 				 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
192 
193 /*
194  * Mask of bits that must not be changed with RDP. Allow only _PAGE_PROTECT
195  * HW bit and all SW bits.
196  */
197 #define _PAGE_RDP_MASK		~(_PAGE_PROTECT | _PAGE_SW_BITS)
198 
199 /*
200  * handle_pte_fault uses pte_present and pte_none to find out the pte type
201  * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
202  * distinguish present from not-present ptes. It is changed only with the page
203  * table lock held.
204  *
205  * The following table gives the different possible bit combinations for
206  * the pte hardware and software bits in the last 12 bits of a pte
207  * (. unassigned bit, x don't care, t swap type):
208  *
209  *				842100000000
210  *				000084210000
211  *				000000008421
212  *				.IR.uswrdy.p
213  * empty			.10.00000000
214  * swap				.11..ttttt.0
215  * prot-none, clean, old	.11.xx0000.1
216  * prot-none, clean, young	.11.xx0001.1
217  * prot-none, dirty, old	.11.xx0010.1
218  * prot-none, dirty, young	.11.xx0011.1
219  * read-only, clean, old	.11.xx0100.1
220  * read-only, clean, young	.01.xx0101.1
221  * read-only, dirty, old	.11.xx0110.1
222  * read-only, dirty, young	.01.xx0111.1
223  * read-write, clean, old	.11.xx1100.1
224  * read-write, clean, young	.01.xx1101.1
225  * read-write, dirty, old	.10.xx1110.1
226  * read-write, dirty, young	.00.xx1111.1
227  * HW-bits: R read-only, I invalid
228  * SW-bits: p present, y young, d dirty, r read, w write, s special,
229  *	    u unused, l large
230  *
231  * pte_none    is true for the bit pattern .10.00000000, pte == 0x400
232  * pte_swap    is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
233  * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
234  */
235 
236 /* Bits in the segment/region table address-space-control-element */
237 #define _ASCE_ORIGIN		~0xfffUL/* region/segment table origin	    */
238 #define _ASCE_PRIVATE_SPACE	0x100	/* private space control	    */
239 #define _ASCE_ALT_EVENT		0x80	/* storage alteration event control */
240 #define _ASCE_SPACE_SWITCH	0x40	/* space switch event		    */
241 #define _ASCE_REAL_SPACE	0x20	/* real space control		    */
242 #define _ASCE_TYPE_MASK		0x0c	/* asce table type mask		    */
243 #define _ASCE_TYPE_REGION1	0x0c	/* region first table type	    */
244 #define _ASCE_TYPE_REGION2	0x08	/* region second table type	    */
245 #define _ASCE_TYPE_REGION3	0x04	/* region third table type	    */
246 #define _ASCE_TYPE_SEGMENT	0x00	/* segment table type		    */
247 #define _ASCE_TABLE_LENGTH	0x03	/* region table length		    */
248 
249 /* Bits in the region table entry */
250 #define _REGION_ENTRY_ORIGIN	~0xfffUL/* region/segment table origin	    */
251 #define _REGION_ENTRY_PROTECT	0x200	/* region protection bit	    */
252 #define _REGION_ENTRY_NOEXEC	0x100	/* region no-execute bit	    */
253 #define _REGION_ENTRY_OFFSET	0xc0	/* region table offset		    */
254 #define _REGION_ENTRY_INVALID	0x20	/* invalid region table entry	    */
255 #define _REGION_ENTRY_TYPE_MASK	0x0c	/* region table type mask	    */
256 #define _REGION_ENTRY_TYPE_R1	0x0c	/* region first table type	    */
257 #define _REGION_ENTRY_TYPE_R2	0x08	/* region second table type	    */
258 #define _REGION_ENTRY_TYPE_R3	0x04	/* region third table type	    */
259 #define _REGION_ENTRY_LENGTH	0x03	/* region third length		    */
260 
261 #define _REGION1_ENTRY		(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
262 #define _REGION1_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
263 #define _REGION2_ENTRY		(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
264 #define _REGION2_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
265 #define _REGION3_ENTRY		(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
266 #define _REGION3_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
267 
268 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address	     */
269 #define _REGION3_ENTRY_DIRTY	0x2000	/* SW region dirty bit */
270 #define _REGION3_ENTRY_YOUNG	0x1000	/* SW region young bit */
271 #define _REGION3_ENTRY_LARGE	0x0400	/* RTTE-format control, large page  */
272 #define _REGION3_ENTRY_READ	0x0002	/* SW region read bit */
273 #define _REGION3_ENTRY_WRITE	0x0001	/* SW region write bit */
274 
275 #ifdef CONFIG_MEM_SOFT_DIRTY
276 #define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
277 #else
278 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
279 #endif
280 
281 #define _REGION_ENTRY_BITS	 0xfffffffffffff22fUL
282 
283 /* Bits in the segment table entry */
284 #define _SEGMENT_ENTRY_BITS			0xfffffffffffffe33UL
285 #define _SEGMENT_ENTRY_HARDWARE_BITS		0xfffffffffffffe30UL
286 #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE	0xfffffffffff00730UL
287 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address	    */
288 #define _SEGMENT_ENTRY_ORIGIN	~0x7ffUL/* page table origin		    */
289 #define _SEGMENT_ENTRY_PROTECT	0x200	/* segment protection bit	    */
290 #define _SEGMENT_ENTRY_NOEXEC	0x100	/* segment no-execute bit	    */
291 #define _SEGMENT_ENTRY_INVALID	0x20	/* invalid segment table entry	    */
292 #define _SEGMENT_ENTRY_TYPE_MASK 0x0c	/* segment table type mask	    */
293 
294 #define _SEGMENT_ENTRY		(0)
295 #define _SEGMENT_ENTRY_EMPTY	(_SEGMENT_ENTRY_INVALID)
296 
297 #define _SEGMENT_ENTRY_DIRTY	0x2000	/* SW segment dirty bit */
298 #define _SEGMENT_ENTRY_YOUNG	0x1000	/* SW segment young bit */
299 #define _SEGMENT_ENTRY_LARGE	0x0400	/* STE-format control, large page */
300 #define _SEGMENT_ENTRY_WRITE	0x0002	/* SW segment write bit */
301 #define _SEGMENT_ENTRY_READ	0x0001	/* SW segment read bit */
302 
303 #ifdef CONFIG_MEM_SOFT_DIRTY
304 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
305 #else
306 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
307 #endif
308 
309 #define _CRST_ENTRIES	2048	/* number of region/segment table entries */
310 #define _PAGE_ENTRIES	256	/* number of page table entries	*/
311 
312 #define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
313 #define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
314 
315 #define _REGION1_SHIFT	53
316 #define _REGION2_SHIFT	42
317 #define _REGION3_SHIFT	31
318 #define _SEGMENT_SHIFT	20
319 
320 #define _REGION1_INDEX	(0x7ffUL << _REGION1_SHIFT)
321 #define _REGION2_INDEX	(0x7ffUL << _REGION2_SHIFT)
322 #define _REGION3_INDEX	(0x7ffUL << _REGION3_SHIFT)
323 #define _SEGMENT_INDEX	(0x7ffUL << _SEGMENT_SHIFT)
324 #define _PAGE_INDEX	(0xffUL  << _PAGE_SHIFT)
325 
326 #define _REGION1_SIZE	(1UL << _REGION1_SHIFT)
327 #define _REGION2_SIZE	(1UL << _REGION2_SHIFT)
328 #define _REGION3_SIZE	(1UL << _REGION3_SHIFT)
329 #define _SEGMENT_SIZE	(1UL << _SEGMENT_SHIFT)
330 
331 #define _REGION1_MASK	(~(_REGION1_SIZE - 1))
332 #define _REGION2_MASK	(~(_REGION2_SIZE - 1))
333 #define _REGION3_MASK	(~(_REGION3_SIZE - 1))
334 #define _SEGMENT_MASK	(~(_SEGMENT_SIZE - 1))
335 
336 #define PMD_SHIFT	_SEGMENT_SHIFT
337 #define PUD_SHIFT	_REGION3_SHIFT
338 #define P4D_SHIFT	_REGION2_SHIFT
339 #define PGDIR_SHIFT	_REGION1_SHIFT
340 
341 #define PMD_SIZE	_SEGMENT_SIZE
342 #define PUD_SIZE	_REGION3_SIZE
343 #define P4D_SIZE	_REGION2_SIZE
344 #define PGDIR_SIZE	_REGION1_SIZE
345 
346 #define PMD_MASK	_SEGMENT_MASK
347 #define PUD_MASK	_REGION3_MASK
348 #define P4D_MASK	_REGION2_MASK
349 #define PGDIR_MASK	_REGION1_MASK
350 
351 #define PTRS_PER_PTE	_PAGE_ENTRIES
352 #define PTRS_PER_PMD	_CRST_ENTRIES
353 #define PTRS_PER_PUD	_CRST_ENTRIES
354 #define PTRS_PER_P4D	_CRST_ENTRIES
355 #define PTRS_PER_PGD	_CRST_ENTRIES
356 
357 /*
358  * Segment table and region3 table entry encoding
359  * (R = read-only, I = invalid, y = young bit):
360  *				dy..R...I...wr
361  * prot-none, clean, old	00..1...1...00
362  * prot-none, clean, young	01..1...1...00
363  * prot-none, dirty, old	10..1...1...00
364  * prot-none, dirty, young	11..1...1...00
365  * read-only, clean, old	00..1...1...01
366  * read-only, clean, young	01..1...0...01
367  * read-only, dirty, old	10..1...1...01
368  * read-only, dirty, young	11..1...0...01
369  * read-write, clean, old	00..1...1...11
370  * read-write, clean, young	01..1...0...11
371  * read-write, dirty, old	10..0...1...11
372  * read-write, dirty, young	11..0...0...11
373  * The segment table origin is used to distinguish empty (origin==0) from
374  * read-write, old segment table entries (origin!=0)
375  * HW-bits: R read-only, I invalid
376  * SW-bits: y young, d dirty, r read, w write
377  */
378 
379 /* Page status table bits for virtualization */
380 #define PGSTE_ACC_BITS	0xf000000000000000UL
381 #define PGSTE_FP_BIT	0x0800000000000000UL
382 #define PGSTE_PCL_BIT	0x0080000000000000UL
383 #define PGSTE_HR_BIT	0x0040000000000000UL
384 #define PGSTE_HC_BIT	0x0020000000000000UL
385 #define PGSTE_GR_BIT	0x0004000000000000UL
386 #define PGSTE_GC_BIT	0x0002000000000000UL
387 #define PGSTE_UC_BIT	0x0000800000000000UL	/* user dirty (migration) */
388 #define PGSTE_IN_BIT	0x0000400000000000UL	/* IPTE notify bit */
389 #define PGSTE_VSIE_BIT	0x0000200000000000UL	/* ref'd in a shadow table */
390 
391 /* Guest Page State used for virtualization */
392 #define _PGSTE_GPS_ZERO			0x0000000080000000UL
393 #define _PGSTE_GPS_NODAT		0x0000000040000000UL
394 #define _PGSTE_GPS_USAGE_MASK		0x0000000003000000UL
395 #define _PGSTE_GPS_USAGE_STABLE		0x0000000000000000UL
396 #define _PGSTE_GPS_USAGE_UNUSED		0x0000000001000000UL
397 #define _PGSTE_GPS_USAGE_POT_VOLATILE	0x0000000002000000UL
398 #define _PGSTE_GPS_USAGE_VOLATILE	_PGSTE_GPS_USAGE_MASK
399 
400 /*
401  * A user page table pointer has the space-switch-event bit, the
402  * private-space-control bit and the storage-alteration-event-control
403  * bit set. A kernel page table pointer doesn't need them.
404  */
405 #define _ASCE_USER_BITS		(_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
406 				 _ASCE_ALT_EVENT)
407 
408 /*
409  * Page protection definitions.
410  */
411 #define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
412 #define PAGE_RO		__pgprot(_PAGE_PRESENT | _PAGE_READ | \
413 				 _PAGE_NOEXEC  | _PAGE_INVALID | _PAGE_PROTECT)
414 #define PAGE_RX		__pgprot(_PAGE_PRESENT | _PAGE_READ | \
415 				 _PAGE_INVALID | _PAGE_PROTECT)
416 #define PAGE_RW		__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
417 				 _PAGE_NOEXEC  | _PAGE_INVALID | _PAGE_PROTECT)
418 #define PAGE_RWX	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
419 				 _PAGE_INVALID | _PAGE_PROTECT)
420 
421 #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
422 				 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
423 #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
424 				 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
425 #define PAGE_KERNEL_RO	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
426 				 _PAGE_PROTECT | _PAGE_NOEXEC)
427 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
428 				  _PAGE_YOUNG |	_PAGE_DIRTY)
429 
430 /*
431  * On s390 the page table entry has an invalid bit and a read-only bit.
432  * Read permission implies execute permission and write permission
433  * implies read permission.
434  */
435          /*xwr*/
436 
437 /*
438  * Segment entry (large page) protection definitions.
439  */
440 #define SEGMENT_NONE	__pgprot(_SEGMENT_ENTRY_INVALID | \
441 				 _SEGMENT_ENTRY_PROTECT)
442 #define SEGMENT_RO	__pgprot(_SEGMENT_ENTRY_PROTECT | \
443 				 _SEGMENT_ENTRY_READ | \
444 				 _SEGMENT_ENTRY_NOEXEC)
445 #define SEGMENT_RX	__pgprot(_SEGMENT_ENTRY_PROTECT | \
446 				 _SEGMENT_ENTRY_READ)
447 #define SEGMENT_RW	__pgprot(_SEGMENT_ENTRY_READ | \
448 				 _SEGMENT_ENTRY_WRITE | \
449 				 _SEGMENT_ENTRY_NOEXEC)
450 #define SEGMENT_RWX	__pgprot(_SEGMENT_ENTRY_READ | \
451 				 _SEGMENT_ENTRY_WRITE)
452 #define SEGMENT_KERNEL	__pgprot(_SEGMENT_ENTRY |	\
453 				 _SEGMENT_ENTRY_LARGE |	\
454 				 _SEGMENT_ENTRY_READ |	\
455 				 _SEGMENT_ENTRY_WRITE | \
456 				 _SEGMENT_ENTRY_YOUNG | \
457 				 _SEGMENT_ENTRY_DIRTY | \
458 				 _SEGMENT_ENTRY_NOEXEC)
459 #define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY |	\
460 				 _SEGMENT_ENTRY_LARGE |	\
461 				 _SEGMENT_ENTRY_READ |	\
462 				 _SEGMENT_ENTRY_YOUNG |	\
463 				 _SEGMENT_ENTRY_PROTECT | \
464 				 _SEGMENT_ENTRY_NOEXEC)
465 #define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY |	\
466 				 _SEGMENT_ENTRY_LARGE |	\
467 				 _SEGMENT_ENTRY_READ |	\
468 				 _SEGMENT_ENTRY_WRITE | \
469 				 _SEGMENT_ENTRY_YOUNG |	\
470 				 _SEGMENT_ENTRY_DIRTY)
471 
472 /*
473  * Region3 entry (large page) protection definitions.
474  */
475 
476 #define REGION3_KERNEL	__pgprot(_REGION_ENTRY_TYPE_R3 | \
477 				 _REGION3_ENTRY_LARGE |	 \
478 				 _REGION3_ENTRY_READ |	 \
479 				 _REGION3_ENTRY_WRITE |	 \
480 				 _REGION3_ENTRY_YOUNG |	 \
481 				 _REGION3_ENTRY_DIRTY | \
482 				 _REGION_ENTRY_NOEXEC)
483 #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
484 				   _REGION3_ENTRY_LARGE |  \
485 				   _REGION3_ENTRY_READ |   \
486 				   _REGION3_ENTRY_YOUNG |  \
487 				   _REGION_ENTRY_PROTECT | \
488 				   _REGION_ENTRY_NOEXEC)
489 #define REGION3_KERNEL_EXEC __pgprot(_REGION_ENTRY_TYPE_R3 | \
490 				 _REGION3_ENTRY_LARGE |	 \
491 				 _REGION3_ENTRY_READ |	 \
492 				 _REGION3_ENTRY_WRITE |	 \
493 				 _REGION3_ENTRY_YOUNG |	 \
494 				 _REGION3_ENTRY_DIRTY)
495 
496 static inline bool mm_p4d_folded(struct mm_struct *mm)
497 {
498 	return mm->context.asce_limit <= _REGION1_SIZE;
499 }
500 #define mm_p4d_folded(mm) mm_p4d_folded(mm)
501 
502 static inline bool mm_pud_folded(struct mm_struct *mm)
503 {
504 	return mm->context.asce_limit <= _REGION2_SIZE;
505 }
506 #define mm_pud_folded(mm) mm_pud_folded(mm)
507 
508 static inline bool mm_pmd_folded(struct mm_struct *mm)
509 {
510 	return mm->context.asce_limit <= _REGION3_SIZE;
511 }
512 #define mm_pmd_folded(mm) mm_pmd_folded(mm)
513 
514 static inline int mm_has_pgste(struct mm_struct *mm)
515 {
516 #ifdef CONFIG_PGSTE
517 	if (unlikely(mm->context.has_pgste))
518 		return 1;
519 #endif
520 	return 0;
521 }
522 
523 static inline int mm_is_protected(struct mm_struct *mm)
524 {
525 #ifdef CONFIG_PGSTE
526 	if (unlikely(atomic_read(&mm->context.protected_count)))
527 		return 1;
528 #endif
529 	return 0;
530 }
531 
532 static inline int mm_alloc_pgste(struct mm_struct *mm)
533 {
534 #ifdef CONFIG_PGSTE
535 	if (unlikely(mm->context.alloc_pgste))
536 		return 1;
537 #endif
538 	return 0;
539 }
540 
541 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
542 {
543 	return __pte(pte_val(pte) & ~pgprot_val(prot));
544 }
545 
546 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
547 {
548 	return __pte(pte_val(pte) | pgprot_val(prot));
549 }
550 
551 static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
552 {
553 	return __pmd(pmd_val(pmd) & ~pgprot_val(prot));
554 }
555 
556 static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
557 {
558 	return __pmd(pmd_val(pmd) | pgprot_val(prot));
559 }
560 
561 static inline pud_t clear_pud_bit(pud_t pud, pgprot_t prot)
562 {
563 	return __pud(pud_val(pud) & ~pgprot_val(prot));
564 }
565 
566 static inline pud_t set_pud_bit(pud_t pud, pgprot_t prot)
567 {
568 	return __pud(pud_val(pud) | pgprot_val(prot));
569 }
570 
571 /*
572  * In the case that a guest uses storage keys
573  * faults should no longer be backed by zero pages
574  */
575 #define mm_forbids_zeropage mm_has_pgste
576 static inline int mm_uses_skeys(struct mm_struct *mm)
577 {
578 #ifdef CONFIG_PGSTE
579 	if (mm->context.uses_skeys)
580 		return 1;
581 #endif
582 	return 0;
583 }
584 
585 static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
586 {
587 	union register_pair r1 = { .even = old, .odd = new, };
588 	unsigned long address = (unsigned long)ptr | 1;
589 
590 	asm volatile(
591 		"	csp	%[r1],%[address]"
592 		: [r1] "+&d" (r1.pair), "+m" (*ptr)
593 		: [address] "d" (address)
594 		: "cc");
595 }
596 
597 static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
598 {
599 	union register_pair r1 = { .even = old, .odd = new, };
600 	unsigned long address = (unsigned long)ptr | 1;
601 
602 	asm volatile(
603 		"	cspg	%[r1],%[address]"
604 		: [r1] "+&d" (r1.pair), "+m" (*ptr)
605 		: [address] "d" (address)
606 		: "cc");
607 }
608 
609 #define CRDTE_DTT_PAGE		0x00UL
610 #define CRDTE_DTT_SEGMENT	0x10UL
611 #define CRDTE_DTT_REGION3	0x14UL
612 #define CRDTE_DTT_REGION2	0x18UL
613 #define CRDTE_DTT_REGION1	0x1cUL
614 
615 static inline void crdte(unsigned long old, unsigned long new,
616 			 unsigned long *table, unsigned long dtt,
617 			 unsigned long address, unsigned long asce)
618 {
619 	union register_pair r1 = { .even = old, .odd = new, };
620 	union register_pair r2 = { .even = __pa(table) | dtt, .odd = address, };
621 
622 	asm volatile(".insn rrf,0xb98f0000,%[r1],%[r2],%[asce],0"
623 		     : [r1] "+&d" (r1.pair)
624 		     : [r2] "d" (r2.pair), [asce] "a" (asce)
625 		     : "memory", "cc");
626 }
627 
628 /*
629  * pgd/p4d/pud/pmd/pte query functions
630  */
631 static inline int pgd_folded(pgd_t pgd)
632 {
633 	return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
634 }
635 
636 static inline int pgd_present(pgd_t pgd)
637 {
638 	if (pgd_folded(pgd))
639 		return 1;
640 	return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
641 }
642 
643 static inline int pgd_none(pgd_t pgd)
644 {
645 	if (pgd_folded(pgd))
646 		return 0;
647 	return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
648 }
649 
650 static inline int pgd_bad(pgd_t pgd)
651 {
652 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
653 		return 0;
654 	return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
655 }
656 
657 static inline unsigned long pgd_pfn(pgd_t pgd)
658 {
659 	unsigned long origin_mask;
660 
661 	origin_mask = _REGION_ENTRY_ORIGIN;
662 	return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
663 }
664 
665 static inline int p4d_folded(p4d_t p4d)
666 {
667 	return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
668 }
669 
670 static inline int p4d_present(p4d_t p4d)
671 {
672 	if (p4d_folded(p4d))
673 		return 1;
674 	return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
675 }
676 
677 static inline int p4d_none(p4d_t p4d)
678 {
679 	if (p4d_folded(p4d))
680 		return 0;
681 	return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
682 }
683 
684 static inline unsigned long p4d_pfn(p4d_t p4d)
685 {
686 	unsigned long origin_mask;
687 
688 	origin_mask = _REGION_ENTRY_ORIGIN;
689 	return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
690 }
691 
692 static inline int pud_folded(pud_t pud)
693 {
694 	return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
695 }
696 
697 static inline int pud_present(pud_t pud)
698 {
699 	if (pud_folded(pud))
700 		return 1;
701 	return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
702 }
703 
704 static inline int pud_none(pud_t pud)
705 {
706 	if (pud_folded(pud))
707 		return 0;
708 	return pud_val(pud) == _REGION3_ENTRY_EMPTY;
709 }
710 
711 #define pud_leaf	pud_large
712 static inline int pud_large(pud_t pud)
713 {
714 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
715 		return 0;
716 	return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
717 }
718 
719 #define pmd_leaf	pmd_large
720 static inline int pmd_large(pmd_t pmd)
721 {
722 	return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
723 }
724 
725 static inline int pmd_bad(pmd_t pmd)
726 {
727 	if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd))
728 		return 1;
729 	return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
730 }
731 
732 static inline int pud_bad(pud_t pud)
733 {
734 	unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
735 
736 	if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
737 		return 1;
738 	if (type < _REGION_ENTRY_TYPE_R3)
739 		return 0;
740 	return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
741 }
742 
743 static inline int p4d_bad(p4d_t p4d)
744 {
745 	unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
746 
747 	if (type > _REGION_ENTRY_TYPE_R2)
748 		return 1;
749 	if (type < _REGION_ENTRY_TYPE_R2)
750 		return 0;
751 	return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
752 }
753 
754 static inline int pmd_present(pmd_t pmd)
755 {
756 	return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
757 }
758 
759 static inline int pmd_none(pmd_t pmd)
760 {
761 	return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
762 }
763 
764 #define pmd_write pmd_write
765 static inline int pmd_write(pmd_t pmd)
766 {
767 	return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
768 }
769 
770 #define pud_write pud_write
771 static inline int pud_write(pud_t pud)
772 {
773 	return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
774 }
775 
776 static inline int pmd_dirty(pmd_t pmd)
777 {
778 	return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
779 }
780 
781 #define pmd_young pmd_young
782 static inline int pmd_young(pmd_t pmd)
783 {
784 	return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
785 }
786 
787 static inline int pte_present(pte_t pte)
788 {
789 	/* Bit pattern: (pte & 0x001) == 0x001 */
790 	return (pte_val(pte) & _PAGE_PRESENT) != 0;
791 }
792 
793 static inline int pte_none(pte_t pte)
794 {
795 	/* Bit pattern: pte == 0x400 */
796 	return pte_val(pte) == _PAGE_INVALID;
797 }
798 
799 static inline int pte_swap(pte_t pte)
800 {
801 	/* Bit pattern: (pte & 0x201) == 0x200 */
802 	return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
803 		== _PAGE_PROTECT;
804 }
805 
806 static inline int pte_special(pte_t pte)
807 {
808 	return (pte_val(pte) & _PAGE_SPECIAL);
809 }
810 
811 #define __HAVE_ARCH_PTE_SAME
812 static inline int pte_same(pte_t a, pte_t b)
813 {
814 	return pte_val(a) == pte_val(b);
815 }
816 
817 #ifdef CONFIG_NUMA_BALANCING
818 static inline int pte_protnone(pte_t pte)
819 {
820 	return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
821 }
822 
823 static inline int pmd_protnone(pmd_t pmd)
824 {
825 	/* pmd_large(pmd) implies pmd_present(pmd) */
826 	return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
827 }
828 #endif
829 
830 #define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
831 static inline int pte_swp_exclusive(pte_t pte)
832 {
833 	return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
834 }
835 
836 static inline pte_t pte_swp_mkexclusive(pte_t pte)
837 {
838 	return set_pte_bit(pte, __pgprot(_PAGE_SWP_EXCLUSIVE));
839 }
840 
841 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
842 {
843 	return clear_pte_bit(pte, __pgprot(_PAGE_SWP_EXCLUSIVE));
844 }
845 
846 static inline int pte_soft_dirty(pte_t pte)
847 {
848 	return pte_val(pte) & _PAGE_SOFT_DIRTY;
849 }
850 #define pte_swp_soft_dirty pte_soft_dirty
851 
852 static inline pte_t pte_mksoft_dirty(pte_t pte)
853 {
854 	return set_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY));
855 }
856 #define pte_swp_mksoft_dirty pte_mksoft_dirty
857 
858 static inline pte_t pte_clear_soft_dirty(pte_t pte)
859 {
860 	return clear_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY));
861 }
862 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
863 
864 static inline int pmd_soft_dirty(pmd_t pmd)
865 {
866 	return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
867 }
868 
869 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
870 {
871 	return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
872 }
873 
874 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
875 {
876 	return clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
877 }
878 
879 /*
880  * query functions pte_write/pte_dirty/pte_young only work if
881  * pte_present() is true. Undefined behaviour if not..
882  */
883 static inline int pte_write(pte_t pte)
884 {
885 	return (pte_val(pte) & _PAGE_WRITE) != 0;
886 }
887 
888 static inline int pte_dirty(pte_t pte)
889 {
890 	return (pte_val(pte) & _PAGE_DIRTY) != 0;
891 }
892 
893 static inline int pte_young(pte_t pte)
894 {
895 	return (pte_val(pte) & _PAGE_YOUNG) != 0;
896 }
897 
898 #define __HAVE_ARCH_PTE_UNUSED
899 static inline int pte_unused(pte_t pte)
900 {
901 	return pte_val(pte) & _PAGE_UNUSED;
902 }
903 
904 /*
905  * Extract the pgprot value from the given pte while at the same time making it
906  * usable for kernel address space mappings where fault driven dirty and
907  * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
908  * must not be set.
909  */
910 static inline pgprot_t pte_pgprot(pte_t pte)
911 {
912 	unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
913 
914 	if (pte_write(pte))
915 		pte_flags |= pgprot_val(PAGE_KERNEL);
916 	else
917 		pte_flags |= pgprot_val(PAGE_KERNEL_RO);
918 	pte_flags |= pte_val(pte) & mio_wb_bit_mask;
919 
920 	return __pgprot(pte_flags);
921 }
922 
923 /*
924  * pgd/pmd/pte modification functions
925  */
926 
927 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
928 {
929 	WRITE_ONCE(*pgdp, pgd);
930 }
931 
932 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
933 {
934 	WRITE_ONCE(*p4dp, p4d);
935 }
936 
937 static inline void set_pud(pud_t *pudp, pud_t pud)
938 {
939 	WRITE_ONCE(*pudp, pud);
940 }
941 
942 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
943 {
944 	WRITE_ONCE(*pmdp, pmd);
945 }
946 
947 static inline void set_pte(pte_t *ptep, pte_t pte)
948 {
949 	WRITE_ONCE(*ptep, pte);
950 }
951 
952 static inline void pgd_clear(pgd_t *pgd)
953 {
954 	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
955 		set_pgd(pgd, __pgd(_REGION1_ENTRY_EMPTY));
956 }
957 
958 static inline void p4d_clear(p4d_t *p4d)
959 {
960 	if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
961 		set_p4d(p4d, __p4d(_REGION2_ENTRY_EMPTY));
962 }
963 
964 static inline void pud_clear(pud_t *pud)
965 {
966 	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
967 		set_pud(pud, __pud(_REGION3_ENTRY_EMPTY));
968 }
969 
970 static inline void pmd_clear(pmd_t *pmdp)
971 {
972 	set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
973 }
974 
975 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
976 {
977 	set_pte(ptep, __pte(_PAGE_INVALID));
978 }
979 
980 /*
981  * The following pte modification functions only work if
982  * pte_present() is true. Undefined behaviour if not..
983  */
984 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
985 {
986 	pte = clear_pte_bit(pte, __pgprot(~_PAGE_CHG_MASK));
987 	pte = set_pte_bit(pte, newprot);
988 	/*
989 	 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
990 	 * has the invalid bit set, clear it again for readable, young pages
991 	 */
992 	if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
993 		pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID));
994 	/*
995 	 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
996 	 * protection bit set, clear it again for writable, dirty pages
997 	 */
998 	if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
999 		pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1000 	return pte;
1001 }
1002 
1003 static inline pte_t pte_wrprotect(pte_t pte)
1004 {
1005 	pte = clear_pte_bit(pte, __pgprot(_PAGE_WRITE));
1006 	return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1007 }
1008 
1009 static inline pte_t pte_mkwrite(pte_t pte)
1010 {
1011 	pte = set_pte_bit(pte, __pgprot(_PAGE_WRITE));
1012 	if (pte_val(pte) & _PAGE_DIRTY)
1013 		pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1014 	return pte;
1015 }
1016 
1017 static inline pte_t pte_mkclean(pte_t pte)
1018 {
1019 	pte = clear_pte_bit(pte, __pgprot(_PAGE_DIRTY));
1020 	return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1021 }
1022 
1023 static inline pte_t pte_mkdirty(pte_t pte)
1024 {
1025 	pte = set_pte_bit(pte, __pgprot(_PAGE_DIRTY | _PAGE_SOFT_DIRTY));
1026 	if (pte_val(pte) & _PAGE_WRITE)
1027 		pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1028 	return pte;
1029 }
1030 
1031 static inline pte_t pte_mkold(pte_t pte)
1032 {
1033 	pte = clear_pte_bit(pte, __pgprot(_PAGE_YOUNG));
1034 	return set_pte_bit(pte, __pgprot(_PAGE_INVALID));
1035 }
1036 
1037 static inline pte_t pte_mkyoung(pte_t pte)
1038 {
1039 	pte = set_pte_bit(pte, __pgprot(_PAGE_YOUNG));
1040 	if (pte_val(pte) & _PAGE_READ)
1041 		pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID));
1042 	return pte;
1043 }
1044 
1045 static inline pte_t pte_mkspecial(pte_t pte)
1046 {
1047 	return set_pte_bit(pte, __pgprot(_PAGE_SPECIAL));
1048 }
1049 
1050 #ifdef CONFIG_HUGETLB_PAGE
1051 static inline pte_t pte_mkhuge(pte_t pte)
1052 {
1053 	return set_pte_bit(pte, __pgprot(_PAGE_LARGE));
1054 }
1055 #endif
1056 
1057 #define IPTE_GLOBAL	0
1058 #define	IPTE_LOCAL	1
1059 
1060 #define IPTE_NODAT	0x400
1061 #define IPTE_GUEST_ASCE	0x800
1062 
1063 static __always_inline void __ptep_rdp(unsigned long addr, pte_t *ptep,
1064 				       unsigned long opt, unsigned long asce,
1065 				       int local)
1066 {
1067 	unsigned long pto;
1068 
1069 	pto = __pa(ptep) & ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
1070 	asm volatile(".insn rrf,0xb98b0000,%[r1],%[r2],%[asce],%[m4]"
1071 		     : "+m" (*ptep)
1072 		     : [r1] "a" (pto), [r2] "a" ((addr & PAGE_MASK) | opt),
1073 		       [asce] "a" (asce), [m4] "i" (local));
1074 }
1075 
1076 static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
1077 					unsigned long opt, unsigned long asce,
1078 					int local)
1079 {
1080 	unsigned long pto = __pa(ptep);
1081 
1082 	if (__builtin_constant_p(opt) && opt == 0) {
1083 		/* Invalidation + TLB flush for the pte */
1084 		asm volatile(
1085 			"	ipte	%[r1],%[r2],0,%[m4]"
1086 			: "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
1087 			  [m4] "i" (local));
1088 		return;
1089 	}
1090 
1091 	/* Invalidate ptes with options + TLB flush of the ptes */
1092 	opt = opt | (asce & _ASCE_ORIGIN);
1093 	asm volatile(
1094 		"	ipte	%[r1],%[r2],%[r3],%[m4]"
1095 		: [r2] "+a" (address), [r3] "+a" (opt)
1096 		: [r1] "a" (pto), [m4] "i" (local) : "memory");
1097 }
1098 
1099 static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
1100 					      pte_t *ptep, int local)
1101 {
1102 	unsigned long pto = __pa(ptep);
1103 
1104 	/* Invalidate a range of ptes + TLB flush of the ptes */
1105 	do {
1106 		asm volatile(
1107 			"	ipte %[r1],%[r2],%[r3],%[m4]"
1108 			: [r2] "+a" (address), [r3] "+a" (nr)
1109 			: [r1] "a" (pto), [m4] "i" (local) : "memory");
1110 	} while (nr != 255);
1111 }
1112 
1113 /*
1114  * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1115  * both clear the TLB for the unmapped pte. The reason is that
1116  * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1117  * to modify an active pte. The sequence is
1118  *   1) ptep_get_and_clear
1119  *   2) set_pte_at
1120  *   3) flush_tlb_range
1121  * On s390 the tlb needs to get flushed with the modification of the pte
1122  * if the pte is active. The only way how this can be implemented is to
1123  * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1124  * is a nop.
1125  */
1126 pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1127 pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1128 
1129 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1130 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1131 					    unsigned long addr, pte_t *ptep)
1132 {
1133 	pte_t pte = *ptep;
1134 
1135 	pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1136 	return pte_young(pte);
1137 }
1138 
1139 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1140 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1141 					 unsigned long address, pte_t *ptep)
1142 {
1143 	return ptep_test_and_clear_young(vma, address, ptep);
1144 }
1145 
1146 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1147 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1148 				       unsigned long addr, pte_t *ptep)
1149 {
1150 	pte_t res;
1151 
1152 	res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1153 	/* At this point the reference through the mapping is still present */
1154 	if (mm_is_protected(mm) && pte_present(res))
1155 		uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
1156 	return res;
1157 }
1158 
1159 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1160 pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1161 void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1162 			     pte_t *, pte_t, pte_t);
1163 
1164 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1165 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1166 				     unsigned long addr, pte_t *ptep)
1167 {
1168 	pte_t res;
1169 
1170 	res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1171 	/* At this point the reference through the mapping is still present */
1172 	if (mm_is_protected(vma->vm_mm) && pte_present(res))
1173 		uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
1174 	return res;
1175 }
1176 
1177 /*
1178  * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1179  * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1180  * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1181  * cannot be accessed while the batched unmap is running. In this case
1182  * full==1 and a simple pte_clear is enough. See tlb.h.
1183  */
1184 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1185 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1186 					    unsigned long addr,
1187 					    pte_t *ptep, int full)
1188 {
1189 	pte_t res;
1190 
1191 	if (full) {
1192 		res = *ptep;
1193 		set_pte(ptep, __pte(_PAGE_INVALID));
1194 	} else {
1195 		res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1196 	}
1197 	/* Nothing to do */
1198 	if (!mm_is_protected(mm) || !pte_present(res))
1199 		return res;
1200 	/*
1201 	 * At this point the reference through the mapping is still present.
1202 	 * The notifier should have destroyed all protected vCPUs at this
1203 	 * point, so the destroy should be successful.
1204 	 */
1205 	if (full && !uv_destroy_owned_page(pte_val(res) & PAGE_MASK))
1206 		return res;
1207 	/*
1208 	 * If something went wrong and the page could not be destroyed, or
1209 	 * if this is not a mm teardown, the slower export is used as
1210 	 * fallback instead.
1211 	 */
1212 	uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
1213 	return res;
1214 }
1215 
1216 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1217 static inline void ptep_set_wrprotect(struct mm_struct *mm,
1218 				      unsigned long addr, pte_t *ptep)
1219 {
1220 	pte_t pte = *ptep;
1221 
1222 	if (pte_write(pte))
1223 		ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1224 }
1225 
1226 /*
1227  * Check if PTEs only differ in _PAGE_PROTECT HW bit, but also allow SW PTE
1228  * bits in the comparison. Those might change e.g. because of dirty and young
1229  * tracking.
1230  */
1231 static inline int pte_allow_rdp(pte_t old, pte_t new)
1232 {
1233 	/*
1234 	 * Only allow changes from RO to RW
1235 	 */
1236 	if (!(pte_val(old) & _PAGE_PROTECT) || pte_val(new) & _PAGE_PROTECT)
1237 		return 0;
1238 
1239 	return (pte_val(old) & _PAGE_RDP_MASK) == (pte_val(new) & _PAGE_RDP_MASK);
1240 }
1241 
1242 static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
1243 						unsigned long address)
1244 {
1245 	/*
1246 	 * RDP might not have propagated the PTE protection reset to all CPUs,
1247 	 * so there could be spurious TLB protection faults.
1248 	 * NOTE: This will also be called when a racing pagetable update on
1249 	 * another thread already installed the correct PTE. Both cases cannot
1250 	 * really be distinguished.
1251 	 * Therefore, only do the local TLB flush when RDP can be used, to avoid
1252 	 * unnecessary overhead.
1253 	 */
1254 	if (MACHINE_HAS_RDP)
1255 		asm volatile("ptlb" : : : "memory");
1256 }
1257 #define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
1258 
1259 void ptep_reset_dat_prot(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
1260 			 pte_t new);
1261 
1262 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1263 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1264 					unsigned long addr, pte_t *ptep,
1265 					pte_t entry, int dirty)
1266 {
1267 	if (pte_same(*ptep, entry))
1268 		return 0;
1269 	if (MACHINE_HAS_RDP && !mm_has_pgste(vma->vm_mm) && pte_allow_rdp(*ptep, entry))
1270 		ptep_reset_dat_prot(vma->vm_mm, addr, ptep, entry);
1271 	else
1272 		ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1273 	return 1;
1274 }
1275 
1276 /*
1277  * Additional functions to handle KVM guest page tables
1278  */
1279 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1280 		     pte_t *ptep, pte_t entry);
1281 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1282 void ptep_notify(struct mm_struct *mm, unsigned long addr,
1283 		 pte_t *ptep, unsigned long bits);
1284 int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1285 		    pte_t *ptep, int prot, unsigned long bit);
1286 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1287 		     pte_t *ptep , int reset);
1288 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1289 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1290 		    pte_t *sptep, pte_t *tptep, pte_t pte);
1291 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1292 
1293 bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1294 			    pte_t *ptep);
1295 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1296 			  unsigned char key, bool nq);
1297 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1298 			       unsigned char key, unsigned char *oldkey,
1299 			       bool nq, bool mr, bool mc);
1300 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1301 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1302 			  unsigned char *key);
1303 
1304 int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1305 				unsigned long bits, unsigned long value);
1306 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1307 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1308 			unsigned long *oldpte, unsigned long *oldpgste);
1309 void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1310 void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1311 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1312 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
1313 
1314 #define pgprot_writecombine	pgprot_writecombine
1315 pgprot_t pgprot_writecombine(pgprot_t prot);
1316 
1317 #define pgprot_writethrough	pgprot_writethrough
1318 pgprot_t pgprot_writethrough(pgprot_t prot);
1319 
1320 /*
1321  * Certain architectures need to do special things when PTEs
1322  * within a page table are directly modified.  Thus, the following
1323  * hook is made available.
1324  */
1325 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1326 			      pte_t *ptep, pte_t entry)
1327 {
1328 	if (pte_present(entry))
1329 		entry = clear_pte_bit(entry, __pgprot(_PAGE_UNUSED));
1330 	if (mm_has_pgste(mm))
1331 		ptep_set_pte_at(mm, addr, ptep, entry);
1332 	else
1333 		set_pte(ptep, entry);
1334 }
1335 
1336 /*
1337  * Conversion functions: convert a page and protection to a page entry,
1338  * and a page entry and page directory to the page they refer to.
1339  */
1340 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1341 {
1342 	pte_t __pte;
1343 
1344 	__pte = __pte(physpage | pgprot_val(pgprot));
1345 	if (!MACHINE_HAS_NX)
1346 		__pte = clear_pte_bit(__pte, __pgprot(_PAGE_NOEXEC));
1347 	return pte_mkyoung(__pte);
1348 }
1349 
1350 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1351 {
1352 	unsigned long physpage = page_to_phys(page);
1353 	pte_t __pte = mk_pte_phys(physpage, pgprot);
1354 
1355 	if (pte_write(__pte) && PageDirty(page))
1356 		__pte = pte_mkdirty(__pte);
1357 	return __pte;
1358 }
1359 
1360 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1361 #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1362 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1363 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1364 
1365 #define p4d_deref(pud) ((unsigned long)__va(p4d_val(pud) & _REGION_ENTRY_ORIGIN))
1366 #define pgd_deref(pgd) ((unsigned long)__va(pgd_val(pgd) & _REGION_ENTRY_ORIGIN))
1367 
1368 static inline unsigned long pmd_deref(pmd_t pmd)
1369 {
1370 	unsigned long origin_mask;
1371 
1372 	origin_mask = _SEGMENT_ENTRY_ORIGIN;
1373 	if (pmd_large(pmd))
1374 		origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
1375 	return (unsigned long)__va(pmd_val(pmd) & origin_mask);
1376 }
1377 
1378 static inline unsigned long pmd_pfn(pmd_t pmd)
1379 {
1380 	return __pa(pmd_deref(pmd)) >> PAGE_SHIFT;
1381 }
1382 
1383 static inline unsigned long pud_deref(pud_t pud)
1384 {
1385 	unsigned long origin_mask;
1386 
1387 	origin_mask = _REGION_ENTRY_ORIGIN;
1388 	if (pud_large(pud))
1389 		origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
1390 	return (unsigned long)__va(pud_val(pud) & origin_mask);
1391 }
1392 
1393 static inline unsigned long pud_pfn(pud_t pud)
1394 {
1395 	return __pa(pud_deref(pud)) >> PAGE_SHIFT;
1396 }
1397 
1398 /*
1399  * The pgd_offset function *always* adds the index for the top-level
1400  * region/segment table. This is done to get a sequence like the
1401  * following to work:
1402  *	pgdp = pgd_offset(current->mm, addr);
1403  *	pgd = READ_ONCE(*pgdp);
1404  *	p4dp = p4d_offset(&pgd, addr);
1405  *	...
1406  * The subsequent p4d_offset, pud_offset and pmd_offset functions
1407  * only add an index if they dereferenced the pointer.
1408  */
1409 static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
1410 {
1411 	unsigned long rste;
1412 	unsigned int shift;
1413 
1414 	/* Get the first entry of the top level table */
1415 	rste = pgd_val(*pgd);
1416 	/* Pick up the shift from the table type of the first entry */
1417 	shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
1418 	return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
1419 }
1420 
1421 #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1422 
1423 static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
1424 {
1425 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
1426 		return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
1427 	return (p4d_t *) pgdp;
1428 }
1429 #define p4d_offset_lockless p4d_offset_lockless
1430 
1431 static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
1432 {
1433 	return p4d_offset_lockless(pgdp, *pgdp, address);
1434 }
1435 
1436 static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
1437 {
1438 	if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
1439 		return (pud_t *) p4d_deref(p4d) + pud_index(address);
1440 	return (pud_t *) p4dp;
1441 }
1442 #define pud_offset_lockless pud_offset_lockless
1443 
1444 static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
1445 {
1446 	return pud_offset_lockless(p4dp, *p4dp, address);
1447 }
1448 #define pud_offset pud_offset
1449 
1450 static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
1451 {
1452 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
1453 		return (pmd_t *) pud_deref(pud) + pmd_index(address);
1454 	return (pmd_t *) pudp;
1455 }
1456 #define pmd_offset_lockless pmd_offset_lockless
1457 
1458 static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
1459 {
1460 	return pmd_offset_lockless(pudp, *pudp, address);
1461 }
1462 #define pmd_offset pmd_offset
1463 
1464 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
1465 {
1466 	return (unsigned long) pmd_deref(pmd);
1467 }
1468 
1469 static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
1470 {
1471 	return end <= current->mm->context.asce_limit;
1472 }
1473 #define gup_fast_permitted gup_fast_permitted
1474 
1475 #define pfn_pte(pfn, pgprot)	mk_pte_phys(((pfn) << PAGE_SHIFT), (pgprot))
1476 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1477 #define pte_page(x) pfn_to_page(pte_pfn(x))
1478 
1479 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1480 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
1481 #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1482 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1483 
1484 static inline pmd_t pmd_wrprotect(pmd_t pmd)
1485 {
1486 	pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
1487 	return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1488 }
1489 
1490 static inline pmd_t pmd_mkwrite(pmd_t pmd)
1491 {
1492 	pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
1493 	if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
1494 		pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1495 	return pmd;
1496 }
1497 
1498 static inline pmd_t pmd_mkclean(pmd_t pmd)
1499 {
1500 	pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY));
1501 	return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1502 }
1503 
1504 static inline pmd_t pmd_mkdirty(pmd_t pmd)
1505 {
1506 	pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY));
1507 	if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1508 		pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1509 	return pmd;
1510 }
1511 
1512 static inline pud_t pud_wrprotect(pud_t pud)
1513 {
1514 	pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE));
1515 	return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1516 }
1517 
1518 static inline pud_t pud_mkwrite(pud_t pud)
1519 {
1520 	pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE));
1521 	if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
1522 		pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1523 	return pud;
1524 }
1525 
1526 static inline pud_t pud_mkclean(pud_t pud)
1527 {
1528 	pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY));
1529 	return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1530 }
1531 
1532 static inline pud_t pud_mkdirty(pud_t pud)
1533 {
1534 	pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY));
1535 	if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1536 		pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1537 	return pud;
1538 }
1539 
1540 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1541 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1542 {
1543 	/*
1544 	 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1545 	 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1546 	 */
1547 	if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1548 		return pgprot_val(SEGMENT_NONE);
1549 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1550 		return pgprot_val(SEGMENT_RO);
1551 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1552 		return pgprot_val(SEGMENT_RX);
1553 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1554 		return pgprot_val(SEGMENT_RW);
1555 	return pgprot_val(SEGMENT_RWX);
1556 }
1557 
1558 static inline pmd_t pmd_mkyoung(pmd_t pmd)
1559 {
1560 	pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1561 	if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1562 		pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1563 	return pmd;
1564 }
1565 
1566 static inline pmd_t pmd_mkold(pmd_t pmd)
1567 {
1568 	pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1569 	return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1570 }
1571 
1572 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1573 {
1574 	unsigned long mask;
1575 
1576 	mask  = _SEGMENT_ENTRY_ORIGIN_LARGE;
1577 	mask |= _SEGMENT_ENTRY_DIRTY;
1578 	mask |= _SEGMENT_ENTRY_YOUNG;
1579 	mask |=	_SEGMENT_ENTRY_LARGE;
1580 	mask |= _SEGMENT_ENTRY_SOFT_DIRTY;
1581 	pmd = __pmd(pmd_val(pmd) & mask);
1582 	pmd = set_pmd_bit(pmd, __pgprot(massage_pgprot_pmd(newprot)));
1583 	if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1584 		pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1585 	if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1586 		pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1587 	return pmd;
1588 }
1589 
1590 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1591 {
1592 	return __pmd(physpage + massage_pgprot_pmd(pgprot));
1593 }
1594 
1595 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1596 
1597 static inline void __pmdp_csp(pmd_t *pmdp)
1598 {
1599 	csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1600 	    pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1601 }
1602 
1603 #define IDTE_GLOBAL	0
1604 #define IDTE_LOCAL	1
1605 
1606 #define IDTE_PTOA	0x0800
1607 #define IDTE_NODAT	0x1000
1608 #define IDTE_GUEST_ASCE	0x2000
1609 
1610 static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1611 					unsigned long opt, unsigned long asce,
1612 					int local)
1613 {
1614 	unsigned long sto;
1615 
1616 	sto = __pa(pmdp) - pmd_index(addr) * sizeof(pmd_t);
1617 	if (__builtin_constant_p(opt) && opt == 0) {
1618 		/* flush without guest asce */
1619 		asm volatile(
1620 			"	idte	%[r1],0,%[r2],%[m4]"
1621 			: "+m" (*pmdp)
1622 			: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1623 			  [m4] "i" (local)
1624 			: "cc" );
1625 	} else {
1626 		/* flush with guest asce */
1627 		asm volatile(
1628 			"	idte	%[r1],%[r3],%[r2],%[m4]"
1629 			: "+m" (*pmdp)
1630 			: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1631 			  [r3] "a" (asce), [m4] "i" (local)
1632 			: "cc" );
1633 	}
1634 }
1635 
1636 static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1637 					unsigned long opt, unsigned long asce,
1638 					int local)
1639 {
1640 	unsigned long r3o;
1641 
1642 	r3o = __pa(pudp) - pud_index(addr) * sizeof(pud_t);
1643 	r3o |= _ASCE_TYPE_REGION3;
1644 	if (__builtin_constant_p(opt) && opt == 0) {
1645 		/* flush without guest asce */
1646 		asm volatile(
1647 			"	idte	%[r1],0,%[r2],%[m4]"
1648 			: "+m" (*pudp)
1649 			: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1650 			  [m4] "i" (local)
1651 			: "cc");
1652 	} else {
1653 		/* flush with guest asce */
1654 		asm volatile(
1655 			"	idte	%[r1],%[r3],%[r2],%[m4]"
1656 			: "+m" (*pudp)
1657 			: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1658 			  [r3] "a" (asce), [m4] "i" (local)
1659 			: "cc" );
1660 	}
1661 }
1662 
1663 pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1664 pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1665 pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1666 
1667 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1668 
1669 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1670 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1671 				pgtable_t pgtable);
1672 
1673 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1674 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1675 
1676 #define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1677 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1678 					unsigned long addr, pmd_t *pmdp,
1679 					pmd_t entry, int dirty)
1680 {
1681 	VM_BUG_ON(addr & ~HPAGE_MASK);
1682 
1683 	entry = pmd_mkyoung(entry);
1684 	if (dirty)
1685 		entry = pmd_mkdirty(entry);
1686 	if (pmd_val(*pmdp) == pmd_val(entry))
1687 		return 0;
1688 	pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1689 	return 1;
1690 }
1691 
1692 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1693 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1694 					    unsigned long addr, pmd_t *pmdp)
1695 {
1696 	pmd_t pmd = *pmdp;
1697 
1698 	pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1699 	return pmd_young(pmd);
1700 }
1701 
1702 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1703 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1704 					 unsigned long addr, pmd_t *pmdp)
1705 {
1706 	VM_BUG_ON(addr & ~HPAGE_MASK);
1707 	return pmdp_test_and_clear_young(vma, addr, pmdp);
1708 }
1709 
1710 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1711 			      pmd_t *pmdp, pmd_t entry)
1712 {
1713 	if (!MACHINE_HAS_NX)
1714 		entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC));
1715 	set_pmd(pmdp, entry);
1716 }
1717 
1718 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1719 {
1720 	pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_LARGE));
1721 	pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1722 	return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1723 }
1724 
1725 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1726 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1727 					    unsigned long addr, pmd_t *pmdp)
1728 {
1729 	return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1730 }
1731 
1732 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1733 static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
1734 						 unsigned long addr,
1735 						 pmd_t *pmdp, int full)
1736 {
1737 	if (full) {
1738 		pmd_t pmd = *pmdp;
1739 		set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1740 		return pmd;
1741 	}
1742 	return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1743 }
1744 
1745 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1746 static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1747 					  unsigned long addr, pmd_t *pmdp)
1748 {
1749 	return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1750 }
1751 
1752 #define __HAVE_ARCH_PMDP_INVALIDATE
1753 static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1754 				   unsigned long addr, pmd_t *pmdp)
1755 {
1756 	pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1757 
1758 	return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1759 }
1760 
1761 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1762 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1763 				      unsigned long addr, pmd_t *pmdp)
1764 {
1765 	pmd_t pmd = *pmdp;
1766 
1767 	if (pmd_write(pmd))
1768 		pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1769 }
1770 
1771 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1772 					unsigned long address,
1773 					pmd_t *pmdp)
1774 {
1775 	return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1776 }
1777 #define pmdp_collapse_flush pmdp_collapse_flush
1778 
1779 #define pfn_pmd(pfn, pgprot)	mk_pmd_phys(((pfn) << PAGE_SHIFT), (pgprot))
1780 #define mk_pmd(page, pgprot)	pfn_pmd(page_to_pfn(page), (pgprot))
1781 
1782 static inline int pmd_trans_huge(pmd_t pmd)
1783 {
1784 	return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1785 }
1786 
1787 #define has_transparent_hugepage has_transparent_hugepage
1788 static inline int has_transparent_hugepage(void)
1789 {
1790 	return MACHINE_HAS_EDAT1 ? 1 : 0;
1791 }
1792 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1793 
1794 /*
1795  * 64 bit swap entry format:
1796  * A page-table entry has some bits we have to treat in a special way.
1797  * Bits 54 and 63 are used to indicate the page type. Bit 53 marks the pte
1798  * as invalid.
1799  * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1800  * |			  offset			|E11XX|type |S0|
1801  * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1802  * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1803  *
1804  * Bits 0-51 store the offset.
1805  * Bit 52 (E) is used to remember PG_anon_exclusive.
1806  * Bits 57-61 store the type.
1807  * Bit 62 (S) is used for softdirty tracking.
1808  * Bits 55 and 56 (X) are unused.
1809  */
1810 
1811 #define __SWP_OFFSET_MASK	((1UL << 52) - 1)
1812 #define __SWP_OFFSET_SHIFT	12
1813 #define __SWP_TYPE_MASK		((1UL << 5) - 1)
1814 #define __SWP_TYPE_SHIFT	2
1815 
1816 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1817 {
1818 	unsigned long pteval;
1819 
1820 	pteval = _PAGE_INVALID | _PAGE_PROTECT;
1821 	pteval |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1822 	pteval |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1823 	return __pte(pteval);
1824 }
1825 
1826 static inline unsigned long __swp_type(swp_entry_t entry)
1827 {
1828 	return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1829 }
1830 
1831 static inline unsigned long __swp_offset(swp_entry_t entry)
1832 {
1833 	return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1834 }
1835 
1836 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1837 {
1838 	return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1839 }
1840 
1841 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
1842 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
1843 
1844 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1845 extern void vmem_remove_mapping(unsigned long start, unsigned long size);
1846 extern int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc);
1847 extern int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot);
1848 extern void vmem_unmap_4k_page(unsigned long addr);
1849 extern pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc);
1850 extern int s390_enable_sie(void);
1851 extern int s390_enable_skey(void);
1852 extern void s390_reset_cmma(struct mm_struct *mm);
1853 
1854 /* s390 has a private copy of get unmapped area to deal with cache synonyms */
1855 #define HAVE_ARCH_UNMAPPED_AREA
1856 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1857 
1858 #define pmd_pgtable(pmd) \
1859 	((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
1860 
1861 #endif /* _S390_PAGE_H */
1862