xref: /openbmc/linux/arch/s390/include/asm/pgtable.h (revision 4ee57308)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999, 2000
5  *    Author(s): Hartmut Penner (hp@de.ibm.com)
6  *               Ulrich Weigand (weigand@de.ibm.com)
7  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
8  *
9  *  Derived from "include/asm-i386/pgtable.h"
10  */
11 
12 #ifndef _ASM_S390_PGTABLE_H
13 #define _ASM_S390_PGTABLE_H
14 
15 #include <linux/sched.h>
16 #include <linux/mm_types.h>
17 #include <linux/page-flags.h>
18 #include <linux/radix-tree.h>
19 #include <linux/atomic.h>
20 #include <asm/bug.h>
21 #include <asm/page.h>
22 #include <asm/uv.h>
23 
24 extern pgd_t swapper_pg_dir[];
25 extern void paging_init(void);
26 
27 enum {
28 	PG_DIRECT_MAP_4K = 0,
29 	PG_DIRECT_MAP_1M,
30 	PG_DIRECT_MAP_2G,
31 	PG_DIRECT_MAP_MAX
32 };
33 
34 extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
35 
36 static inline void update_page_count(int level, long count)
37 {
38 	if (IS_ENABLED(CONFIG_PROC_FS))
39 		atomic_long_add(count, &direct_pages_count[level]);
40 }
41 
42 struct seq_file;
43 void arch_report_meminfo(struct seq_file *m);
44 
45 /*
46  * The S390 doesn't have any external MMU info: the kernel page
47  * tables contain all the necessary information.
48  */
49 #define update_mmu_cache(vma, address, ptep)     do { } while (0)
50 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
51 
52 /*
53  * ZERO_PAGE is a global shared page that is always zero; used
54  * for zero-mapped memory areas etc..
55  */
56 
57 extern unsigned long empty_zero_page;
58 extern unsigned long zero_page_mask;
59 
60 #define ZERO_PAGE(vaddr) \
61 	(virt_to_page((void *)(empty_zero_page + \
62 	 (((unsigned long)(vaddr)) &zero_page_mask))))
63 #define __HAVE_COLOR_ZERO_PAGE
64 
65 /* TODO: s390 cannot support io_remap_pfn_range... */
66 
67 #define FIRST_USER_ADDRESS  0UL
68 
69 #define pte_ERROR(e) \
70 	printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
71 #define pmd_ERROR(e) \
72 	printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
73 #define pud_ERROR(e) \
74 	printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
75 #define p4d_ERROR(e) \
76 	printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
77 #define pgd_ERROR(e) \
78 	printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
79 
80 /*
81  * The vmalloc and module area will always be on the topmost area of the
82  * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
83  * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
84  * modules will reside. That makes sure that inter module branches always
85  * happen without trampolines and in addition the placement within a 2GB frame
86  * is branch prediction unit friendly.
87  */
88 extern unsigned long VMALLOC_START;
89 extern unsigned long VMALLOC_END;
90 #define VMALLOC_DEFAULT_SIZE	((128UL << 30) - MODULES_LEN)
91 extern struct page *vmemmap;
92 extern unsigned long vmemmap_size;
93 
94 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
95 
96 extern unsigned long MODULES_VADDR;
97 extern unsigned long MODULES_END;
98 #define MODULES_VADDR	MODULES_VADDR
99 #define MODULES_END	MODULES_END
100 #define MODULES_LEN	(1UL << 31)
101 
102 static inline int is_module_addr(void *addr)
103 {
104 	BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
105 	if (addr < (void *)MODULES_VADDR)
106 		return 0;
107 	if (addr > (void *)MODULES_END)
108 		return 0;
109 	return 1;
110 }
111 
112 /*
113  * A 64 bit pagetable entry of S390 has following format:
114  * |			 PFRA			      |0IPC|  OS  |
115  * 0000000000111111111122222222223333333333444444444455555555556666
116  * 0123456789012345678901234567890123456789012345678901234567890123
117  *
118  * I Page-Invalid Bit:    Page is not available for address-translation
119  * P Page-Protection Bit: Store access not possible for page
120  * C Change-bit override: HW is not required to set change bit
121  *
122  * A 64 bit segmenttable entry of S390 has following format:
123  * |        P-table origin                              |      TT
124  * 0000000000111111111122222222223333333333444444444455555555556666
125  * 0123456789012345678901234567890123456789012345678901234567890123
126  *
127  * I Segment-Invalid Bit:    Segment is not available for address-translation
128  * C Common-Segment Bit:     Segment is not private (PoP 3-30)
129  * P Page-Protection Bit: Store access not possible for page
130  * TT Type 00
131  *
132  * A 64 bit region table entry of S390 has following format:
133  * |        S-table origin                             |   TF  TTTL
134  * 0000000000111111111122222222223333333333444444444455555555556666
135  * 0123456789012345678901234567890123456789012345678901234567890123
136  *
137  * I Segment-Invalid Bit:    Segment is not available for address-translation
138  * TT Type 01
139  * TF
140  * TL Table length
141  *
142  * The 64 bit regiontable origin of S390 has following format:
143  * |      region table origon                          |       DTTL
144  * 0000000000111111111122222222223333333333444444444455555555556666
145  * 0123456789012345678901234567890123456789012345678901234567890123
146  *
147  * X Space-Switch event:
148  * G Segment-Invalid Bit:
149  * P Private-Space Bit:
150  * S Storage-Alteration:
151  * R Real space
152  * TL Table-Length:
153  *
154  * A storage key has the following format:
155  * | ACC |F|R|C|0|
156  *  0   3 4 5 6 7
157  * ACC: access key
158  * F  : fetch protection bit
159  * R  : referenced bit
160  * C  : changed bit
161  */
162 
163 /* Hardware bits in the page table entry */
164 #define _PAGE_NOEXEC	0x100		/* HW no-execute bit  */
165 #define _PAGE_PROTECT	0x200		/* HW read-only bit  */
166 #define _PAGE_INVALID	0x400		/* HW invalid bit    */
167 #define _PAGE_LARGE	0x800		/* Bit to mark a large pte */
168 
169 /* Software bits in the page table entry */
170 #define _PAGE_PRESENT	0x001		/* SW pte present bit */
171 #define _PAGE_YOUNG	0x004		/* SW pte young bit */
172 #define _PAGE_DIRTY	0x008		/* SW pte dirty bit */
173 #define _PAGE_READ	0x010		/* SW pte read bit */
174 #define _PAGE_WRITE	0x020		/* SW pte write bit */
175 #define _PAGE_SPECIAL	0x040		/* SW associated with special page */
176 #define _PAGE_UNUSED	0x080		/* SW bit for pgste usage state */
177 
178 #ifdef CONFIG_MEM_SOFT_DIRTY
179 #define _PAGE_SOFT_DIRTY 0x002		/* SW pte soft dirty bit */
180 #else
181 #define _PAGE_SOFT_DIRTY 0x000
182 #endif
183 
184 /* Set of bits not changed in pte_modify */
185 #define _PAGE_CHG_MASK		(PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
186 				 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
187 
188 /*
189  * handle_pte_fault uses pte_present and pte_none to find out the pte type
190  * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
191  * distinguish present from not-present ptes. It is changed only with the page
192  * table lock held.
193  *
194  * The following table gives the different possible bit combinations for
195  * the pte hardware and software bits in the last 12 bits of a pte
196  * (. unassigned bit, x don't care, t swap type):
197  *
198  *				842100000000
199  *				000084210000
200  *				000000008421
201  *				.IR.uswrdy.p
202  * empty			.10.00000000
203  * swap				.11..ttttt.0
204  * prot-none, clean, old	.11.xx0000.1
205  * prot-none, clean, young	.11.xx0001.1
206  * prot-none, dirty, old	.11.xx0010.1
207  * prot-none, dirty, young	.11.xx0011.1
208  * read-only, clean, old	.11.xx0100.1
209  * read-only, clean, young	.01.xx0101.1
210  * read-only, dirty, old	.11.xx0110.1
211  * read-only, dirty, young	.01.xx0111.1
212  * read-write, clean, old	.11.xx1100.1
213  * read-write, clean, young	.01.xx1101.1
214  * read-write, dirty, old	.10.xx1110.1
215  * read-write, dirty, young	.00.xx1111.1
216  * HW-bits: R read-only, I invalid
217  * SW-bits: p present, y young, d dirty, r read, w write, s special,
218  *	    u unused, l large
219  *
220  * pte_none    is true for the bit pattern .10.00000000, pte == 0x400
221  * pte_swap    is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
222  * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
223  */
224 
225 /* Bits in the segment/region table address-space-control-element */
226 #define _ASCE_ORIGIN		~0xfffUL/* region/segment table origin	    */
227 #define _ASCE_PRIVATE_SPACE	0x100	/* private space control	    */
228 #define _ASCE_ALT_EVENT		0x80	/* storage alteration event control */
229 #define _ASCE_SPACE_SWITCH	0x40	/* space switch event		    */
230 #define _ASCE_REAL_SPACE	0x20	/* real space control		    */
231 #define _ASCE_TYPE_MASK		0x0c	/* asce table type mask		    */
232 #define _ASCE_TYPE_REGION1	0x0c	/* region first table type	    */
233 #define _ASCE_TYPE_REGION2	0x08	/* region second table type	    */
234 #define _ASCE_TYPE_REGION3	0x04	/* region third table type	    */
235 #define _ASCE_TYPE_SEGMENT	0x00	/* segment table type		    */
236 #define _ASCE_TABLE_LENGTH	0x03	/* region table length		    */
237 
238 /* Bits in the region table entry */
239 #define _REGION_ENTRY_ORIGIN	~0xfffUL/* region/segment table origin	    */
240 #define _REGION_ENTRY_PROTECT	0x200	/* region protection bit	    */
241 #define _REGION_ENTRY_NOEXEC	0x100	/* region no-execute bit	    */
242 #define _REGION_ENTRY_OFFSET	0xc0	/* region table offset		    */
243 #define _REGION_ENTRY_INVALID	0x20	/* invalid region table entry	    */
244 #define _REGION_ENTRY_TYPE_MASK	0x0c	/* region table type mask	    */
245 #define _REGION_ENTRY_TYPE_R1	0x0c	/* region first table type	    */
246 #define _REGION_ENTRY_TYPE_R2	0x08	/* region second table type	    */
247 #define _REGION_ENTRY_TYPE_R3	0x04	/* region third table type	    */
248 #define _REGION_ENTRY_LENGTH	0x03	/* region third length		    */
249 
250 #define _REGION1_ENTRY		(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
251 #define _REGION1_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
252 #define _REGION2_ENTRY		(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
253 #define _REGION2_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
254 #define _REGION3_ENTRY		(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
255 #define _REGION3_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
256 
257 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address	     */
258 #define _REGION3_ENTRY_DIRTY	0x2000	/* SW region dirty bit */
259 #define _REGION3_ENTRY_YOUNG	0x1000	/* SW region young bit */
260 #define _REGION3_ENTRY_LARGE	0x0400	/* RTTE-format control, large page  */
261 #define _REGION3_ENTRY_READ	0x0002	/* SW region read bit */
262 #define _REGION3_ENTRY_WRITE	0x0001	/* SW region write bit */
263 
264 #ifdef CONFIG_MEM_SOFT_DIRTY
265 #define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
266 #else
267 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
268 #endif
269 
270 #define _REGION_ENTRY_BITS	 0xfffffffffffff22fUL
271 
272 /* Bits in the segment table entry */
273 #define _SEGMENT_ENTRY_BITS			0xfffffffffffffe33UL
274 #define _SEGMENT_ENTRY_HARDWARE_BITS		0xfffffffffffffe30UL
275 #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE	0xfffffffffff00730UL
276 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address	    */
277 #define _SEGMENT_ENTRY_ORIGIN	~0x7ffUL/* page table origin		    */
278 #define _SEGMENT_ENTRY_PROTECT	0x200	/* segment protection bit	    */
279 #define _SEGMENT_ENTRY_NOEXEC	0x100	/* segment no-execute bit	    */
280 #define _SEGMENT_ENTRY_INVALID	0x20	/* invalid segment table entry	    */
281 #define _SEGMENT_ENTRY_TYPE_MASK 0x0c	/* segment table type mask	    */
282 
283 #define _SEGMENT_ENTRY		(0)
284 #define _SEGMENT_ENTRY_EMPTY	(_SEGMENT_ENTRY_INVALID)
285 
286 #define _SEGMENT_ENTRY_DIRTY	0x2000	/* SW segment dirty bit */
287 #define _SEGMENT_ENTRY_YOUNG	0x1000	/* SW segment young bit */
288 #define _SEGMENT_ENTRY_LARGE	0x0400	/* STE-format control, large page */
289 #define _SEGMENT_ENTRY_WRITE	0x0002	/* SW segment write bit */
290 #define _SEGMENT_ENTRY_READ	0x0001	/* SW segment read bit */
291 
292 #ifdef CONFIG_MEM_SOFT_DIRTY
293 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
294 #else
295 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
296 #endif
297 
298 #define _CRST_ENTRIES	2048	/* number of region/segment table entries */
299 #define _PAGE_ENTRIES	256	/* number of page table entries	*/
300 
301 #define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
302 #define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
303 
304 #define _REGION1_SHIFT	53
305 #define _REGION2_SHIFT	42
306 #define _REGION3_SHIFT	31
307 #define _SEGMENT_SHIFT	20
308 
309 #define _REGION1_INDEX	(0x7ffUL << _REGION1_SHIFT)
310 #define _REGION2_INDEX	(0x7ffUL << _REGION2_SHIFT)
311 #define _REGION3_INDEX	(0x7ffUL << _REGION3_SHIFT)
312 #define _SEGMENT_INDEX	(0x7ffUL << _SEGMENT_SHIFT)
313 #define _PAGE_INDEX	(0xffUL  << _PAGE_SHIFT)
314 
315 #define _REGION1_SIZE	(1UL << _REGION1_SHIFT)
316 #define _REGION2_SIZE	(1UL << _REGION2_SHIFT)
317 #define _REGION3_SIZE	(1UL << _REGION3_SHIFT)
318 #define _SEGMENT_SIZE	(1UL << _SEGMENT_SHIFT)
319 
320 #define _REGION1_MASK	(~(_REGION1_SIZE - 1))
321 #define _REGION2_MASK	(~(_REGION2_SIZE - 1))
322 #define _REGION3_MASK	(~(_REGION3_SIZE - 1))
323 #define _SEGMENT_MASK	(~(_SEGMENT_SIZE - 1))
324 
325 #define PMD_SHIFT	_SEGMENT_SHIFT
326 #define PUD_SHIFT	_REGION3_SHIFT
327 #define P4D_SHIFT	_REGION2_SHIFT
328 #define PGDIR_SHIFT	_REGION1_SHIFT
329 
330 #define PMD_SIZE	_SEGMENT_SIZE
331 #define PUD_SIZE	_REGION3_SIZE
332 #define P4D_SIZE	_REGION2_SIZE
333 #define PGDIR_SIZE	_REGION1_SIZE
334 
335 #define PMD_MASK	_SEGMENT_MASK
336 #define PUD_MASK	_REGION3_MASK
337 #define P4D_MASK	_REGION2_MASK
338 #define PGDIR_MASK	_REGION1_MASK
339 
340 #define PTRS_PER_PTE	_PAGE_ENTRIES
341 #define PTRS_PER_PMD	_CRST_ENTRIES
342 #define PTRS_PER_PUD	_CRST_ENTRIES
343 #define PTRS_PER_P4D	_CRST_ENTRIES
344 #define PTRS_PER_PGD	_CRST_ENTRIES
345 
346 #define MAX_PTRS_PER_P4D	PTRS_PER_P4D
347 
348 /*
349  * Segment table and region3 table entry encoding
350  * (R = read-only, I = invalid, y = young bit):
351  *				dy..R...I...wr
352  * prot-none, clean, old	00..1...1...00
353  * prot-none, clean, young	01..1...1...00
354  * prot-none, dirty, old	10..1...1...00
355  * prot-none, dirty, young	11..1...1...00
356  * read-only, clean, old	00..1...1...01
357  * read-only, clean, young	01..1...0...01
358  * read-only, dirty, old	10..1...1...01
359  * read-only, dirty, young	11..1...0...01
360  * read-write, clean, old	00..1...1...11
361  * read-write, clean, young	01..1...0...11
362  * read-write, dirty, old	10..0...1...11
363  * read-write, dirty, young	11..0...0...11
364  * The segment table origin is used to distinguish empty (origin==0) from
365  * read-write, old segment table entries (origin!=0)
366  * HW-bits: R read-only, I invalid
367  * SW-bits: y young, d dirty, r read, w write
368  */
369 
370 /* Page status table bits for virtualization */
371 #define PGSTE_ACC_BITS	0xf000000000000000UL
372 #define PGSTE_FP_BIT	0x0800000000000000UL
373 #define PGSTE_PCL_BIT	0x0080000000000000UL
374 #define PGSTE_HR_BIT	0x0040000000000000UL
375 #define PGSTE_HC_BIT	0x0020000000000000UL
376 #define PGSTE_GR_BIT	0x0004000000000000UL
377 #define PGSTE_GC_BIT	0x0002000000000000UL
378 #define PGSTE_UC_BIT	0x0000800000000000UL	/* user dirty (migration) */
379 #define PGSTE_IN_BIT	0x0000400000000000UL	/* IPTE notify bit */
380 #define PGSTE_VSIE_BIT	0x0000200000000000UL	/* ref'd in a shadow table */
381 
382 /* Guest Page State used for virtualization */
383 #define _PGSTE_GPS_ZERO			0x0000000080000000UL
384 #define _PGSTE_GPS_NODAT		0x0000000040000000UL
385 #define _PGSTE_GPS_USAGE_MASK		0x0000000003000000UL
386 #define _PGSTE_GPS_USAGE_STABLE		0x0000000000000000UL
387 #define _PGSTE_GPS_USAGE_UNUSED		0x0000000001000000UL
388 #define _PGSTE_GPS_USAGE_POT_VOLATILE	0x0000000002000000UL
389 #define _PGSTE_GPS_USAGE_VOLATILE	_PGSTE_GPS_USAGE_MASK
390 
391 /*
392  * A user page table pointer has the space-switch-event bit, the
393  * private-space-control bit and the storage-alteration-event-control
394  * bit set. A kernel page table pointer doesn't need them.
395  */
396 #define _ASCE_USER_BITS		(_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
397 				 _ASCE_ALT_EVENT)
398 
399 /*
400  * Page protection definitions.
401  */
402 #define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
403 #define PAGE_RO		__pgprot(_PAGE_PRESENT | _PAGE_READ | \
404 				 _PAGE_NOEXEC  | _PAGE_INVALID | _PAGE_PROTECT)
405 #define PAGE_RX		__pgprot(_PAGE_PRESENT | _PAGE_READ | \
406 				 _PAGE_INVALID | _PAGE_PROTECT)
407 #define PAGE_RW		__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
408 				 _PAGE_NOEXEC  | _PAGE_INVALID | _PAGE_PROTECT)
409 #define PAGE_RWX	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
410 				 _PAGE_INVALID | _PAGE_PROTECT)
411 
412 #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
413 				 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
414 #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
415 				 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
416 #define PAGE_KERNEL_RO	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
417 				 _PAGE_PROTECT | _PAGE_NOEXEC)
418 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
419 				  _PAGE_YOUNG |	_PAGE_DIRTY)
420 
421 /*
422  * On s390 the page table entry has an invalid bit and a read-only bit.
423  * Read permission implies execute permission and write permission
424  * implies read permission.
425  */
426          /*xwr*/
427 #define __P000	PAGE_NONE
428 #define __P001	PAGE_RO
429 #define __P010	PAGE_RO
430 #define __P011	PAGE_RO
431 #define __P100	PAGE_RX
432 #define __P101	PAGE_RX
433 #define __P110	PAGE_RX
434 #define __P111	PAGE_RX
435 
436 #define __S000	PAGE_NONE
437 #define __S001	PAGE_RO
438 #define __S010	PAGE_RW
439 #define __S011	PAGE_RW
440 #define __S100	PAGE_RX
441 #define __S101	PAGE_RX
442 #define __S110	PAGE_RWX
443 #define __S111	PAGE_RWX
444 
445 /*
446  * Segment entry (large page) protection definitions.
447  */
448 #define SEGMENT_NONE	__pgprot(_SEGMENT_ENTRY_INVALID | \
449 				 _SEGMENT_ENTRY_PROTECT)
450 #define SEGMENT_RO	__pgprot(_SEGMENT_ENTRY_PROTECT | \
451 				 _SEGMENT_ENTRY_READ | \
452 				 _SEGMENT_ENTRY_NOEXEC)
453 #define SEGMENT_RX	__pgprot(_SEGMENT_ENTRY_PROTECT | \
454 				 _SEGMENT_ENTRY_READ)
455 #define SEGMENT_RW	__pgprot(_SEGMENT_ENTRY_READ | \
456 				 _SEGMENT_ENTRY_WRITE | \
457 				 _SEGMENT_ENTRY_NOEXEC)
458 #define SEGMENT_RWX	__pgprot(_SEGMENT_ENTRY_READ | \
459 				 _SEGMENT_ENTRY_WRITE)
460 #define SEGMENT_KERNEL	__pgprot(_SEGMENT_ENTRY |	\
461 				 _SEGMENT_ENTRY_LARGE |	\
462 				 _SEGMENT_ENTRY_READ |	\
463 				 _SEGMENT_ENTRY_WRITE | \
464 				 _SEGMENT_ENTRY_YOUNG | \
465 				 _SEGMENT_ENTRY_DIRTY | \
466 				 _SEGMENT_ENTRY_NOEXEC)
467 #define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY |	\
468 				 _SEGMENT_ENTRY_LARGE |	\
469 				 _SEGMENT_ENTRY_READ |	\
470 				 _SEGMENT_ENTRY_YOUNG |	\
471 				 _SEGMENT_ENTRY_PROTECT | \
472 				 _SEGMENT_ENTRY_NOEXEC)
473 #define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY |	\
474 				 _SEGMENT_ENTRY_LARGE |	\
475 				 _SEGMENT_ENTRY_READ |	\
476 				 _SEGMENT_ENTRY_WRITE | \
477 				 _SEGMENT_ENTRY_YOUNG |	\
478 				 _SEGMENT_ENTRY_DIRTY)
479 
480 /*
481  * Region3 entry (large page) protection definitions.
482  */
483 
484 #define REGION3_KERNEL	__pgprot(_REGION_ENTRY_TYPE_R3 | \
485 				 _REGION3_ENTRY_LARGE |	 \
486 				 _REGION3_ENTRY_READ |	 \
487 				 _REGION3_ENTRY_WRITE |	 \
488 				 _REGION3_ENTRY_YOUNG |	 \
489 				 _REGION3_ENTRY_DIRTY | \
490 				 _REGION_ENTRY_NOEXEC)
491 #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
492 				   _REGION3_ENTRY_LARGE |  \
493 				   _REGION3_ENTRY_READ |   \
494 				   _REGION3_ENTRY_YOUNG |  \
495 				   _REGION_ENTRY_PROTECT | \
496 				   _REGION_ENTRY_NOEXEC)
497 
498 static inline bool mm_p4d_folded(struct mm_struct *mm)
499 {
500 	return mm->context.asce_limit <= _REGION1_SIZE;
501 }
502 #define mm_p4d_folded(mm) mm_p4d_folded(mm)
503 
504 static inline bool mm_pud_folded(struct mm_struct *mm)
505 {
506 	return mm->context.asce_limit <= _REGION2_SIZE;
507 }
508 #define mm_pud_folded(mm) mm_pud_folded(mm)
509 
510 static inline bool mm_pmd_folded(struct mm_struct *mm)
511 {
512 	return mm->context.asce_limit <= _REGION3_SIZE;
513 }
514 #define mm_pmd_folded(mm) mm_pmd_folded(mm)
515 
516 static inline int mm_has_pgste(struct mm_struct *mm)
517 {
518 #ifdef CONFIG_PGSTE
519 	if (unlikely(mm->context.has_pgste))
520 		return 1;
521 #endif
522 	return 0;
523 }
524 
525 static inline int mm_is_protected(struct mm_struct *mm)
526 {
527 #ifdef CONFIG_PGSTE
528 	if (unlikely(atomic_read(&mm->context.is_protected)))
529 		return 1;
530 #endif
531 	return 0;
532 }
533 
534 static inline int mm_alloc_pgste(struct mm_struct *mm)
535 {
536 #ifdef CONFIG_PGSTE
537 	if (unlikely(mm->context.alloc_pgste))
538 		return 1;
539 #endif
540 	return 0;
541 }
542 
543 /*
544  * In the case that a guest uses storage keys
545  * faults should no longer be backed by zero pages
546  */
547 #define mm_forbids_zeropage mm_has_pgste
548 static inline int mm_uses_skeys(struct mm_struct *mm)
549 {
550 #ifdef CONFIG_PGSTE
551 	if (mm->context.uses_skeys)
552 		return 1;
553 #endif
554 	return 0;
555 }
556 
557 static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
558 {
559 	register unsigned long reg2 asm("2") = old;
560 	register unsigned long reg3 asm("3") = new;
561 	unsigned long address = (unsigned long)ptr | 1;
562 
563 	asm volatile(
564 		"	csp	%0,%3"
565 		: "+d" (reg2), "+m" (*ptr)
566 		: "d" (reg3), "d" (address)
567 		: "cc");
568 }
569 
570 static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
571 {
572 	register unsigned long reg2 asm("2") = old;
573 	register unsigned long reg3 asm("3") = new;
574 	unsigned long address = (unsigned long)ptr | 1;
575 
576 	asm volatile(
577 		"	.insn	rre,0xb98a0000,%0,%3"
578 		: "+d" (reg2), "+m" (*ptr)
579 		: "d" (reg3), "d" (address)
580 		: "cc");
581 }
582 
583 #define CRDTE_DTT_PAGE		0x00UL
584 #define CRDTE_DTT_SEGMENT	0x10UL
585 #define CRDTE_DTT_REGION3	0x14UL
586 #define CRDTE_DTT_REGION2	0x18UL
587 #define CRDTE_DTT_REGION1	0x1cUL
588 
589 static inline void crdte(unsigned long old, unsigned long new,
590 			 unsigned long table, unsigned long dtt,
591 			 unsigned long address, unsigned long asce)
592 {
593 	register unsigned long reg2 asm("2") = old;
594 	register unsigned long reg3 asm("3") = new;
595 	register unsigned long reg4 asm("4") = table | dtt;
596 	register unsigned long reg5 asm("5") = address;
597 
598 	asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
599 		     : "+d" (reg2)
600 		     : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
601 		     : "memory", "cc");
602 }
603 
604 /*
605  * pgd/p4d/pud/pmd/pte query functions
606  */
607 static inline int pgd_folded(pgd_t pgd)
608 {
609 	return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
610 }
611 
612 static inline int pgd_present(pgd_t pgd)
613 {
614 	if (pgd_folded(pgd))
615 		return 1;
616 	return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
617 }
618 
619 static inline int pgd_none(pgd_t pgd)
620 {
621 	if (pgd_folded(pgd))
622 		return 0;
623 	return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
624 }
625 
626 static inline int pgd_bad(pgd_t pgd)
627 {
628 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
629 		return 0;
630 	return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
631 }
632 
633 static inline unsigned long pgd_pfn(pgd_t pgd)
634 {
635 	unsigned long origin_mask;
636 
637 	origin_mask = _REGION_ENTRY_ORIGIN;
638 	return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
639 }
640 
641 static inline int p4d_folded(p4d_t p4d)
642 {
643 	return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
644 }
645 
646 static inline int p4d_present(p4d_t p4d)
647 {
648 	if (p4d_folded(p4d))
649 		return 1;
650 	return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
651 }
652 
653 static inline int p4d_none(p4d_t p4d)
654 {
655 	if (p4d_folded(p4d))
656 		return 0;
657 	return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
658 }
659 
660 static inline unsigned long p4d_pfn(p4d_t p4d)
661 {
662 	unsigned long origin_mask;
663 
664 	origin_mask = _REGION_ENTRY_ORIGIN;
665 	return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
666 }
667 
668 static inline int pud_folded(pud_t pud)
669 {
670 	return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
671 }
672 
673 static inline int pud_present(pud_t pud)
674 {
675 	if (pud_folded(pud))
676 		return 1;
677 	return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
678 }
679 
680 static inline int pud_none(pud_t pud)
681 {
682 	if (pud_folded(pud))
683 		return 0;
684 	return pud_val(pud) == _REGION3_ENTRY_EMPTY;
685 }
686 
687 #define pud_leaf	pud_large
688 static inline int pud_large(pud_t pud)
689 {
690 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
691 		return 0;
692 	return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
693 }
694 
695 #define pmd_leaf	pmd_large
696 static inline int pmd_large(pmd_t pmd)
697 {
698 	return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
699 }
700 
701 static inline int pmd_bad(pmd_t pmd)
702 {
703 	if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd))
704 		return 1;
705 	return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
706 }
707 
708 static inline int pud_bad(pud_t pud)
709 {
710 	unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
711 
712 	if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
713 		return 1;
714 	if (type < _REGION_ENTRY_TYPE_R3)
715 		return 0;
716 	return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
717 }
718 
719 static inline int p4d_bad(p4d_t p4d)
720 {
721 	unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
722 
723 	if (type > _REGION_ENTRY_TYPE_R2)
724 		return 1;
725 	if (type < _REGION_ENTRY_TYPE_R2)
726 		return 0;
727 	return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
728 }
729 
730 static inline int pmd_present(pmd_t pmd)
731 {
732 	return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
733 }
734 
735 static inline int pmd_none(pmd_t pmd)
736 {
737 	return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
738 }
739 
740 #define pmd_write pmd_write
741 static inline int pmd_write(pmd_t pmd)
742 {
743 	return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
744 }
745 
746 #define pud_write pud_write
747 static inline int pud_write(pud_t pud)
748 {
749 	return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
750 }
751 
752 static inline int pmd_dirty(pmd_t pmd)
753 {
754 	return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
755 }
756 
757 static inline int pmd_young(pmd_t pmd)
758 {
759 	return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
760 }
761 
762 static inline int pte_present(pte_t pte)
763 {
764 	/* Bit pattern: (pte & 0x001) == 0x001 */
765 	return (pte_val(pte) & _PAGE_PRESENT) != 0;
766 }
767 
768 static inline int pte_none(pte_t pte)
769 {
770 	/* Bit pattern: pte == 0x400 */
771 	return pte_val(pte) == _PAGE_INVALID;
772 }
773 
774 static inline int pte_swap(pte_t pte)
775 {
776 	/* Bit pattern: (pte & 0x201) == 0x200 */
777 	return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
778 		== _PAGE_PROTECT;
779 }
780 
781 static inline int pte_special(pte_t pte)
782 {
783 	return (pte_val(pte) & _PAGE_SPECIAL);
784 }
785 
786 #define __HAVE_ARCH_PTE_SAME
787 static inline int pte_same(pte_t a, pte_t b)
788 {
789 	return pte_val(a) == pte_val(b);
790 }
791 
792 #ifdef CONFIG_NUMA_BALANCING
793 static inline int pte_protnone(pte_t pte)
794 {
795 	return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
796 }
797 
798 static inline int pmd_protnone(pmd_t pmd)
799 {
800 	/* pmd_large(pmd) implies pmd_present(pmd) */
801 	return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
802 }
803 #endif
804 
805 static inline int pte_soft_dirty(pte_t pte)
806 {
807 	return pte_val(pte) & _PAGE_SOFT_DIRTY;
808 }
809 #define pte_swp_soft_dirty pte_soft_dirty
810 
811 static inline pte_t pte_mksoft_dirty(pte_t pte)
812 {
813 	pte_val(pte) |= _PAGE_SOFT_DIRTY;
814 	return pte;
815 }
816 #define pte_swp_mksoft_dirty pte_mksoft_dirty
817 
818 static inline pte_t pte_clear_soft_dirty(pte_t pte)
819 {
820 	pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
821 	return pte;
822 }
823 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
824 
825 static inline int pmd_soft_dirty(pmd_t pmd)
826 {
827 	return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
828 }
829 
830 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
831 {
832 	pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
833 	return pmd;
834 }
835 
836 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
837 {
838 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
839 	return pmd;
840 }
841 
842 /*
843  * query functions pte_write/pte_dirty/pte_young only work if
844  * pte_present() is true. Undefined behaviour if not..
845  */
846 static inline int pte_write(pte_t pte)
847 {
848 	return (pte_val(pte) & _PAGE_WRITE) != 0;
849 }
850 
851 static inline int pte_dirty(pte_t pte)
852 {
853 	return (pte_val(pte) & _PAGE_DIRTY) != 0;
854 }
855 
856 static inline int pte_young(pte_t pte)
857 {
858 	return (pte_val(pte) & _PAGE_YOUNG) != 0;
859 }
860 
861 #define __HAVE_ARCH_PTE_UNUSED
862 static inline int pte_unused(pte_t pte)
863 {
864 	return pte_val(pte) & _PAGE_UNUSED;
865 }
866 
867 /*
868  * pgd/pmd/pte modification functions
869  */
870 
871 static inline void pgd_clear(pgd_t *pgd)
872 {
873 	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
874 		pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
875 }
876 
877 static inline void p4d_clear(p4d_t *p4d)
878 {
879 	if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
880 		p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
881 }
882 
883 static inline void pud_clear(pud_t *pud)
884 {
885 	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
886 		pud_val(*pud) = _REGION3_ENTRY_EMPTY;
887 }
888 
889 static inline void pmd_clear(pmd_t *pmdp)
890 {
891 	pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
892 }
893 
894 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
895 {
896 	pte_val(*ptep) = _PAGE_INVALID;
897 }
898 
899 /*
900  * The following pte modification functions only work if
901  * pte_present() is true. Undefined behaviour if not..
902  */
903 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
904 {
905 	pte_val(pte) &= _PAGE_CHG_MASK;
906 	pte_val(pte) |= pgprot_val(newprot);
907 	/*
908 	 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
909 	 * has the invalid bit set, clear it again for readable, young pages
910 	 */
911 	if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
912 		pte_val(pte) &= ~_PAGE_INVALID;
913 	/*
914 	 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
915 	 * protection bit set, clear it again for writable, dirty pages
916 	 */
917 	if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
918 		pte_val(pte) &= ~_PAGE_PROTECT;
919 	return pte;
920 }
921 
922 static inline pte_t pte_wrprotect(pte_t pte)
923 {
924 	pte_val(pte) &= ~_PAGE_WRITE;
925 	pte_val(pte) |= _PAGE_PROTECT;
926 	return pte;
927 }
928 
929 static inline pte_t pte_mkwrite(pte_t pte)
930 {
931 	pte_val(pte) |= _PAGE_WRITE;
932 	if (pte_val(pte) & _PAGE_DIRTY)
933 		pte_val(pte) &= ~_PAGE_PROTECT;
934 	return pte;
935 }
936 
937 static inline pte_t pte_mkclean(pte_t pte)
938 {
939 	pte_val(pte) &= ~_PAGE_DIRTY;
940 	pte_val(pte) |= _PAGE_PROTECT;
941 	return pte;
942 }
943 
944 static inline pte_t pte_mkdirty(pte_t pte)
945 {
946 	pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
947 	if (pte_val(pte) & _PAGE_WRITE)
948 		pte_val(pte) &= ~_PAGE_PROTECT;
949 	return pte;
950 }
951 
952 static inline pte_t pte_mkold(pte_t pte)
953 {
954 	pte_val(pte) &= ~_PAGE_YOUNG;
955 	pte_val(pte) |= _PAGE_INVALID;
956 	return pte;
957 }
958 
959 static inline pte_t pte_mkyoung(pte_t pte)
960 {
961 	pte_val(pte) |= _PAGE_YOUNG;
962 	if (pte_val(pte) & _PAGE_READ)
963 		pte_val(pte) &= ~_PAGE_INVALID;
964 	return pte;
965 }
966 
967 static inline pte_t pte_mkspecial(pte_t pte)
968 {
969 	pte_val(pte) |= _PAGE_SPECIAL;
970 	return pte;
971 }
972 
973 #ifdef CONFIG_HUGETLB_PAGE
974 static inline pte_t pte_mkhuge(pte_t pte)
975 {
976 	pte_val(pte) |= _PAGE_LARGE;
977 	return pte;
978 }
979 #endif
980 
981 #define IPTE_GLOBAL	0
982 #define	IPTE_LOCAL	1
983 
984 #define IPTE_NODAT	0x400
985 #define IPTE_GUEST_ASCE	0x800
986 
987 static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
988 					unsigned long opt, unsigned long asce,
989 					int local)
990 {
991 	unsigned long pto = (unsigned long) ptep;
992 
993 	if (__builtin_constant_p(opt) && opt == 0) {
994 		/* Invalidation + TLB flush for the pte */
995 		asm volatile(
996 			"	.insn	rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
997 			: "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
998 			  [m4] "i" (local));
999 		return;
1000 	}
1001 
1002 	/* Invalidate ptes with options + TLB flush of the ptes */
1003 	opt = opt | (asce & _ASCE_ORIGIN);
1004 	asm volatile(
1005 		"	.insn	rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1006 		: [r2] "+a" (address), [r3] "+a" (opt)
1007 		: [r1] "a" (pto), [m4] "i" (local) : "memory");
1008 }
1009 
1010 static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
1011 					      pte_t *ptep, int local)
1012 {
1013 	unsigned long pto = (unsigned long) ptep;
1014 
1015 	/* Invalidate a range of ptes + TLB flush of the ptes */
1016 	do {
1017 		asm volatile(
1018 			"       .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1019 			: [r2] "+a" (address), [r3] "+a" (nr)
1020 			: [r1] "a" (pto), [m4] "i" (local) : "memory");
1021 	} while (nr != 255);
1022 }
1023 
1024 /*
1025  * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1026  * both clear the TLB for the unmapped pte. The reason is that
1027  * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1028  * to modify an active pte. The sequence is
1029  *   1) ptep_get_and_clear
1030  *   2) set_pte_at
1031  *   3) flush_tlb_range
1032  * On s390 the tlb needs to get flushed with the modification of the pte
1033  * if the pte is active. The only way how this can be implemented is to
1034  * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1035  * is a nop.
1036  */
1037 pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1038 pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1039 
1040 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1041 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1042 					    unsigned long addr, pte_t *ptep)
1043 {
1044 	pte_t pte = *ptep;
1045 
1046 	pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1047 	return pte_young(pte);
1048 }
1049 
1050 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1051 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1052 					 unsigned long address, pte_t *ptep)
1053 {
1054 	return ptep_test_and_clear_young(vma, address, ptep);
1055 }
1056 
1057 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1058 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1059 				       unsigned long addr, pte_t *ptep)
1060 {
1061 	pte_t res;
1062 
1063 	res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1064 	if (mm_is_protected(mm) && pte_present(res))
1065 		uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1066 	return res;
1067 }
1068 
1069 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1070 pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1071 void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1072 			     pte_t *, pte_t, pte_t);
1073 
1074 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1075 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1076 				     unsigned long addr, pte_t *ptep)
1077 {
1078 	pte_t res;
1079 
1080 	res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1081 	if (mm_is_protected(vma->vm_mm) && pte_present(res))
1082 		uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1083 	return res;
1084 }
1085 
1086 /*
1087  * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1088  * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1089  * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1090  * cannot be accessed while the batched unmap is running. In this case
1091  * full==1 and a simple pte_clear is enough. See tlb.h.
1092  */
1093 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1094 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1095 					    unsigned long addr,
1096 					    pte_t *ptep, int full)
1097 {
1098 	pte_t res;
1099 
1100 	if (full) {
1101 		res = *ptep;
1102 		*ptep = __pte(_PAGE_INVALID);
1103 	} else {
1104 		res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1105 	}
1106 	if (mm_is_protected(mm) && pte_present(res))
1107 		uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1108 	return res;
1109 }
1110 
1111 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1112 static inline void ptep_set_wrprotect(struct mm_struct *mm,
1113 				      unsigned long addr, pte_t *ptep)
1114 {
1115 	pte_t pte = *ptep;
1116 
1117 	if (pte_write(pte))
1118 		ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1119 }
1120 
1121 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1122 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1123 					unsigned long addr, pte_t *ptep,
1124 					pte_t entry, int dirty)
1125 {
1126 	if (pte_same(*ptep, entry))
1127 		return 0;
1128 	ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1129 	return 1;
1130 }
1131 
1132 /*
1133  * Additional functions to handle KVM guest page tables
1134  */
1135 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1136 		     pte_t *ptep, pte_t entry);
1137 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1138 void ptep_notify(struct mm_struct *mm, unsigned long addr,
1139 		 pte_t *ptep, unsigned long bits);
1140 int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1141 		    pte_t *ptep, int prot, unsigned long bit);
1142 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1143 		     pte_t *ptep , int reset);
1144 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1145 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1146 		    pte_t *sptep, pte_t *tptep, pte_t pte);
1147 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1148 
1149 bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1150 			    pte_t *ptep);
1151 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1152 			  unsigned char key, bool nq);
1153 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1154 			       unsigned char key, unsigned char *oldkey,
1155 			       bool nq, bool mr, bool mc);
1156 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1157 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1158 			  unsigned char *key);
1159 
1160 int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1161 				unsigned long bits, unsigned long value);
1162 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1163 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1164 			unsigned long *oldpte, unsigned long *oldpgste);
1165 void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1166 void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1167 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1168 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
1169 
1170 #define pgprot_writecombine	pgprot_writecombine
1171 pgprot_t pgprot_writecombine(pgprot_t prot);
1172 
1173 #define pgprot_writethrough	pgprot_writethrough
1174 pgprot_t pgprot_writethrough(pgprot_t prot);
1175 
1176 /*
1177  * Certain architectures need to do special things when PTEs
1178  * within a page table are directly modified.  Thus, the following
1179  * hook is made available.
1180  */
1181 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1182 			      pte_t *ptep, pte_t entry)
1183 {
1184 	if (pte_present(entry))
1185 		pte_val(entry) &= ~_PAGE_UNUSED;
1186 	if (mm_has_pgste(mm))
1187 		ptep_set_pte_at(mm, addr, ptep, entry);
1188 	else
1189 		*ptep = entry;
1190 }
1191 
1192 /*
1193  * Conversion functions: convert a page and protection to a page entry,
1194  * and a page entry and page directory to the page they refer to.
1195  */
1196 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1197 {
1198 	pte_t __pte;
1199 
1200 	pte_val(__pte) = physpage | pgprot_val(pgprot);
1201 	if (!MACHINE_HAS_NX)
1202 		pte_val(__pte) &= ~_PAGE_NOEXEC;
1203 	return pte_mkyoung(__pte);
1204 }
1205 
1206 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1207 {
1208 	unsigned long physpage = page_to_phys(page);
1209 	pte_t __pte = mk_pte_phys(physpage, pgprot);
1210 
1211 	if (pte_write(__pte) && PageDirty(page))
1212 		__pte = pte_mkdirty(__pte);
1213 	return __pte;
1214 }
1215 
1216 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1217 #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1218 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1219 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1220 
1221 #define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
1222 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1223 
1224 static inline unsigned long pmd_deref(pmd_t pmd)
1225 {
1226 	unsigned long origin_mask;
1227 
1228 	origin_mask = _SEGMENT_ENTRY_ORIGIN;
1229 	if (pmd_large(pmd))
1230 		origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
1231 	return pmd_val(pmd) & origin_mask;
1232 }
1233 
1234 static inline unsigned long pmd_pfn(pmd_t pmd)
1235 {
1236 	return pmd_deref(pmd) >> PAGE_SHIFT;
1237 }
1238 
1239 static inline unsigned long pud_deref(pud_t pud)
1240 {
1241 	unsigned long origin_mask;
1242 
1243 	origin_mask = _REGION_ENTRY_ORIGIN;
1244 	if (pud_large(pud))
1245 		origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
1246 	return pud_val(pud) & origin_mask;
1247 }
1248 
1249 static inline unsigned long pud_pfn(pud_t pud)
1250 {
1251 	return pud_deref(pud) >> PAGE_SHIFT;
1252 }
1253 
1254 /*
1255  * The pgd_offset function *always* adds the index for the top-level
1256  * region/segment table. This is done to get a sequence like the
1257  * following to work:
1258  *	pgdp = pgd_offset(current->mm, addr);
1259  *	pgd = READ_ONCE(*pgdp);
1260  *	p4dp = p4d_offset(&pgd, addr);
1261  *	...
1262  * The subsequent p4d_offset, pud_offset and pmd_offset functions
1263  * only add an index if they dereferenced the pointer.
1264  */
1265 static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
1266 {
1267 	unsigned long rste;
1268 	unsigned int shift;
1269 
1270 	/* Get the first entry of the top level table */
1271 	rste = pgd_val(*pgd);
1272 	/* Pick up the shift from the table type of the first entry */
1273 	shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
1274 	return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
1275 }
1276 
1277 #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1278 
1279 static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
1280 {
1281 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
1282 		return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
1283 	return (p4d_t *) pgdp;
1284 }
1285 #define p4d_offset_lockless p4d_offset_lockless
1286 
1287 static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
1288 {
1289 	return p4d_offset_lockless(pgdp, *pgdp, address);
1290 }
1291 
1292 static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
1293 {
1294 	if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
1295 		return (pud_t *) p4d_deref(p4d) + pud_index(address);
1296 	return (pud_t *) p4dp;
1297 }
1298 #define pud_offset_lockless pud_offset_lockless
1299 
1300 static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
1301 {
1302 	return pud_offset_lockless(p4dp, *p4dp, address);
1303 }
1304 #define pud_offset pud_offset
1305 
1306 static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
1307 {
1308 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
1309 		return (pmd_t *) pud_deref(pud) + pmd_index(address);
1310 	return (pmd_t *) pudp;
1311 }
1312 #define pmd_offset_lockless pmd_offset_lockless
1313 
1314 static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
1315 {
1316 	return pmd_offset_lockless(pudp, *pudp, address);
1317 }
1318 #define pmd_offset pmd_offset
1319 
1320 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
1321 {
1322 	return (unsigned long) pmd_deref(pmd);
1323 }
1324 
1325 static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
1326 {
1327 	return end <= current->mm->context.asce_limit;
1328 }
1329 #define gup_fast_permitted gup_fast_permitted
1330 
1331 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1332 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1333 #define pte_page(x) pfn_to_page(pte_pfn(x))
1334 
1335 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1336 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
1337 #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1338 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1339 
1340 static inline pmd_t pmd_wrprotect(pmd_t pmd)
1341 {
1342 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1343 	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1344 	return pmd;
1345 }
1346 
1347 static inline pmd_t pmd_mkwrite(pmd_t pmd)
1348 {
1349 	pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1350 	if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
1351 		pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1352 	return pmd;
1353 }
1354 
1355 static inline pmd_t pmd_mkclean(pmd_t pmd)
1356 {
1357 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1358 	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1359 	return pmd;
1360 }
1361 
1362 static inline pmd_t pmd_mkdirty(pmd_t pmd)
1363 {
1364 	pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY;
1365 	if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1366 		pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1367 	return pmd;
1368 }
1369 
1370 static inline pud_t pud_wrprotect(pud_t pud)
1371 {
1372 	pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
1373 	pud_val(pud) |= _REGION_ENTRY_PROTECT;
1374 	return pud;
1375 }
1376 
1377 static inline pud_t pud_mkwrite(pud_t pud)
1378 {
1379 	pud_val(pud) |= _REGION3_ENTRY_WRITE;
1380 	if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
1381 		pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1382 	return pud;
1383 }
1384 
1385 static inline pud_t pud_mkclean(pud_t pud)
1386 {
1387 	pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
1388 	pud_val(pud) |= _REGION_ENTRY_PROTECT;
1389 	return pud;
1390 }
1391 
1392 static inline pud_t pud_mkdirty(pud_t pud)
1393 {
1394 	pud_val(pud) |= _REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY;
1395 	if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1396 		pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1397 	return pud;
1398 }
1399 
1400 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1401 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1402 {
1403 	/*
1404 	 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1405 	 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1406 	 */
1407 	if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1408 		return pgprot_val(SEGMENT_NONE);
1409 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1410 		return pgprot_val(SEGMENT_RO);
1411 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1412 		return pgprot_val(SEGMENT_RX);
1413 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1414 		return pgprot_val(SEGMENT_RW);
1415 	return pgprot_val(SEGMENT_RWX);
1416 }
1417 
1418 static inline pmd_t pmd_mkyoung(pmd_t pmd)
1419 {
1420 	pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1421 	if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1422 		pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1423 	return pmd;
1424 }
1425 
1426 static inline pmd_t pmd_mkold(pmd_t pmd)
1427 {
1428 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1429 	pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1430 	return pmd;
1431 }
1432 
1433 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1434 {
1435 	pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1436 		_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1437 		_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
1438 	pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1439 	if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1440 		pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1441 	if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1442 		pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1443 	return pmd;
1444 }
1445 
1446 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1447 {
1448 	pmd_t __pmd;
1449 	pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1450 	return __pmd;
1451 }
1452 
1453 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1454 
1455 static inline void __pmdp_csp(pmd_t *pmdp)
1456 {
1457 	csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1458 	    pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1459 }
1460 
1461 #define IDTE_GLOBAL	0
1462 #define IDTE_LOCAL	1
1463 
1464 #define IDTE_PTOA	0x0800
1465 #define IDTE_NODAT	0x1000
1466 #define IDTE_GUEST_ASCE	0x2000
1467 
1468 static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1469 					unsigned long opt, unsigned long asce,
1470 					int local)
1471 {
1472 	unsigned long sto;
1473 
1474 	sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t);
1475 	if (__builtin_constant_p(opt) && opt == 0) {
1476 		/* flush without guest asce */
1477 		asm volatile(
1478 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1479 			: "+m" (*pmdp)
1480 			: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1481 			  [m4] "i" (local)
1482 			: "cc" );
1483 	} else {
1484 		/* flush with guest asce */
1485 		asm volatile(
1486 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1487 			: "+m" (*pmdp)
1488 			: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1489 			  [r3] "a" (asce), [m4] "i" (local)
1490 			: "cc" );
1491 	}
1492 }
1493 
1494 static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1495 					unsigned long opt, unsigned long asce,
1496 					int local)
1497 {
1498 	unsigned long r3o;
1499 
1500 	r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t);
1501 	r3o |= _ASCE_TYPE_REGION3;
1502 	if (__builtin_constant_p(opt) && opt == 0) {
1503 		/* flush without guest asce */
1504 		asm volatile(
1505 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1506 			: "+m" (*pudp)
1507 			: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1508 			  [m4] "i" (local)
1509 			: "cc");
1510 	} else {
1511 		/* flush with guest asce */
1512 		asm volatile(
1513 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1514 			: "+m" (*pudp)
1515 			: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1516 			  [r3] "a" (asce), [m4] "i" (local)
1517 			: "cc" );
1518 	}
1519 }
1520 
1521 pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1522 pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1523 pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1524 
1525 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1526 
1527 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1528 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1529 				pgtable_t pgtable);
1530 
1531 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1532 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1533 
1534 #define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1535 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1536 					unsigned long addr, pmd_t *pmdp,
1537 					pmd_t entry, int dirty)
1538 {
1539 	VM_BUG_ON(addr & ~HPAGE_MASK);
1540 
1541 	entry = pmd_mkyoung(entry);
1542 	if (dirty)
1543 		entry = pmd_mkdirty(entry);
1544 	if (pmd_val(*pmdp) == pmd_val(entry))
1545 		return 0;
1546 	pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1547 	return 1;
1548 }
1549 
1550 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1551 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1552 					    unsigned long addr, pmd_t *pmdp)
1553 {
1554 	pmd_t pmd = *pmdp;
1555 
1556 	pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1557 	return pmd_young(pmd);
1558 }
1559 
1560 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1561 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1562 					 unsigned long addr, pmd_t *pmdp)
1563 {
1564 	VM_BUG_ON(addr & ~HPAGE_MASK);
1565 	return pmdp_test_and_clear_young(vma, addr, pmdp);
1566 }
1567 
1568 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1569 			      pmd_t *pmdp, pmd_t entry)
1570 {
1571 	if (!MACHINE_HAS_NX)
1572 		pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
1573 	*pmdp = entry;
1574 }
1575 
1576 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1577 {
1578 	pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1579 	pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1580 	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1581 	return pmd;
1582 }
1583 
1584 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1585 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1586 					    unsigned long addr, pmd_t *pmdp)
1587 {
1588 	return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1589 }
1590 
1591 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1592 static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
1593 						 unsigned long addr,
1594 						 pmd_t *pmdp, int full)
1595 {
1596 	if (full) {
1597 		pmd_t pmd = *pmdp;
1598 		*pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
1599 		return pmd;
1600 	}
1601 	return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1602 }
1603 
1604 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1605 static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1606 					  unsigned long addr, pmd_t *pmdp)
1607 {
1608 	return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1609 }
1610 
1611 #define __HAVE_ARCH_PMDP_INVALIDATE
1612 static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1613 				   unsigned long addr, pmd_t *pmdp)
1614 {
1615 	pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1616 
1617 	return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1618 }
1619 
1620 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1621 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1622 				      unsigned long addr, pmd_t *pmdp)
1623 {
1624 	pmd_t pmd = *pmdp;
1625 
1626 	if (pmd_write(pmd))
1627 		pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1628 }
1629 
1630 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1631 					unsigned long address,
1632 					pmd_t *pmdp)
1633 {
1634 	return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1635 }
1636 #define pmdp_collapse_flush pmdp_collapse_flush
1637 
1638 #define pfn_pmd(pfn, pgprot)	mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1639 #define mk_pmd(page, pgprot)	pfn_pmd(page_to_pfn(page), (pgprot))
1640 
1641 static inline int pmd_trans_huge(pmd_t pmd)
1642 {
1643 	return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1644 }
1645 
1646 #define has_transparent_hugepage has_transparent_hugepage
1647 static inline int has_transparent_hugepage(void)
1648 {
1649 	return MACHINE_HAS_EDAT1 ? 1 : 0;
1650 }
1651 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1652 
1653 /*
1654  * 64 bit swap entry format:
1655  * A page-table entry has some bits we have to treat in a special way.
1656  * Bits 52 and bit 55 have to be zero, otherwise a specification
1657  * exception will occur instead of a page translation exception. The
1658  * specification exception has the bad habit not to store necessary
1659  * information in the lowcore.
1660  * Bits 54 and 63 are used to indicate the page type.
1661  * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1662  * This leaves the bits 0-51 and bits 56-62 to store type and offset.
1663  * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
1664  * for the offset.
1665  * |			  offset			|01100|type |00|
1666  * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1667  * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1668  */
1669 
1670 #define __SWP_OFFSET_MASK	((1UL << 52) - 1)
1671 #define __SWP_OFFSET_SHIFT	12
1672 #define __SWP_TYPE_MASK		((1UL << 5) - 1)
1673 #define __SWP_TYPE_SHIFT	2
1674 
1675 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1676 {
1677 	pte_t pte;
1678 
1679 	pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1680 	pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1681 	pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1682 	return pte;
1683 }
1684 
1685 static inline unsigned long __swp_type(swp_entry_t entry)
1686 {
1687 	return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1688 }
1689 
1690 static inline unsigned long __swp_offset(swp_entry_t entry)
1691 {
1692 	return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1693 }
1694 
1695 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1696 {
1697 	return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1698 }
1699 
1700 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
1701 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
1702 
1703 #define kern_addr_valid(addr)   (1)
1704 
1705 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1706 extern void vmem_remove_mapping(unsigned long start, unsigned long size);
1707 extern int s390_enable_sie(void);
1708 extern int s390_enable_skey(void);
1709 extern void s390_reset_cmma(struct mm_struct *mm);
1710 
1711 /* s390 has a private copy of get unmapped area to deal with cache synonyms */
1712 #define HAVE_ARCH_UNMAPPED_AREA
1713 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1714 
1715 #endif /* _S390_PAGE_H */
1716