xref: /openbmc/linux/arch/s390/include/asm/pgtable.h (revision 6c33a6f4)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999, 2000
5  *    Author(s): Hartmut Penner (hp@de.ibm.com)
6  *               Ulrich Weigand (weigand@de.ibm.com)
7  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
8  *
9  *  Derived from "include/asm-i386/pgtable.h"
10  */
11 
12 #ifndef _ASM_S390_PGTABLE_H
13 #define _ASM_S390_PGTABLE_H
14 
15 #include <linux/sched.h>
16 #include <linux/mm_types.h>
17 #include <linux/page-flags.h>
18 #include <linux/radix-tree.h>
19 #include <linux/atomic.h>
20 #include <asm/bug.h>
21 #include <asm/page.h>
22 
23 extern pgd_t swapper_pg_dir[];
24 extern void paging_init(void);
25 
26 enum {
27 	PG_DIRECT_MAP_4K = 0,
28 	PG_DIRECT_MAP_1M,
29 	PG_DIRECT_MAP_2G,
30 	PG_DIRECT_MAP_MAX
31 };
32 
33 extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
34 
35 static inline void update_page_count(int level, long count)
36 {
37 	if (IS_ENABLED(CONFIG_PROC_FS))
38 		atomic_long_add(count, &direct_pages_count[level]);
39 }
40 
41 struct seq_file;
42 void arch_report_meminfo(struct seq_file *m);
43 
44 /*
45  * The S390 doesn't have any external MMU info: the kernel page
46  * tables contain all the necessary information.
47  */
48 #define update_mmu_cache(vma, address, ptep)     do { } while (0)
49 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
50 
51 /*
52  * ZERO_PAGE is a global shared page that is always zero; used
53  * for zero-mapped memory areas etc..
54  */
55 
56 extern unsigned long empty_zero_page;
57 extern unsigned long zero_page_mask;
58 
59 #define ZERO_PAGE(vaddr) \
60 	(virt_to_page((void *)(empty_zero_page + \
61 	 (((unsigned long)(vaddr)) &zero_page_mask))))
62 #define __HAVE_COLOR_ZERO_PAGE
63 
64 /* TODO: s390 cannot support io_remap_pfn_range... */
65 
66 #define FIRST_USER_ADDRESS  0UL
67 
68 #define pte_ERROR(e) \
69 	printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
70 #define pmd_ERROR(e) \
71 	printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
72 #define pud_ERROR(e) \
73 	printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
74 #define p4d_ERROR(e) \
75 	printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
76 #define pgd_ERROR(e) \
77 	printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
78 
79 /*
80  * The vmalloc and module area will always be on the topmost area of the
81  * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
82  * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
83  * modules will reside. That makes sure that inter module branches always
84  * happen without trampolines and in addition the placement within a 2GB frame
85  * is branch prediction unit friendly.
86  */
87 extern unsigned long VMALLOC_START;
88 extern unsigned long VMALLOC_END;
89 #define VMALLOC_DEFAULT_SIZE	((128UL << 30) - MODULES_LEN)
90 extern struct page *vmemmap;
91 
92 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
93 
94 extern unsigned long MODULES_VADDR;
95 extern unsigned long MODULES_END;
96 #define MODULES_VADDR	MODULES_VADDR
97 #define MODULES_END	MODULES_END
98 #define MODULES_LEN	(1UL << 31)
99 
100 static inline int is_module_addr(void *addr)
101 {
102 	BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
103 	if (addr < (void *)MODULES_VADDR)
104 		return 0;
105 	if (addr > (void *)MODULES_END)
106 		return 0;
107 	return 1;
108 }
109 
110 /*
111  * A 64 bit pagetable entry of S390 has following format:
112  * |			 PFRA			      |0IPC|  OS  |
113  * 0000000000111111111122222222223333333333444444444455555555556666
114  * 0123456789012345678901234567890123456789012345678901234567890123
115  *
116  * I Page-Invalid Bit:    Page is not available for address-translation
117  * P Page-Protection Bit: Store access not possible for page
118  * C Change-bit override: HW is not required to set change bit
119  *
120  * A 64 bit segmenttable entry of S390 has following format:
121  * |        P-table origin                              |      TT
122  * 0000000000111111111122222222223333333333444444444455555555556666
123  * 0123456789012345678901234567890123456789012345678901234567890123
124  *
125  * I Segment-Invalid Bit:    Segment is not available for address-translation
126  * C Common-Segment Bit:     Segment is not private (PoP 3-30)
127  * P Page-Protection Bit: Store access not possible for page
128  * TT Type 00
129  *
130  * A 64 bit region table entry of S390 has following format:
131  * |        S-table origin                             |   TF  TTTL
132  * 0000000000111111111122222222223333333333444444444455555555556666
133  * 0123456789012345678901234567890123456789012345678901234567890123
134  *
135  * I Segment-Invalid Bit:    Segment is not available for address-translation
136  * TT Type 01
137  * TF
138  * TL Table length
139  *
140  * The 64 bit regiontable origin of S390 has following format:
141  * |      region table origon                          |       DTTL
142  * 0000000000111111111122222222223333333333444444444455555555556666
143  * 0123456789012345678901234567890123456789012345678901234567890123
144  *
145  * X Space-Switch event:
146  * G Segment-Invalid Bit:
147  * P Private-Space Bit:
148  * S Storage-Alteration:
149  * R Real space
150  * TL Table-Length:
151  *
152  * A storage key has the following format:
153  * | ACC |F|R|C|0|
154  *  0   3 4 5 6 7
155  * ACC: access key
156  * F  : fetch protection bit
157  * R  : referenced bit
158  * C  : changed bit
159  */
160 
161 /* Hardware bits in the page table entry */
162 #define _PAGE_NOEXEC	0x100		/* HW no-execute bit  */
163 #define _PAGE_PROTECT	0x200		/* HW read-only bit  */
164 #define _PAGE_INVALID	0x400		/* HW invalid bit    */
165 #define _PAGE_LARGE	0x800		/* Bit to mark a large pte */
166 
167 /* Software bits in the page table entry */
168 #define _PAGE_PRESENT	0x001		/* SW pte present bit */
169 #define _PAGE_YOUNG	0x004		/* SW pte young bit */
170 #define _PAGE_DIRTY	0x008		/* SW pte dirty bit */
171 #define _PAGE_READ	0x010		/* SW pte read bit */
172 #define _PAGE_WRITE	0x020		/* SW pte write bit */
173 #define _PAGE_SPECIAL	0x040		/* SW associated with special page */
174 #define _PAGE_UNUSED	0x080		/* SW bit for pgste usage state */
175 
176 #ifdef CONFIG_MEM_SOFT_DIRTY
177 #define _PAGE_SOFT_DIRTY 0x002		/* SW pte soft dirty bit */
178 #else
179 #define _PAGE_SOFT_DIRTY 0x000
180 #endif
181 
182 /* Set of bits not changed in pte_modify */
183 #define _PAGE_CHG_MASK		(PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
184 				 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
185 
186 /*
187  * handle_pte_fault uses pte_present and pte_none to find out the pte type
188  * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
189  * distinguish present from not-present ptes. It is changed only with the page
190  * table lock held.
191  *
192  * The following table gives the different possible bit combinations for
193  * the pte hardware and software bits in the last 12 bits of a pte
194  * (. unassigned bit, x don't care, t swap type):
195  *
196  *				842100000000
197  *				000084210000
198  *				000000008421
199  *				.IR.uswrdy.p
200  * empty			.10.00000000
201  * swap				.11..ttttt.0
202  * prot-none, clean, old	.11.xx0000.1
203  * prot-none, clean, young	.11.xx0001.1
204  * prot-none, dirty, old	.11.xx0010.1
205  * prot-none, dirty, young	.11.xx0011.1
206  * read-only, clean, old	.11.xx0100.1
207  * read-only, clean, young	.01.xx0101.1
208  * read-only, dirty, old	.11.xx0110.1
209  * read-only, dirty, young	.01.xx0111.1
210  * read-write, clean, old	.11.xx1100.1
211  * read-write, clean, young	.01.xx1101.1
212  * read-write, dirty, old	.10.xx1110.1
213  * read-write, dirty, young	.00.xx1111.1
214  * HW-bits: R read-only, I invalid
215  * SW-bits: p present, y young, d dirty, r read, w write, s special,
216  *	    u unused, l large
217  *
218  * pte_none    is true for the bit pattern .10.00000000, pte == 0x400
219  * pte_swap    is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
220  * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
221  */
222 
223 /* Bits in the segment/region table address-space-control-element */
224 #define _ASCE_ORIGIN		~0xfffUL/* region/segment table origin	    */
225 #define _ASCE_PRIVATE_SPACE	0x100	/* private space control	    */
226 #define _ASCE_ALT_EVENT		0x80	/* storage alteration event control */
227 #define _ASCE_SPACE_SWITCH	0x40	/* space switch event		    */
228 #define _ASCE_REAL_SPACE	0x20	/* real space control		    */
229 #define _ASCE_TYPE_MASK		0x0c	/* asce table type mask		    */
230 #define _ASCE_TYPE_REGION1	0x0c	/* region first table type	    */
231 #define _ASCE_TYPE_REGION2	0x08	/* region second table type	    */
232 #define _ASCE_TYPE_REGION3	0x04	/* region third table type	    */
233 #define _ASCE_TYPE_SEGMENT	0x00	/* segment table type		    */
234 #define _ASCE_TABLE_LENGTH	0x03	/* region table length		    */
235 
236 /* Bits in the region table entry */
237 #define _REGION_ENTRY_ORIGIN	~0xfffUL/* region/segment table origin	    */
238 #define _REGION_ENTRY_PROTECT	0x200	/* region protection bit	    */
239 #define _REGION_ENTRY_NOEXEC	0x100	/* region no-execute bit	    */
240 #define _REGION_ENTRY_OFFSET	0xc0	/* region table offset		    */
241 #define _REGION_ENTRY_INVALID	0x20	/* invalid region table entry	    */
242 #define _REGION_ENTRY_TYPE_MASK	0x0c	/* region table type mask	    */
243 #define _REGION_ENTRY_TYPE_R1	0x0c	/* region first table type	    */
244 #define _REGION_ENTRY_TYPE_R2	0x08	/* region second table type	    */
245 #define _REGION_ENTRY_TYPE_R3	0x04	/* region third table type	    */
246 #define _REGION_ENTRY_LENGTH	0x03	/* region third length		    */
247 
248 #define _REGION1_ENTRY		(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
249 #define _REGION1_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
250 #define _REGION2_ENTRY		(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
251 #define _REGION2_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
252 #define _REGION3_ENTRY		(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
253 #define _REGION3_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
254 
255 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address	     */
256 #define _REGION3_ENTRY_DIRTY	0x2000	/* SW region dirty bit */
257 #define _REGION3_ENTRY_YOUNG	0x1000	/* SW region young bit */
258 #define _REGION3_ENTRY_LARGE	0x0400	/* RTTE-format control, large page  */
259 #define _REGION3_ENTRY_READ	0x0002	/* SW region read bit */
260 #define _REGION3_ENTRY_WRITE	0x0001	/* SW region write bit */
261 
262 #ifdef CONFIG_MEM_SOFT_DIRTY
263 #define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
264 #else
265 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
266 #endif
267 
268 #define _REGION_ENTRY_BITS	 0xfffffffffffff22fUL
269 
270 /* Bits in the segment table entry */
271 #define _SEGMENT_ENTRY_BITS			0xfffffffffffffe33UL
272 #define _SEGMENT_ENTRY_HARDWARE_BITS		0xfffffffffffffe30UL
273 #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE	0xfffffffffff00730UL
274 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address	    */
275 #define _SEGMENT_ENTRY_ORIGIN	~0x7ffUL/* page table origin		    */
276 #define _SEGMENT_ENTRY_PROTECT	0x200	/* segment protection bit	    */
277 #define _SEGMENT_ENTRY_NOEXEC	0x100	/* segment no-execute bit	    */
278 #define _SEGMENT_ENTRY_INVALID	0x20	/* invalid segment table entry	    */
279 #define _SEGMENT_ENTRY_TYPE_MASK 0x0c	/* segment table type mask	    */
280 
281 #define _SEGMENT_ENTRY		(0)
282 #define _SEGMENT_ENTRY_EMPTY	(_SEGMENT_ENTRY_INVALID)
283 
284 #define _SEGMENT_ENTRY_DIRTY	0x2000	/* SW segment dirty bit */
285 #define _SEGMENT_ENTRY_YOUNG	0x1000	/* SW segment young bit */
286 #define _SEGMENT_ENTRY_LARGE	0x0400	/* STE-format control, large page */
287 #define _SEGMENT_ENTRY_WRITE	0x0002	/* SW segment write bit */
288 #define _SEGMENT_ENTRY_READ	0x0001	/* SW segment read bit */
289 
290 #ifdef CONFIG_MEM_SOFT_DIRTY
291 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
292 #else
293 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
294 #endif
295 
296 #define _CRST_ENTRIES	2048	/* number of region/segment table entries */
297 #define _PAGE_ENTRIES	256	/* number of page table entries	*/
298 
299 #define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
300 #define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
301 
302 #define _REGION1_SHIFT	53
303 #define _REGION2_SHIFT	42
304 #define _REGION3_SHIFT	31
305 #define _SEGMENT_SHIFT	20
306 
307 #define _REGION1_INDEX	(0x7ffUL << _REGION1_SHIFT)
308 #define _REGION2_INDEX	(0x7ffUL << _REGION2_SHIFT)
309 #define _REGION3_INDEX	(0x7ffUL << _REGION3_SHIFT)
310 #define _SEGMENT_INDEX	(0x7ffUL << _SEGMENT_SHIFT)
311 #define _PAGE_INDEX	(0xffUL  << _PAGE_SHIFT)
312 
313 #define _REGION1_SIZE	(1UL << _REGION1_SHIFT)
314 #define _REGION2_SIZE	(1UL << _REGION2_SHIFT)
315 #define _REGION3_SIZE	(1UL << _REGION3_SHIFT)
316 #define _SEGMENT_SIZE	(1UL << _SEGMENT_SHIFT)
317 
318 #define _REGION1_MASK	(~(_REGION1_SIZE - 1))
319 #define _REGION2_MASK	(~(_REGION2_SIZE - 1))
320 #define _REGION3_MASK	(~(_REGION3_SIZE - 1))
321 #define _SEGMENT_MASK	(~(_SEGMENT_SIZE - 1))
322 
323 #define PMD_SHIFT	_SEGMENT_SHIFT
324 #define PUD_SHIFT	_REGION3_SHIFT
325 #define P4D_SHIFT	_REGION2_SHIFT
326 #define PGDIR_SHIFT	_REGION1_SHIFT
327 
328 #define PMD_SIZE	_SEGMENT_SIZE
329 #define PUD_SIZE	_REGION3_SIZE
330 #define P4D_SIZE	_REGION2_SIZE
331 #define PGDIR_SIZE	_REGION1_SIZE
332 
333 #define PMD_MASK	_SEGMENT_MASK
334 #define PUD_MASK	_REGION3_MASK
335 #define P4D_MASK	_REGION2_MASK
336 #define PGDIR_MASK	_REGION1_MASK
337 
338 #define PTRS_PER_PTE	_PAGE_ENTRIES
339 #define PTRS_PER_PMD	_CRST_ENTRIES
340 #define PTRS_PER_PUD	_CRST_ENTRIES
341 #define PTRS_PER_P4D	_CRST_ENTRIES
342 #define PTRS_PER_PGD	_CRST_ENTRIES
343 
344 #define MAX_PTRS_PER_P4D	PTRS_PER_P4D
345 
346 /*
347  * Segment table and region3 table entry encoding
348  * (R = read-only, I = invalid, y = young bit):
349  *				dy..R...I...wr
350  * prot-none, clean, old	00..1...1...00
351  * prot-none, clean, young	01..1...1...00
352  * prot-none, dirty, old	10..1...1...00
353  * prot-none, dirty, young	11..1...1...00
354  * read-only, clean, old	00..1...1...01
355  * read-only, clean, young	01..1...0...01
356  * read-only, dirty, old	10..1...1...01
357  * read-only, dirty, young	11..1...0...01
358  * read-write, clean, old	00..1...1...11
359  * read-write, clean, young	01..1...0...11
360  * read-write, dirty, old	10..0...1...11
361  * read-write, dirty, young	11..0...0...11
362  * The segment table origin is used to distinguish empty (origin==0) from
363  * read-write, old segment table entries (origin!=0)
364  * HW-bits: R read-only, I invalid
365  * SW-bits: y young, d dirty, r read, w write
366  */
367 
368 /* Page status table bits for virtualization */
369 #define PGSTE_ACC_BITS	0xf000000000000000UL
370 #define PGSTE_FP_BIT	0x0800000000000000UL
371 #define PGSTE_PCL_BIT	0x0080000000000000UL
372 #define PGSTE_HR_BIT	0x0040000000000000UL
373 #define PGSTE_HC_BIT	0x0020000000000000UL
374 #define PGSTE_GR_BIT	0x0004000000000000UL
375 #define PGSTE_GC_BIT	0x0002000000000000UL
376 #define PGSTE_UC_BIT	0x0000800000000000UL	/* user dirty (migration) */
377 #define PGSTE_IN_BIT	0x0000400000000000UL	/* IPTE notify bit */
378 #define PGSTE_VSIE_BIT	0x0000200000000000UL	/* ref'd in a shadow table */
379 
380 /* Guest Page State used for virtualization */
381 #define _PGSTE_GPS_ZERO			0x0000000080000000UL
382 #define _PGSTE_GPS_NODAT		0x0000000040000000UL
383 #define _PGSTE_GPS_USAGE_MASK		0x0000000003000000UL
384 #define _PGSTE_GPS_USAGE_STABLE		0x0000000000000000UL
385 #define _PGSTE_GPS_USAGE_UNUSED		0x0000000001000000UL
386 #define _PGSTE_GPS_USAGE_POT_VOLATILE	0x0000000002000000UL
387 #define _PGSTE_GPS_USAGE_VOLATILE	_PGSTE_GPS_USAGE_MASK
388 
389 /*
390  * A user page table pointer has the space-switch-event bit, the
391  * private-space-control bit and the storage-alteration-event-control
392  * bit set. A kernel page table pointer doesn't need them.
393  */
394 #define _ASCE_USER_BITS		(_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
395 				 _ASCE_ALT_EVENT)
396 
397 /*
398  * Page protection definitions.
399  */
400 #define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
401 #define PAGE_RO		__pgprot(_PAGE_PRESENT | _PAGE_READ | \
402 				 _PAGE_NOEXEC  | _PAGE_INVALID | _PAGE_PROTECT)
403 #define PAGE_RX		__pgprot(_PAGE_PRESENT | _PAGE_READ | \
404 				 _PAGE_INVALID | _PAGE_PROTECT)
405 #define PAGE_RW		__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
406 				 _PAGE_NOEXEC  | _PAGE_INVALID | _PAGE_PROTECT)
407 #define PAGE_RWX	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
408 				 _PAGE_INVALID | _PAGE_PROTECT)
409 
410 #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
411 				 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
412 #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
413 				 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
414 #define PAGE_KERNEL_RO	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
415 				 _PAGE_PROTECT | _PAGE_NOEXEC)
416 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
417 				  _PAGE_YOUNG |	_PAGE_DIRTY)
418 
419 /*
420  * On s390 the page table entry has an invalid bit and a read-only bit.
421  * Read permission implies execute permission and write permission
422  * implies read permission.
423  */
424          /*xwr*/
425 #define __P000	PAGE_NONE
426 #define __P001	PAGE_RO
427 #define __P010	PAGE_RO
428 #define __P011	PAGE_RO
429 #define __P100	PAGE_RX
430 #define __P101	PAGE_RX
431 #define __P110	PAGE_RX
432 #define __P111	PAGE_RX
433 
434 #define __S000	PAGE_NONE
435 #define __S001	PAGE_RO
436 #define __S010	PAGE_RW
437 #define __S011	PAGE_RW
438 #define __S100	PAGE_RX
439 #define __S101	PAGE_RX
440 #define __S110	PAGE_RWX
441 #define __S111	PAGE_RWX
442 
443 /*
444  * Segment entry (large page) protection definitions.
445  */
446 #define SEGMENT_NONE	__pgprot(_SEGMENT_ENTRY_INVALID | \
447 				 _SEGMENT_ENTRY_PROTECT)
448 #define SEGMENT_RO	__pgprot(_SEGMENT_ENTRY_PROTECT | \
449 				 _SEGMENT_ENTRY_READ | \
450 				 _SEGMENT_ENTRY_NOEXEC)
451 #define SEGMENT_RX	__pgprot(_SEGMENT_ENTRY_PROTECT | \
452 				 _SEGMENT_ENTRY_READ)
453 #define SEGMENT_RW	__pgprot(_SEGMENT_ENTRY_READ | \
454 				 _SEGMENT_ENTRY_WRITE | \
455 				 _SEGMENT_ENTRY_NOEXEC)
456 #define SEGMENT_RWX	__pgprot(_SEGMENT_ENTRY_READ | \
457 				 _SEGMENT_ENTRY_WRITE)
458 #define SEGMENT_KERNEL	__pgprot(_SEGMENT_ENTRY |	\
459 				 _SEGMENT_ENTRY_LARGE |	\
460 				 _SEGMENT_ENTRY_READ |	\
461 				 _SEGMENT_ENTRY_WRITE | \
462 				 _SEGMENT_ENTRY_YOUNG | \
463 				 _SEGMENT_ENTRY_DIRTY | \
464 				 _SEGMENT_ENTRY_NOEXEC)
465 #define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY |	\
466 				 _SEGMENT_ENTRY_LARGE |	\
467 				 _SEGMENT_ENTRY_READ |	\
468 				 _SEGMENT_ENTRY_YOUNG |	\
469 				 _SEGMENT_ENTRY_PROTECT | \
470 				 _SEGMENT_ENTRY_NOEXEC)
471 #define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY |	\
472 				 _SEGMENT_ENTRY_LARGE |	\
473 				 _SEGMENT_ENTRY_READ |	\
474 				 _SEGMENT_ENTRY_WRITE | \
475 				 _SEGMENT_ENTRY_YOUNG |	\
476 				 _SEGMENT_ENTRY_DIRTY)
477 
478 /*
479  * Region3 entry (large page) protection definitions.
480  */
481 
482 #define REGION3_KERNEL	__pgprot(_REGION_ENTRY_TYPE_R3 | \
483 				 _REGION3_ENTRY_LARGE |	 \
484 				 _REGION3_ENTRY_READ |	 \
485 				 _REGION3_ENTRY_WRITE |	 \
486 				 _REGION3_ENTRY_YOUNG |	 \
487 				 _REGION3_ENTRY_DIRTY | \
488 				 _REGION_ENTRY_NOEXEC)
489 #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
490 				   _REGION3_ENTRY_LARGE |  \
491 				   _REGION3_ENTRY_READ |   \
492 				   _REGION3_ENTRY_YOUNG |  \
493 				   _REGION_ENTRY_PROTECT | \
494 				   _REGION_ENTRY_NOEXEC)
495 
496 static inline bool mm_p4d_folded(struct mm_struct *mm)
497 {
498 	return mm->context.asce_limit <= _REGION1_SIZE;
499 }
500 #define mm_p4d_folded(mm) mm_p4d_folded(mm)
501 
502 static inline bool mm_pud_folded(struct mm_struct *mm)
503 {
504 	return mm->context.asce_limit <= _REGION2_SIZE;
505 }
506 #define mm_pud_folded(mm) mm_pud_folded(mm)
507 
508 static inline bool mm_pmd_folded(struct mm_struct *mm)
509 {
510 	return mm->context.asce_limit <= _REGION3_SIZE;
511 }
512 #define mm_pmd_folded(mm) mm_pmd_folded(mm)
513 
514 static inline int mm_has_pgste(struct mm_struct *mm)
515 {
516 #ifdef CONFIG_PGSTE
517 	if (unlikely(mm->context.has_pgste))
518 		return 1;
519 #endif
520 	return 0;
521 }
522 
523 static inline int mm_alloc_pgste(struct mm_struct *mm)
524 {
525 #ifdef CONFIG_PGSTE
526 	if (unlikely(mm->context.alloc_pgste))
527 		return 1;
528 #endif
529 	return 0;
530 }
531 
532 /*
533  * In the case that a guest uses storage keys
534  * faults should no longer be backed by zero pages
535  */
536 #define mm_forbids_zeropage mm_has_pgste
537 static inline int mm_uses_skeys(struct mm_struct *mm)
538 {
539 #ifdef CONFIG_PGSTE
540 	if (mm->context.uses_skeys)
541 		return 1;
542 #endif
543 	return 0;
544 }
545 
546 static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
547 {
548 	register unsigned long reg2 asm("2") = old;
549 	register unsigned long reg3 asm("3") = new;
550 	unsigned long address = (unsigned long)ptr | 1;
551 
552 	asm volatile(
553 		"	csp	%0,%3"
554 		: "+d" (reg2), "+m" (*ptr)
555 		: "d" (reg3), "d" (address)
556 		: "cc");
557 }
558 
559 static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
560 {
561 	register unsigned long reg2 asm("2") = old;
562 	register unsigned long reg3 asm("3") = new;
563 	unsigned long address = (unsigned long)ptr | 1;
564 
565 	asm volatile(
566 		"	.insn	rre,0xb98a0000,%0,%3"
567 		: "+d" (reg2), "+m" (*ptr)
568 		: "d" (reg3), "d" (address)
569 		: "cc");
570 }
571 
572 #define CRDTE_DTT_PAGE		0x00UL
573 #define CRDTE_DTT_SEGMENT	0x10UL
574 #define CRDTE_DTT_REGION3	0x14UL
575 #define CRDTE_DTT_REGION2	0x18UL
576 #define CRDTE_DTT_REGION1	0x1cUL
577 
578 static inline void crdte(unsigned long old, unsigned long new,
579 			 unsigned long table, unsigned long dtt,
580 			 unsigned long address, unsigned long asce)
581 {
582 	register unsigned long reg2 asm("2") = old;
583 	register unsigned long reg3 asm("3") = new;
584 	register unsigned long reg4 asm("4") = table | dtt;
585 	register unsigned long reg5 asm("5") = address;
586 
587 	asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
588 		     : "+d" (reg2)
589 		     : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
590 		     : "memory", "cc");
591 }
592 
593 /*
594  * pgd/p4d/pud/pmd/pte query functions
595  */
596 static inline int pgd_folded(pgd_t pgd)
597 {
598 	return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
599 }
600 
601 static inline int pgd_present(pgd_t pgd)
602 {
603 	if (pgd_folded(pgd))
604 		return 1;
605 	return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
606 }
607 
608 static inline int pgd_none(pgd_t pgd)
609 {
610 	if (pgd_folded(pgd))
611 		return 0;
612 	return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
613 }
614 
615 static inline int pgd_bad(pgd_t pgd)
616 {
617 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
618 		return 0;
619 	return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
620 }
621 
622 static inline unsigned long pgd_pfn(pgd_t pgd)
623 {
624 	unsigned long origin_mask;
625 
626 	origin_mask = _REGION_ENTRY_ORIGIN;
627 	return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
628 }
629 
630 static inline int p4d_folded(p4d_t p4d)
631 {
632 	return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
633 }
634 
635 static inline int p4d_present(p4d_t p4d)
636 {
637 	if (p4d_folded(p4d))
638 		return 1;
639 	return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
640 }
641 
642 static inline int p4d_none(p4d_t p4d)
643 {
644 	if (p4d_folded(p4d))
645 		return 0;
646 	return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
647 }
648 
649 static inline unsigned long p4d_pfn(p4d_t p4d)
650 {
651 	unsigned long origin_mask;
652 
653 	origin_mask = _REGION_ENTRY_ORIGIN;
654 	return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
655 }
656 
657 static inline int pud_folded(pud_t pud)
658 {
659 	return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
660 }
661 
662 static inline int pud_present(pud_t pud)
663 {
664 	if (pud_folded(pud))
665 		return 1;
666 	return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
667 }
668 
669 static inline int pud_none(pud_t pud)
670 {
671 	if (pud_folded(pud))
672 		return 0;
673 	return pud_val(pud) == _REGION3_ENTRY_EMPTY;
674 }
675 
676 #define pud_leaf	pud_large
677 static inline int pud_large(pud_t pud)
678 {
679 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
680 		return 0;
681 	return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
682 }
683 
684 static inline unsigned long pud_pfn(pud_t pud)
685 {
686 	unsigned long origin_mask;
687 
688 	origin_mask = _REGION_ENTRY_ORIGIN;
689 	if (pud_large(pud))
690 		origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
691 	return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
692 }
693 
694 #define pmd_leaf	pmd_large
695 static inline int pmd_large(pmd_t pmd)
696 {
697 	return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
698 }
699 
700 static inline int pmd_bad(pmd_t pmd)
701 {
702 	if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd))
703 		return 1;
704 	return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
705 }
706 
707 static inline int pud_bad(pud_t pud)
708 {
709 	unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
710 
711 	if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
712 		return 1;
713 	if (type < _REGION_ENTRY_TYPE_R3)
714 		return 0;
715 	return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
716 }
717 
718 static inline int p4d_bad(p4d_t p4d)
719 {
720 	unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
721 
722 	if (type > _REGION_ENTRY_TYPE_R2)
723 		return 1;
724 	if (type < _REGION_ENTRY_TYPE_R2)
725 		return 0;
726 	return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
727 }
728 
729 static inline int pmd_present(pmd_t pmd)
730 {
731 	return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
732 }
733 
734 static inline int pmd_none(pmd_t pmd)
735 {
736 	return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
737 }
738 
739 static inline unsigned long pmd_pfn(pmd_t pmd)
740 {
741 	unsigned long origin_mask;
742 
743 	origin_mask = _SEGMENT_ENTRY_ORIGIN;
744 	if (pmd_large(pmd))
745 		origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
746 	return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
747 }
748 
749 #define pmd_write pmd_write
750 static inline int pmd_write(pmd_t pmd)
751 {
752 	return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
753 }
754 
755 #define pud_write pud_write
756 static inline int pud_write(pud_t pud)
757 {
758 	return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
759 }
760 
761 static inline int pmd_dirty(pmd_t pmd)
762 {
763 	return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
764 }
765 
766 static inline int pmd_young(pmd_t pmd)
767 {
768 	return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
769 }
770 
771 static inline int pte_present(pte_t pte)
772 {
773 	/* Bit pattern: (pte & 0x001) == 0x001 */
774 	return (pte_val(pte) & _PAGE_PRESENT) != 0;
775 }
776 
777 static inline int pte_none(pte_t pte)
778 {
779 	/* Bit pattern: pte == 0x400 */
780 	return pte_val(pte) == _PAGE_INVALID;
781 }
782 
783 static inline int pte_swap(pte_t pte)
784 {
785 	/* Bit pattern: (pte & 0x201) == 0x200 */
786 	return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
787 		== _PAGE_PROTECT;
788 }
789 
790 static inline int pte_special(pte_t pte)
791 {
792 	return (pte_val(pte) & _PAGE_SPECIAL);
793 }
794 
795 #define __HAVE_ARCH_PTE_SAME
796 static inline int pte_same(pte_t a, pte_t b)
797 {
798 	return pte_val(a) == pte_val(b);
799 }
800 
801 #ifdef CONFIG_NUMA_BALANCING
802 static inline int pte_protnone(pte_t pte)
803 {
804 	return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
805 }
806 
807 static inline int pmd_protnone(pmd_t pmd)
808 {
809 	/* pmd_large(pmd) implies pmd_present(pmd) */
810 	return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
811 }
812 #endif
813 
814 static inline int pte_soft_dirty(pte_t pte)
815 {
816 	return pte_val(pte) & _PAGE_SOFT_DIRTY;
817 }
818 #define pte_swp_soft_dirty pte_soft_dirty
819 
820 static inline pte_t pte_mksoft_dirty(pte_t pte)
821 {
822 	pte_val(pte) |= _PAGE_SOFT_DIRTY;
823 	return pte;
824 }
825 #define pte_swp_mksoft_dirty pte_mksoft_dirty
826 
827 static inline pte_t pte_clear_soft_dirty(pte_t pte)
828 {
829 	pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
830 	return pte;
831 }
832 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
833 
834 static inline int pmd_soft_dirty(pmd_t pmd)
835 {
836 	return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
837 }
838 
839 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
840 {
841 	pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
842 	return pmd;
843 }
844 
845 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
846 {
847 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
848 	return pmd;
849 }
850 
851 /*
852  * query functions pte_write/pte_dirty/pte_young only work if
853  * pte_present() is true. Undefined behaviour if not..
854  */
855 static inline int pte_write(pte_t pte)
856 {
857 	return (pte_val(pte) & _PAGE_WRITE) != 0;
858 }
859 
860 static inline int pte_dirty(pte_t pte)
861 {
862 	return (pte_val(pte) & _PAGE_DIRTY) != 0;
863 }
864 
865 static inline int pte_young(pte_t pte)
866 {
867 	return (pte_val(pte) & _PAGE_YOUNG) != 0;
868 }
869 
870 #define __HAVE_ARCH_PTE_UNUSED
871 static inline int pte_unused(pte_t pte)
872 {
873 	return pte_val(pte) & _PAGE_UNUSED;
874 }
875 
876 /*
877  * pgd/pmd/pte modification functions
878  */
879 
880 static inline void pgd_clear(pgd_t *pgd)
881 {
882 	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
883 		pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
884 }
885 
886 static inline void p4d_clear(p4d_t *p4d)
887 {
888 	if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
889 		p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
890 }
891 
892 static inline void pud_clear(pud_t *pud)
893 {
894 	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
895 		pud_val(*pud) = _REGION3_ENTRY_EMPTY;
896 }
897 
898 static inline void pmd_clear(pmd_t *pmdp)
899 {
900 	pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
901 }
902 
903 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
904 {
905 	pte_val(*ptep) = _PAGE_INVALID;
906 }
907 
908 /*
909  * The following pte modification functions only work if
910  * pte_present() is true. Undefined behaviour if not..
911  */
912 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
913 {
914 	pte_val(pte) &= _PAGE_CHG_MASK;
915 	pte_val(pte) |= pgprot_val(newprot);
916 	/*
917 	 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
918 	 * has the invalid bit set, clear it again for readable, young pages
919 	 */
920 	if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
921 		pte_val(pte) &= ~_PAGE_INVALID;
922 	/*
923 	 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
924 	 * protection bit set, clear it again for writable, dirty pages
925 	 */
926 	if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
927 		pte_val(pte) &= ~_PAGE_PROTECT;
928 	return pte;
929 }
930 
931 static inline pte_t pte_wrprotect(pte_t pte)
932 {
933 	pte_val(pte) &= ~_PAGE_WRITE;
934 	pte_val(pte) |= _PAGE_PROTECT;
935 	return pte;
936 }
937 
938 static inline pte_t pte_mkwrite(pte_t pte)
939 {
940 	pte_val(pte) |= _PAGE_WRITE;
941 	if (pte_val(pte) & _PAGE_DIRTY)
942 		pte_val(pte) &= ~_PAGE_PROTECT;
943 	return pte;
944 }
945 
946 static inline pte_t pte_mkclean(pte_t pte)
947 {
948 	pte_val(pte) &= ~_PAGE_DIRTY;
949 	pte_val(pte) |= _PAGE_PROTECT;
950 	return pte;
951 }
952 
953 static inline pte_t pte_mkdirty(pte_t pte)
954 {
955 	pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
956 	if (pte_val(pte) & _PAGE_WRITE)
957 		pte_val(pte) &= ~_PAGE_PROTECT;
958 	return pte;
959 }
960 
961 static inline pte_t pte_mkold(pte_t pte)
962 {
963 	pte_val(pte) &= ~_PAGE_YOUNG;
964 	pte_val(pte) |= _PAGE_INVALID;
965 	return pte;
966 }
967 
968 static inline pte_t pte_mkyoung(pte_t pte)
969 {
970 	pte_val(pte) |= _PAGE_YOUNG;
971 	if (pte_val(pte) & _PAGE_READ)
972 		pte_val(pte) &= ~_PAGE_INVALID;
973 	return pte;
974 }
975 
976 static inline pte_t pte_mkspecial(pte_t pte)
977 {
978 	pte_val(pte) |= _PAGE_SPECIAL;
979 	return pte;
980 }
981 
982 #ifdef CONFIG_HUGETLB_PAGE
983 static inline pte_t pte_mkhuge(pte_t pte)
984 {
985 	pte_val(pte) |= _PAGE_LARGE;
986 	return pte;
987 }
988 #endif
989 
990 #define IPTE_GLOBAL	0
991 #define	IPTE_LOCAL	1
992 
993 #define IPTE_NODAT	0x400
994 #define IPTE_GUEST_ASCE	0x800
995 
996 static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
997 					unsigned long opt, unsigned long asce,
998 					int local)
999 {
1000 	unsigned long pto = (unsigned long) ptep;
1001 
1002 	if (__builtin_constant_p(opt) && opt == 0) {
1003 		/* Invalidation + TLB flush for the pte */
1004 		asm volatile(
1005 			"	.insn	rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
1006 			: "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
1007 			  [m4] "i" (local));
1008 		return;
1009 	}
1010 
1011 	/* Invalidate ptes with options + TLB flush of the ptes */
1012 	opt = opt | (asce & _ASCE_ORIGIN);
1013 	asm volatile(
1014 		"	.insn	rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1015 		: [r2] "+a" (address), [r3] "+a" (opt)
1016 		: [r1] "a" (pto), [m4] "i" (local) : "memory");
1017 }
1018 
1019 static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
1020 					      pte_t *ptep, int local)
1021 {
1022 	unsigned long pto = (unsigned long) ptep;
1023 
1024 	/* Invalidate a range of ptes + TLB flush of the ptes */
1025 	do {
1026 		asm volatile(
1027 			"       .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1028 			: [r2] "+a" (address), [r3] "+a" (nr)
1029 			: [r1] "a" (pto), [m4] "i" (local) : "memory");
1030 	} while (nr != 255);
1031 }
1032 
1033 /*
1034  * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1035  * both clear the TLB for the unmapped pte. The reason is that
1036  * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1037  * to modify an active pte. The sequence is
1038  *   1) ptep_get_and_clear
1039  *   2) set_pte_at
1040  *   3) flush_tlb_range
1041  * On s390 the tlb needs to get flushed with the modification of the pte
1042  * if the pte is active. The only way how this can be implemented is to
1043  * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1044  * is a nop.
1045  */
1046 pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1047 pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1048 
1049 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1050 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1051 					    unsigned long addr, pte_t *ptep)
1052 {
1053 	pte_t pte = *ptep;
1054 
1055 	pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1056 	return pte_young(pte);
1057 }
1058 
1059 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1060 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1061 					 unsigned long address, pte_t *ptep)
1062 {
1063 	return ptep_test_and_clear_young(vma, address, ptep);
1064 }
1065 
1066 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1067 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1068 				       unsigned long addr, pte_t *ptep)
1069 {
1070 	return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1071 }
1072 
1073 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1074 pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1075 void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1076 			     pte_t *, pte_t, pte_t);
1077 
1078 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1079 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1080 				     unsigned long addr, pte_t *ptep)
1081 {
1082 	return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1083 }
1084 
1085 /*
1086  * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1087  * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1088  * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1089  * cannot be accessed while the batched unmap is running. In this case
1090  * full==1 and a simple pte_clear is enough. See tlb.h.
1091  */
1092 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1093 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1094 					    unsigned long addr,
1095 					    pte_t *ptep, int full)
1096 {
1097 	if (full) {
1098 		pte_t pte = *ptep;
1099 		*ptep = __pte(_PAGE_INVALID);
1100 		return pte;
1101 	}
1102 	return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1103 }
1104 
1105 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1106 static inline void ptep_set_wrprotect(struct mm_struct *mm,
1107 				      unsigned long addr, pte_t *ptep)
1108 {
1109 	pte_t pte = *ptep;
1110 
1111 	if (pte_write(pte))
1112 		ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1113 }
1114 
1115 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1116 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1117 					unsigned long addr, pte_t *ptep,
1118 					pte_t entry, int dirty)
1119 {
1120 	if (pte_same(*ptep, entry))
1121 		return 0;
1122 	ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1123 	return 1;
1124 }
1125 
1126 /*
1127  * Additional functions to handle KVM guest page tables
1128  */
1129 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1130 		     pte_t *ptep, pte_t entry);
1131 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1132 void ptep_notify(struct mm_struct *mm, unsigned long addr,
1133 		 pte_t *ptep, unsigned long bits);
1134 int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1135 		    pte_t *ptep, int prot, unsigned long bit);
1136 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1137 		     pte_t *ptep , int reset);
1138 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1139 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1140 		    pte_t *sptep, pte_t *tptep, pte_t pte);
1141 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1142 
1143 bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1144 			    pte_t *ptep);
1145 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1146 			  unsigned char key, bool nq);
1147 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1148 			       unsigned char key, unsigned char *oldkey,
1149 			       bool nq, bool mr, bool mc);
1150 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1151 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1152 			  unsigned char *key);
1153 
1154 int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1155 				unsigned long bits, unsigned long value);
1156 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1157 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1158 			unsigned long *oldpte, unsigned long *oldpgste);
1159 void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1160 void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1161 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1162 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
1163 
1164 /*
1165  * Certain architectures need to do special things when PTEs
1166  * within a page table are directly modified.  Thus, the following
1167  * hook is made available.
1168  */
1169 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1170 			      pte_t *ptep, pte_t entry)
1171 {
1172 	if (pte_present(entry))
1173 		pte_val(entry) &= ~_PAGE_UNUSED;
1174 	if (mm_has_pgste(mm))
1175 		ptep_set_pte_at(mm, addr, ptep, entry);
1176 	else
1177 		*ptep = entry;
1178 }
1179 
1180 /*
1181  * Conversion functions: convert a page and protection to a page entry,
1182  * and a page entry and page directory to the page they refer to.
1183  */
1184 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1185 {
1186 	pte_t __pte;
1187 	pte_val(__pte) = physpage + pgprot_val(pgprot);
1188 	if (!MACHINE_HAS_NX)
1189 		pte_val(__pte) &= ~_PAGE_NOEXEC;
1190 	return pte_mkyoung(__pte);
1191 }
1192 
1193 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1194 {
1195 	unsigned long physpage = page_to_phys(page);
1196 	pte_t __pte = mk_pte_phys(physpage, pgprot);
1197 
1198 	if (pte_write(__pte) && PageDirty(page))
1199 		__pte = pte_mkdirty(__pte);
1200 	return __pte;
1201 }
1202 
1203 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1204 #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1205 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1206 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1207 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1208 
1209 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1210 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1211 #define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
1212 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1213 
1214 /*
1215  * The pgd_offset function *always* adds the index for the top-level
1216  * region/segment table. This is done to get a sequence like the
1217  * following to work:
1218  *	pgdp = pgd_offset(current->mm, addr);
1219  *	pgd = READ_ONCE(*pgdp);
1220  *	p4dp = p4d_offset(&pgd, addr);
1221  *	...
1222  * The subsequent p4d_offset, pud_offset and pmd_offset functions
1223  * only add an index if they dereferenced the pointer.
1224  */
1225 static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
1226 {
1227 	unsigned long rste;
1228 	unsigned int shift;
1229 
1230 	/* Get the first entry of the top level table */
1231 	rste = pgd_val(*pgd);
1232 	/* Pick up the shift from the table type of the first entry */
1233 	shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
1234 	return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
1235 }
1236 
1237 #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1238 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1239 
1240 static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
1241 {
1242 	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
1243 		return (p4d_t *) pgd_deref(*pgd) + p4d_index(address);
1244 	return (p4d_t *) pgd;
1245 }
1246 
1247 static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
1248 {
1249 	if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
1250 		return (pud_t *) p4d_deref(*p4d) + pud_index(address);
1251 	return (pud_t *) p4d;
1252 }
1253 
1254 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1255 {
1256 	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
1257 		return (pmd_t *) pud_deref(*pud) + pmd_index(address);
1258 	return (pmd_t *) pud;
1259 }
1260 
1261 static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address)
1262 {
1263 	return (pte_t *) pmd_deref(*pmd) + pte_index(address);
1264 }
1265 
1266 #define pte_offset_kernel(pmd, address) pte_offset(pmd, address)
1267 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1268 
1269 static inline void pte_unmap(pte_t *pte) { }
1270 
1271 static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
1272 {
1273 	return end <= current->mm->context.asce_limit;
1274 }
1275 #define gup_fast_permitted gup_fast_permitted
1276 
1277 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1278 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1279 #define pte_page(x) pfn_to_page(pte_pfn(x))
1280 
1281 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1282 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
1283 #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1284 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1285 
1286 static inline pmd_t pmd_wrprotect(pmd_t pmd)
1287 {
1288 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1289 	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1290 	return pmd;
1291 }
1292 
1293 static inline pmd_t pmd_mkwrite(pmd_t pmd)
1294 {
1295 	pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1296 	if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
1297 		pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1298 	return pmd;
1299 }
1300 
1301 static inline pmd_t pmd_mkclean(pmd_t pmd)
1302 {
1303 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1304 	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1305 	return pmd;
1306 }
1307 
1308 static inline pmd_t pmd_mkdirty(pmd_t pmd)
1309 {
1310 	pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY;
1311 	if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1312 		pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1313 	return pmd;
1314 }
1315 
1316 static inline pud_t pud_wrprotect(pud_t pud)
1317 {
1318 	pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
1319 	pud_val(pud) |= _REGION_ENTRY_PROTECT;
1320 	return pud;
1321 }
1322 
1323 static inline pud_t pud_mkwrite(pud_t pud)
1324 {
1325 	pud_val(pud) |= _REGION3_ENTRY_WRITE;
1326 	if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
1327 		pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1328 	return pud;
1329 }
1330 
1331 static inline pud_t pud_mkclean(pud_t pud)
1332 {
1333 	pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
1334 	pud_val(pud) |= _REGION_ENTRY_PROTECT;
1335 	return pud;
1336 }
1337 
1338 static inline pud_t pud_mkdirty(pud_t pud)
1339 {
1340 	pud_val(pud) |= _REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY;
1341 	if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1342 		pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1343 	return pud;
1344 }
1345 
1346 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1347 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1348 {
1349 	/*
1350 	 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1351 	 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1352 	 */
1353 	if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1354 		return pgprot_val(SEGMENT_NONE);
1355 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1356 		return pgprot_val(SEGMENT_RO);
1357 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1358 		return pgprot_val(SEGMENT_RX);
1359 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1360 		return pgprot_val(SEGMENT_RW);
1361 	return pgprot_val(SEGMENT_RWX);
1362 }
1363 
1364 static inline pmd_t pmd_mkyoung(pmd_t pmd)
1365 {
1366 	pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1367 	if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1368 		pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1369 	return pmd;
1370 }
1371 
1372 static inline pmd_t pmd_mkold(pmd_t pmd)
1373 {
1374 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1375 	pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1376 	return pmd;
1377 }
1378 
1379 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1380 {
1381 	pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1382 		_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1383 		_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
1384 	pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1385 	if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1386 		pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1387 	if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1388 		pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1389 	return pmd;
1390 }
1391 
1392 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1393 {
1394 	pmd_t __pmd;
1395 	pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1396 	return __pmd;
1397 }
1398 
1399 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1400 
1401 static inline void __pmdp_csp(pmd_t *pmdp)
1402 {
1403 	csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1404 	    pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1405 }
1406 
1407 #define IDTE_GLOBAL	0
1408 #define IDTE_LOCAL	1
1409 
1410 #define IDTE_PTOA	0x0800
1411 #define IDTE_NODAT	0x1000
1412 #define IDTE_GUEST_ASCE	0x2000
1413 
1414 static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1415 					unsigned long opt, unsigned long asce,
1416 					int local)
1417 {
1418 	unsigned long sto;
1419 
1420 	sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t);
1421 	if (__builtin_constant_p(opt) && opt == 0) {
1422 		/* flush without guest asce */
1423 		asm volatile(
1424 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1425 			: "+m" (*pmdp)
1426 			: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1427 			  [m4] "i" (local)
1428 			: "cc" );
1429 	} else {
1430 		/* flush with guest asce */
1431 		asm volatile(
1432 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1433 			: "+m" (*pmdp)
1434 			: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1435 			  [r3] "a" (asce), [m4] "i" (local)
1436 			: "cc" );
1437 	}
1438 }
1439 
1440 static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1441 					unsigned long opt, unsigned long asce,
1442 					int local)
1443 {
1444 	unsigned long r3o;
1445 
1446 	r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t);
1447 	r3o |= _ASCE_TYPE_REGION3;
1448 	if (__builtin_constant_p(opt) && opt == 0) {
1449 		/* flush without guest asce */
1450 		asm volatile(
1451 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1452 			: "+m" (*pudp)
1453 			: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1454 			  [m4] "i" (local)
1455 			: "cc");
1456 	} else {
1457 		/* flush with guest asce */
1458 		asm volatile(
1459 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1460 			: "+m" (*pudp)
1461 			: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1462 			  [r3] "a" (asce), [m4] "i" (local)
1463 			: "cc" );
1464 	}
1465 }
1466 
1467 pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1468 pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1469 pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1470 
1471 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1472 
1473 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1474 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1475 				pgtable_t pgtable);
1476 
1477 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1478 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1479 
1480 #define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1481 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1482 					unsigned long addr, pmd_t *pmdp,
1483 					pmd_t entry, int dirty)
1484 {
1485 	VM_BUG_ON(addr & ~HPAGE_MASK);
1486 
1487 	entry = pmd_mkyoung(entry);
1488 	if (dirty)
1489 		entry = pmd_mkdirty(entry);
1490 	if (pmd_val(*pmdp) == pmd_val(entry))
1491 		return 0;
1492 	pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1493 	return 1;
1494 }
1495 
1496 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1497 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1498 					    unsigned long addr, pmd_t *pmdp)
1499 {
1500 	pmd_t pmd = *pmdp;
1501 
1502 	pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1503 	return pmd_young(pmd);
1504 }
1505 
1506 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1507 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1508 					 unsigned long addr, pmd_t *pmdp)
1509 {
1510 	VM_BUG_ON(addr & ~HPAGE_MASK);
1511 	return pmdp_test_and_clear_young(vma, addr, pmdp);
1512 }
1513 
1514 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1515 			      pmd_t *pmdp, pmd_t entry)
1516 {
1517 	if (!MACHINE_HAS_NX)
1518 		pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
1519 	*pmdp = entry;
1520 }
1521 
1522 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1523 {
1524 	pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1525 	pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1526 	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1527 	return pmd;
1528 }
1529 
1530 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1531 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1532 					    unsigned long addr, pmd_t *pmdp)
1533 {
1534 	return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1535 }
1536 
1537 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1538 static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
1539 						 unsigned long addr,
1540 						 pmd_t *pmdp, int full)
1541 {
1542 	if (full) {
1543 		pmd_t pmd = *pmdp;
1544 		*pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
1545 		return pmd;
1546 	}
1547 	return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1548 }
1549 
1550 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1551 static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1552 					  unsigned long addr, pmd_t *pmdp)
1553 {
1554 	return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1555 }
1556 
1557 #define __HAVE_ARCH_PMDP_INVALIDATE
1558 static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1559 				   unsigned long addr, pmd_t *pmdp)
1560 {
1561 	pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1562 
1563 	return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1564 }
1565 
1566 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1567 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1568 				      unsigned long addr, pmd_t *pmdp)
1569 {
1570 	pmd_t pmd = *pmdp;
1571 
1572 	if (pmd_write(pmd))
1573 		pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1574 }
1575 
1576 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1577 					unsigned long address,
1578 					pmd_t *pmdp)
1579 {
1580 	return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1581 }
1582 #define pmdp_collapse_flush pmdp_collapse_flush
1583 
1584 #define pfn_pmd(pfn, pgprot)	mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1585 #define mk_pmd(page, pgprot)	pfn_pmd(page_to_pfn(page), (pgprot))
1586 
1587 static inline int pmd_trans_huge(pmd_t pmd)
1588 {
1589 	return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1590 }
1591 
1592 #define has_transparent_hugepage has_transparent_hugepage
1593 static inline int has_transparent_hugepage(void)
1594 {
1595 	return MACHINE_HAS_EDAT1 ? 1 : 0;
1596 }
1597 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1598 
1599 /*
1600  * 64 bit swap entry format:
1601  * A page-table entry has some bits we have to treat in a special way.
1602  * Bits 52 and bit 55 have to be zero, otherwise a specification
1603  * exception will occur instead of a page translation exception. The
1604  * specification exception has the bad habit not to store necessary
1605  * information in the lowcore.
1606  * Bits 54 and 63 are used to indicate the page type.
1607  * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1608  * This leaves the bits 0-51 and bits 56-62 to store type and offset.
1609  * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
1610  * for the offset.
1611  * |			  offset			|01100|type |00|
1612  * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1613  * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1614  */
1615 
1616 #define __SWP_OFFSET_MASK	((1UL << 52) - 1)
1617 #define __SWP_OFFSET_SHIFT	12
1618 #define __SWP_TYPE_MASK		((1UL << 5) - 1)
1619 #define __SWP_TYPE_SHIFT	2
1620 
1621 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1622 {
1623 	pte_t pte;
1624 
1625 	pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1626 	pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1627 	pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1628 	return pte;
1629 }
1630 
1631 static inline unsigned long __swp_type(swp_entry_t entry)
1632 {
1633 	return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1634 }
1635 
1636 static inline unsigned long __swp_offset(swp_entry_t entry)
1637 {
1638 	return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1639 }
1640 
1641 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1642 {
1643 	return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1644 }
1645 
1646 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
1647 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
1648 
1649 #define kern_addr_valid(addr)   (1)
1650 
1651 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1652 extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1653 extern int s390_enable_sie(void);
1654 extern int s390_enable_skey(void);
1655 extern void s390_reset_cmma(struct mm_struct *mm);
1656 
1657 /* s390 has a private copy of get unmapped area to deal with cache synonyms */
1658 #define HAVE_ARCH_UNMAPPED_AREA
1659 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1660 
1661 #include <asm-generic/pgtable.h>
1662 
1663 #endif /* _S390_PAGE_H */
1664