xref: /openbmc/linux/arch/s390/include/asm/pgtable.h (revision fd589a8f)
1 /*
2  *  include/asm-s390/pgtable.h
3  *
4  *  S390 version
5  *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6  *    Author(s): Hartmut Penner (hp@de.ibm.com)
7  *               Ulrich Weigand (weigand@de.ibm.com)
8  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
9  *
10  *  Derived from "include/asm-i386/pgtable.h"
11  */
12 
13 #ifndef _ASM_S390_PGTABLE_H
14 #define _ASM_S390_PGTABLE_H
15 
16 /*
17  * The Linux memory management assumes a three-level page table setup. For
18  * s390 31 bit we "fold" the mid level into the top-level page table, so
19  * that we physically have the same two-level page table as the s390 mmu
20  * expects in 31 bit mode. For s390 64 bit we use three of the five levels
21  * the hardware provides (region first and region second tables are not
22  * used).
23  *
24  * The "pgd_xxx()" functions are trivial for a folded two-level
25  * setup: the pgd is never bad, and a pmd always exists (as it's folded
26  * into the pgd entry)
27  *
28  * This file contains the functions and defines necessary to modify and use
29  * the S390 page table tree.
30  */
31 #ifndef __ASSEMBLY__
32 #include <linux/sched.h>
33 #include <linux/mm_types.h>
34 #include <asm/bitops.h>
35 #include <asm/bug.h>
36 #include <asm/processor.h>
37 
38 extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
39 extern void paging_init(void);
40 extern void vmem_map_init(void);
41 
42 /*
43  * The S390 doesn't have any external MMU info: the kernel page
44  * tables contain all the necessary information.
45  */
46 #define update_mmu_cache(vma, address, pte)     do { } while (0)
47 
48 /*
49  * ZERO_PAGE is a global shared page that is always zero: used
50  * for zero-mapped memory areas etc..
51  */
52 extern char empty_zero_page[PAGE_SIZE];
53 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
54 #endif /* !__ASSEMBLY__ */
55 
56 /*
57  * PMD_SHIFT determines the size of the area a second-level page
58  * table can map
59  * PGDIR_SHIFT determines what a third-level page table entry can map
60  */
61 #ifndef __s390x__
62 # define PMD_SHIFT	20
63 # define PUD_SHIFT	20
64 # define PGDIR_SHIFT	20
65 #else /* __s390x__ */
66 # define PMD_SHIFT	20
67 # define PUD_SHIFT	31
68 # define PGDIR_SHIFT	42
69 #endif /* __s390x__ */
70 
71 #define PMD_SIZE        (1UL << PMD_SHIFT)
72 #define PMD_MASK        (~(PMD_SIZE-1))
73 #define PUD_SIZE	(1UL << PUD_SHIFT)
74 #define PUD_MASK	(~(PUD_SIZE-1))
75 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
76 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
77 
78 /*
79  * entries per page directory level: the S390 is two-level, so
80  * we don't really have any PMD directory physically.
81  * for S390 segment-table entries are combined to one PGD
82  * that leads to 1024 pte per pgd
83  */
84 #define PTRS_PER_PTE	256
85 #ifndef __s390x__
86 #define PTRS_PER_PMD	1
87 #define PTRS_PER_PUD	1
88 #else /* __s390x__ */
89 #define PTRS_PER_PMD	2048
90 #define PTRS_PER_PUD	2048
91 #endif /* __s390x__ */
92 #define PTRS_PER_PGD	2048
93 
94 #define FIRST_USER_ADDRESS  0
95 
96 #define pte_ERROR(e) \
97 	printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
98 #define pmd_ERROR(e) \
99 	printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
100 #define pud_ERROR(e) \
101 	printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
102 #define pgd_ERROR(e) \
103 	printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
104 
105 #ifndef __ASSEMBLY__
106 /*
107  * The vmalloc area will always be on the topmost area of the kernel
108  * mapping. We reserve 96MB (31bit) / 1GB (64bit) for vmalloc,
109  * which should be enough for any sane case.
110  * By putting vmalloc at the top, we maximise the gap between physical
111  * memory and vmalloc to catch misplaced memory accesses. As a side
112  * effect, this also makes sure that 64 bit module code cannot be used
113  * as system call address.
114  */
115 
116 extern unsigned long VMALLOC_START;
117 
118 #ifndef __s390x__
119 #define VMALLOC_SIZE	(96UL << 20)
120 #define VMALLOC_END	0x7e000000UL
121 #define VMEM_MAP_END	0x80000000UL
122 #else /* __s390x__ */
123 #define VMALLOC_SIZE	(1UL << 30)
124 #define VMALLOC_END	0x3e040000000UL
125 #define VMEM_MAP_END	0x40000000000UL
126 #endif /* __s390x__ */
127 
128 /*
129  * VMEM_MAX_PHYS is the highest physical address that can be added to the 1:1
130  * mapping. This needs to be calculated at compile time since the size of the
131  * VMEM_MAP is static but the size of struct page can change.
132  */
133 #define VMEM_MAX_PAGES	((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page))
134 #define VMEM_MAX_PFN	min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES)
135 #define VMEM_MAX_PHYS	((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1))
136 #define vmemmap		((struct page *) VMALLOC_END)
137 
138 /*
139  * A 31 bit pagetable entry of S390 has following format:
140  *  |   PFRA          |    |  OS  |
141  * 0                   0IP0
142  * 00000000001111111111222222222233
143  * 01234567890123456789012345678901
144  *
145  * I Page-Invalid Bit:    Page is not available for address-translation
146  * P Page-Protection Bit: Store access not possible for page
147  *
148  * A 31 bit segmenttable entry of S390 has following format:
149  *  |   P-table origin      |  |PTL
150  * 0                         IC
151  * 00000000001111111111222222222233
152  * 01234567890123456789012345678901
153  *
154  * I Segment-Invalid Bit:    Segment is not available for address-translation
155  * C Common-Segment Bit:     Segment is not private (PoP 3-30)
156  * PTL Page-Table-Length:    Page-table length (PTL+1*16 entries -> up to 256)
157  *
158  * The 31 bit segmenttable origin of S390 has following format:
159  *
160  *  |S-table origin   |     | STL |
161  * X                   **GPS
162  * 00000000001111111111222222222233
163  * 01234567890123456789012345678901
164  *
165  * X Space-Switch event:
166  * G Segment-Invalid Bit:     *
167  * P Private-Space Bit:       Segment is not private (PoP 3-30)
168  * S Storage-Alteration:
169  * STL Segment-Table-Length:  Segment-table length (STL+1*16 entries -> up to 2048)
170  *
171  * A 64 bit pagetable entry of S390 has following format:
172  * |                     PFRA                         |0IP0|  OS  |
173  * 0000000000111111111122222222223333333333444444444455555555556666
174  * 0123456789012345678901234567890123456789012345678901234567890123
175  *
176  * I Page-Invalid Bit:    Page is not available for address-translation
177  * P Page-Protection Bit: Store access not possible for page
178  *
179  * A 64 bit segmenttable entry of S390 has following format:
180  * |        P-table origin                              |      TT
181  * 0000000000111111111122222222223333333333444444444455555555556666
182  * 0123456789012345678901234567890123456789012345678901234567890123
183  *
184  * I Segment-Invalid Bit:    Segment is not available for address-translation
185  * C Common-Segment Bit:     Segment is not private (PoP 3-30)
186  * P Page-Protection Bit: Store access not possible for page
187  * TT Type 00
188  *
189  * A 64 bit region table entry of S390 has following format:
190  * |        S-table origin                             |   TF  TTTL
191  * 0000000000111111111122222222223333333333444444444455555555556666
192  * 0123456789012345678901234567890123456789012345678901234567890123
193  *
194  * I Segment-Invalid Bit:    Segment is not available for address-translation
195  * TT Type 01
196  * TF
197  * TL Table length
198  *
199  * The 64 bit regiontable origin of S390 has following format:
200  * |      region table origon                          |       DTTL
201  * 0000000000111111111122222222223333333333444444444455555555556666
202  * 0123456789012345678901234567890123456789012345678901234567890123
203  *
204  * X Space-Switch event:
205  * G Segment-Invalid Bit:
206  * P Private-Space Bit:
207  * S Storage-Alteration:
208  * R Real space
209  * TL Table-Length:
210  *
211  * A storage key has the following format:
212  * | ACC |F|R|C|0|
213  *  0   3 4 5 6 7
214  * ACC: access key
215  * F  : fetch protection bit
216  * R  : referenced bit
217  * C  : changed bit
218  */
219 
220 /* Hardware bits in the page table entry */
221 #define _PAGE_RO	0x200		/* HW read-only bit  */
222 #define _PAGE_INVALID	0x400		/* HW invalid bit    */
223 
224 /* Software bits in the page table entry */
225 #define _PAGE_SWT	0x001		/* SW pte type bit t */
226 #define _PAGE_SWX	0x002		/* SW pte type bit x */
227 #define _PAGE_SPECIAL	0x004		/* SW associated with special page */
228 #define __HAVE_ARCH_PTE_SPECIAL
229 
230 /* Set of bits not changed in pte_modify */
231 #define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_SPECIAL)
232 
233 /* Six different types of pages. */
234 #define _PAGE_TYPE_EMPTY	0x400
235 #define _PAGE_TYPE_NONE		0x401
236 #define _PAGE_TYPE_SWAP		0x403
237 #define _PAGE_TYPE_FILE		0x601	/* bit 0x002 is used for offset !! */
238 #define _PAGE_TYPE_RO		0x200
239 #define _PAGE_TYPE_RW		0x000
240 #define _PAGE_TYPE_EX_RO	0x202
241 #define _PAGE_TYPE_EX_RW	0x002
242 
243 /*
244  * Only four types for huge pages, using the invalid bit and protection bit
245  * of a segment table entry.
246  */
247 #define _HPAGE_TYPE_EMPTY	0x020	/* _SEGMENT_ENTRY_INV */
248 #define _HPAGE_TYPE_NONE	0x220
249 #define _HPAGE_TYPE_RO		0x200	/* _SEGMENT_ENTRY_RO  */
250 #define _HPAGE_TYPE_RW		0x000
251 
252 /*
253  * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
254  * pte_none and pte_file to find out the pte type WITHOUT holding the page
255  * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to
256  * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs
257  * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards.
258  * This change is done while holding the lock, but the intermediate step
259  * of a previously valid pte with the hw invalid bit set can be observed by
260  * handle_pte_fault. That makes it necessary that all valid pte types with
261  * the hw invalid bit set must be distinguishable from the four pte types
262  * empty, none, swap and file.
263  *
264  *			irxt  ipte  irxt
265  * _PAGE_TYPE_EMPTY	1000   ->   1000
266  * _PAGE_TYPE_NONE	1001   ->   1001
267  * _PAGE_TYPE_SWAP	1011   ->   1011
268  * _PAGE_TYPE_FILE	11?1   ->   11?1
269  * _PAGE_TYPE_RO	0100   ->   1100
270  * _PAGE_TYPE_RW	0000   ->   1000
271  * _PAGE_TYPE_EX_RO	0110   ->   1110
272  * _PAGE_TYPE_EX_RW	0010   ->   1010
273  *
274  * pte_none is true for bits combinations 1000, 1010, 1100, 1110
275  * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
276  * pte_file is true for bits combinations 1101, 1111
277  * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
278  */
279 
280 /* Page status table bits for virtualization */
281 #define RCP_PCL_BIT	55
282 #define RCP_HR_BIT	54
283 #define RCP_HC_BIT	53
284 #define RCP_GR_BIT	50
285 #define RCP_GC_BIT	49
286 
287 /* User dirty bit for KVM's migration feature */
288 #define KVM_UD_BIT	47
289 
290 #ifndef __s390x__
291 
292 /* Bits in the segment table address-space-control-element */
293 #define _ASCE_SPACE_SWITCH	0x80000000UL	/* space switch event	    */
294 #define _ASCE_ORIGIN_MASK	0x7ffff000UL	/* segment table origin	    */
295 #define _ASCE_PRIVATE_SPACE	0x100	/* private space control	    */
296 #define _ASCE_ALT_EVENT		0x80	/* storage alteration event control */
297 #define _ASCE_TABLE_LENGTH	0x7f	/* 128 x 64 entries = 8k	    */
298 
299 /* Bits in the segment table entry */
300 #define _SEGMENT_ENTRY_ORIGIN	0x7fffffc0UL	/* page table origin	    */
301 #define _SEGMENT_ENTRY_INV	0x20	/* invalid segment table entry	    */
302 #define _SEGMENT_ENTRY_COMMON	0x10	/* common segment bit		    */
303 #define _SEGMENT_ENTRY_PTL	0x0f	/* page table length		    */
304 
305 #define _SEGMENT_ENTRY		(_SEGMENT_ENTRY_PTL)
306 #define _SEGMENT_ENTRY_EMPTY	(_SEGMENT_ENTRY_INV)
307 
308 #else /* __s390x__ */
309 
310 /* Bits in the segment/region table address-space-control-element */
311 #define _ASCE_ORIGIN		~0xfffUL/* segment table origin		    */
312 #define _ASCE_PRIVATE_SPACE	0x100	/* private space control	    */
313 #define _ASCE_ALT_EVENT		0x80	/* storage alteration event control */
314 #define _ASCE_SPACE_SWITCH	0x40	/* space switch event		    */
315 #define _ASCE_REAL_SPACE	0x20	/* real space control		    */
316 #define _ASCE_TYPE_MASK		0x0c	/* asce table type mask		    */
317 #define _ASCE_TYPE_REGION1	0x0c	/* region first table type	    */
318 #define _ASCE_TYPE_REGION2	0x08	/* region second table type	    */
319 #define _ASCE_TYPE_REGION3	0x04	/* region third table type	    */
320 #define _ASCE_TYPE_SEGMENT	0x00	/* segment table type		    */
321 #define _ASCE_TABLE_LENGTH	0x03	/* region table length		    */
322 
323 /* Bits in the region table entry */
324 #define _REGION_ENTRY_ORIGIN	~0xfffUL/* region/segment table origin	    */
325 #define _REGION_ENTRY_INV	0x20	/* invalid region table entry	    */
326 #define _REGION_ENTRY_TYPE_MASK	0x0c	/* region/segment table type mask   */
327 #define _REGION_ENTRY_TYPE_R1	0x0c	/* region first table type	    */
328 #define _REGION_ENTRY_TYPE_R2	0x08	/* region second table type	    */
329 #define _REGION_ENTRY_TYPE_R3	0x04	/* region third table type	    */
330 #define _REGION_ENTRY_LENGTH	0x03	/* region third length		    */
331 
332 #define _REGION1_ENTRY		(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
333 #define _REGION1_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
334 #define _REGION2_ENTRY		(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
335 #define _REGION2_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
336 #define _REGION3_ENTRY		(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
337 #define _REGION3_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
338 
339 /* Bits in the segment table entry */
340 #define _SEGMENT_ENTRY_ORIGIN	~0x7ffUL/* segment table origin		    */
341 #define _SEGMENT_ENTRY_RO	0x200	/* page protection bit		    */
342 #define _SEGMENT_ENTRY_INV	0x20	/* invalid segment table entry	    */
343 
344 #define _SEGMENT_ENTRY		(0)
345 #define _SEGMENT_ENTRY_EMPTY	(_SEGMENT_ENTRY_INV)
346 
347 #define _SEGMENT_ENTRY_LARGE	0x400	/* STE-format control, large page   */
348 #define _SEGMENT_ENTRY_CO	0x100	/* change-recording override   */
349 
350 #endif /* __s390x__ */
351 
352 /*
353  * A user page table pointer has the space-switch-event bit, the
354  * private-space-control bit and the storage-alteration-event-control
355  * bit set. A kernel page table pointer doesn't need them.
356  */
357 #define _ASCE_USER_BITS		(_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
358 				 _ASCE_ALT_EVENT)
359 
360 /* Bits int the storage key */
361 #define _PAGE_CHANGED    0x02          /* HW changed bit                   */
362 #define _PAGE_REFERENCED 0x04          /* HW referenced bit                */
363 
364 /*
365  * Page protection definitions.
366  */
367 #define PAGE_NONE	__pgprot(_PAGE_TYPE_NONE)
368 #define PAGE_RO		__pgprot(_PAGE_TYPE_RO)
369 #define PAGE_RW		__pgprot(_PAGE_TYPE_RW)
370 #define PAGE_EX_RO	__pgprot(_PAGE_TYPE_EX_RO)
371 #define PAGE_EX_RW	__pgprot(_PAGE_TYPE_EX_RW)
372 
373 #define PAGE_KERNEL	PAGE_RW
374 #define PAGE_COPY	PAGE_RO
375 
376 /*
377  * Dependent on the EXEC_PROTECT option s390 can do execute protection.
378  * Write permission always implies read permission. In theory with a
379  * primary/secondary page table execute only can be implemented but
380  * it would cost an additional bit in the pte to distinguish all the
381  * different pte types. To avoid that execute permission currently
382  * implies read permission as well.
383  */
384          /*xwr*/
385 #define __P000	PAGE_NONE
386 #define __P001	PAGE_RO
387 #define __P010	PAGE_RO
388 #define __P011	PAGE_RO
389 #define __P100	PAGE_EX_RO
390 #define __P101	PAGE_EX_RO
391 #define __P110	PAGE_EX_RO
392 #define __P111	PAGE_EX_RO
393 
394 #define __S000	PAGE_NONE
395 #define __S001	PAGE_RO
396 #define __S010	PAGE_RW
397 #define __S011	PAGE_RW
398 #define __S100	PAGE_EX_RO
399 #define __S101	PAGE_EX_RO
400 #define __S110	PAGE_EX_RW
401 #define __S111	PAGE_EX_RW
402 
403 #ifndef __s390x__
404 # define PxD_SHADOW_SHIFT	1
405 #else /* __s390x__ */
406 # define PxD_SHADOW_SHIFT	2
407 #endif /* __s390x__ */
408 
409 static inline void *get_shadow_table(void *table)
410 {
411 	unsigned long addr, offset;
412 	struct page *page;
413 
414 	addr = (unsigned long) table;
415 	offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1);
416 	page = virt_to_page((void *)(addr ^ offset));
417 	return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
418 }
419 
420 /*
421  * Certain architectures need to do special things when PTEs
422  * within a page table are directly modified.  Thus, the following
423  * hook is made available.
424  */
425 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
426 			      pte_t *ptep, pte_t entry)
427 {
428 	*ptep = entry;
429 	if (mm->context.noexec) {
430 		if (!(pte_val(entry) & _PAGE_INVALID) &&
431 		    (pte_val(entry) & _PAGE_SWX))
432 			pte_val(entry) |= _PAGE_RO;
433 		else
434 			pte_val(entry) = _PAGE_TYPE_EMPTY;
435 		ptep[PTRS_PER_PTE] = entry;
436 	}
437 }
438 
439 /*
440  * pgd/pmd/pte query functions
441  */
442 #ifndef __s390x__
443 
444 static inline int pgd_present(pgd_t pgd) { return 1; }
445 static inline int pgd_none(pgd_t pgd)    { return 0; }
446 static inline int pgd_bad(pgd_t pgd)     { return 0; }
447 
448 static inline int pud_present(pud_t pud) { return 1; }
449 static inline int pud_none(pud_t pud)	 { return 0; }
450 static inline int pud_bad(pud_t pud)	 { return 0; }
451 
452 #else /* __s390x__ */
453 
454 static inline int pgd_present(pgd_t pgd)
455 {
456 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
457 		return 1;
458 	return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
459 }
460 
461 static inline int pgd_none(pgd_t pgd)
462 {
463 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
464 		return 0;
465 	return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL;
466 }
467 
468 static inline int pgd_bad(pgd_t pgd)
469 {
470 	/*
471 	 * With dynamic page table levels the pgd can be a region table
472 	 * entry or a segment table entry. Check for the bit that are
473 	 * invalid for either table entry.
474 	 */
475 	unsigned long mask =
476 		~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
477 		~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
478 	return (pgd_val(pgd) & mask) != 0;
479 }
480 
481 static inline int pud_present(pud_t pud)
482 {
483 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
484 		return 1;
485 	return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
486 }
487 
488 static inline int pud_none(pud_t pud)
489 {
490 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
491 		return 0;
492 	return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
493 }
494 
495 static inline int pud_bad(pud_t pud)
496 {
497 	/*
498 	 * With dynamic page table levels the pud can be a region table
499 	 * entry or a segment table entry. Check for the bit that are
500 	 * invalid for either table entry.
501 	 */
502 	unsigned long mask =
503 		~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
504 		~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
505 	return (pud_val(pud) & mask) != 0;
506 }
507 
508 #endif /* __s390x__ */
509 
510 static inline int pmd_present(pmd_t pmd)
511 {
512 	return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL;
513 }
514 
515 static inline int pmd_none(pmd_t pmd)
516 {
517 	return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL;
518 }
519 
520 static inline int pmd_bad(pmd_t pmd)
521 {
522 	unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
523 	return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
524 }
525 
526 static inline int pte_none(pte_t pte)
527 {
528 	return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
529 }
530 
531 static inline int pte_present(pte_t pte)
532 {
533 	unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX;
534 	return (pte_val(pte) & mask) == _PAGE_TYPE_NONE ||
535 		(!(pte_val(pte) & _PAGE_INVALID) &&
536 		 !(pte_val(pte) & _PAGE_SWT));
537 }
538 
539 static inline int pte_file(pte_t pte)
540 {
541 	unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT;
542 	return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
543 }
544 
545 static inline int pte_special(pte_t pte)
546 {
547 	return (pte_val(pte) & _PAGE_SPECIAL);
548 }
549 
550 #define __HAVE_ARCH_PTE_SAME
551 #define pte_same(a,b)  (pte_val(a) == pte_val(b))
552 
553 static inline void rcp_lock(pte_t *ptep)
554 {
555 #ifdef CONFIG_PGSTE
556 	unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
557 	preempt_disable();
558 	while (test_and_set_bit(RCP_PCL_BIT, pgste))
559 		;
560 #endif
561 }
562 
563 static inline void rcp_unlock(pte_t *ptep)
564 {
565 #ifdef CONFIG_PGSTE
566 	unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
567 	clear_bit(RCP_PCL_BIT, pgste);
568 	preempt_enable();
569 #endif
570 }
571 
572 /* forward declaration for SetPageUptodate in page-flags.h*/
573 static inline void page_clear_dirty(struct page *page);
574 #include <linux/page-flags.h>
575 
576 static inline void ptep_rcp_copy(pte_t *ptep)
577 {
578 #ifdef CONFIG_PGSTE
579 	struct page *page = virt_to_page(pte_val(*ptep));
580 	unsigned int skey;
581 	unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
582 
583 	skey = page_get_storage_key(page_to_phys(page));
584 	if (skey & _PAGE_CHANGED) {
585 		set_bit_simple(RCP_GC_BIT, pgste);
586 		set_bit_simple(KVM_UD_BIT, pgste);
587 	}
588 	if (skey & _PAGE_REFERENCED)
589 		set_bit_simple(RCP_GR_BIT, pgste);
590 	if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) {
591 		SetPageDirty(page);
592 		set_bit_simple(KVM_UD_BIT, pgste);
593 	}
594 	if (test_and_clear_bit_simple(RCP_HR_BIT, pgste))
595 		SetPageReferenced(page);
596 #endif
597 }
598 
599 /*
600  * query functions pte_write/pte_dirty/pte_young only work if
601  * pte_present() is true. Undefined behaviour if not..
602  */
603 static inline int pte_write(pte_t pte)
604 {
605 	return (pte_val(pte) & _PAGE_RO) == 0;
606 }
607 
608 static inline int pte_dirty(pte_t pte)
609 {
610 	/* A pte is neither clean nor dirty on s/390. The dirty bit
611 	 * is in the storage key. See page_test_and_clear_dirty for
612 	 * details.
613 	 */
614 	return 0;
615 }
616 
617 static inline int pte_young(pte_t pte)
618 {
619 	/* A pte is neither young nor old on s/390. The young bit
620 	 * is in the storage key. See page_test_and_clear_young for
621 	 * details.
622 	 */
623 	return 0;
624 }
625 
626 /*
627  * pgd/pmd/pte modification functions
628  */
629 
630 #ifndef __s390x__
631 
632 #define pgd_clear(pgd)		do { } while (0)
633 #define pud_clear(pud)		do { } while (0)
634 
635 #else /* __s390x__ */
636 
637 static inline void pgd_clear_kernel(pgd_t * pgd)
638 {
639 	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
640 		pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
641 }
642 
643 static inline void pgd_clear(pgd_t * pgd)
644 {
645 	pgd_t *shadow = get_shadow_table(pgd);
646 
647 	pgd_clear_kernel(pgd);
648 	if (shadow)
649 		pgd_clear_kernel(shadow);
650 }
651 
652 static inline void pud_clear_kernel(pud_t *pud)
653 {
654 	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
655 		pud_val(*pud) = _REGION3_ENTRY_EMPTY;
656 }
657 
658 static inline void pud_clear(pud_t *pud)
659 {
660 	pud_t *shadow = get_shadow_table(pud);
661 
662 	pud_clear_kernel(pud);
663 	if (shadow)
664 		pud_clear_kernel(shadow);
665 }
666 
667 #endif /* __s390x__ */
668 
669 static inline void pmd_clear_kernel(pmd_t * pmdp)
670 {
671 	pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
672 }
673 
674 static inline void pmd_clear(pmd_t *pmd)
675 {
676 	pmd_t *shadow = get_shadow_table(pmd);
677 
678 	pmd_clear_kernel(pmd);
679 	if (shadow)
680 		pmd_clear_kernel(shadow);
681 }
682 
683 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
684 {
685 	pte_val(*ptep) = _PAGE_TYPE_EMPTY;
686 	if (mm->context.noexec)
687 		pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY;
688 }
689 
690 /*
691  * The following pte modification functions only work if
692  * pte_present() is true. Undefined behaviour if not..
693  */
694 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
695 {
696 	pte_val(pte) &= _PAGE_CHG_MASK;
697 	pte_val(pte) |= pgprot_val(newprot);
698 	return pte;
699 }
700 
701 static inline pte_t pte_wrprotect(pte_t pte)
702 {
703 	/* Do not clobber _PAGE_TYPE_NONE pages!  */
704 	if (!(pte_val(pte) & _PAGE_INVALID))
705 		pte_val(pte) |= _PAGE_RO;
706 	return pte;
707 }
708 
709 static inline pte_t pte_mkwrite(pte_t pte)
710 {
711 	pte_val(pte) &= ~_PAGE_RO;
712 	return pte;
713 }
714 
715 static inline pte_t pte_mkclean(pte_t pte)
716 {
717 	/* The only user of pte_mkclean is the fork() code.
718 	   We must *not* clear the *physical* page dirty bit
719 	   just because fork() wants to clear the dirty bit in
720 	   *one* of the page's mappings.  So we just do nothing. */
721 	return pte;
722 }
723 
724 static inline pte_t pte_mkdirty(pte_t pte)
725 {
726 	/* We do not explicitly set the dirty bit because the
727 	 * sske instruction is slow. It is faster to let the
728 	 * next instruction set the dirty bit.
729 	 */
730 	return pte;
731 }
732 
733 static inline pte_t pte_mkold(pte_t pte)
734 {
735 	/* S/390 doesn't keep its dirty/referenced bit in the pte.
736 	 * There is no point in clearing the real referenced bit.
737 	 */
738 	return pte;
739 }
740 
741 static inline pte_t pte_mkyoung(pte_t pte)
742 {
743 	/* S/390 doesn't keep its dirty/referenced bit in the pte.
744 	 * There is no point in setting the real referenced bit.
745 	 */
746 	return pte;
747 }
748 
749 static inline pte_t pte_mkspecial(pte_t pte)
750 {
751 	pte_val(pte) |= _PAGE_SPECIAL;
752 	return pte;
753 }
754 
755 #ifdef CONFIG_PGSTE
756 /*
757  * Get (and clear) the user dirty bit for a PTE.
758  */
759 static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm,
760 						     pte_t *ptep)
761 {
762 	int dirty;
763 	unsigned long *pgste;
764 	struct page *page;
765 	unsigned int skey;
766 
767 	if (!mm->context.has_pgste)
768 		return -EINVAL;
769 	rcp_lock(ptep);
770 	pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
771 	page = virt_to_page(pte_val(*ptep));
772 	skey = page_get_storage_key(page_to_phys(page));
773 	if (skey & _PAGE_CHANGED) {
774 		set_bit_simple(RCP_GC_BIT, pgste);
775 		set_bit_simple(KVM_UD_BIT, pgste);
776 	}
777 	if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) {
778 		SetPageDirty(page);
779 		set_bit_simple(KVM_UD_BIT, pgste);
780 	}
781 	dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste);
782 	if (skey & _PAGE_CHANGED)
783 		page_clear_dirty(page);
784 	rcp_unlock(ptep);
785 	return dirty;
786 }
787 #endif
788 
789 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
790 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
791 					    unsigned long addr, pte_t *ptep)
792 {
793 #ifdef CONFIG_PGSTE
794 	unsigned long physpage;
795 	int young;
796 	unsigned long *pgste;
797 
798 	if (!vma->vm_mm->context.has_pgste)
799 		return 0;
800 	physpage = pte_val(*ptep) & PAGE_MASK;
801 	pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
802 
803 	young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0);
804 	rcp_lock(ptep);
805 	if (young)
806 		set_bit_simple(RCP_GR_BIT, pgste);
807 	young |= test_and_clear_bit_simple(RCP_HR_BIT, pgste);
808 	rcp_unlock(ptep);
809 	return young;
810 #endif
811 	return 0;
812 }
813 
814 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
815 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
816 					 unsigned long address, pte_t *ptep)
817 {
818 	/* No need to flush TLB
819 	 * On s390 reference bits are in storage key and never in TLB
820 	 * With virtualization we handle the reference bit, without we
821 	 * we can simply return */
822 #ifdef CONFIG_PGSTE
823 	return ptep_test_and_clear_young(vma, address, ptep);
824 #endif
825 	return 0;
826 }
827 
828 static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
829 {
830 	if (!(pte_val(*ptep) & _PAGE_INVALID)) {
831 #ifndef __s390x__
832 		/* pto must point to the start of the segment table */
833 		pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
834 #else
835 		/* ipte in zarch mode can do the math */
836 		pte_t *pto = ptep;
837 #endif
838 		asm volatile(
839 			"	ipte	%2,%3"
840 			: "=m" (*ptep) : "m" (*ptep),
841 			  "a" (pto), "a" (address));
842 	}
843 }
844 
845 static inline void ptep_invalidate(struct mm_struct *mm,
846 				   unsigned long address, pte_t *ptep)
847 {
848 	if (mm->context.has_pgste) {
849 		rcp_lock(ptep);
850 		__ptep_ipte(address, ptep);
851 		ptep_rcp_copy(ptep);
852 		pte_val(*ptep) = _PAGE_TYPE_EMPTY;
853 		rcp_unlock(ptep);
854 		return;
855 	}
856 	__ptep_ipte(address, ptep);
857 	pte_val(*ptep) = _PAGE_TYPE_EMPTY;
858 	if (mm->context.noexec) {
859 		__ptep_ipte(address, ptep + PTRS_PER_PTE);
860 		pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY;
861 	}
862 }
863 
864 /*
865  * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
866  * both clear the TLB for the unmapped pte. The reason is that
867  * ptep_get_and_clear is used in common code (e.g. change_pte_range)
868  * to modify an active pte. The sequence is
869  *   1) ptep_get_and_clear
870  *   2) set_pte_at
871  *   3) flush_tlb_range
872  * On s390 the tlb needs to get flushed with the modification of the pte
873  * if the pte is active. The only way how this can be implemented is to
874  * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
875  * is a nop.
876  */
877 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
878 #define ptep_get_and_clear(__mm, __address, __ptep)			\
879 ({									\
880 	pte_t __pte = *(__ptep);					\
881 	if (atomic_read(&(__mm)->mm_users) > 1 ||			\
882 	    (__mm) != current->active_mm)				\
883 		ptep_invalidate(__mm, __address, __ptep);		\
884 	else								\
885 		pte_clear((__mm), (__address), (__ptep));		\
886 	__pte;								\
887 })
888 
889 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
890 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
891 				     unsigned long address, pte_t *ptep)
892 {
893 	pte_t pte = *ptep;
894 	ptep_invalidate(vma->vm_mm, address, ptep);
895 	return pte;
896 }
897 
898 /*
899  * The batched pte unmap code uses ptep_get_and_clear_full to clear the
900  * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
901  * tlbs of an mm if it can guarantee that the ptes of the mm_struct
902  * cannot be accessed while the batched unmap is running. In this case
903  * full==1 and a simple pte_clear is enough. See tlb.h.
904  */
905 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
906 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
907 					    unsigned long addr,
908 					    pte_t *ptep, int full)
909 {
910 	pte_t pte = *ptep;
911 
912 	if (full)
913 		pte_clear(mm, addr, ptep);
914 	else
915 		ptep_invalidate(mm, addr, ptep);
916 	return pte;
917 }
918 
919 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
920 #define ptep_set_wrprotect(__mm, __addr, __ptep)			\
921 ({									\
922 	pte_t __pte = *(__ptep);					\
923 	if (pte_write(__pte)) {						\
924 		if (atomic_read(&(__mm)->mm_users) > 1 ||		\
925 		    (__mm) != current->active_mm)			\
926 			ptep_invalidate(__mm, __addr, __ptep);		\
927 		set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte));	\
928 	}								\
929 })
930 
931 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
932 #define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty)	\
933 ({									\
934 	int __changed = !pte_same(*(__ptep), __entry);			\
935 	if (__changed) {						\
936 		ptep_invalidate((__vma)->vm_mm, __addr, __ptep);	\
937 		set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry);	\
938 	}								\
939 	__changed;							\
940 })
941 
942 /*
943  * Test and clear dirty bit in storage key.
944  * We can't clear the changed bit atomically. This is a potential
945  * race against modification of the referenced bit. This function
946  * should therefore only be called if it is not mapped in any
947  * address space.
948  */
949 #define __HAVE_ARCH_PAGE_TEST_DIRTY
950 static inline int page_test_dirty(struct page *page)
951 {
952 	return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0;
953 }
954 
955 #define __HAVE_ARCH_PAGE_CLEAR_DIRTY
956 static inline void page_clear_dirty(struct page *page)
957 {
958 	page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY);
959 }
960 
961 /*
962  * Test and clear referenced bit in storage key.
963  */
964 #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
965 static inline int page_test_and_clear_young(struct page *page)
966 {
967 	unsigned long physpage = page_to_phys(page);
968 	int ccode;
969 
970 	asm volatile(
971 		"	rrbe	0,%1\n"
972 		"	ipm	%0\n"
973 		"	srl	%0,28\n"
974 		: "=d" (ccode) : "a" (physpage) : "cc" );
975 	return ccode & 2;
976 }
977 
978 /*
979  * Conversion functions: convert a page and protection to a page entry,
980  * and a page entry and page directory to the page they refer to.
981  */
982 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
983 {
984 	pte_t __pte;
985 	pte_val(__pte) = physpage + pgprot_val(pgprot);
986 	return __pte;
987 }
988 
989 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
990 {
991 	unsigned long physpage = page_to_phys(page);
992 
993 	return mk_pte_phys(physpage, pgprot);
994 }
995 
996 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
997 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
998 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
999 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1000 
1001 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1002 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1003 
1004 #ifndef __s390x__
1005 
1006 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1007 #define pud_deref(pmd) ({ BUG(); 0UL; })
1008 #define pgd_deref(pmd) ({ BUG(); 0UL; })
1009 
1010 #define pud_offset(pgd, address) ((pud_t *) pgd)
1011 #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
1012 
1013 #else /* __s390x__ */
1014 
1015 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1016 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1017 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1018 
1019 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
1020 {
1021 	pud_t *pud = (pud_t *) pgd;
1022 	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1023 		pud = (pud_t *) pgd_deref(*pgd);
1024 	return pud  + pud_index(address);
1025 }
1026 
1027 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1028 {
1029 	pmd_t *pmd = (pmd_t *) pud;
1030 	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1031 		pmd = (pmd_t *) pud_deref(*pud);
1032 	return pmd + pmd_index(address);
1033 }
1034 
1035 #endif /* __s390x__ */
1036 
1037 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1038 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1039 #define pte_page(x) pfn_to_page(pte_pfn(x))
1040 
1041 #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
1042 
1043 /* Find an entry in the lowest level page table.. */
1044 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1045 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1046 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1047 #define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
1048 #define pte_unmap(pte) do { } while (0)
1049 #define pte_unmap_nested(pte) do { } while (0)
1050 
1051 /*
1052  * 31 bit swap entry format:
1053  * A page-table entry has some bits we have to treat in a special way.
1054  * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
1055  * exception will occur instead of a page translation exception. The
1056  * specifiation exception has the bad habit not to store necessary
1057  * information in the lowcore.
1058  * Bit 21 and bit 22 are the page invalid bit and the page protection
1059  * bit. We set both to indicate a swapped page.
1060  * Bit 30 and 31 are used to distinguish the different page types. For
1061  * a swapped page these bits need to be zero.
1062  * This leaves the bits 1-19 and bits 24-29 to store type and offset.
1063  * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
1064  * plus 24 for the offset.
1065  * 0|     offset        |0110|o|type |00|
1066  * 0 0000000001111111111 2222 2 22222 33
1067  * 0 1234567890123456789 0123 4 56789 01
1068  *
1069  * 64 bit swap entry format:
1070  * A page-table entry has some bits we have to treat in a special way.
1071  * Bits 52 and bit 55 have to be zero, otherwise an specification
1072  * exception will occur instead of a page translation exception. The
1073  * specifiation exception has the bad habit not to store necessary
1074  * information in the lowcore.
1075  * Bit 53 and bit 54 are the page invalid bit and the page protection
1076  * bit. We set both to indicate a swapped page.
1077  * Bit 62 and 63 are used to distinguish the different page types. For
1078  * a swapped page these bits need to be zero.
1079  * This leaves the bits 0-51 and bits 56-61 to store type and offset.
1080  * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
1081  * plus 56 for the offset.
1082  * |                      offset                        |0110|o|type |00|
1083  *  0000000000111111111122222222223333333333444444444455 5555 5 55566 66
1084  *  0123456789012345678901234567890123456789012345678901 2345 6 78901 23
1085  */
1086 #ifndef __s390x__
1087 #define __SWP_OFFSET_MASK (~0UL >> 12)
1088 #else
1089 #define __SWP_OFFSET_MASK (~0UL >> 11)
1090 #endif
1091 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1092 {
1093 	pte_t pte;
1094 	offset &= __SWP_OFFSET_MASK;
1095 	pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
1096 		((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
1097 	return pte;
1098 }
1099 
1100 #define __swp_type(entry)	(((entry).val >> 2) & 0x1f)
1101 #define __swp_offset(entry)	(((entry).val >> 11) | (((entry).val >> 7) & 1))
1102 #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
1103 
1104 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
1105 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
1106 
1107 #ifndef __s390x__
1108 # define PTE_FILE_MAX_BITS	26
1109 #else /* __s390x__ */
1110 # define PTE_FILE_MAX_BITS	59
1111 #endif /* __s390x__ */
1112 
1113 #define pte_to_pgoff(__pte) \
1114 	((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
1115 
1116 #define pgoff_to_pte(__off) \
1117 	((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
1118 		   | _PAGE_TYPE_FILE })
1119 
1120 #endif /* !__ASSEMBLY__ */
1121 
1122 #define kern_addr_valid(addr)   (1)
1123 
1124 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1125 extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1126 extern int s390_enable_sie(void);
1127 
1128 /*
1129  * No page table caches to initialise
1130  */
1131 #define pgtable_cache_init()	do { } while (0)
1132 
1133 #include <asm-generic/pgtable.h>
1134 
1135 #endif /* _S390_PAGE_H */
1136