1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * S390 version 4 * Copyright IBM Corp. 1999, 2000 5 * Author(s): Hartmut Penner (hp@de.ibm.com) 6 * Ulrich Weigand (weigand@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 * 9 * Derived from "include/asm-i386/pgtable.h" 10 */ 11 12 #ifndef _ASM_S390_PGTABLE_H 13 #define _ASM_S390_PGTABLE_H 14 15 #include <linux/sched.h> 16 #include <linux/mm_types.h> 17 #include <linux/page-flags.h> 18 #include <linux/radix-tree.h> 19 #include <linux/atomic.h> 20 #include <asm/bug.h> 21 #include <asm/page.h> 22 23 extern pgd_t swapper_pg_dir[]; 24 extern void paging_init(void); 25 26 enum { 27 PG_DIRECT_MAP_4K = 0, 28 PG_DIRECT_MAP_1M, 29 PG_DIRECT_MAP_2G, 30 PG_DIRECT_MAP_MAX 31 }; 32 33 extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX]; 34 35 static inline void update_page_count(int level, long count) 36 { 37 if (IS_ENABLED(CONFIG_PROC_FS)) 38 atomic_long_add(count, &direct_pages_count[level]); 39 } 40 41 struct seq_file; 42 void arch_report_meminfo(struct seq_file *m); 43 44 /* 45 * The S390 doesn't have any external MMU info: the kernel page 46 * tables contain all the necessary information. 47 */ 48 #define update_mmu_cache(vma, address, ptep) do { } while (0) 49 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0) 50 51 /* 52 * ZERO_PAGE is a global shared page that is always zero; used 53 * for zero-mapped memory areas etc.. 54 */ 55 56 extern unsigned long empty_zero_page; 57 extern unsigned long zero_page_mask; 58 59 #define ZERO_PAGE(vaddr) \ 60 (virt_to_page((void *)(empty_zero_page + \ 61 (((unsigned long)(vaddr)) &zero_page_mask)))) 62 #define __HAVE_COLOR_ZERO_PAGE 63 64 /* TODO: s390 cannot support io_remap_pfn_range... */ 65 66 #define FIRST_USER_ADDRESS 0UL 67 68 #define pte_ERROR(e) \ 69 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) 70 #define pmd_ERROR(e) \ 71 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) 72 #define pud_ERROR(e) \ 73 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e)) 74 #define p4d_ERROR(e) \ 75 printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e)) 76 #define pgd_ERROR(e) \ 77 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) 78 79 /* 80 * The vmalloc and module area will always be on the topmost area of the 81 * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules. 82 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where 83 * modules will reside. That makes sure that inter module branches always 84 * happen without trampolines and in addition the placement within a 2GB frame 85 * is branch prediction unit friendly. 86 */ 87 extern unsigned long VMALLOC_START; 88 extern unsigned long VMALLOC_END; 89 extern struct page *vmemmap; 90 91 #define VMEM_MAX_PHYS ((unsigned long) vmemmap) 92 93 extern unsigned long MODULES_VADDR; 94 extern unsigned long MODULES_END; 95 #define MODULES_VADDR MODULES_VADDR 96 #define MODULES_END MODULES_END 97 #define MODULES_LEN (1UL << 31) 98 99 static inline int is_module_addr(void *addr) 100 { 101 BUILD_BUG_ON(MODULES_LEN > (1UL << 31)); 102 if (addr < (void *)MODULES_VADDR) 103 return 0; 104 if (addr > (void *)MODULES_END) 105 return 0; 106 return 1; 107 } 108 109 /* 110 * A 64 bit pagetable entry of S390 has following format: 111 * | PFRA |0IPC| OS | 112 * 0000000000111111111122222222223333333333444444444455555555556666 113 * 0123456789012345678901234567890123456789012345678901234567890123 114 * 115 * I Page-Invalid Bit: Page is not available for address-translation 116 * P Page-Protection Bit: Store access not possible for page 117 * C Change-bit override: HW is not required to set change bit 118 * 119 * A 64 bit segmenttable entry of S390 has following format: 120 * | P-table origin | TT 121 * 0000000000111111111122222222223333333333444444444455555555556666 122 * 0123456789012345678901234567890123456789012345678901234567890123 123 * 124 * I Segment-Invalid Bit: Segment is not available for address-translation 125 * C Common-Segment Bit: Segment is not private (PoP 3-30) 126 * P Page-Protection Bit: Store access not possible for page 127 * TT Type 00 128 * 129 * A 64 bit region table entry of S390 has following format: 130 * | S-table origin | TF TTTL 131 * 0000000000111111111122222222223333333333444444444455555555556666 132 * 0123456789012345678901234567890123456789012345678901234567890123 133 * 134 * I Segment-Invalid Bit: Segment is not available for address-translation 135 * TT Type 01 136 * TF 137 * TL Table length 138 * 139 * The 64 bit regiontable origin of S390 has following format: 140 * | region table origon | DTTL 141 * 0000000000111111111122222222223333333333444444444455555555556666 142 * 0123456789012345678901234567890123456789012345678901234567890123 143 * 144 * X Space-Switch event: 145 * G Segment-Invalid Bit: 146 * P Private-Space Bit: 147 * S Storage-Alteration: 148 * R Real space 149 * TL Table-Length: 150 * 151 * A storage key has the following format: 152 * | ACC |F|R|C|0| 153 * 0 3 4 5 6 7 154 * ACC: access key 155 * F : fetch protection bit 156 * R : referenced bit 157 * C : changed bit 158 */ 159 160 /* Hardware bits in the page table entry */ 161 #define _PAGE_NOEXEC 0x100 /* HW no-execute bit */ 162 #define _PAGE_PROTECT 0x200 /* HW read-only bit */ 163 #define _PAGE_INVALID 0x400 /* HW invalid bit */ 164 #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */ 165 166 /* Software bits in the page table entry */ 167 #define _PAGE_PRESENT 0x001 /* SW pte present bit */ 168 #define _PAGE_YOUNG 0x004 /* SW pte young bit */ 169 #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */ 170 #define _PAGE_READ 0x010 /* SW pte read bit */ 171 #define _PAGE_WRITE 0x020 /* SW pte write bit */ 172 #define _PAGE_SPECIAL 0x040 /* SW associated with special page */ 173 #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */ 174 #define __HAVE_ARCH_PTE_SPECIAL 175 176 #ifdef CONFIG_MEM_SOFT_DIRTY 177 #define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */ 178 #else 179 #define _PAGE_SOFT_DIRTY 0x000 180 #endif 181 182 /* Set of bits not changed in pte_modify */ 183 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \ 184 _PAGE_YOUNG | _PAGE_SOFT_DIRTY) 185 186 /* 187 * handle_pte_fault uses pte_present and pte_none to find out the pte type 188 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to 189 * distinguish present from not-present ptes. It is changed only with the page 190 * table lock held. 191 * 192 * The following table gives the different possible bit combinations for 193 * the pte hardware and software bits in the last 12 bits of a pte 194 * (. unassigned bit, x don't care, t swap type): 195 * 196 * 842100000000 197 * 000084210000 198 * 000000008421 199 * .IR.uswrdy.p 200 * empty .10.00000000 201 * swap .11..ttttt.0 202 * prot-none, clean, old .11.xx0000.1 203 * prot-none, clean, young .11.xx0001.1 204 * prot-none, dirty, old .11.xx0010.1 205 * prot-none, dirty, young .11.xx0011.1 206 * read-only, clean, old .11.xx0100.1 207 * read-only, clean, young .01.xx0101.1 208 * read-only, dirty, old .11.xx0110.1 209 * read-only, dirty, young .01.xx0111.1 210 * read-write, clean, old .11.xx1100.1 211 * read-write, clean, young .01.xx1101.1 212 * read-write, dirty, old .10.xx1110.1 213 * read-write, dirty, young .00.xx1111.1 214 * HW-bits: R read-only, I invalid 215 * SW-bits: p present, y young, d dirty, r read, w write, s special, 216 * u unused, l large 217 * 218 * pte_none is true for the bit pattern .10.00000000, pte == 0x400 219 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200 220 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001 221 */ 222 223 /* Bits in the segment/region table address-space-control-element */ 224 #define _ASCE_ORIGIN ~0xfffUL/* region/segment table origin */ 225 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ 226 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ 227 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */ 228 #define _ASCE_REAL_SPACE 0x20 /* real space control */ 229 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */ 230 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */ 231 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */ 232 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */ 233 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */ 234 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */ 235 236 /* Bits in the region table entry */ 237 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ 238 #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */ 239 #define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */ 240 #define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */ 241 #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */ 242 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ 243 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ 244 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ 245 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */ 246 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */ 247 248 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) 249 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID) 250 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) 251 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID) 252 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) 253 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID) 254 255 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */ 256 #define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */ 257 #define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */ 258 #define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */ 259 #define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */ 260 #define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */ 261 262 #ifdef CONFIG_MEM_SOFT_DIRTY 263 #define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */ 264 #else 265 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */ 266 #endif 267 268 #define _REGION_ENTRY_BITS 0xfffffffffffff22fUL 269 #define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL 270 271 /* Bits in the segment table entry */ 272 #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL 273 #define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL 274 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ 275 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */ 276 #define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */ 277 #define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */ 278 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */ 279 280 #define _SEGMENT_ENTRY (0) 281 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) 282 283 #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */ 284 #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */ 285 #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */ 286 #define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */ 287 #define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */ 288 289 #ifdef CONFIG_MEM_SOFT_DIRTY 290 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */ 291 #else 292 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */ 293 #endif 294 295 #define _CRST_ENTRIES 2048 /* number of region/segment table entries */ 296 #define _PAGE_ENTRIES 256 /* number of page table entries */ 297 298 #define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8) 299 #define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8) 300 301 #define _REGION1_SHIFT 53 302 #define _REGION2_SHIFT 42 303 #define _REGION3_SHIFT 31 304 #define _SEGMENT_SHIFT 20 305 306 #define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT) 307 #define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT) 308 #define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT) 309 #define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT) 310 #define _PAGE_INDEX (0xffUL << _PAGE_SHIFT) 311 312 #define _REGION1_SIZE (1UL << _REGION1_SHIFT) 313 #define _REGION2_SIZE (1UL << _REGION2_SHIFT) 314 #define _REGION3_SIZE (1UL << _REGION3_SHIFT) 315 #define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT) 316 317 #define _REGION1_MASK (~(_REGION1_SIZE - 1)) 318 #define _REGION2_MASK (~(_REGION2_SIZE - 1)) 319 #define _REGION3_MASK (~(_REGION3_SIZE - 1)) 320 #define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1)) 321 322 #define PMD_SHIFT _SEGMENT_SHIFT 323 #define PUD_SHIFT _REGION3_SHIFT 324 #define P4D_SHIFT _REGION2_SHIFT 325 #define PGDIR_SHIFT _REGION1_SHIFT 326 327 #define PMD_SIZE _SEGMENT_SIZE 328 #define PUD_SIZE _REGION3_SIZE 329 #define P4D_SIZE _REGION2_SIZE 330 #define PGDIR_SIZE _REGION1_SIZE 331 332 #define PMD_MASK _SEGMENT_MASK 333 #define PUD_MASK _REGION3_MASK 334 #define P4D_MASK _REGION2_MASK 335 #define PGDIR_MASK _REGION1_MASK 336 337 #define PTRS_PER_PTE _PAGE_ENTRIES 338 #define PTRS_PER_PMD _CRST_ENTRIES 339 #define PTRS_PER_PUD _CRST_ENTRIES 340 #define PTRS_PER_P4D _CRST_ENTRIES 341 #define PTRS_PER_PGD _CRST_ENTRIES 342 343 /* 344 * Segment table and region3 table entry encoding 345 * (R = read-only, I = invalid, y = young bit): 346 * dy..R...I...wr 347 * prot-none, clean, old 00..1...1...00 348 * prot-none, clean, young 01..1...1...00 349 * prot-none, dirty, old 10..1...1...00 350 * prot-none, dirty, young 11..1...1...00 351 * read-only, clean, old 00..1...1...01 352 * read-only, clean, young 01..1...0...01 353 * read-only, dirty, old 10..1...1...01 354 * read-only, dirty, young 11..1...0...01 355 * read-write, clean, old 00..1...1...11 356 * read-write, clean, young 01..1...0...11 357 * read-write, dirty, old 10..0...1...11 358 * read-write, dirty, young 11..0...0...11 359 * The segment table origin is used to distinguish empty (origin==0) from 360 * read-write, old segment table entries (origin!=0) 361 * HW-bits: R read-only, I invalid 362 * SW-bits: y young, d dirty, r read, w write 363 */ 364 365 /* Page status table bits for virtualization */ 366 #define PGSTE_ACC_BITS 0xf000000000000000UL 367 #define PGSTE_FP_BIT 0x0800000000000000UL 368 #define PGSTE_PCL_BIT 0x0080000000000000UL 369 #define PGSTE_HR_BIT 0x0040000000000000UL 370 #define PGSTE_HC_BIT 0x0020000000000000UL 371 #define PGSTE_GR_BIT 0x0004000000000000UL 372 #define PGSTE_GC_BIT 0x0002000000000000UL 373 #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */ 374 #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */ 375 #define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */ 376 377 /* Guest Page State used for virtualization */ 378 #define _PGSTE_GPS_ZERO 0x0000000080000000UL 379 #define _PGSTE_GPS_NODAT 0x0000000040000000UL 380 #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL 381 #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL 382 #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL 383 #define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL 384 #define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK 385 386 /* 387 * A user page table pointer has the space-switch-event bit, the 388 * private-space-control bit and the storage-alteration-event-control 389 * bit set. A kernel page table pointer doesn't need them. 390 */ 391 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ 392 _ASCE_ALT_EVENT) 393 394 /* 395 * Page protection definitions. 396 */ 397 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT) 398 #define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \ 399 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT) 400 #define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \ 401 _PAGE_INVALID | _PAGE_PROTECT) 402 #define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 403 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT) 404 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 405 _PAGE_INVALID | _PAGE_PROTECT) 406 407 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 408 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC) 409 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 410 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC) 411 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \ 412 _PAGE_PROTECT | _PAGE_NOEXEC) 413 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 414 _PAGE_YOUNG | _PAGE_DIRTY) 415 416 /* 417 * On s390 the page table entry has an invalid bit and a read-only bit. 418 * Read permission implies execute permission and write permission 419 * implies read permission. 420 */ 421 /*xwr*/ 422 #define __P000 PAGE_NONE 423 #define __P001 PAGE_RO 424 #define __P010 PAGE_RO 425 #define __P011 PAGE_RO 426 #define __P100 PAGE_RX 427 #define __P101 PAGE_RX 428 #define __P110 PAGE_RX 429 #define __P111 PAGE_RX 430 431 #define __S000 PAGE_NONE 432 #define __S001 PAGE_RO 433 #define __S010 PAGE_RW 434 #define __S011 PAGE_RW 435 #define __S100 PAGE_RX 436 #define __S101 PAGE_RX 437 #define __S110 PAGE_RWX 438 #define __S111 PAGE_RWX 439 440 /* 441 * Segment entry (large page) protection definitions. 442 */ 443 #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \ 444 _SEGMENT_ENTRY_PROTECT) 445 #define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \ 446 _SEGMENT_ENTRY_READ | \ 447 _SEGMENT_ENTRY_NOEXEC) 448 #define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \ 449 _SEGMENT_ENTRY_READ) 450 #define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \ 451 _SEGMENT_ENTRY_WRITE | \ 452 _SEGMENT_ENTRY_NOEXEC) 453 #define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \ 454 _SEGMENT_ENTRY_WRITE) 455 #define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \ 456 _SEGMENT_ENTRY_LARGE | \ 457 _SEGMENT_ENTRY_READ | \ 458 _SEGMENT_ENTRY_WRITE | \ 459 _SEGMENT_ENTRY_YOUNG | \ 460 _SEGMENT_ENTRY_DIRTY | \ 461 _SEGMENT_ENTRY_NOEXEC) 462 #define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \ 463 _SEGMENT_ENTRY_LARGE | \ 464 _SEGMENT_ENTRY_READ | \ 465 _SEGMENT_ENTRY_YOUNG | \ 466 _SEGMENT_ENTRY_PROTECT | \ 467 _SEGMENT_ENTRY_NOEXEC) 468 469 /* 470 * Region3 entry (large page) protection definitions. 471 */ 472 473 #define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \ 474 _REGION3_ENTRY_LARGE | \ 475 _REGION3_ENTRY_READ | \ 476 _REGION3_ENTRY_WRITE | \ 477 _REGION3_ENTRY_YOUNG | \ 478 _REGION3_ENTRY_DIRTY | \ 479 _REGION_ENTRY_NOEXEC) 480 #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \ 481 _REGION3_ENTRY_LARGE | \ 482 _REGION3_ENTRY_READ | \ 483 _REGION3_ENTRY_YOUNG | \ 484 _REGION_ENTRY_PROTECT | \ 485 _REGION_ENTRY_NOEXEC) 486 487 static inline int mm_has_pgste(struct mm_struct *mm) 488 { 489 #ifdef CONFIG_PGSTE 490 if (unlikely(mm->context.has_pgste)) 491 return 1; 492 #endif 493 return 0; 494 } 495 496 static inline int mm_alloc_pgste(struct mm_struct *mm) 497 { 498 #ifdef CONFIG_PGSTE 499 if (unlikely(mm->context.alloc_pgste)) 500 return 1; 501 #endif 502 return 0; 503 } 504 505 /* 506 * In the case that a guest uses storage keys 507 * faults should no longer be backed by zero pages 508 */ 509 #define mm_forbids_zeropage mm_has_pgste 510 static inline int mm_use_skey(struct mm_struct *mm) 511 { 512 #ifdef CONFIG_PGSTE 513 if (mm->context.use_skey) 514 return 1; 515 #endif 516 return 0; 517 } 518 519 static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new) 520 { 521 register unsigned long reg2 asm("2") = old; 522 register unsigned long reg3 asm("3") = new; 523 unsigned long address = (unsigned long)ptr | 1; 524 525 asm volatile( 526 " csp %0,%3" 527 : "+d" (reg2), "+m" (*ptr) 528 : "d" (reg3), "d" (address) 529 : "cc"); 530 } 531 532 static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new) 533 { 534 register unsigned long reg2 asm("2") = old; 535 register unsigned long reg3 asm("3") = new; 536 unsigned long address = (unsigned long)ptr | 1; 537 538 asm volatile( 539 " .insn rre,0xb98a0000,%0,%3" 540 : "+d" (reg2), "+m" (*ptr) 541 : "d" (reg3), "d" (address) 542 : "cc"); 543 } 544 545 #define CRDTE_DTT_PAGE 0x00UL 546 #define CRDTE_DTT_SEGMENT 0x10UL 547 #define CRDTE_DTT_REGION3 0x14UL 548 #define CRDTE_DTT_REGION2 0x18UL 549 #define CRDTE_DTT_REGION1 0x1cUL 550 551 static inline void crdte(unsigned long old, unsigned long new, 552 unsigned long table, unsigned long dtt, 553 unsigned long address, unsigned long asce) 554 { 555 register unsigned long reg2 asm("2") = old; 556 register unsigned long reg3 asm("3") = new; 557 register unsigned long reg4 asm("4") = table | dtt; 558 register unsigned long reg5 asm("5") = address; 559 560 asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0" 561 : "+d" (reg2) 562 : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce) 563 : "memory", "cc"); 564 } 565 566 /* 567 * pgd/p4d/pud/pmd/pte query functions 568 */ 569 static inline int pgd_folded(pgd_t pgd) 570 { 571 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1; 572 } 573 574 static inline int pgd_present(pgd_t pgd) 575 { 576 if (pgd_folded(pgd)) 577 return 1; 578 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL; 579 } 580 581 static inline int pgd_none(pgd_t pgd) 582 { 583 if (pgd_folded(pgd)) 584 return 0; 585 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL; 586 } 587 588 static inline int pgd_bad(pgd_t pgd) 589 { 590 /* 591 * With dynamic page table levels the pgd can be a region table 592 * entry or a segment table entry. Check for the bit that are 593 * invalid for either table entry. 594 */ 595 unsigned long mask = 596 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID & 597 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; 598 return (pgd_val(pgd) & mask) != 0; 599 } 600 601 static inline int p4d_folded(p4d_t p4d) 602 { 603 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2; 604 } 605 606 static inline int p4d_present(p4d_t p4d) 607 { 608 if (p4d_folded(p4d)) 609 return 1; 610 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL; 611 } 612 613 static inline int p4d_none(p4d_t p4d) 614 { 615 if (p4d_folded(p4d)) 616 return 0; 617 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY; 618 } 619 620 static inline unsigned long p4d_pfn(p4d_t p4d) 621 { 622 unsigned long origin_mask; 623 624 origin_mask = _REGION_ENTRY_ORIGIN; 625 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT; 626 } 627 628 static inline int pud_folded(pud_t pud) 629 { 630 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3; 631 } 632 633 static inline int pud_present(pud_t pud) 634 { 635 if (pud_folded(pud)) 636 return 1; 637 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; 638 } 639 640 static inline int pud_none(pud_t pud) 641 { 642 if (pud_folded(pud)) 643 return 0; 644 return pud_val(pud) == _REGION3_ENTRY_EMPTY; 645 } 646 647 static inline int pud_large(pud_t pud) 648 { 649 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3) 650 return 0; 651 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE); 652 } 653 654 static inline unsigned long pud_pfn(pud_t pud) 655 { 656 unsigned long origin_mask; 657 658 origin_mask = _REGION_ENTRY_ORIGIN; 659 if (pud_large(pud)) 660 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE; 661 return (pud_val(pud) & origin_mask) >> PAGE_SHIFT; 662 } 663 664 static inline int pmd_large(pmd_t pmd) 665 { 666 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0; 667 } 668 669 static inline int pmd_bad(pmd_t pmd) 670 { 671 if (pmd_large(pmd)) 672 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0; 673 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0; 674 } 675 676 static inline int pud_bad(pud_t pud) 677 { 678 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) 679 return pmd_bad(__pmd(pud_val(pud))); 680 if (pud_large(pud)) 681 return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0; 682 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0; 683 } 684 685 static inline int p4d_bad(p4d_t p4d) 686 { 687 if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) 688 return pud_bad(__pud(p4d_val(p4d))); 689 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0; 690 } 691 692 static inline int pmd_present(pmd_t pmd) 693 { 694 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY; 695 } 696 697 static inline int pmd_none(pmd_t pmd) 698 { 699 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY; 700 } 701 702 static inline unsigned long pmd_pfn(pmd_t pmd) 703 { 704 unsigned long origin_mask; 705 706 origin_mask = _SEGMENT_ENTRY_ORIGIN; 707 if (pmd_large(pmd)) 708 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE; 709 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT; 710 } 711 712 #define pmd_write pmd_write 713 static inline int pmd_write(pmd_t pmd) 714 { 715 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0; 716 } 717 718 static inline int pmd_dirty(pmd_t pmd) 719 { 720 int dirty = 1; 721 if (pmd_large(pmd)) 722 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0; 723 return dirty; 724 } 725 726 static inline int pmd_young(pmd_t pmd) 727 { 728 int young = 1; 729 if (pmd_large(pmd)) 730 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0; 731 return young; 732 } 733 734 static inline int pte_present(pte_t pte) 735 { 736 /* Bit pattern: (pte & 0x001) == 0x001 */ 737 return (pte_val(pte) & _PAGE_PRESENT) != 0; 738 } 739 740 static inline int pte_none(pte_t pte) 741 { 742 /* Bit pattern: pte == 0x400 */ 743 return pte_val(pte) == _PAGE_INVALID; 744 } 745 746 static inline int pte_swap(pte_t pte) 747 { 748 /* Bit pattern: (pte & 0x201) == 0x200 */ 749 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT)) 750 == _PAGE_PROTECT; 751 } 752 753 static inline int pte_special(pte_t pte) 754 { 755 return (pte_val(pte) & _PAGE_SPECIAL); 756 } 757 758 #define __HAVE_ARCH_PTE_SAME 759 static inline int pte_same(pte_t a, pte_t b) 760 { 761 return pte_val(a) == pte_val(b); 762 } 763 764 #ifdef CONFIG_NUMA_BALANCING 765 static inline int pte_protnone(pte_t pte) 766 { 767 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ); 768 } 769 770 static inline int pmd_protnone(pmd_t pmd) 771 { 772 /* pmd_large(pmd) implies pmd_present(pmd) */ 773 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ); 774 } 775 #endif 776 777 static inline int pte_soft_dirty(pte_t pte) 778 { 779 return pte_val(pte) & _PAGE_SOFT_DIRTY; 780 } 781 #define pte_swp_soft_dirty pte_soft_dirty 782 783 static inline pte_t pte_mksoft_dirty(pte_t pte) 784 { 785 pte_val(pte) |= _PAGE_SOFT_DIRTY; 786 return pte; 787 } 788 #define pte_swp_mksoft_dirty pte_mksoft_dirty 789 790 static inline pte_t pte_clear_soft_dirty(pte_t pte) 791 { 792 pte_val(pte) &= ~_PAGE_SOFT_DIRTY; 793 return pte; 794 } 795 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty 796 797 static inline int pmd_soft_dirty(pmd_t pmd) 798 { 799 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY; 800 } 801 802 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) 803 { 804 pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY; 805 return pmd; 806 } 807 808 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) 809 { 810 pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY; 811 return pmd; 812 } 813 814 /* 815 * query functions pte_write/pte_dirty/pte_young only work if 816 * pte_present() is true. Undefined behaviour if not.. 817 */ 818 static inline int pte_write(pte_t pte) 819 { 820 return (pte_val(pte) & _PAGE_WRITE) != 0; 821 } 822 823 static inline int pte_dirty(pte_t pte) 824 { 825 return (pte_val(pte) & _PAGE_DIRTY) != 0; 826 } 827 828 static inline int pte_young(pte_t pte) 829 { 830 return (pte_val(pte) & _PAGE_YOUNG) != 0; 831 } 832 833 #define __HAVE_ARCH_PTE_UNUSED 834 static inline int pte_unused(pte_t pte) 835 { 836 return pte_val(pte) & _PAGE_UNUSED; 837 } 838 839 /* 840 * pgd/pmd/pte modification functions 841 */ 842 843 static inline void pgd_clear(pgd_t *pgd) 844 { 845 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1) 846 pgd_val(*pgd) = _REGION1_ENTRY_EMPTY; 847 } 848 849 static inline void p4d_clear(p4d_t *p4d) 850 { 851 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 852 p4d_val(*p4d) = _REGION2_ENTRY_EMPTY; 853 } 854 855 static inline void pud_clear(pud_t *pud) 856 { 857 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 858 pud_val(*pud) = _REGION3_ENTRY_EMPTY; 859 } 860 861 static inline void pmd_clear(pmd_t *pmdp) 862 { 863 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; 864 } 865 866 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 867 { 868 pte_val(*ptep) = _PAGE_INVALID; 869 } 870 871 /* 872 * The following pte modification functions only work if 873 * pte_present() is true. Undefined behaviour if not.. 874 */ 875 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 876 { 877 pte_val(pte) &= _PAGE_CHG_MASK; 878 pte_val(pte) |= pgprot_val(newprot); 879 /* 880 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX 881 * has the invalid bit set, clear it again for readable, young pages 882 */ 883 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ)) 884 pte_val(pte) &= ~_PAGE_INVALID; 885 /* 886 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page 887 * protection bit set, clear it again for writable, dirty pages 888 */ 889 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE)) 890 pte_val(pte) &= ~_PAGE_PROTECT; 891 return pte; 892 } 893 894 static inline pte_t pte_wrprotect(pte_t pte) 895 { 896 pte_val(pte) &= ~_PAGE_WRITE; 897 pte_val(pte) |= _PAGE_PROTECT; 898 return pte; 899 } 900 901 static inline pte_t pte_mkwrite(pte_t pte) 902 { 903 pte_val(pte) |= _PAGE_WRITE; 904 if (pte_val(pte) & _PAGE_DIRTY) 905 pte_val(pte) &= ~_PAGE_PROTECT; 906 return pte; 907 } 908 909 static inline pte_t pte_mkclean(pte_t pte) 910 { 911 pte_val(pte) &= ~_PAGE_DIRTY; 912 pte_val(pte) |= _PAGE_PROTECT; 913 return pte; 914 } 915 916 static inline pte_t pte_mkdirty(pte_t pte) 917 { 918 pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY; 919 if (pte_val(pte) & _PAGE_WRITE) 920 pte_val(pte) &= ~_PAGE_PROTECT; 921 return pte; 922 } 923 924 static inline pte_t pte_mkold(pte_t pte) 925 { 926 pte_val(pte) &= ~_PAGE_YOUNG; 927 pte_val(pte) |= _PAGE_INVALID; 928 return pte; 929 } 930 931 static inline pte_t pte_mkyoung(pte_t pte) 932 { 933 pte_val(pte) |= _PAGE_YOUNG; 934 if (pte_val(pte) & _PAGE_READ) 935 pte_val(pte) &= ~_PAGE_INVALID; 936 return pte; 937 } 938 939 static inline pte_t pte_mkspecial(pte_t pte) 940 { 941 pte_val(pte) |= _PAGE_SPECIAL; 942 return pte; 943 } 944 945 #ifdef CONFIG_HUGETLB_PAGE 946 static inline pte_t pte_mkhuge(pte_t pte) 947 { 948 pte_val(pte) |= _PAGE_LARGE; 949 return pte; 950 } 951 #endif 952 953 #define IPTE_GLOBAL 0 954 #define IPTE_LOCAL 1 955 956 #define IPTE_NODAT 0x400 957 #define IPTE_GUEST_ASCE 0x800 958 959 static inline void __ptep_ipte(unsigned long address, pte_t *ptep, 960 unsigned long opt, unsigned long asce, 961 int local) 962 { 963 unsigned long pto = (unsigned long) ptep; 964 965 if (__builtin_constant_p(opt) && opt == 0) { 966 /* Invalidation + TLB flush for the pte */ 967 asm volatile( 968 " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]" 969 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address), 970 [m4] "i" (local)); 971 return; 972 } 973 974 /* Invalidate ptes with options + TLB flush of the ptes */ 975 opt = opt | (asce & _ASCE_ORIGIN); 976 asm volatile( 977 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]" 978 : [r2] "+a" (address), [r3] "+a" (opt) 979 : [r1] "a" (pto), [m4] "i" (local) : "memory"); 980 } 981 982 static inline void __ptep_ipte_range(unsigned long address, int nr, 983 pte_t *ptep, int local) 984 { 985 unsigned long pto = (unsigned long) ptep; 986 987 /* Invalidate a range of ptes + TLB flush of the ptes */ 988 do { 989 asm volatile( 990 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]" 991 : [r2] "+a" (address), [r3] "+a" (nr) 992 : [r1] "a" (pto), [m4] "i" (local) : "memory"); 993 } while (nr != 255); 994 } 995 996 /* 997 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush 998 * both clear the TLB for the unmapped pte. The reason is that 999 * ptep_get_and_clear is used in common code (e.g. change_pte_range) 1000 * to modify an active pte. The sequence is 1001 * 1) ptep_get_and_clear 1002 * 2) set_pte_at 1003 * 3) flush_tlb_range 1004 * On s390 the tlb needs to get flushed with the modification of the pte 1005 * if the pte is active. The only way how this can be implemented is to 1006 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range 1007 * is a nop. 1008 */ 1009 pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t); 1010 pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t); 1011 1012 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 1013 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 1014 unsigned long addr, pte_t *ptep) 1015 { 1016 pte_t pte = *ptep; 1017 1018 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte)); 1019 return pte_young(pte); 1020 } 1021 1022 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 1023 static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 1024 unsigned long address, pte_t *ptep) 1025 { 1026 return ptep_test_and_clear_young(vma, address, ptep); 1027 } 1028 1029 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 1030 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 1031 unsigned long addr, pte_t *ptep) 1032 { 1033 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID)); 1034 } 1035 1036 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 1037 pte_t ptep_modify_prot_start(struct mm_struct *, unsigned long, pte_t *); 1038 void ptep_modify_prot_commit(struct mm_struct *, unsigned long, pte_t *, pte_t); 1039 1040 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH 1041 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, 1042 unsigned long addr, pte_t *ptep) 1043 { 1044 return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID)); 1045 } 1046 1047 /* 1048 * The batched pte unmap code uses ptep_get_and_clear_full to clear the 1049 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all 1050 * tlbs of an mm if it can guarantee that the ptes of the mm_struct 1051 * cannot be accessed while the batched unmap is running. In this case 1052 * full==1 and a simple pte_clear is enough. See tlb.h. 1053 */ 1054 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 1055 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, 1056 unsigned long addr, 1057 pte_t *ptep, int full) 1058 { 1059 if (full) { 1060 pte_t pte = *ptep; 1061 *ptep = __pte(_PAGE_INVALID); 1062 return pte; 1063 } 1064 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID)); 1065 } 1066 1067 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 1068 static inline void ptep_set_wrprotect(struct mm_struct *mm, 1069 unsigned long addr, pte_t *ptep) 1070 { 1071 pte_t pte = *ptep; 1072 1073 if (pte_write(pte)) 1074 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte)); 1075 } 1076 1077 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 1078 static inline int ptep_set_access_flags(struct vm_area_struct *vma, 1079 unsigned long addr, pte_t *ptep, 1080 pte_t entry, int dirty) 1081 { 1082 if (pte_same(*ptep, entry)) 1083 return 0; 1084 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry); 1085 return 1; 1086 } 1087 1088 /* 1089 * Additional functions to handle KVM guest page tables 1090 */ 1091 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr, 1092 pte_t *ptep, pte_t entry); 1093 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep); 1094 void ptep_notify(struct mm_struct *mm, unsigned long addr, 1095 pte_t *ptep, unsigned long bits); 1096 int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr, 1097 pte_t *ptep, int prot, unsigned long bit); 1098 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr, 1099 pte_t *ptep , int reset); 1100 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep); 1101 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr, 1102 pte_t *sptep, pte_t *tptep, pte_t pte); 1103 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep); 1104 1105 bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address); 1106 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, 1107 unsigned char key, bool nq); 1108 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr, 1109 unsigned char key, unsigned char *oldkey, 1110 bool nq, bool mr, bool mc); 1111 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr); 1112 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr, 1113 unsigned char *key); 1114 1115 int set_pgste_bits(struct mm_struct *mm, unsigned long addr, 1116 unsigned long bits, unsigned long value); 1117 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep); 1118 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc, 1119 unsigned long *oldpte, unsigned long *oldpgste); 1120 1121 /* 1122 * Certain architectures need to do special things when PTEs 1123 * within a page table are directly modified. Thus, the following 1124 * hook is made available. 1125 */ 1126 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 1127 pte_t *ptep, pte_t entry) 1128 { 1129 if (!MACHINE_HAS_NX) 1130 pte_val(entry) &= ~_PAGE_NOEXEC; 1131 if (pte_present(entry)) 1132 pte_val(entry) &= ~_PAGE_UNUSED; 1133 if (mm_has_pgste(mm)) 1134 ptep_set_pte_at(mm, addr, ptep, entry); 1135 else 1136 *ptep = entry; 1137 } 1138 1139 /* 1140 * Conversion functions: convert a page and protection to a page entry, 1141 * and a page entry and page directory to the page they refer to. 1142 */ 1143 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) 1144 { 1145 pte_t __pte; 1146 pte_val(__pte) = physpage + pgprot_val(pgprot); 1147 return pte_mkyoung(__pte); 1148 } 1149 1150 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) 1151 { 1152 unsigned long physpage = page_to_phys(page); 1153 pte_t __pte = mk_pte_phys(physpage, pgprot); 1154 1155 if (pte_write(__pte) && PageDirty(page)) 1156 __pte = pte_mkdirty(__pte); 1157 return __pte; 1158 } 1159 1160 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 1161 #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1)) 1162 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 1163 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 1164 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) 1165 1166 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 1167 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 1168 1169 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 1170 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) 1171 #define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN) 1172 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) 1173 1174 static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) 1175 { 1176 p4d_t *p4d = (p4d_t *) pgd; 1177 1178 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1) 1179 p4d = (p4d_t *) pgd_deref(*pgd); 1180 return p4d + p4d_index(address); 1181 } 1182 1183 static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) 1184 { 1185 pud_t *pud = (pud_t *) p4d; 1186 1187 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 1188 pud = (pud_t *) p4d_deref(*p4d); 1189 return pud + pud_index(address); 1190 } 1191 1192 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) 1193 { 1194 pmd_t *pmd = (pmd_t *) pud; 1195 1196 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 1197 pmd = (pmd_t *) pud_deref(*pud); 1198 return pmd + pmd_index(address); 1199 } 1200 1201 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) 1202 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) 1203 #define pte_page(x) pfn_to_page(pte_pfn(x)) 1204 1205 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) 1206 #define pud_page(pud) pfn_to_page(pud_pfn(pud)) 1207 #define p4d_page(pud) pfn_to_page(p4d_pfn(p4d)) 1208 1209 /* Find an entry in the lowest level page table.. */ 1210 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) 1211 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address) 1212 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) 1213 #define pte_unmap(pte) do { } while (0) 1214 1215 static inline pmd_t pmd_wrprotect(pmd_t pmd) 1216 { 1217 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE; 1218 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1219 return pmd; 1220 } 1221 1222 static inline pmd_t pmd_mkwrite(pmd_t pmd) 1223 { 1224 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE; 1225 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) 1226 return pmd; 1227 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; 1228 return pmd; 1229 } 1230 1231 static inline pmd_t pmd_mkclean(pmd_t pmd) 1232 { 1233 if (pmd_large(pmd)) { 1234 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY; 1235 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1236 } 1237 return pmd; 1238 } 1239 1240 static inline pmd_t pmd_mkdirty(pmd_t pmd) 1241 { 1242 if (pmd_large(pmd)) { 1243 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | 1244 _SEGMENT_ENTRY_SOFT_DIRTY; 1245 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) 1246 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; 1247 } 1248 return pmd; 1249 } 1250 1251 static inline pud_t pud_wrprotect(pud_t pud) 1252 { 1253 pud_val(pud) &= ~_REGION3_ENTRY_WRITE; 1254 pud_val(pud) |= _REGION_ENTRY_PROTECT; 1255 return pud; 1256 } 1257 1258 static inline pud_t pud_mkwrite(pud_t pud) 1259 { 1260 pud_val(pud) |= _REGION3_ENTRY_WRITE; 1261 if (pud_large(pud) && !(pud_val(pud) & _REGION3_ENTRY_DIRTY)) 1262 return pud; 1263 pud_val(pud) &= ~_REGION_ENTRY_PROTECT; 1264 return pud; 1265 } 1266 1267 static inline pud_t pud_mkclean(pud_t pud) 1268 { 1269 if (pud_large(pud)) { 1270 pud_val(pud) &= ~_REGION3_ENTRY_DIRTY; 1271 pud_val(pud) |= _REGION_ENTRY_PROTECT; 1272 } 1273 return pud; 1274 } 1275 1276 static inline pud_t pud_mkdirty(pud_t pud) 1277 { 1278 if (pud_large(pud)) { 1279 pud_val(pud) |= _REGION3_ENTRY_DIRTY | 1280 _REGION3_ENTRY_SOFT_DIRTY; 1281 if (pud_val(pud) & _REGION3_ENTRY_WRITE) 1282 pud_val(pud) &= ~_REGION_ENTRY_PROTECT; 1283 } 1284 return pud; 1285 } 1286 1287 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) 1288 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) 1289 { 1290 /* 1291 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX 1292 * (see __Pxxx / __Sxxx). Convert to segment table entry format. 1293 */ 1294 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE)) 1295 return pgprot_val(SEGMENT_NONE); 1296 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO)) 1297 return pgprot_val(SEGMENT_RO); 1298 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX)) 1299 return pgprot_val(SEGMENT_RX); 1300 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW)) 1301 return pgprot_val(SEGMENT_RW); 1302 return pgprot_val(SEGMENT_RWX); 1303 } 1304 1305 static inline pmd_t pmd_mkyoung(pmd_t pmd) 1306 { 1307 if (pmd_large(pmd)) { 1308 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; 1309 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ) 1310 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID; 1311 } 1312 return pmd; 1313 } 1314 1315 static inline pmd_t pmd_mkold(pmd_t pmd) 1316 { 1317 if (pmd_large(pmd)) { 1318 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG; 1319 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; 1320 } 1321 return pmd; 1322 } 1323 1324 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 1325 { 1326 if (pmd_large(pmd)) { 1327 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE | 1328 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG | 1329 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY; 1330 pmd_val(pmd) |= massage_pgprot_pmd(newprot); 1331 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) 1332 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1333 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)) 1334 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; 1335 return pmd; 1336 } 1337 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN; 1338 pmd_val(pmd) |= massage_pgprot_pmd(newprot); 1339 return pmd; 1340 } 1341 1342 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) 1343 { 1344 pmd_t __pmd; 1345 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); 1346 return __pmd; 1347 } 1348 1349 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ 1350 1351 static inline void __pmdp_csp(pmd_t *pmdp) 1352 { 1353 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp), 1354 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID); 1355 } 1356 1357 #define IDTE_GLOBAL 0 1358 #define IDTE_LOCAL 1 1359 1360 #define IDTE_PTOA 0x0800 1361 #define IDTE_NODAT 0x1000 1362 #define IDTE_GUEST_ASCE 0x2000 1363 1364 static inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp, 1365 unsigned long opt, unsigned long asce, 1366 int local) 1367 { 1368 unsigned long sto; 1369 1370 sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t); 1371 if (__builtin_constant_p(opt) && opt == 0) { 1372 /* flush without guest asce */ 1373 asm volatile( 1374 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]" 1375 : "+m" (*pmdp) 1376 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)), 1377 [m4] "i" (local) 1378 : "cc" ); 1379 } else { 1380 /* flush with guest asce */ 1381 asm volatile( 1382 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]" 1383 : "+m" (*pmdp) 1384 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt), 1385 [r3] "a" (asce), [m4] "i" (local) 1386 : "cc" ); 1387 } 1388 } 1389 1390 static inline void __pudp_idte(unsigned long addr, pud_t *pudp, 1391 unsigned long opt, unsigned long asce, 1392 int local) 1393 { 1394 unsigned long r3o; 1395 1396 r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t); 1397 r3o |= _ASCE_TYPE_REGION3; 1398 if (__builtin_constant_p(opt) && opt == 0) { 1399 /* flush without guest asce */ 1400 asm volatile( 1401 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]" 1402 : "+m" (*pudp) 1403 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)), 1404 [m4] "i" (local) 1405 : "cc"); 1406 } else { 1407 /* flush with guest asce */ 1408 asm volatile( 1409 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]" 1410 : "+m" (*pudp) 1411 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt), 1412 [r3] "a" (asce), [m4] "i" (local) 1413 : "cc" ); 1414 } 1415 } 1416 1417 pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t); 1418 pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t); 1419 pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t); 1420 1421 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1422 1423 #define __HAVE_ARCH_PGTABLE_DEPOSIT 1424 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 1425 pgtable_t pgtable); 1426 1427 #define __HAVE_ARCH_PGTABLE_WITHDRAW 1428 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 1429 1430 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 1431 static inline int pmdp_set_access_flags(struct vm_area_struct *vma, 1432 unsigned long addr, pmd_t *pmdp, 1433 pmd_t entry, int dirty) 1434 { 1435 VM_BUG_ON(addr & ~HPAGE_MASK); 1436 1437 entry = pmd_mkyoung(entry); 1438 if (dirty) 1439 entry = pmd_mkdirty(entry); 1440 if (pmd_val(*pmdp) == pmd_val(entry)) 1441 return 0; 1442 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry); 1443 return 1; 1444 } 1445 1446 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 1447 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 1448 unsigned long addr, pmd_t *pmdp) 1449 { 1450 pmd_t pmd = *pmdp; 1451 1452 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd)); 1453 return pmd_young(pmd); 1454 } 1455 1456 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 1457 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma, 1458 unsigned long addr, pmd_t *pmdp) 1459 { 1460 VM_BUG_ON(addr & ~HPAGE_MASK); 1461 return pmdp_test_and_clear_young(vma, addr, pmdp); 1462 } 1463 1464 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 1465 pmd_t *pmdp, pmd_t entry) 1466 { 1467 if (!MACHINE_HAS_NX) 1468 pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC; 1469 *pmdp = entry; 1470 } 1471 1472 static inline pmd_t pmd_mkhuge(pmd_t pmd) 1473 { 1474 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; 1475 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; 1476 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1477 return pmd; 1478 } 1479 1480 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 1481 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 1482 unsigned long addr, pmd_t *pmdp) 1483 { 1484 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); 1485 } 1486 1487 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL 1488 static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm, 1489 unsigned long addr, 1490 pmd_t *pmdp, int full) 1491 { 1492 if (full) { 1493 pmd_t pmd = *pmdp; 1494 *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY); 1495 return pmd; 1496 } 1497 return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); 1498 } 1499 1500 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH 1501 static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, 1502 unsigned long addr, pmd_t *pmdp) 1503 { 1504 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); 1505 } 1506 1507 #define __HAVE_ARCH_PMDP_INVALIDATE 1508 static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma, 1509 unsigned long addr, pmd_t *pmdp) 1510 { 1511 pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID); 1512 1513 return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd); 1514 } 1515 1516 #define __HAVE_ARCH_PMDP_SET_WRPROTECT 1517 static inline void pmdp_set_wrprotect(struct mm_struct *mm, 1518 unsigned long addr, pmd_t *pmdp) 1519 { 1520 pmd_t pmd = *pmdp; 1521 1522 if (pmd_write(pmd)) 1523 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd)); 1524 } 1525 1526 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, 1527 unsigned long address, 1528 pmd_t *pmdp) 1529 { 1530 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 1531 } 1532 #define pmdp_collapse_flush pmdp_collapse_flush 1533 1534 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot)) 1535 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) 1536 1537 static inline int pmd_trans_huge(pmd_t pmd) 1538 { 1539 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE; 1540 } 1541 1542 #define has_transparent_hugepage has_transparent_hugepage 1543 static inline int has_transparent_hugepage(void) 1544 { 1545 return MACHINE_HAS_EDAT1 ? 1 : 0; 1546 } 1547 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1548 1549 /* 1550 * 64 bit swap entry format: 1551 * A page-table entry has some bits we have to treat in a special way. 1552 * Bits 52 and bit 55 have to be zero, otherwise a specification 1553 * exception will occur instead of a page translation exception. The 1554 * specification exception has the bad habit not to store necessary 1555 * information in the lowcore. 1556 * Bits 54 and 63 are used to indicate the page type. 1557 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200 1558 * This leaves the bits 0-51 and bits 56-62 to store type and offset. 1559 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51 1560 * for the offset. 1561 * | offset |01100|type |00| 1562 * |0000000000111111111122222222223333333333444444444455|55555|55566|66| 1563 * |0123456789012345678901234567890123456789012345678901|23456|78901|23| 1564 */ 1565 1566 #define __SWP_OFFSET_MASK ((1UL << 52) - 1) 1567 #define __SWP_OFFSET_SHIFT 12 1568 #define __SWP_TYPE_MASK ((1UL << 5) - 1) 1569 #define __SWP_TYPE_SHIFT 2 1570 1571 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) 1572 { 1573 pte_t pte; 1574 1575 pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT; 1576 pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT; 1577 pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT; 1578 return pte; 1579 } 1580 1581 static inline unsigned long __swp_type(swp_entry_t entry) 1582 { 1583 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK; 1584 } 1585 1586 static inline unsigned long __swp_offset(swp_entry_t entry) 1587 { 1588 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK; 1589 } 1590 1591 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset) 1592 { 1593 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) }; 1594 } 1595 1596 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 1597 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 1598 1599 #define kern_addr_valid(addr) (1) 1600 1601 extern int vmem_add_mapping(unsigned long start, unsigned long size); 1602 extern int vmem_remove_mapping(unsigned long start, unsigned long size); 1603 extern int s390_enable_sie(void); 1604 extern int s390_enable_skey(void); 1605 extern void s390_reset_cmma(struct mm_struct *mm); 1606 1607 /* s390 has a private copy of get unmapped area to deal with cache synonyms */ 1608 #define HAVE_ARCH_UNMAPPED_AREA 1609 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 1610 1611 /* 1612 * No page table caches to initialise 1613 */ 1614 static inline void pgtable_cache_init(void) { } 1615 static inline void check_pgt_cache(void) { } 1616 1617 #include <asm-generic/pgtable.h> 1618 1619 #endif /* _S390_PAGE_H */ 1620