1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * S390 version 4 * Copyright IBM Corp. 1999, 2000 5 * Author(s): Hartmut Penner (hp@de.ibm.com) 6 * Ulrich Weigand (weigand@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 * 9 * Derived from "include/asm-i386/pgtable.h" 10 */ 11 12 #ifndef _ASM_S390_PGTABLE_H 13 #define _ASM_S390_PGTABLE_H 14 15 #include <linux/sched.h> 16 #include <linux/mm_types.h> 17 #include <linux/page-flags.h> 18 #include <linux/radix-tree.h> 19 #include <linux/atomic.h> 20 #include <asm/bug.h> 21 #include <asm/page.h> 22 23 extern pgd_t swapper_pg_dir[]; 24 extern void paging_init(void); 25 26 enum { 27 PG_DIRECT_MAP_4K = 0, 28 PG_DIRECT_MAP_1M, 29 PG_DIRECT_MAP_2G, 30 PG_DIRECT_MAP_MAX 31 }; 32 33 extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX]; 34 35 static inline void update_page_count(int level, long count) 36 { 37 if (IS_ENABLED(CONFIG_PROC_FS)) 38 atomic_long_add(count, &direct_pages_count[level]); 39 } 40 41 struct seq_file; 42 void arch_report_meminfo(struct seq_file *m); 43 44 /* 45 * The S390 doesn't have any external MMU info: the kernel page 46 * tables contain all the necessary information. 47 */ 48 #define update_mmu_cache(vma, address, ptep) do { } while (0) 49 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0) 50 51 /* 52 * ZERO_PAGE is a global shared page that is always zero; used 53 * for zero-mapped memory areas etc.. 54 */ 55 56 extern unsigned long empty_zero_page; 57 extern unsigned long zero_page_mask; 58 59 #define ZERO_PAGE(vaddr) \ 60 (virt_to_page((void *)(empty_zero_page + \ 61 (((unsigned long)(vaddr)) &zero_page_mask)))) 62 #define __HAVE_COLOR_ZERO_PAGE 63 64 /* TODO: s390 cannot support io_remap_pfn_range... */ 65 66 #define FIRST_USER_ADDRESS 0UL 67 68 #define pte_ERROR(e) \ 69 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) 70 #define pmd_ERROR(e) \ 71 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) 72 #define pud_ERROR(e) \ 73 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e)) 74 #define p4d_ERROR(e) \ 75 printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e)) 76 #define pgd_ERROR(e) \ 77 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) 78 79 /* 80 * The vmalloc and module area will always be on the topmost area of the 81 * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules. 82 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where 83 * modules will reside. That makes sure that inter module branches always 84 * happen without trampolines and in addition the placement within a 2GB frame 85 * is branch prediction unit friendly. 86 */ 87 extern unsigned long VMALLOC_START; 88 extern unsigned long VMALLOC_END; 89 extern struct page *vmemmap; 90 91 #define VMEM_MAX_PHYS ((unsigned long) vmemmap) 92 93 extern unsigned long MODULES_VADDR; 94 extern unsigned long MODULES_END; 95 #define MODULES_VADDR MODULES_VADDR 96 #define MODULES_END MODULES_END 97 #define MODULES_LEN (1UL << 31) 98 99 static inline int is_module_addr(void *addr) 100 { 101 BUILD_BUG_ON(MODULES_LEN > (1UL << 31)); 102 if (addr < (void *)MODULES_VADDR) 103 return 0; 104 if (addr > (void *)MODULES_END) 105 return 0; 106 return 1; 107 } 108 109 /* 110 * A 64 bit pagetable entry of S390 has following format: 111 * | PFRA |0IPC| OS | 112 * 0000000000111111111122222222223333333333444444444455555555556666 113 * 0123456789012345678901234567890123456789012345678901234567890123 114 * 115 * I Page-Invalid Bit: Page is not available for address-translation 116 * P Page-Protection Bit: Store access not possible for page 117 * C Change-bit override: HW is not required to set change bit 118 * 119 * A 64 bit segmenttable entry of S390 has following format: 120 * | P-table origin | TT 121 * 0000000000111111111122222222223333333333444444444455555555556666 122 * 0123456789012345678901234567890123456789012345678901234567890123 123 * 124 * I Segment-Invalid Bit: Segment is not available for address-translation 125 * C Common-Segment Bit: Segment is not private (PoP 3-30) 126 * P Page-Protection Bit: Store access not possible for page 127 * TT Type 00 128 * 129 * A 64 bit region table entry of S390 has following format: 130 * | S-table origin | TF TTTL 131 * 0000000000111111111122222222223333333333444444444455555555556666 132 * 0123456789012345678901234567890123456789012345678901234567890123 133 * 134 * I Segment-Invalid Bit: Segment is not available for address-translation 135 * TT Type 01 136 * TF 137 * TL Table length 138 * 139 * The 64 bit regiontable origin of S390 has following format: 140 * | region table origon | DTTL 141 * 0000000000111111111122222222223333333333444444444455555555556666 142 * 0123456789012345678901234567890123456789012345678901234567890123 143 * 144 * X Space-Switch event: 145 * G Segment-Invalid Bit: 146 * P Private-Space Bit: 147 * S Storage-Alteration: 148 * R Real space 149 * TL Table-Length: 150 * 151 * A storage key has the following format: 152 * | ACC |F|R|C|0| 153 * 0 3 4 5 6 7 154 * ACC: access key 155 * F : fetch protection bit 156 * R : referenced bit 157 * C : changed bit 158 */ 159 160 /* Hardware bits in the page table entry */ 161 #define _PAGE_NOEXEC 0x100 /* HW no-execute bit */ 162 #define _PAGE_PROTECT 0x200 /* HW read-only bit */ 163 #define _PAGE_INVALID 0x400 /* HW invalid bit */ 164 #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */ 165 166 /* Software bits in the page table entry */ 167 #define _PAGE_PRESENT 0x001 /* SW pte present bit */ 168 #define _PAGE_YOUNG 0x004 /* SW pte young bit */ 169 #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */ 170 #define _PAGE_READ 0x010 /* SW pte read bit */ 171 #define _PAGE_WRITE 0x020 /* SW pte write bit */ 172 #define _PAGE_SPECIAL 0x040 /* SW associated with special page */ 173 #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */ 174 175 #ifdef CONFIG_MEM_SOFT_DIRTY 176 #define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */ 177 #else 178 #define _PAGE_SOFT_DIRTY 0x000 179 #endif 180 181 /* Set of bits not changed in pte_modify */ 182 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \ 183 _PAGE_YOUNG | _PAGE_SOFT_DIRTY) 184 185 /* 186 * handle_pte_fault uses pte_present and pte_none to find out the pte type 187 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to 188 * distinguish present from not-present ptes. It is changed only with the page 189 * table lock held. 190 * 191 * The following table gives the different possible bit combinations for 192 * the pte hardware and software bits in the last 12 bits of a pte 193 * (. unassigned bit, x don't care, t swap type): 194 * 195 * 842100000000 196 * 000084210000 197 * 000000008421 198 * .IR.uswrdy.p 199 * empty .10.00000000 200 * swap .11..ttttt.0 201 * prot-none, clean, old .11.xx0000.1 202 * prot-none, clean, young .11.xx0001.1 203 * prot-none, dirty, old .11.xx0010.1 204 * prot-none, dirty, young .11.xx0011.1 205 * read-only, clean, old .11.xx0100.1 206 * read-only, clean, young .01.xx0101.1 207 * read-only, dirty, old .11.xx0110.1 208 * read-only, dirty, young .01.xx0111.1 209 * read-write, clean, old .11.xx1100.1 210 * read-write, clean, young .01.xx1101.1 211 * read-write, dirty, old .10.xx1110.1 212 * read-write, dirty, young .00.xx1111.1 213 * HW-bits: R read-only, I invalid 214 * SW-bits: p present, y young, d dirty, r read, w write, s special, 215 * u unused, l large 216 * 217 * pte_none is true for the bit pattern .10.00000000, pte == 0x400 218 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200 219 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001 220 */ 221 222 /* Bits in the segment/region table address-space-control-element */ 223 #define _ASCE_ORIGIN ~0xfffUL/* region/segment table origin */ 224 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ 225 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ 226 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */ 227 #define _ASCE_REAL_SPACE 0x20 /* real space control */ 228 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */ 229 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */ 230 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */ 231 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */ 232 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */ 233 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */ 234 235 /* Bits in the region table entry */ 236 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ 237 #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */ 238 #define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */ 239 #define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */ 240 #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */ 241 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ 242 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ 243 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ 244 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */ 245 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */ 246 247 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) 248 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID) 249 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) 250 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID) 251 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) 252 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID) 253 254 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */ 255 #define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */ 256 #define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */ 257 #define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */ 258 #define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */ 259 #define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */ 260 261 #ifdef CONFIG_MEM_SOFT_DIRTY 262 #define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */ 263 #else 264 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */ 265 #endif 266 267 #define _REGION_ENTRY_BITS 0xfffffffffffff22fUL 268 #define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL 269 270 /* Bits in the segment table entry */ 271 #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL 272 #define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL 273 #define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL 274 #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL 275 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ 276 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */ 277 #define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */ 278 #define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */ 279 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */ 280 281 #define _SEGMENT_ENTRY (0) 282 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) 283 284 #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */ 285 #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */ 286 #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */ 287 #define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */ 288 #define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */ 289 290 #ifdef CONFIG_MEM_SOFT_DIRTY 291 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */ 292 #else 293 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */ 294 #endif 295 296 #define _CRST_ENTRIES 2048 /* number of region/segment table entries */ 297 #define _PAGE_ENTRIES 256 /* number of page table entries */ 298 299 #define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8) 300 #define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8) 301 302 #define _REGION1_SHIFT 53 303 #define _REGION2_SHIFT 42 304 #define _REGION3_SHIFT 31 305 #define _SEGMENT_SHIFT 20 306 307 #define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT) 308 #define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT) 309 #define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT) 310 #define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT) 311 #define _PAGE_INDEX (0xffUL << _PAGE_SHIFT) 312 313 #define _REGION1_SIZE (1UL << _REGION1_SHIFT) 314 #define _REGION2_SIZE (1UL << _REGION2_SHIFT) 315 #define _REGION3_SIZE (1UL << _REGION3_SHIFT) 316 #define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT) 317 318 #define _REGION1_MASK (~(_REGION1_SIZE - 1)) 319 #define _REGION2_MASK (~(_REGION2_SIZE - 1)) 320 #define _REGION3_MASK (~(_REGION3_SIZE - 1)) 321 #define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1)) 322 323 #define PMD_SHIFT _SEGMENT_SHIFT 324 #define PUD_SHIFT _REGION3_SHIFT 325 #define P4D_SHIFT _REGION2_SHIFT 326 #define PGDIR_SHIFT _REGION1_SHIFT 327 328 #define PMD_SIZE _SEGMENT_SIZE 329 #define PUD_SIZE _REGION3_SIZE 330 #define P4D_SIZE _REGION2_SIZE 331 #define PGDIR_SIZE _REGION1_SIZE 332 333 #define PMD_MASK _SEGMENT_MASK 334 #define PUD_MASK _REGION3_MASK 335 #define P4D_MASK _REGION2_MASK 336 #define PGDIR_MASK _REGION1_MASK 337 338 #define PTRS_PER_PTE _PAGE_ENTRIES 339 #define PTRS_PER_PMD _CRST_ENTRIES 340 #define PTRS_PER_PUD _CRST_ENTRIES 341 #define PTRS_PER_P4D _CRST_ENTRIES 342 #define PTRS_PER_PGD _CRST_ENTRIES 343 344 /* 345 * Segment table and region3 table entry encoding 346 * (R = read-only, I = invalid, y = young bit): 347 * dy..R...I...wr 348 * prot-none, clean, old 00..1...1...00 349 * prot-none, clean, young 01..1...1...00 350 * prot-none, dirty, old 10..1...1...00 351 * prot-none, dirty, young 11..1...1...00 352 * read-only, clean, old 00..1...1...01 353 * read-only, clean, young 01..1...0...01 354 * read-only, dirty, old 10..1...1...01 355 * read-only, dirty, young 11..1...0...01 356 * read-write, clean, old 00..1...1...11 357 * read-write, clean, young 01..1...0...11 358 * read-write, dirty, old 10..0...1...11 359 * read-write, dirty, young 11..0...0...11 360 * The segment table origin is used to distinguish empty (origin==0) from 361 * read-write, old segment table entries (origin!=0) 362 * HW-bits: R read-only, I invalid 363 * SW-bits: y young, d dirty, r read, w write 364 */ 365 366 /* Page status table bits for virtualization */ 367 #define PGSTE_ACC_BITS 0xf000000000000000UL 368 #define PGSTE_FP_BIT 0x0800000000000000UL 369 #define PGSTE_PCL_BIT 0x0080000000000000UL 370 #define PGSTE_HR_BIT 0x0040000000000000UL 371 #define PGSTE_HC_BIT 0x0020000000000000UL 372 #define PGSTE_GR_BIT 0x0004000000000000UL 373 #define PGSTE_GC_BIT 0x0002000000000000UL 374 #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */ 375 #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */ 376 #define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */ 377 378 /* Guest Page State used for virtualization */ 379 #define _PGSTE_GPS_ZERO 0x0000000080000000UL 380 #define _PGSTE_GPS_NODAT 0x0000000040000000UL 381 #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL 382 #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL 383 #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL 384 #define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL 385 #define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK 386 387 /* 388 * A user page table pointer has the space-switch-event bit, the 389 * private-space-control bit and the storage-alteration-event-control 390 * bit set. A kernel page table pointer doesn't need them. 391 */ 392 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ 393 _ASCE_ALT_EVENT) 394 395 /* 396 * Page protection definitions. 397 */ 398 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT) 399 #define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \ 400 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT) 401 #define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \ 402 _PAGE_INVALID | _PAGE_PROTECT) 403 #define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 404 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT) 405 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 406 _PAGE_INVALID | _PAGE_PROTECT) 407 408 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 409 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC) 410 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 411 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC) 412 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \ 413 _PAGE_PROTECT | _PAGE_NOEXEC) 414 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 415 _PAGE_YOUNG | _PAGE_DIRTY) 416 417 /* 418 * On s390 the page table entry has an invalid bit and a read-only bit. 419 * Read permission implies execute permission and write permission 420 * implies read permission. 421 */ 422 /*xwr*/ 423 #define __P000 PAGE_NONE 424 #define __P001 PAGE_RO 425 #define __P010 PAGE_RO 426 #define __P011 PAGE_RO 427 #define __P100 PAGE_RX 428 #define __P101 PAGE_RX 429 #define __P110 PAGE_RX 430 #define __P111 PAGE_RX 431 432 #define __S000 PAGE_NONE 433 #define __S001 PAGE_RO 434 #define __S010 PAGE_RW 435 #define __S011 PAGE_RW 436 #define __S100 PAGE_RX 437 #define __S101 PAGE_RX 438 #define __S110 PAGE_RWX 439 #define __S111 PAGE_RWX 440 441 /* 442 * Segment entry (large page) protection definitions. 443 */ 444 #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \ 445 _SEGMENT_ENTRY_PROTECT) 446 #define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \ 447 _SEGMENT_ENTRY_READ | \ 448 _SEGMENT_ENTRY_NOEXEC) 449 #define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \ 450 _SEGMENT_ENTRY_READ) 451 #define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \ 452 _SEGMENT_ENTRY_WRITE | \ 453 _SEGMENT_ENTRY_NOEXEC) 454 #define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \ 455 _SEGMENT_ENTRY_WRITE) 456 #define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \ 457 _SEGMENT_ENTRY_LARGE | \ 458 _SEGMENT_ENTRY_READ | \ 459 _SEGMENT_ENTRY_WRITE | \ 460 _SEGMENT_ENTRY_YOUNG | \ 461 _SEGMENT_ENTRY_DIRTY | \ 462 _SEGMENT_ENTRY_NOEXEC) 463 #define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \ 464 _SEGMENT_ENTRY_LARGE | \ 465 _SEGMENT_ENTRY_READ | \ 466 _SEGMENT_ENTRY_YOUNG | \ 467 _SEGMENT_ENTRY_PROTECT | \ 468 _SEGMENT_ENTRY_NOEXEC) 469 470 /* 471 * Region3 entry (large page) protection definitions. 472 */ 473 474 #define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \ 475 _REGION3_ENTRY_LARGE | \ 476 _REGION3_ENTRY_READ | \ 477 _REGION3_ENTRY_WRITE | \ 478 _REGION3_ENTRY_YOUNG | \ 479 _REGION3_ENTRY_DIRTY | \ 480 _REGION_ENTRY_NOEXEC) 481 #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \ 482 _REGION3_ENTRY_LARGE | \ 483 _REGION3_ENTRY_READ | \ 484 _REGION3_ENTRY_YOUNG | \ 485 _REGION_ENTRY_PROTECT | \ 486 _REGION_ENTRY_NOEXEC) 487 488 static inline int mm_has_pgste(struct mm_struct *mm) 489 { 490 #ifdef CONFIG_PGSTE 491 if (unlikely(mm->context.has_pgste)) 492 return 1; 493 #endif 494 return 0; 495 } 496 497 static inline int mm_alloc_pgste(struct mm_struct *mm) 498 { 499 #ifdef CONFIG_PGSTE 500 if (unlikely(mm->context.alloc_pgste)) 501 return 1; 502 #endif 503 return 0; 504 } 505 506 /* 507 * In the case that a guest uses storage keys 508 * faults should no longer be backed by zero pages 509 */ 510 #define mm_forbids_zeropage mm_has_pgste 511 static inline int mm_uses_skeys(struct mm_struct *mm) 512 { 513 #ifdef CONFIG_PGSTE 514 if (mm->context.uses_skeys) 515 return 1; 516 #endif 517 return 0; 518 } 519 520 static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new) 521 { 522 register unsigned long reg2 asm("2") = old; 523 register unsigned long reg3 asm("3") = new; 524 unsigned long address = (unsigned long)ptr | 1; 525 526 asm volatile( 527 " csp %0,%3" 528 : "+d" (reg2), "+m" (*ptr) 529 : "d" (reg3), "d" (address) 530 : "cc"); 531 } 532 533 static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new) 534 { 535 register unsigned long reg2 asm("2") = old; 536 register unsigned long reg3 asm("3") = new; 537 unsigned long address = (unsigned long)ptr | 1; 538 539 asm volatile( 540 " .insn rre,0xb98a0000,%0,%3" 541 : "+d" (reg2), "+m" (*ptr) 542 : "d" (reg3), "d" (address) 543 : "cc"); 544 } 545 546 #define CRDTE_DTT_PAGE 0x00UL 547 #define CRDTE_DTT_SEGMENT 0x10UL 548 #define CRDTE_DTT_REGION3 0x14UL 549 #define CRDTE_DTT_REGION2 0x18UL 550 #define CRDTE_DTT_REGION1 0x1cUL 551 552 static inline void crdte(unsigned long old, unsigned long new, 553 unsigned long table, unsigned long dtt, 554 unsigned long address, unsigned long asce) 555 { 556 register unsigned long reg2 asm("2") = old; 557 register unsigned long reg3 asm("3") = new; 558 register unsigned long reg4 asm("4") = table | dtt; 559 register unsigned long reg5 asm("5") = address; 560 561 asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0" 562 : "+d" (reg2) 563 : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce) 564 : "memory", "cc"); 565 } 566 567 /* 568 * pgd/p4d/pud/pmd/pte query functions 569 */ 570 static inline int pgd_folded(pgd_t pgd) 571 { 572 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1; 573 } 574 575 static inline int pgd_present(pgd_t pgd) 576 { 577 if (pgd_folded(pgd)) 578 return 1; 579 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL; 580 } 581 582 static inline int pgd_none(pgd_t pgd) 583 { 584 if (pgd_folded(pgd)) 585 return 0; 586 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL; 587 } 588 589 static inline int pgd_bad(pgd_t pgd) 590 { 591 /* 592 * With dynamic page table levels the pgd can be a region table 593 * entry or a segment table entry. Check for the bit that are 594 * invalid for either table entry. 595 */ 596 unsigned long mask = 597 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID & 598 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; 599 return (pgd_val(pgd) & mask) != 0; 600 } 601 602 static inline int p4d_folded(p4d_t p4d) 603 { 604 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2; 605 } 606 607 static inline int p4d_present(p4d_t p4d) 608 { 609 if (p4d_folded(p4d)) 610 return 1; 611 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL; 612 } 613 614 static inline int p4d_none(p4d_t p4d) 615 { 616 if (p4d_folded(p4d)) 617 return 0; 618 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY; 619 } 620 621 static inline unsigned long p4d_pfn(p4d_t p4d) 622 { 623 unsigned long origin_mask; 624 625 origin_mask = _REGION_ENTRY_ORIGIN; 626 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT; 627 } 628 629 static inline int pud_folded(pud_t pud) 630 { 631 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3; 632 } 633 634 static inline int pud_present(pud_t pud) 635 { 636 if (pud_folded(pud)) 637 return 1; 638 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; 639 } 640 641 static inline int pud_none(pud_t pud) 642 { 643 if (pud_folded(pud)) 644 return 0; 645 return pud_val(pud) == _REGION3_ENTRY_EMPTY; 646 } 647 648 static inline int pud_large(pud_t pud) 649 { 650 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3) 651 return 0; 652 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE); 653 } 654 655 static inline unsigned long pud_pfn(pud_t pud) 656 { 657 unsigned long origin_mask; 658 659 origin_mask = _REGION_ENTRY_ORIGIN; 660 if (pud_large(pud)) 661 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE; 662 return (pud_val(pud) & origin_mask) >> PAGE_SHIFT; 663 } 664 665 static inline int pmd_large(pmd_t pmd) 666 { 667 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0; 668 } 669 670 static inline int pmd_bad(pmd_t pmd) 671 { 672 if (pmd_large(pmd)) 673 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0; 674 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0; 675 } 676 677 static inline int pud_bad(pud_t pud) 678 { 679 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) 680 return pmd_bad(__pmd(pud_val(pud))); 681 if (pud_large(pud)) 682 return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0; 683 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0; 684 } 685 686 static inline int p4d_bad(p4d_t p4d) 687 { 688 if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) 689 return pud_bad(__pud(p4d_val(p4d))); 690 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0; 691 } 692 693 static inline int pmd_present(pmd_t pmd) 694 { 695 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY; 696 } 697 698 static inline int pmd_none(pmd_t pmd) 699 { 700 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY; 701 } 702 703 static inline unsigned long pmd_pfn(pmd_t pmd) 704 { 705 unsigned long origin_mask; 706 707 origin_mask = _SEGMENT_ENTRY_ORIGIN; 708 if (pmd_large(pmd)) 709 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE; 710 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT; 711 } 712 713 #define pmd_write pmd_write 714 static inline int pmd_write(pmd_t pmd) 715 { 716 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0; 717 } 718 719 static inline int pmd_dirty(pmd_t pmd) 720 { 721 int dirty = 1; 722 if (pmd_large(pmd)) 723 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0; 724 return dirty; 725 } 726 727 static inline int pmd_young(pmd_t pmd) 728 { 729 int young = 1; 730 if (pmd_large(pmd)) 731 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0; 732 return young; 733 } 734 735 static inline int pte_present(pte_t pte) 736 { 737 /* Bit pattern: (pte & 0x001) == 0x001 */ 738 return (pte_val(pte) & _PAGE_PRESENT) != 0; 739 } 740 741 static inline int pte_none(pte_t pte) 742 { 743 /* Bit pattern: pte == 0x400 */ 744 return pte_val(pte) == _PAGE_INVALID; 745 } 746 747 static inline int pte_swap(pte_t pte) 748 { 749 /* Bit pattern: (pte & 0x201) == 0x200 */ 750 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT)) 751 == _PAGE_PROTECT; 752 } 753 754 static inline int pte_special(pte_t pte) 755 { 756 return (pte_val(pte) & _PAGE_SPECIAL); 757 } 758 759 #define __HAVE_ARCH_PTE_SAME 760 static inline int pte_same(pte_t a, pte_t b) 761 { 762 return pte_val(a) == pte_val(b); 763 } 764 765 #ifdef CONFIG_NUMA_BALANCING 766 static inline int pte_protnone(pte_t pte) 767 { 768 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ); 769 } 770 771 static inline int pmd_protnone(pmd_t pmd) 772 { 773 /* pmd_large(pmd) implies pmd_present(pmd) */ 774 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ); 775 } 776 #endif 777 778 static inline int pte_soft_dirty(pte_t pte) 779 { 780 return pte_val(pte) & _PAGE_SOFT_DIRTY; 781 } 782 #define pte_swp_soft_dirty pte_soft_dirty 783 784 static inline pte_t pte_mksoft_dirty(pte_t pte) 785 { 786 pte_val(pte) |= _PAGE_SOFT_DIRTY; 787 return pte; 788 } 789 #define pte_swp_mksoft_dirty pte_mksoft_dirty 790 791 static inline pte_t pte_clear_soft_dirty(pte_t pte) 792 { 793 pte_val(pte) &= ~_PAGE_SOFT_DIRTY; 794 return pte; 795 } 796 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty 797 798 static inline int pmd_soft_dirty(pmd_t pmd) 799 { 800 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY; 801 } 802 803 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) 804 { 805 pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY; 806 return pmd; 807 } 808 809 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) 810 { 811 pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY; 812 return pmd; 813 } 814 815 /* 816 * query functions pte_write/pte_dirty/pte_young only work if 817 * pte_present() is true. Undefined behaviour if not.. 818 */ 819 static inline int pte_write(pte_t pte) 820 { 821 return (pte_val(pte) & _PAGE_WRITE) != 0; 822 } 823 824 static inline int pte_dirty(pte_t pte) 825 { 826 return (pte_val(pte) & _PAGE_DIRTY) != 0; 827 } 828 829 static inline int pte_young(pte_t pte) 830 { 831 return (pte_val(pte) & _PAGE_YOUNG) != 0; 832 } 833 834 #define __HAVE_ARCH_PTE_UNUSED 835 static inline int pte_unused(pte_t pte) 836 { 837 return pte_val(pte) & _PAGE_UNUSED; 838 } 839 840 /* 841 * pgd/pmd/pte modification functions 842 */ 843 844 static inline void pgd_clear(pgd_t *pgd) 845 { 846 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1) 847 pgd_val(*pgd) = _REGION1_ENTRY_EMPTY; 848 } 849 850 static inline void p4d_clear(p4d_t *p4d) 851 { 852 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 853 p4d_val(*p4d) = _REGION2_ENTRY_EMPTY; 854 } 855 856 static inline void pud_clear(pud_t *pud) 857 { 858 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 859 pud_val(*pud) = _REGION3_ENTRY_EMPTY; 860 } 861 862 static inline void pmd_clear(pmd_t *pmdp) 863 { 864 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; 865 } 866 867 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 868 { 869 pte_val(*ptep) = _PAGE_INVALID; 870 } 871 872 /* 873 * The following pte modification functions only work if 874 * pte_present() is true. Undefined behaviour if not.. 875 */ 876 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 877 { 878 pte_val(pte) &= _PAGE_CHG_MASK; 879 pte_val(pte) |= pgprot_val(newprot); 880 /* 881 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX 882 * has the invalid bit set, clear it again for readable, young pages 883 */ 884 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ)) 885 pte_val(pte) &= ~_PAGE_INVALID; 886 /* 887 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page 888 * protection bit set, clear it again for writable, dirty pages 889 */ 890 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE)) 891 pte_val(pte) &= ~_PAGE_PROTECT; 892 return pte; 893 } 894 895 static inline pte_t pte_wrprotect(pte_t pte) 896 { 897 pte_val(pte) &= ~_PAGE_WRITE; 898 pte_val(pte) |= _PAGE_PROTECT; 899 return pte; 900 } 901 902 static inline pte_t pte_mkwrite(pte_t pte) 903 { 904 pte_val(pte) |= _PAGE_WRITE; 905 if (pte_val(pte) & _PAGE_DIRTY) 906 pte_val(pte) &= ~_PAGE_PROTECT; 907 return pte; 908 } 909 910 static inline pte_t pte_mkclean(pte_t pte) 911 { 912 pte_val(pte) &= ~_PAGE_DIRTY; 913 pte_val(pte) |= _PAGE_PROTECT; 914 return pte; 915 } 916 917 static inline pte_t pte_mkdirty(pte_t pte) 918 { 919 pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY; 920 if (pte_val(pte) & _PAGE_WRITE) 921 pte_val(pte) &= ~_PAGE_PROTECT; 922 return pte; 923 } 924 925 static inline pte_t pte_mkold(pte_t pte) 926 { 927 pte_val(pte) &= ~_PAGE_YOUNG; 928 pte_val(pte) |= _PAGE_INVALID; 929 return pte; 930 } 931 932 static inline pte_t pte_mkyoung(pte_t pte) 933 { 934 pte_val(pte) |= _PAGE_YOUNG; 935 if (pte_val(pte) & _PAGE_READ) 936 pte_val(pte) &= ~_PAGE_INVALID; 937 return pte; 938 } 939 940 static inline pte_t pte_mkspecial(pte_t pte) 941 { 942 pte_val(pte) |= _PAGE_SPECIAL; 943 return pte; 944 } 945 946 #ifdef CONFIG_HUGETLB_PAGE 947 static inline pte_t pte_mkhuge(pte_t pte) 948 { 949 pte_val(pte) |= _PAGE_LARGE; 950 return pte; 951 } 952 #endif 953 954 #define IPTE_GLOBAL 0 955 #define IPTE_LOCAL 1 956 957 #define IPTE_NODAT 0x400 958 #define IPTE_GUEST_ASCE 0x800 959 960 static inline void __ptep_ipte(unsigned long address, pte_t *ptep, 961 unsigned long opt, unsigned long asce, 962 int local) 963 { 964 unsigned long pto = (unsigned long) ptep; 965 966 if (__builtin_constant_p(opt) && opt == 0) { 967 /* Invalidation + TLB flush for the pte */ 968 asm volatile( 969 " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]" 970 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address), 971 [m4] "i" (local)); 972 return; 973 } 974 975 /* Invalidate ptes with options + TLB flush of the ptes */ 976 opt = opt | (asce & _ASCE_ORIGIN); 977 asm volatile( 978 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]" 979 : [r2] "+a" (address), [r3] "+a" (opt) 980 : [r1] "a" (pto), [m4] "i" (local) : "memory"); 981 } 982 983 static inline void __ptep_ipte_range(unsigned long address, int nr, 984 pte_t *ptep, int local) 985 { 986 unsigned long pto = (unsigned long) ptep; 987 988 /* Invalidate a range of ptes + TLB flush of the ptes */ 989 do { 990 asm volatile( 991 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]" 992 : [r2] "+a" (address), [r3] "+a" (nr) 993 : [r1] "a" (pto), [m4] "i" (local) : "memory"); 994 } while (nr != 255); 995 } 996 997 /* 998 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush 999 * both clear the TLB for the unmapped pte. The reason is that 1000 * ptep_get_and_clear is used in common code (e.g. change_pte_range) 1001 * to modify an active pte. The sequence is 1002 * 1) ptep_get_and_clear 1003 * 2) set_pte_at 1004 * 3) flush_tlb_range 1005 * On s390 the tlb needs to get flushed with the modification of the pte 1006 * if the pte is active. The only way how this can be implemented is to 1007 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range 1008 * is a nop. 1009 */ 1010 pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t); 1011 pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t); 1012 1013 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 1014 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 1015 unsigned long addr, pte_t *ptep) 1016 { 1017 pte_t pte = *ptep; 1018 1019 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte)); 1020 return pte_young(pte); 1021 } 1022 1023 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 1024 static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 1025 unsigned long address, pte_t *ptep) 1026 { 1027 return ptep_test_and_clear_young(vma, address, ptep); 1028 } 1029 1030 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 1031 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 1032 unsigned long addr, pte_t *ptep) 1033 { 1034 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID)); 1035 } 1036 1037 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 1038 pte_t ptep_modify_prot_start(struct mm_struct *, unsigned long, pte_t *); 1039 void ptep_modify_prot_commit(struct mm_struct *, unsigned long, pte_t *, pte_t); 1040 1041 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH 1042 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, 1043 unsigned long addr, pte_t *ptep) 1044 { 1045 return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID)); 1046 } 1047 1048 /* 1049 * The batched pte unmap code uses ptep_get_and_clear_full to clear the 1050 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all 1051 * tlbs of an mm if it can guarantee that the ptes of the mm_struct 1052 * cannot be accessed while the batched unmap is running. In this case 1053 * full==1 and a simple pte_clear is enough. See tlb.h. 1054 */ 1055 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 1056 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, 1057 unsigned long addr, 1058 pte_t *ptep, int full) 1059 { 1060 if (full) { 1061 pte_t pte = *ptep; 1062 *ptep = __pte(_PAGE_INVALID); 1063 return pte; 1064 } 1065 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID)); 1066 } 1067 1068 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 1069 static inline void ptep_set_wrprotect(struct mm_struct *mm, 1070 unsigned long addr, pte_t *ptep) 1071 { 1072 pte_t pte = *ptep; 1073 1074 if (pte_write(pte)) 1075 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte)); 1076 } 1077 1078 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 1079 static inline int ptep_set_access_flags(struct vm_area_struct *vma, 1080 unsigned long addr, pte_t *ptep, 1081 pte_t entry, int dirty) 1082 { 1083 if (pte_same(*ptep, entry)) 1084 return 0; 1085 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry); 1086 return 1; 1087 } 1088 1089 /* 1090 * Additional functions to handle KVM guest page tables 1091 */ 1092 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr, 1093 pte_t *ptep, pte_t entry); 1094 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep); 1095 void ptep_notify(struct mm_struct *mm, unsigned long addr, 1096 pte_t *ptep, unsigned long bits); 1097 int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr, 1098 pte_t *ptep, int prot, unsigned long bit); 1099 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr, 1100 pte_t *ptep , int reset); 1101 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep); 1102 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr, 1103 pte_t *sptep, pte_t *tptep, pte_t pte); 1104 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep); 1105 1106 bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address, 1107 pte_t *ptep); 1108 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, 1109 unsigned char key, bool nq); 1110 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr, 1111 unsigned char key, unsigned char *oldkey, 1112 bool nq, bool mr, bool mc); 1113 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr); 1114 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr, 1115 unsigned char *key); 1116 1117 int set_pgste_bits(struct mm_struct *mm, unsigned long addr, 1118 unsigned long bits, unsigned long value); 1119 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep); 1120 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc, 1121 unsigned long *oldpte, unsigned long *oldpgste); 1122 void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr); 1123 void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr); 1124 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr); 1125 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr); 1126 1127 /* 1128 * Certain architectures need to do special things when PTEs 1129 * within a page table are directly modified. Thus, the following 1130 * hook is made available. 1131 */ 1132 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 1133 pte_t *ptep, pte_t entry) 1134 { 1135 if (!MACHINE_HAS_NX) 1136 pte_val(entry) &= ~_PAGE_NOEXEC; 1137 if (pte_present(entry)) 1138 pte_val(entry) &= ~_PAGE_UNUSED; 1139 if (mm_has_pgste(mm)) 1140 ptep_set_pte_at(mm, addr, ptep, entry); 1141 else 1142 *ptep = entry; 1143 } 1144 1145 /* 1146 * Conversion functions: convert a page and protection to a page entry, 1147 * and a page entry and page directory to the page they refer to. 1148 */ 1149 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) 1150 { 1151 pte_t __pte; 1152 pte_val(__pte) = physpage + pgprot_val(pgprot); 1153 return pte_mkyoung(__pte); 1154 } 1155 1156 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) 1157 { 1158 unsigned long physpage = page_to_phys(page); 1159 pte_t __pte = mk_pte_phys(physpage, pgprot); 1160 1161 if (pte_write(__pte) && PageDirty(page)) 1162 __pte = pte_mkdirty(__pte); 1163 return __pte; 1164 } 1165 1166 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 1167 #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1)) 1168 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 1169 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 1170 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) 1171 1172 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 1173 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 1174 1175 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 1176 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) 1177 #define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN) 1178 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) 1179 1180 static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) 1181 { 1182 p4d_t *p4d = (p4d_t *) pgd; 1183 1184 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1) 1185 p4d = (p4d_t *) pgd_deref(*pgd); 1186 return p4d + p4d_index(address); 1187 } 1188 1189 static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) 1190 { 1191 pud_t *pud = (pud_t *) p4d; 1192 1193 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 1194 pud = (pud_t *) p4d_deref(*p4d); 1195 return pud + pud_index(address); 1196 } 1197 1198 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) 1199 { 1200 pmd_t *pmd = (pmd_t *) pud; 1201 1202 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 1203 pmd = (pmd_t *) pud_deref(*pud); 1204 return pmd + pmd_index(address); 1205 } 1206 1207 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) 1208 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) 1209 #define pte_page(x) pfn_to_page(pte_pfn(x)) 1210 1211 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) 1212 #define pud_page(pud) pfn_to_page(pud_pfn(pud)) 1213 #define p4d_page(pud) pfn_to_page(p4d_pfn(p4d)) 1214 1215 /* Find an entry in the lowest level page table.. */ 1216 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) 1217 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address) 1218 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) 1219 #define pte_unmap(pte) do { } while (0) 1220 1221 static inline pmd_t pmd_wrprotect(pmd_t pmd) 1222 { 1223 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE; 1224 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1225 return pmd; 1226 } 1227 1228 static inline pmd_t pmd_mkwrite(pmd_t pmd) 1229 { 1230 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE; 1231 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) 1232 return pmd; 1233 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; 1234 return pmd; 1235 } 1236 1237 static inline pmd_t pmd_mkclean(pmd_t pmd) 1238 { 1239 if (pmd_large(pmd)) { 1240 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY; 1241 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1242 } 1243 return pmd; 1244 } 1245 1246 static inline pmd_t pmd_mkdirty(pmd_t pmd) 1247 { 1248 if (pmd_large(pmd)) { 1249 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | 1250 _SEGMENT_ENTRY_SOFT_DIRTY; 1251 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) 1252 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; 1253 } 1254 return pmd; 1255 } 1256 1257 static inline pud_t pud_wrprotect(pud_t pud) 1258 { 1259 pud_val(pud) &= ~_REGION3_ENTRY_WRITE; 1260 pud_val(pud) |= _REGION_ENTRY_PROTECT; 1261 return pud; 1262 } 1263 1264 static inline pud_t pud_mkwrite(pud_t pud) 1265 { 1266 pud_val(pud) |= _REGION3_ENTRY_WRITE; 1267 if (pud_large(pud) && !(pud_val(pud) & _REGION3_ENTRY_DIRTY)) 1268 return pud; 1269 pud_val(pud) &= ~_REGION_ENTRY_PROTECT; 1270 return pud; 1271 } 1272 1273 static inline pud_t pud_mkclean(pud_t pud) 1274 { 1275 if (pud_large(pud)) { 1276 pud_val(pud) &= ~_REGION3_ENTRY_DIRTY; 1277 pud_val(pud) |= _REGION_ENTRY_PROTECT; 1278 } 1279 return pud; 1280 } 1281 1282 static inline pud_t pud_mkdirty(pud_t pud) 1283 { 1284 if (pud_large(pud)) { 1285 pud_val(pud) |= _REGION3_ENTRY_DIRTY | 1286 _REGION3_ENTRY_SOFT_DIRTY; 1287 if (pud_val(pud) & _REGION3_ENTRY_WRITE) 1288 pud_val(pud) &= ~_REGION_ENTRY_PROTECT; 1289 } 1290 return pud; 1291 } 1292 1293 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) 1294 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) 1295 { 1296 /* 1297 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX 1298 * (see __Pxxx / __Sxxx). Convert to segment table entry format. 1299 */ 1300 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE)) 1301 return pgprot_val(SEGMENT_NONE); 1302 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO)) 1303 return pgprot_val(SEGMENT_RO); 1304 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX)) 1305 return pgprot_val(SEGMENT_RX); 1306 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW)) 1307 return pgprot_val(SEGMENT_RW); 1308 return pgprot_val(SEGMENT_RWX); 1309 } 1310 1311 static inline pmd_t pmd_mkyoung(pmd_t pmd) 1312 { 1313 if (pmd_large(pmd)) { 1314 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; 1315 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ) 1316 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID; 1317 } 1318 return pmd; 1319 } 1320 1321 static inline pmd_t pmd_mkold(pmd_t pmd) 1322 { 1323 if (pmd_large(pmd)) { 1324 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG; 1325 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; 1326 } 1327 return pmd; 1328 } 1329 1330 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 1331 { 1332 if (pmd_large(pmd)) { 1333 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE | 1334 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG | 1335 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY; 1336 pmd_val(pmd) |= massage_pgprot_pmd(newprot); 1337 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) 1338 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1339 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)) 1340 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; 1341 return pmd; 1342 } 1343 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN; 1344 pmd_val(pmd) |= massage_pgprot_pmd(newprot); 1345 return pmd; 1346 } 1347 1348 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) 1349 { 1350 pmd_t __pmd; 1351 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); 1352 return __pmd; 1353 } 1354 1355 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ 1356 1357 static inline void __pmdp_csp(pmd_t *pmdp) 1358 { 1359 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp), 1360 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID); 1361 } 1362 1363 #define IDTE_GLOBAL 0 1364 #define IDTE_LOCAL 1 1365 1366 #define IDTE_PTOA 0x0800 1367 #define IDTE_NODAT 0x1000 1368 #define IDTE_GUEST_ASCE 0x2000 1369 1370 static inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp, 1371 unsigned long opt, unsigned long asce, 1372 int local) 1373 { 1374 unsigned long sto; 1375 1376 sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t); 1377 if (__builtin_constant_p(opt) && opt == 0) { 1378 /* flush without guest asce */ 1379 asm volatile( 1380 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]" 1381 : "+m" (*pmdp) 1382 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)), 1383 [m4] "i" (local) 1384 : "cc" ); 1385 } else { 1386 /* flush with guest asce */ 1387 asm volatile( 1388 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]" 1389 : "+m" (*pmdp) 1390 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt), 1391 [r3] "a" (asce), [m4] "i" (local) 1392 : "cc" ); 1393 } 1394 } 1395 1396 static inline void __pudp_idte(unsigned long addr, pud_t *pudp, 1397 unsigned long opt, unsigned long asce, 1398 int local) 1399 { 1400 unsigned long r3o; 1401 1402 r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t); 1403 r3o |= _ASCE_TYPE_REGION3; 1404 if (__builtin_constant_p(opt) && opt == 0) { 1405 /* flush without guest asce */ 1406 asm volatile( 1407 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]" 1408 : "+m" (*pudp) 1409 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)), 1410 [m4] "i" (local) 1411 : "cc"); 1412 } else { 1413 /* flush with guest asce */ 1414 asm volatile( 1415 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]" 1416 : "+m" (*pudp) 1417 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt), 1418 [r3] "a" (asce), [m4] "i" (local) 1419 : "cc" ); 1420 } 1421 } 1422 1423 pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t); 1424 pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t); 1425 pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t); 1426 1427 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1428 1429 #define __HAVE_ARCH_PGTABLE_DEPOSIT 1430 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 1431 pgtable_t pgtable); 1432 1433 #define __HAVE_ARCH_PGTABLE_WITHDRAW 1434 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 1435 1436 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 1437 static inline int pmdp_set_access_flags(struct vm_area_struct *vma, 1438 unsigned long addr, pmd_t *pmdp, 1439 pmd_t entry, int dirty) 1440 { 1441 VM_BUG_ON(addr & ~HPAGE_MASK); 1442 1443 entry = pmd_mkyoung(entry); 1444 if (dirty) 1445 entry = pmd_mkdirty(entry); 1446 if (pmd_val(*pmdp) == pmd_val(entry)) 1447 return 0; 1448 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry); 1449 return 1; 1450 } 1451 1452 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 1453 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 1454 unsigned long addr, pmd_t *pmdp) 1455 { 1456 pmd_t pmd = *pmdp; 1457 1458 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd)); 1459 return pmd_young(pmd); 1460 } 1461 1462 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 1463 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma, 1464 unsigned long addr, pmd_t *pmdp) 1465 { 1466 VM_BUG_ON(addr & ~HPAGE_MASK); 1467 return pmdp_test_and_clear_young(vma, addr, pmdp); 1468 } 1469 1470 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 1471 pmd_t *pmdp, pmd_t entry) 1472 { 1473 if (!MACHINE_HAS_NX) 1474 pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC; 1475 *pmdp = entry; 1476 } 1477 1478 static inline pmd_t pmd_mkhuge(pmd_t pmd) 1479 { 1480 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; 1481 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; 1482 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1483 return pmd; 1484 } 1485 1486 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 1487 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 1488 unsigned long addr, pmd_t *pmdp) 1489 { 1490 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); 1491 } 1492 1493 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL 1494 static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm, 1495 unsigned long addr, 1496 pmd_t *pmdp, int full) 1497 { 1498 if (full) { 1499 pmd_t pmd = *pmdp; 1500 *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY); 1501 return pmd; 1502 } 1503 return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); 1504 } 1505 1506 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH 1507 static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, 1508 unsigned long addr, pmd_t *pmdp) 1509 { 1510 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); 1511 } 1512 1513 #define __HAVE_ARCH_PMDP_INVALIDATE 1514 static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma, 1515 unsigned long addr, pmd_t *pmdp) 1516 { 1517 pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID); 1518 1519 return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd); 1520 } 1521 1522 #define __HAVE_ARCH_PMDP_SET_WRPROTECT 1523 static inline void pmdp_set_wrprotect(struct mm_struct *mm, 1524 unsigned long addr, pmd_t *pmdp) 1525 { 1526 pmd_t pmd = *pmdp; 1527 1528 if (pmd_write(pmd)) 1529 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd)); 1530 } 1531 1532 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, 1533 unsigned long address, 1534 pmd_t *pmdp) 1535 { 1536 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 1537 } 1538 #define pmdp_collapse_flush pmdp_collapse_flush 1539 1540 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot)) 1541 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) 1542 1543 static inline int pmd_trans_huge(pmd_t pmd) 1544 { 1545 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE; 1546 } 1547 1548 #define has_transparent_hugepage has_transparent_hugepage 1549 static inline int has_transparent_hugepage(void) 1550 { 1551 return MACHINE_HAS_EDAT1 ? 1 : 0; 1552 } 1553 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1554 1555 /* 1556 * 64 bit swap entry format: 1557 * A page-table entry has some bits we have to treat in a special way. 1558 * Bits 52 and bit 55 have to be zero, otherwise a specification 1559 * exception will occur instead of a page translation exception. The 1560 * specification exception has the bad habit not to store necessary 1561 * information in the lowcore. 1562 * Bits 54 and 63 are used to indicate the page type. 1563 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200 1564 * This leaves the bits 0-51 and bits 56-62 to store type and offset. 1565 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51 1566 * for the offset. 1567 * | offset |01100|type |00| 1568 * |0000000000111111111122222222223333333333444444444455|55555|55566|66| 1569 * |0123456789012345678901234567890123456789012345678901|23456|78901|23| 1570 */ 1571 1572 #define __SWP_OFFSET_MASK ((1UL << 52) - 1) 1573 #define __SWP_OFFSET_SHIFT 12 1574 #define __SWP_TYPE_MASK ((1UL << 5) - 1) 1575 #define __SWP_TYPE_SHIFT 2 1576 1577 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) 1578 { 1579 pte_t pte; 1580 1581 pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT; 1582 pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT; 1583 pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT; 1584 return pte; 1585 } 1586 1587 static inline unsigned long __swp_type(swp_entry_t entry) 1588 { 1589 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK; 1590 } 1591 1592 static inline unsigned long __swp_offset(swp_entry_t entry) 1593 { 1594 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK; 1595 } 1596 1597 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset) 1598 { 1599 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) }; 1600 } 1601 1602 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 1603 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 1604 1605 #define kern_addr_valid(addr) (1) 1606 1607 extern int vmem_add_mapping(unsigned long start, unsigned long size); 1608 extern int vmem_remove_mapping(unsigned long start, unsigned long size); 1609 extern int s390_enable_sie(void); 1610 extern int s390_enable_skey(void); 1611 extern void s390_reset_cmma(struct mm_struct *mm); 1612 1613 /* s390 has a private copy of get unmapped area to deal with cache synonyms */ 1614 #define HAVE_ARCH_UNMAPPED_AREA 1615 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 1616 1617 /* 1618 * No page table caches to initialise 1619 */ 1620 static inline void pgtable_cache_init(void) { } 1621 static inline void check_pgt_cache(void) { } 1622 1623 #include <asm-generic/pgtable.h> 1624 1625 #endif /* _S390_PAGE_H */ 1626