1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * S390 version 4 * Copyright IBM Corp. 1999, 2000 5 * Author(s): Hartmut Penner (hp@de.ibm.com) 6 * Ulrich Weigand (weigand@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 * 9 * Derived from "include/asm-i386/pgtable.h" 10 */ 11 12 #ifndef _ASM_S390_PGTABLE_H 13 #define _ASM_S390_PGTABLE_H 14 15 #include <linux/sched.h> 16 #include <linux/mm_types.h> 17 #include <linux/page-flags.h> 18 #include <linux/radix-tree.h> 19 #include <linux/atomic.h> 20 #include <asm/bug.h> 21 #include <asm/page.h> 22 #include <asm/uv.h> 23 24 extern pgd_t swapper_pg_dir[]; 25 extern void paging_init(void); 26 27 enum { 28 PG_DIRECT_MAP_4K = 0, 29 PG_DIRECT_MAP_1M, 30 PG_DIRECT_MAP_2G, 31 PG_DIRECT_MAP_MAX 32 }; 33 34 extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX]; 35 36 static inline void update_page_count(int level, long count) 37 { 38 if (IS_ENABLED(CONFIG_PROC_FS)) 39 atomic_long_add(count, &direct_pages_count[level]); 40 } 41 42 struct seq_file; 43 void arch_report_meminfo(struct seq_file *m); 44 45 /* 46 * The S390 doesn't have any external MMU info: the kernel page 47 * tables contain all the necessary information. 48 */ 49 #define update_mmu_cache(vma, address, ptep) do { } while (0) 50 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0) 51 52 /* 53 * ZERO_PAGE is a global shared page that is always zero; used 54 * for zero-mapped memory areas etc.. 55 */ 56 57 extern unsigned long empty_zero_page; 58 extern unsigned long zero_page_mask; 59 60 #define ZERO_PAGE(vaddr) \ 61 (virt_to_page((void *)(empty_zero_page + \ 62 (((unsigned long)(vaddr)) &zero_page_mask)))) 63 #define __HAVE_COLOR_ZERO_PAGE 64 65 /* TODO: s390 cannot support io_remap_pfn_range... */ 66 67 #define FIRST_USER_ADDRESS 0UL 68 69 #define pte_ERROR(e) \ 70 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) 71 #define pmd_ERROR(e) \ 72 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) 73 #define pud_ERROR(e) \ 74 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e)) 75 #define p4d_ERROR(e) \ 76 printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e)) 77 #define pgd_ERROR(e) \ 78 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) 79 80 /* 81 * The vmalloc and module area will always be on the topmost area of the 82 * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules. 83 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where 84 * modules will reside. That makes sure that inter module branches always 85 * happen without trampolines and in addition the placement within a 2GB frame 86 * is branch prediction unit friendly. 87 */ 88 extern unsigned long VMALLOC_START; 89 extern unsigned long VMALLOC_END; 90 #define VMALLOC_DEFAULT_SIZE ((128UL << 30) - MODULES_LEN) 91 extern struct page *vmemmap; 92 93 #define VMEM_MAX_PHYS ((unsigned long) vmemmap) 94 95 extern unsigned long MODULES_VADDR; 96 extern unsigned long MODULES_END; 97 #define MODULES_VADDR MODULES_VADDR 98 #define MODULES_END MODULES_END 99 #define MODULES_LEN (1UL << 31) 100 101 static inline int is_module_addr(void *addr) 102 { 103 BUILD_BUG_ON(MODULES_LEN > (1UL << 31)); 104 if (addr < (void *)MODULES_VADDR) 105 return 0; 106 if (addr > (void *)MODULES_END) 107 return 0; 108 return 1; 109 } 110 111 /* 112 * A 64 bit pagetable entry of S390 has following format: 113 * | PFRA |0IPC| OS | 114 * 0000000000111111111122222222223333333333444444444455555555556666 115 * 0123456789012345678901234567890123456789012345678901234567890123 116 * 117 * I Page-Invalid Bit: Page is not available for address-translation 118 * P Page-Protection Bit: Store access not possible for page 119 * C Change-bit override: HW is not required to set change bit 120 * 121 * A 64 bit segmenttable entry of S390 has following format: 122 * | P-table origin | TT 123 * 0000000000111111111122222222223333333333444444444455555555556666 124 * 0123456789012345678901234567890123456789012345678901234567890123 125 * 126 * I Segment-Invalid Bit: Segment is not available for address-translation 127 * C Common-Segment Bit: Segment is not private (PoP 3-30) 128 * P Page-Protection Bit: Store access not possible for page 129 * TT Type 00 130 * 131 * A 64 bit region table entry of S390 has following format: 132 * | S-table origin | TF TTTL 133 * 0000000000111111111122222222223333333333444444444455555555556666 134 * 0123456789012345678901234567890123456789012345678901234567890123 135 * 136 * I Segment-Invalid Bit: Segment is not available for address-translation 137 * TT Type 01 138 * TF 139 * TL Table length 140 * 141 * The 64 bit regiontable origin of S390 has following format: 142 * | region table origon | DTTL 143 * 0000000000111111111122222222223333333333444444444455555555556666 144 * 0123456789012345678901234567890123456789012345678901234567890123 145 * 146 * X Space-Switch event: 147 * G Segment-Invalid Bit: 148 * P Private-Space Bit: 149 * S Storage-Alteration: 150 * R Real space 151 * TL Table-Length: 152 * 153 * A storage key has the following format: 154 * | ACC |F|R|C|0| 155 * 0 3 4 5 6 7 156 * ACC: access key 157 * F : fetch protection bit 158 * R : referenced bit 159 * C : changed bit 160 */ 161 162 /* Hardware bits in the page table entry */ 163 #define _PAGE_NOEXEC 0x100 /* HW no-execute bit */ 164 #define _PAGE_PROTECT 0x200 /* HW read-only bit */ 165 #define _PAGE_INVALID 0x400 /* HW invalid bit */ 166 #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */ 167 168 /* Software bits in the page table entry */ 169 #define _PAGE_PRESENT 0x001 /* SW pte present bit */ 170 #define _PAGE_YOUNG 0x004 /* SW pte young bit */ 171 #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */ 172 #define _PAGE_READ 0x010 /* SW pte read bit */ 173 #define _PAGE_WRITE 0x020 /* SW pte write bit */ 174 #define _PAGE_SPECIAL 0x040 /* SW associated with special page */ 175 #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */ 176 177 #ifdef CONFIG_MEM_SOFT_DIRTY 178 #define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */ 179 #else 180 #define _PAGE_SOFT_DIRTY 0x000 181 #endif 182 183 /* Set of bits not changed in pte_modify */ 184 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \ 185 _PAGE_YOUNG | _PAGE_SOFT_DIRTY) 186 187 /* 188 * handle_pte_fault uses pte_present and pte_none to find out the pte type 189 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to 190 * distinguish present from not-present ptes. It is changed only with the page 191 * table lock held. 192 * 193 * The following table gives the different possible bit combinations for 194 * the pte hardware and software bits in the last 12 bits of a pte 195 * (. unassigned bit, x don't care, t swap type): 196 * 197 * 842100000000 198 * 000084210000 199 * 000000008421 200 * .IR.uswrdy.p 201 * empty .10.00000000 202 * swap .11..ttttt.0 203 * prot-none, clean, old .11.xx0000.1 204 * prot-none, clean, young .11.xx0001.1 205 * prot-none, dirty, old .11.xx0010.1 206 * prot-none, dirty, young .11.xx0011.1 207 * read-only, clean, old .11.xx0100.1 208 * read-only, clean, young .01.xx0101.1 209 * read-only, dirty, old .11.xx0110.1 210 * read-only, dirty, young .01.xx0111.1 211 * read-write, clean, old .11.xx1100.1 212 * read-write, clean, young .01.xx1101.1 213 * read-write, dirty, old .10.xx1110.1 214 * read-write, dirty, young .00.xx1111.1 215 * HW-bits: R read-only, I invalid 216 * SW-bits: p present, y young, d dirty, r read, w write, s special, 217 * u unused, l large 218 * 219 * pte_none is true for the bit pattern .10.00000000, pte == 0x400 220 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200 221 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001 222 */ 223 224 /* Bits in the segment/region table address-space-control-element */ 225 #define _ASCE_ORIGIN ~0xfffUL/* region/segment table origin */ 226 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ 227 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ 228 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */ 229 #define _ASCE_REAL_SPACE 0x20 /* real space control */ 230 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */ 231 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */ 232 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */ 233 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */ 234 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */ 235 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */ 236 237 /* Bits in the region table entry */ 238 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ 239 #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */ 240 #define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */ 241 #define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */ 242 #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */ 243 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region table type mask */ 244 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ 245 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ 246 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */ 247 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */ 248 249 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) 250 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID) 251 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) 252 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID) 253 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) 254 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID) 255 256 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */ 257 #define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */ 258 #define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */ 259 #define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */ 260 #define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */ 261 #define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */ 262 263 #ifdef CONFIG_MEM_SOFT_DIRTY 264 #define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */ 265 #else 266 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */ 267 #endif 268 269 #define _REGION_ENTRY_BITS 0xfffffffffffff22fUL 270 271 /* Bits in the segment table entry */ 272 #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL 273 #define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL 274 #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL 275 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ 276 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */ 277 #define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */ 278 #define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */ 279 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */ 280 #define _SEGMENT_ENTRY_TYPE_MASK 0x0c /* segment table type mask */ 281 282 #define _SEGMENT_ENTRY (0) 283 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) 284 285 #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */ 286 #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */ 287 #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */ 288 #define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */ 289 #define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */ 290 291 #ifdef CONFIG_MEM_SOFT_DIRTY 292 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */ 293 #else 294 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */ 295 #endif 296 297 #define _CRST_ENTRIES 2048 /* number of region/segment table entries */ 298 #define _PAGE_ENTRIES 256 /* number of page table entries */ 299 300 #define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8) 301 #define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8) 302 303 #define _REGION1_SHIFT 53 304 #define _REGION2_SHIFT 42 305 #define _REGION3_SHIFT 31 306 #define _SEGMENT_SHIFT 20 307 308 #define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT) 309 #define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT) 310 #define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT) 311 #define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT) 312 #define _PAGE_INDEX (0xffUL << _PAGE_SHIFT) 313 314 #define _REGION1_SIZE (1UL << _REGION1_SHIFT) 315 #define _REGION2_SIZE (1UL << _REGION2_SHIFT) 316 #define _REGION3_SIZE (1UL << _REGION3_SHIFT) 317 #define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT) 318 319 #define _REGION1_MASK (~(_REGION1_SIZE - 1)) 320 #define _REGION2_MASK (~(_REGION2_SIZE - 1)) 321 #define _REGION3_MASK (~(_REGION3_SIZE - 1)) 322 #define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1)) 323 324 #define PMD_SHIFT _SEGMENT_SHIFT 325 #define PUD_SHIFT _REGION3_SHIFT 326 #define P4D_SHIFT _REGION2_SHIFT 327 #define PGDIR_SHIFT _REGION1_SHIFT 328 329 #define PMD_SIZE _SEGMENT_SIZE 330 #define PUD_SIZE _REGION3_SIZE 331 #define P4D_SIZE _REGION2_SIZE 332 #define PGDIR_SIZE _REGION1_SIZE 333 334 #define PMD_MASK _SEGMENT_MASK 335 #define PUD_MASK _REGION3_MASK 336 #define P4D_MASK _REGION2_MASK 337 #define PGDIR_MASK _REGION1_MASK 338 339 #define PTRS_PER_PTE _PAGE_ENTRIES 340 #define PTRS_PER_PMD _CRST_ENTRIES 341 #define PTRS_PER_PUD _CRST_ENTRIES 342 #define PTRS_PER_P4D _CRST_ENTRIES 343 #define PTRS_PER_PGD _CRST_ENTRIES 344 345 #define MAX_PTRS_PER_P4D PTRS_PER_P4D 346 347 /* 348 * Segment table and region3 table entry encoding 349 * (R = read-only, I = invalid, y = young bit): 350 * dy..R...I...wr 351 * prot-none, clean, old 00..1...1...00 352 * prot-none, clean, young 01..1...1...00 353 * prot-none, dirty, old 10..1...1...00 354 * prot-none, dirty, young 11..1...1...00 355 * read-only, clean, old 00..1...1...01 356 * read-only, clean, young 01..1...0...01 357 * read-only, dirty, old 10..1...1...01 358 * read-only, dirty, young 11..1...0...01 359 * read-write, clean, old 00..1...1...11 360 * read-write, clean, young 01..1...0...11 361 * read-write, dirty, old 10..0...1...11 362 * read-write, dirty, young 11..0...0...11 363 * The segment table origin is used to distinguish empty (origin==0) from 364 * read-write, old segment table entries (origin!=0) 365 * HW-bits: R read-only, I invalid 366 * SW-bits: y young, d dirty, r read, w write 367 */ 368 369 /* Page status table bits for virtualization */ 370 #define PGSTE_ACC_BITS 0xf000000000000000UL 371 #define PGSTE_FP_BIT 0x0800000000000000UL 372 #define PGSTE_PCL_BIT 0x0080000000000000UL 373 #define PGSTE_HR_BIT 0x0040000000000000UL 374 #define PGSTE_HC_BIT 0x0020000000000000UL 375 #define PGSTE_GR_BIT 0x0004000000000000UL 376 #define PGSTE_GC_BIT 0x0002000000000000UL 377 #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */ 378 #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */ 379 #define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */ 380 381 /* Guest Page State used for virtualization */ 382 #define _PGSTE_GPS_ZERO 0x0000000080000000UL 383 #define _PGSTE_GPS_NODAT 0x0000000040000000UL 384 #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL 385 #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL 386 #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL 387 #define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL 388 #define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK 389 390 /* 391 * A user page table pointer has the space-switch-event bit, the 392 * private-space-control bit and the storage-alteration-event-control 393 * bit set. A kernel page table pointer doesn't need them. 394 */ 395 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ 396 _ASCE_ALT_EVENT) 397 398 /* 399 * Page protection definitions. 400 */ 401 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT) 402 #define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \ 403 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT) 404 #define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \ 405 _PAGE_INVALID | _PAGE_PROTECT) 406 #define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 407 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT) 408 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 409 _PAGE_INVALID | _PAGE_PROTECT) 410 411 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 412 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC) 413 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 414 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC) 415 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \ 416 _PAGE_PROTECT | _PAGE_NOEXEC) 417 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 418 _PAGE_YOUNG | _PAGE_DIRTY) 419 420 /* 421 * On s390 the page table entry has an invalid bit and a read-only bit. 422 * Read permission implies execute permission and write permission 423 * implies read permission. 424 */ 425 /*xwr*/ 426 #define __P000 PAGE_NONE 427 #define __P001 PAGE_RO 428 #define __P010 PAGE_RO 429 #define __P011 PAGE_RO 430 #define __P100 PAGE_RX 431 #define __P101 PAGE_RX 432 #define __P110 PAGE_RX 433 #define __P111 PAGE_RX 434 435 #define __S000 PAGE_NONE 436 #define __S001 PAGE_RO 437 #define __S010 PAGE_RW 438 #define __S011 PAGE_RW 439 #define __S100 PAGE_RX 440 #define __S101 PAGE_RX 441 #define __S110 PAGE_RWX 442 #define __S111 PAGE_RWX 443 444 /* 445 * Segment entry (large page) protection definitions. 446 */ 447 #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \ 448 _SEGMENT_ENTRY_PROTECT) 449 #define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \ 450 _SEGMENT_ENTRY_READ | \ 451 _SEGMENT_ENTRY_NOEXEC) 452 #define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \ 453 _SEGMENT_ENTRY_READ) 454 #define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \ 455 _SEGMENT_ENTRY_WRITE | \ 456 _SEGMENT_ENTRY_NOEXEC) 457 #define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \ 458 _SEGMENT_ENTRY_WRITE) 459 #define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \ 460 _SEGMENT_ENTRY_LARGE | \ 461 _SEGMENT_ENTRY_READ | \ 462 _SEGMENT_ENTRY_WRITE | \ 463 _SEGMENT_ENTRY_YOUNG | \ 464 _SEGMENT_ENTRY_DIRTY | \ 465 _SEGMENT_ENTRY_NOEXEC) 466 #define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \ 467 _SEGMENT_ENTRY_LARGE | \ 468 _SEGMENT_ENTRY_READ | \ 469 _SEGMENT_ENTRY_YOUNG | \ 470 _SEGMENT_ENTRY_PROTECT | \ 471 _SEGMENT_ENTRY_NOEXEC) 472 #define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \ 473 _SEGMENT_ENTRY_LARGE | \ 474 _SEGMENT_ENTRY_READ | \ 475 _SEGMENT_ENTRY_WRITE | \ 476 _SEGMENT_ENTRY_YOUNG | \ 477 _SEGMENT_ENTRY_DIRTY) 478 479 /* 480 * Region3 entry (large page) protection definitions. 481 */ 482 483 #define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \ 484 _REGION3_ENTRY_LARGE | \ 485 _REGION3_ENTRY_READ | \ 486 _REGION3_ENTRY_WRITE | \ 487 _REGION3_ENTRY_YOUNG | \ 488 _REGION3_ENTRY_DIRTY | \ 489 _REGION_ENTRY_NOEXEC) 490 #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \ 491 _REGION3_ENTRY_LARGE | \ 492 _REGION3_ENTRY_READ | \ 493 _REGION3_ENTRY_YOUNG | \ 494 _REGION_ENTRY_PROTECT | \ 495 _REGION_ENTRY_NOEXEC) 496 497 static inline bool mm_p4d_folded(struct mm_struct *mm) 498 { 499 return mm->context.asce_limit <= _REGION1_SIZE; 500 } 501 #define mm_p4d_folded(mm) mm_p4d_folded(mm) 502 503 static inline bool mm_pud_folded(struct mm_struct *mm) 504 { 505 return mm->context.asce_limit <= _REGION2_SIZE; 506 } 507 #define mm_pud_folded(mm) mm_pud_folded(mm) 508 509 static inline bool mm_pmd_folded(struct mm_struct *mm) 510 { 511 return mm->context.asce_limit <= _REGION3_SIZE; 512 } 513 #define mm_pmd_folded(mm) mm_pmd_folded(mm) 514 515 static inline int mm_has_pgste(struct mm_struct *mm) 516 { 517 #ifdef CONFIG_PGSTE 518 if (unlikely(mm->context.has_pgste)) 519 return 1; 520 #endif 521 return 0; 522 } 523 524 static inline int mm_is_protected(struct mm_struct *mm) 525 { 526 #ifdef CONFIG_PGSTE 527 if (unlikely(atomic_read(&mm->context.is_protected))) 528 return 1; 529 #endif 530 return 0; 531 } 532 533 static inline int mm_alloc_pgste(struct mm_struct *mm) 534 { 535 #ifdef CONFIG_PGSTE 536 if (unlikely(mm->context.alloc_pgste)) 537 return 1; 538 #endif 539 return 0; 540 } 541 542 /* 543 * In the case that a guest uses storage keys 544 * faults should no longer be backed by zero pages 545 */ 546 #define mm_forbids_zeropage mm_has_pgste 547 static inline int mm_uses_skeys(struct mm_struct *mm) 548 { 549 #ifdef CONFIG_PGSTE 550 if (mm->context.uses_skeys) 551 return 1; 552 #endif 553 return 0; 554 } 555 556 static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new) 557 { 558 register unsigned long reg2 asm("2") = old; 559 register unsigned long reg3 asm("3") = new; 560 unsigned long address = (unsigned long)ptr | 1; 561 562 asm volatile( 563 " csp %0,%3" 564 : "+d" (reg2), "+m" (*ptr) 565 : "d" (reg3), "d" (address) 566 : "cc"); 567 } 568 569 static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new) 570 { 571 register unsigned long reg2 asm("2") = old; 572 register unsigned long reg3 asm("3") = new; 573 unsigned long address = (unsigned long)ptr | 1; 574 575 asm volatile( 576 " .insn rre,0xb98a0000,%0,%3" 577 : "+d" (reg2), "+m" (*ptr) 578 : "d" (reg3), "d" (address) 579 : "cc"); 580 } 581 582 #define CRDTE_DTT_PAGE 0x00UL 583 #define CRDTE_DTT_SEGMENT 0x10UL 584 #define CRDTE_DTT_REGION3 0x14UL 585 #define CRDTE_DTT_REGION2 0x18UL 586 #define CRDTE_DTT_REGION1 0x1cUL 587 588 static inline void crdte(unsigned long old, unsigned long new, 589 unsigned long table, unsigned long dtt, 590 unsigned long address, unsigned long asce) 591 { 592 register unsigned long reg2 asm("2") = old; 593 register unsigned long reg3 asm("3") = new; 594 register unsigned long reg4 asm("4") = table | dtt; 595 register unsigned long reg5 asm("5") = address; 596 597 asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0" 598 : "+d" (reg2) 599 : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce) 600 : "memory", "cc"); 601 } 602 603 /* 604 * pgd/p4d/pud/pmd/pte query functions 605 */ 606 static inline int pgd_folded(pgd_t pgd) 607 { 608 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1; 609 } 610 611 static inline int pgd_present(pgd_t pgd) 612 { 613 if (pgd_folded(pgd)) 614 return 1; 615 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL; 616 } 617 618 static inline int pgd_none(pgd_t pgd) 619 { 620 if (pgd_folded(pgd)) 621 return 0; 622 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL; 623 } 624 625 static inline int pgd_bad(pgd_t pgd) 626 { 627 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1) 628 return 0; 629 return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0; 630 } 631 632 static inline unsigned long pgd_pfn(pgd_t pgd) 633 { 634 unsigned long origin_mask; 635 636 origin_mask = _REGION_ENTRY_ORIGIN; 637 return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT; 638 } 639 640 static inline int p4d_folded(p4d_t p4d) 641 { 642 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2; 643 } 644 645 static inline int p4d_present(p4d_t p4d) 646 { 647 if (p4d_folded(p4d)) 648 return 1; 649 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL; 650 } 651 652 static inline int p4d_none(p4d_t p4d) 653 { 654 if (p4d_folded(p4d)) 655 return 0; 656 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY; 657 } 658 659 static inline unsigned long p4d_pfn(p4d_t p4d) 660 { 661 unsigned long origin_mask; 662 663 origin_mask = _REGION_ENTRY_ORIGIN; 664 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT; 665 } 666 667 static inline int pud_folded(pud_t pud) 668 { 669 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3; 670 } 671 672 static inline int pud_present(pud_t pud) 673 { 674 if (pud_folded(pud)) 675 return 1; 676 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; 677 } 678 679 static inline int pud_none(pud_t pud) 680 { 681 if (pud_folded(pud)) 682 return 0; 683 return pud_val(pud) == _REGION3_ENTRY_EMPTY; 684 } 685 686 #define pud_leaf pud_large 687 static inline int pud_large(pud_t pud) 688 { 689 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3) 690 return 0; 691 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE); 692 } 693 694 static inline unsigned long pud_pfn(pud_t pud) 695 { 696 unsigned long origin_mask; 697 698 origin_mask = _REGION_ENTRY_ORIGIN; 699 if (pud_large(pud)) 700 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE; 701 return (pud_val(pud) & origin_mask) >> PAGE_SHIFT; 702 } 703 704 #define pmd_leaf pmd_large 705 static inline int pmd_large(pmd_t pmd) 706 { 707 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0; 708 } 709 710 static inline int pmd_bad(pmd_t pmd) 711 { 712 if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd)) 713 return 1; 714 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0; 715 } 716 717 static inline int pud_bad(pud_t pud) 718 { 719 unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK; 720 721 if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud)) 722 return 1; 723 if (type < _REGION_ENTRY_TYPE_R3) 724 return 0; 725 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0; 726 } 727 728 static inline int p4d_bad(p4d_t p4d) 729 { 730 unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK; 731 732 if (type > _REGION_ENTRY_TYPE_R2) 733 return 1; 734 if (type < _REGION_ENTRY_TYPE_R2) 735 return 0; 736 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0; 737 } 738 739 static inline int pmd_present(pmd_t pmd) 740 { 741 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY; 742 } 743 744 static inline int pmd_none(pmd_t pmd) 745 { 746 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY; 747 } 748 749 static inline unsigned long pmd_pfn(pmd_t pmd) 750 { 751 unsigned long origin_mask; 752 753 origin_mask = _SEGMENT_ENTRY_ORIGIN; 754 if (pmd_large(pmd)) 755 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE; 756 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT; 757 } 758 759 #define pmd_write pmd_write 760 static inline int pmd_write(pmd_t pmd) 761 { 762 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0; 763 } 764 765 #define pud_write pud_write 766 static inline int pud_write(pud_t pud) 767 { 768 return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0; 769 } 770 771 static inline int pmd_dirty(pmd_t pmd) 772 { 773 return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0; 774 } 775 776 static inline int pmd_young(pmd_t pmd) 777 { 778 return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0; 779 } 780 781 static inline int pte_present(pte_t pte) 782 { 783 /* Bit pattern: (pte & 0x001) == 0x001 */ 784 return (pte_val(pte) & _PAGE_PRESENT) != 0; 785 } 786 787 static inline int pte_none(pte_t pte) 788 { 789 /* Bit pattern: pte == 0x400 */ 790 return pte_val(pte) == _PAGE_INVALID; 791 } 792 793 static inline int pte_swap(pte_t pte) 794 { 795 /* Bit pattern: (pte & 0x201) == 0x200 */ 796 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT)) 797 == _PAGE_PROTECT; 798 } 799 800 static inline int pte_special(pte_t pte) 801 { 802 return (pte_val(pte) & _PAGE_SPECIAL); 803 } 804 805 #define __HAVE_ARCH_PTE_SAME 806 static inline int pte_same(pte_t a, pte_t b) 807 { 808 return pte_val(a) == pte_val(b); 809 } 810 811 #ifdef CONFIG_NUMA_BALANCING 812 static inline int pte_protnone(pte_t pte) 813 { 814 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ); 815 } 816 817 static inline int pmd_protnone(pmd_t pmd) 818 { 819 /* pmd_large(pmd) implies pmd_present(pmd) */ 820 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ); 821 } 822 #endif 823 824 static inline int pte_soft_dirty(pte_t pte) 825 { 826 return pte_val(pte) & _PAGE_SOFT_DIRTY; 827 } 828 #define pte_swp_soft_dirty pte_soft_dirty 829 830 static inline pte_t pte_mksoft_dirty(pte_t pte) 831 { 832 pte_val(pte) |= _PAGE_SOFT_DIRTY; 833 return pte; 834 } 835 #define pte_swp_mksoft_dirty pte_mksoft_dirty 836 837 static inline pte_t pte_clear_soft_dirty(pte_t pte) 838 { 839 pte_val(pte) &= ~_PAGE_SOFT_DIRTY; 840 return pte; 841 } 842 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty 843 844 static inline int pmd_soft_dirty(pmd_t pmd) 845 { 846 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY; 847 } 848 849 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) 850 { 851 pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY; 852 return pmd; 853 } 854 855 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) 856 { 857 pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY; 858 return pmd; 859 } 860 861 /* 862 * query functions pte_write/pte_dirty/pte_young only work if 863 * pte_present() is true. Undefined behaviour if not.. 864 */ 865 static inline int pte_write(pte_t pte) 866 { 867 return (pte_val(pte) & _PAGE_WRITE) != 0; 868 } 869 870 static inline int pte_dirty(pte_t pte) 871 { 872 return (pte_val(pte) & _PAGE_DIRTY) != 0; 873 } 874 875 static inline int pte_young(pte_t pte) 876 { 877 return (pte_val(pte) & _PAGE_YOUNG) != 0; 878 } 879 880 #define __HAVE_ARCH_PTE_UNUSED 881 static inline int pte_unused(pte_t pte) 882 { 883 return pte_val(pte) & _PAGE_UNUSED; 884 } 885 886 /* 887 * pgd/pmd/pte modification functions 888 */ 889 890 static inline void pgd_clear(pgd_t *pgd) 891 { 892 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1) 893 pgd_val(*pgd) = _REGION1_ENTRY_EMPTY; 894 } 895 896 static inline void p4d_clear(p4d_t *p4d) 897 { 898 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 899 p4d_val(*p4d) = _REGION2_ENTRY_EMPTY; 900 } 901 902 static inline void pud_clear(pud_t *pud) 903 { 904 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 905 pud_val(*pud) = _REGION3_ENTRY_EMPTY; 906 } 907 908 static inline void pmd_clear(pmd_t *pmdp) 909 { 910 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; 911 } 912 913 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 914 { 915 pte_val(*ptep) = _PAGE_INVALID; 916 } 917 918 /* 919 * The following pte modification functions only work if 920 * pte_present() is true. Undefined behaviour if not.. 921 */ 922 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 923 { 924 pte_val(pte) &= _PAGE_CHG_MASK; 925 pte_val(pte) |= pgprot_val(newprot); 926 /* 927 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX 928 * has the invalid bit set, clear it again for readable, young pages 929 */ 930 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ)) 931 pte_val(pte) &= ~_PAGE_INVALID; 932 /* 933 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page 934 * protection bit set, clear it again for writable, dirty pages 935 */ 936 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE)) 937 pte_val(pte) &= ~_PAGE_PROTECT; 938 return pte; 939 } 940 941 static inline pte_t pte_wrprotect(pte_t pte) 942 { 943 pte_val(pte) &= ~_PAGE_WRITE; 944 pte_val(pte) |= _PAGE_PROTECT; 945 return pte; 946 } 947 948 static inline pte_t pte_mkwrite(pte_t pte) 949 { 950 pte_val(pte) |= _PAGE_WRITE; 951 if (pte_val(pte) & _PAGE_DIRTY) 952 pte_val(pte) &= ~_PAGE_PROTECT; 953 return pte; 954 } 955 956 static inline pte_t pte_mkclean(pte_t pte) 957 { 958 pte_val(pte) &= ~_PAGE_DIRTY; 959 pte_val(pte) |= _PAGE_PROTECT; 960 return pte; 961 } 962 963 static inline pte_t pte_mkdirty(pte_t pte) 964 { 965 pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY; 966 if (pte_val(pte) & _PAGE_WRITE) 967 pte_val(pte) &= ~_PAGE_PROTECT; 968 return pte; 969 } 970 971 static inline pte_t pte_mkold(pte_t pte) 972 { 973 pte_val(pte) &= ~_PAGE_YOUNG; 974 pte_val(pte) |= _PAGE_INVALID; 975 return pte; 976 } 977 978 static inline pte_t pte_mkyoung(pte_t pte) 979 { 980 pte_val(pte) |= _PAGE_YOUNG; 981 if (pte_val(pte) & _PAGE_READ) 982 pte_val(pte) &= ~_PAGE_INVALID; 983 return pte; 984 } 985 986 static inline pte_t pte_mkspecial(pte_t pte) 987 { 988 pte_val(pte) |= _PAGE_SPECIAL; 989 return pte; 990 } 991 992 #ifdef CONFIG_HUGETLB_PAGE 993 static inline pte_t pte_mkhuge(pte_t pte) 994 { 995 pte_val(pte) |= _PAGE_LARGE; 996 return pte; 997 } 998 #endif 999 1000 #define IPTE_GLOBAL 0 1001 #define IPTE_LOCAL 1 1002 1003 #define IPTE_NODAT 0x400 1004 #define IPTE_GUEST_ASCE 0x800 1005 1006 static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep, 1007 unsigned long opt, unsigned long asce, 1008 int local) 1009 { 1010 unsigned long pto = (unsigned long) ptep; 1011 1012 if (__builtin_constant_p(opt) && opt == 0) { 1013 /* Invalidation + TLB flush for the pte */ 1014 asm volatile( 1015 " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]" 1016 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address), 1017 [m4] "i" (local)); 1018 return; 1019 } 1020 1021 /* Invalidate ptes with options + TLB flush of the ptes */ 1022 opt = opt | (asce & _ASCE_ORIGIN); 1023 asm volatile( 1024 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]" 1025 : [r2] "+a" (address), [r3] "+a" (opt) 1026 : [r1] "a" (pto), [m4] "i" (local) : "memory"); 1027 } 1028 1029 static __always_inline void __ptep_ipte_range(unsigned long address, int nr, 1030 pte_t *ptep, int local) 1031 { 1032 unsigned long pto = (unsigned long) ptep; 1033 1034 /* Invalidate a range of ptes + TLB flush of the ptes */ 1035 do { 1036 asm volatile( 1037 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]" 1038 : [r2] "+a" (address), [r3] "+a" (nr) 1039 : [r1] "a" (pto), [m4] "i" (local) : "memory"); 1040 } while (nr != 255); 1041 } 1042 1043 /* 1044 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush 1045 * both clear the TLB for the unmapped pte. The reason is that 1046 * ptep_get_and_clear is used in common code (e.g. change_pte_range) 1047 * to modify an active pte. The sequence is 1048 * 1) ptep_get_and_clear 1049 * 2) set_pte_at 1050 * 3) flush_tlb_range 1051 * On s390 the tlb needs to get flushed with the modification of the pte 1052 * if the pte is active. The only way how this can be implemented is to 1053 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range 1054 * is a nop. 1055 */ 1056 pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t); 1057 pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t); 1058 1059 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 1060 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 1061 unsigned long addr, pte_t *ptep) 1062 { 1063 pte_t pte = *ptep; 1064 1065 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte)); 1066 return pte_young(pte); 1067 } 1068 1069 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 1070 static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 1071 unsigned long address, pte_t *ptep) 1072 { 1073 return ptep_test_and_clear_young(vma, address, ptep); 1074 } 1075 1076 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 1077 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 1078 unsigned long addr, pte_t *ptep) 1079 { 1080 pte_t res; 1081 1082 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID)); 1083 if (mm_is_protected(mm) && pte_present(res)) 1084 uv_convert_from_secure(pte_val(res) & PAGE_MASK); 1085 return res; 1086 } 1087 1088 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 1089 pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *); 1090 void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long, 1091 pte_t *, pte_t, pte_t); 1092 1093 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH 1094 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, 1095 unsigned long addr, pte_t *ptep) 1096 { 1097 pte_t res; 1098 1099 res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID)); 1100 if (mm_is_protected(vma->vm_mm) && pte_present(res)) 1101 uv_convert_from_secure(pte_val(res) & PAGE_MASK); 1102 return res; 1103 } 1104 1105 /* 1106 * The batched pte unmap code uses ptep_get_and_clear_full to clear the 1107 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all 1108 * tlbs of an mm if it can guarantee that the ptes of the mm_struct 1109 * cannot be accessed while the batched unmap is running. In this case 1110 * full==1 and a simple pte_clear is enough. See tlb.h. 1111 */ 1112 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 1113 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, 1114 unsigned long addr, 1115 pte_t *ptep, int full) 1116 { 1117 pte_t res; 1118 1119 if (full) { 1120 res = *ptep; 1121 *ptep = __pte(_PAGE_INVALID); 1122 } else { 1123 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID)); 1124 } 1125 if (mm_is_protected(mm) && pte_present(res)) 1126 uv_convert_from_secure(pte_val(res) & PAGE_MASK); 1127 return res; 1128 } 1129 1130 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 1131 static inline void ptep_set_wrprotect(struct mm_struct *mm, 1132 unsigned long addr, pte_t *ptep) 1133 { 1134 pte_t pte = *ptep; 1135 1136 if (pte_write(pte)) 1137 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte)); 1138 } 1139 1140 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 1141 static inline int ptep_set_access_flags(struct vm_area_struct *vma, 1142 unsigned long addr, pte_t *ptep, 1143 pte_t entry, int dirty) 1144 { 1145 if (pte_same(*ptep, entry)) 1146 return 0; 1147 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry); 1148 return 1; 1149 } 1150 1151 /* 1152 * Additional functions to handle KVM guest page tables 1153 */ 1154 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr, 1155 pte_t *ptep, pte_t entry); 1156 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep); 1157 void ptep_notify(struct mm_struct *mm, unsigned long addr, 1158 pte_t *ptep, unsigned long bits); 1159 int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr, 1160 pte_t *ptep, int prot, unsigned long bit); 1161 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr, 1162 pte_t *ptep , int reset); 1163 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep); 1164 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr, 1165 pte_t *sptep, pte_t *tptep, pte_t pte); 1166 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep); 1167 1168 bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address, 1169 pte_t *ptep); 1170 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, 1171 unsigned char key, bool nq); 1172 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr, 1173 unsigned char key, unsigned char *oldkey, 1174 bool nq, bool mr, bool mc); 1175 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr); 1176 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr, 1177 unsigned char *key); 1178 1179 int set_pgste_bits(struct mm_struct *mm, unsigned long addr, 1180 unsigned long bits, unsigned long value); 1181 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep); 1182 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc, 1183 unsigned long *oldpte, unsigned long *oldpgste); 1184 void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr); 1185 void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr); 1186 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr); 1187 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr); 1188 1189 /* 1190 * Certain architectures need to do special things when PTEs 1191 * within a page table are directly modified. Thus, the following 1192 * hook is made available. 1193 */ 1194 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 1195 pte_t *ptep, pte_t entry) 1196 { 1197 if (pte_present(entry)) 1198 pte_val(entry) &= ~_PAGE_UNUSED; 1199 if (mm_has_pgste(mm)) 1200 ptep_set_pte_at(mm, addr, ptep, entry); 1201 else 1202 *ptep = entry; 1203 } 1204 1205 /* 1206 * Conversion functions: convert a page and protection to a page entry, 1207 * and a page entry and page directory to the page they refer to. 1208 */ 1209 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) 1210 { 1211 pte_t __pte; 1212 pte_val(__pte) = physpage + pgprot_val(pgprot); 1213 if (!MACHINE_HAS_NX) 1214 pte_val(__pte) &= ~_PAGE_NOEXEC; 1215 return pte_mkyoung(__pte); 1216 } 1217 1218 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) 1219 { 1220 unsigned long physpage = page_to_phys(page); 1221 pte_t __pte = mk_pte_phys(physpage, pgprot); 1222 1223 if (pte_write(__pte) && PageDirty(page)) 1224 __pte = pte_mkdirty(__pte); 1225 return __pte; 1226 } 1227 1228 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 1229 #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1)) 1230 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 1231 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 1232 1233 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 1234 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) 1235 #define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN) 1236 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) 1237 1238 /* 1239 * The pgd_offset function *always* adds the index for the top-level 1240 * region/segment table. This is done to get a sequence like the 1241 * following to work: 1242 * pgdp = pgd_offset(current->mm, addr); 1243 * pgd = READ_ONCE(*pgdp); 1244 * p4dp = p4d_offset(&pgd, addr); 1245 * ... 1246 * The subsequent p4d_offset, pud_offset and pmd_offset functions 1247 * only add an index if they dereferenced the pointer. 1248 */ 1249 static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address) 1250 { 1251 unsigned long rste; 1252 unsigned int shift; 1253 1254 /* Get the first entry of the top level table */ 1255 rste = pgd_val(*pgd); 1256 /* Pick up the shift from the table type of the first entry */ 1257 shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20; 1258 return pgd + ((address >> shift) & (PTRS_PER_PGD - 1)); 1259 } 1260 1261 #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address) 1262 1263 static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) 1264 { 1265 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1) 1266 return (p4d_t *) pgd_deref(*pgd) + p4d_index(address); 1267 return (p4d_t *) pgd; 1268 } 1269 1270 static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) 1271 { 1272 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2) 1273 return (pud_t *) p4d_deref(*p4d) + pud_index(address); 1274 return (pud_t *) p4d; 1275 } 1276 #define pud_offset pud_offset 1277 1278 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) 1279 { 1280 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3) 1281 return (pmd_t *) pud_deref(*pud) + pmd_index(address); 1282 return (pmd_t *) pud; 1283 } 1284 #define pmd_offset pmd_offset 1285 1286 static inline unsigned long pmd_page_vaddr(pmd_t pmd) 1287 { 1288 return (unsigned long) pmd_deref(pmd); 1289 } 1290 1291 static inline bool gup_fast_permitted(unsigned long start, unsigned long end) 1292 { 1293 return end <= current->mm->context.asce_limit; 1294 } 1295 #define gup_fast_permitted gup_fast_permitted 1296 1297 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) 1298 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) 1299 #define pte_page(x) pfn_to_page(pte_pfn(x)) 1300 1301 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) 1302 #define pud_page(pud) pfn_to_page(pud_pfn(pud)) 1303 #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d)) 1304 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd)) 1305 1306 static inline pmd_t pmd_wrprotect(pmd_t pmd) 1307 { 1308 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE; 1309 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1310 return pmd; 1311 } 1312 1313 static inline pmd_t pmd_mkwrite(pmd_t pmd) 1314 { 1315 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE; 1316 if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) 1317 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; 1318 return pmd; 1319 } 1320 1321 static inline pmd_t pmd_mkclean(pmd_t pmd) 1322 { 1323 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY; 1324 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1325 return pmd; 1326 } 1327 1328 static inline pmd_t pmd_mkdirty(pmd_t pmd) 1329 { 1330 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY; 1331 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) 1332 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; 1333 return pmd; 1334 } 1335 1336 static inline pud_t pud_wrprotect(pud_t pud) 1337 { 1338 pud_val(pud) &= ~_REGION3_ENTRY_WRITE; 1339 pud_val(pud) |= _REGION_ENTRY_PROTECT; 1340 return pud; 1341 } 1342 1343 static inline pud_t pud_mkwrite(pud_t pud) 1344 { 1345 pud_val(pud) |= _REGION3_ENTRY_WRITE; 1346 if (pud_val(pud) & _REGION3_ENTRY_DIRTY) 1347 pud_val(pud) &= ~_REGION_ENTRY_PROTECT; 1348 return pud; 1349 } 1350 1351 static inline pud_t pud_mkclean(pud_t pud) 1352 { 1353 pud_val(pud) &= ~_REGION3_ENTRY_DIRTY; 1354 pud_val(pud) |= _REGION_ENTRY_PROTECT; 1355 return pud; 1356 } 1357 1358 static inline pud_t pud_mkdirty(pud_t pud) 1359 { 1360 pud_val(pud) |= _REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY; 1361 if (pud_val(pud) & _REGION3_ENTRY_WRITE) 1362 pud_val(pud) &= ~_REGION_ENTRY_PROTECT; 1363 return pud; 1364 } 1365 1366 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) 1367 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) 1368 { 1369 /* 1370 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX 1371 * (see __Pxxx / __Sxxx). Convert to segment table entry format. 1372 */ 1373 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE)) 1374 return pgprot_val(SEGMENT_NONE); 1375 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO)) 1376 return pgprot_val(SEGMENT_RO); 1377 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX)) 1378 return pgprot_val(SEGMENT_RX); 1379 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW)) 1380 return pgprot_val(SEGMENT_RW); 1381 return pgprot_val(SEGMENT_RWX); 1382 } 1383 1384 static inline pmd_t pmd_mkyoung(pmd_t pmd) 1385 { 1386 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; 1387 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ) 1388 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID; 1389 return pmd; 1390 } 1391 1392 static inline pmd_t pmd_mkold(pmd_t pmd) 1393 { 1394 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG; 1395 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; 1396 return pmd; 1397 } 1398 1399 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 1400 { 1401 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE | 1402 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG | 1403 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY; 1404 pmd_val(pmd) |= massage_pgprot_pmd(newprot); 1405 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) 1406 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1407 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)) 1408 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; 1409 return pmd; 1410 } 1411 1412 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) 1413 { 1414 pmd_t __pmd; 1415 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); 1416 return __pmd; 1417 } 1418 1419 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ 1420 1421 static inline void __pmdp_csp(pmd_t *pmdp) 1422 { 1423 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp), 1424 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID); 1425 } 1426 1427 #define IDTE_GLOBAL 0 1428 #define IDTE_LOCAL 1 1429 1430 #define IDTE_PTOA 0x0800 1431 #define IDTE_NODAT 0x1000 1432 #define IDTE_GUEST_ASCE 0x2000 1433 1434 static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp, 1435 unsigned long opt, unsigned long asce, 1436 int local) 1437 { 1438 unsigned long sto; 1439 1440 sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t); 1441 if (__builtin_constant_p(opt) && opt == 0) { 1442 /* flush without guest asce */ 1443 asm volatile( 1444 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]" 1445 : "+m" (*pmdp) 1446 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)), 1447 [m4] "i" (local) 1448 : "cc" ); 1449 } else { 1450 /* flush with guest asce */ 1451 asm volatile( 1452 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]" 1453 : "+m" (*pmdp) 1454 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt), 1455 [r3] "a" (asce), [m4] "i" (local) 1456 : "cc" ); 1457 } 1458 } 1459 1460 static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp, 1461 unsigned long opt, unsigned long asce, 1462 int local) 1463 { 1464 unsigned long r3o; 1465 1466 r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t); 1467 r3o |= _ASCE_TYPE_REGION3; 1468 if (__builtin_constant_p(opt) && opt == 0) { 1469 /* flush without guest asce */ 1470 asm volatile( 1471 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]" 1472 : "+m" (*pudp) 1473 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)), 1474 [m4] "i" (local) 1475 : "cc"); 1476 } else { 1477 /* flush with guest asce */ 1478 asm volatile( 1479 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]" 1480 : "+m" (*pudp) 1481 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt), 1482 [r3] "a" (asce), [m4] "i" (local) 1483 : "cc" ); 1484 } 1485 } 1486 1487 pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t); 1488 pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t); 1489 pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t); 1490 1491 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1492 1493 #define __HAVE_ARCH_PGTABLE_DEPOSIT 1494 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 1495 pgtable_t pgtable); 1496 1497 #define __HAVE_ARCH_PGTABLE_WITHDRAW 1498 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 1499 1500 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 1501 static inline int pmdp_set_access_flags(struct vm_area_struct *vma, 1502 unsigned long addr, pmd_t *pmdp, 1503 pmd_t entry, int dirty) 1504 { 1505 VM_BUG_ON(addr & ~HPAGE_MASK); 1506 1507 entry = pmd_mkyoung(entry); 1508 if (dirty) 1509 entry = pmd_mkdirty(entry); 1510 if (pmd_val(*pmdp) == pmd_val(entry)) 1511 return 0; 1512 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry); 1513 return 1; 1514 } 1515 1516 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 1517 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 1518 unsigned long addr, pmd_t *pmdp) 1519 { 1520 pmd_t pmd = *pmdp; 1521 1522 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd)); 1523 return pmd_young(pmd); 1524 } 1525 1526 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 1527 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma, 1528 unsigned long addr, pmd_t *pmdp) 1529 { 1530 VM_BUG_ON(addr & ~HPAGE_MASK); 1531 return pmdp_test_and_clear_young(vma, addr, pmdp); 1532 } 1533 1534 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 1535 pmd_t *pmdp, pmd_t entry) 1536 { 1537 if (!MACHINE_HAS_NX) 1538 pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC; 1539 *pmdp = entry; 1540 } 1541 1542 static inline pmd_t pmd_mkhuge(pmd_t pmd) 1543 { 1544 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; 1545 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; 1546 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1547 return pmd; 1548 } 1549 1550 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 1551 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 1552 unsigned long addr, pmd_t *pmdp) 1553 { 1554 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); 1555 } 1556 1557 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL 1558 static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, 1559 unsigned long addr, 1560 pmd_t *pmdp, int full) 1561 { 1562 if (full) { 1563 pmd_t pmd = *pmdp; 1564 *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY); 1565 return pmd; 1566 } 1567 return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); 1568 } 1569 1570 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH 1571 static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, 1572 unsigned long addr, pmd_t *pmdp) 1573 { 1574 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); 1575 } 1576 1577 #define __HAVE_ARCH_PMDP_INVALIDATE 1578 static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma, 1579 unsigned long addr, pmd_t *pmdp) 1580 { 1581 pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID); 1582 1583 return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd); 1584 } 1585 1586 #define __HAVE_ARCH_PMDP_SET_WRPROTECT 1587 static inline void pmdp_set_wrprotect(struct mm_struct *mm, 1588 unsigned long addr, pmd_t *pmdp) 1589 { 1590 pmd_t pmd = *pmdp; 1591 1592 if (pmd_write(pmd)) 1593 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd)); 1594 } 1595 1596 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, 1597 unsigned long address, 1598 pmd_t *pmdp) 1599 { 1600 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 1601 } 1602 #define pmdp_collapse_flush pmdp_collapse_flush 1603 1604 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot)) 1605 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) 1606 1607 static inline int pmd_trans_huge(pmd_t pmd) 1608 { 1609 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE; 1610 } 1611 1612 #define has_transparent_hugepage has_transparent_hugepage 1613 static inline int has_transparent_hugepage(void) 1614 { 1615 return MACHINE_HAS_EDAT1 ? 1 : 0; 1616 } 1617 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1618 1619 /* 1620 * 64 bit swap entry format: 1621 * A page-table entry has some bits we have to treat in a special way. 1622 * Bits 52 and bit 55 have to be zero, otherwise a specification 1623 * exception will occur instead of a page translation exception. The 1624 * specification exception has the bad habit not to store necessary 1625 * information in the lowcore. 1626 * Bits 54 and 63 are used to indicate the page type. 1627 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200 1628 * This leaves the bits 0-51 and bits 56-62 to store type and offset. 1629 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51 1630 * for the offset. 1631 * | offset |01100|type |00| 1632 * |0000000000111111111122222222223333333333444444444455|55555|55566|66| 1633 * |0123456789012345678901234567890123456789012345678901|23456|78901|23| 1634 */ 1635 1636 #define __SWP_OFFSET_MASK ((1UL << 52) - 1) 1637 #define __SWP_OFFSET_SHIFT 12 1638 #define __SWP_TYPE_MASK ((1UL << 5) - 1) 1639 #define __SWP_TYPE_SHIFT 2 1640 1641 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) 1642 { 1643 pte_t pte; 1644 1645 pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT; 1646 pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT; 1647 pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT; 1648 return pte; 1649 } 1650 1651 static inline unsigned long __swp_type(swp_entry_t entry) 1652 { 1653 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK; 1654 } 1655 1656 static inline unsigned long __swp_offset(swp_entry_t entry) 1657 { 1658 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK; 1659 } 1660 1661 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset) 1662 { 1663 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) }; 1664 } 1665 1666 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 1667 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 1668 1669 #define kern_addr_valid(addr) (1) 1670 1671 extern int vmem_add_mapping(unsigned long start, unsigned long size); 1672 extern int vmem_remove_mapping(unsigned long start, unsigned long size); 1673 extern int s390_enable_sie(void); 1674 extern int s390_enable_skey(void); 1675 extern void s390_reset_cmma(struct mm_struct *mm); 1676 1677 /* s390 has a private copy of get unmapped area to deal with cache synonyms */ 1678 #define HAVE_ARCH_UNMAPPED_AREA 1679 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 1680 1681 #endif /* _S390_PAGE_H */ 1682