1 /* 2 * S390 version 3 * Copyright IBM Corp. 1999, 2000 4 * Author(s): Hartmut Penner (hp@de.ibm.com) 5 * Ulrich Weigand (weigand@de.ibm.com) 6 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * 8 * Derived from "include/asm-i386/pgtable.h" 9 */ 10 11 #ifndef _ASM_S390_PGTABLE_H 12 #define _ASM_S390_PGTABLE_H 13 14 /* 15 * The Linux memory management assumes a three-level page table setup. 16 * For s390 64 bit we use up to four of the five levels the hardware 17 * provides (region first tables are not used). 18 * 19 * The "pgd_xxx()" functions are trivial for a folded two-level 20 * setup: the pgd is never bad, and a pmd always exists (as it's folded 21 * into the pgd entry) 22 * 23 * This file contains the functions and defines necessary to modify and use 24 * the S390 page table tree. 25 */ 26 #ifndef __ASSEMBLY__ 27 #include <linux/sched.h> 28 #include <linux/mm_types.h> 29 #include <linux/page-flags.h> 30 #include <linux/radix-tree.h> 31 #include <asm/bug.h> 32 #include <asm/page.h> 33 34 extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); 35 extern void paging_init(void); 36 extern void vmem_map_init(void); 37 38 /* 39 * The S390 doesn't have any external MMU info: the kernel page 40 * tables contain all the necessary information. 41 */ 42 #define update_mmu_cache(vma, address, ptep) do { } while (0) 43 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0) 44 45 /* 46 * ZERO_PAGE is a global shared page that is always zero; used 47 * for zero-mapped memory areas etc.. 48 */ 49 50 extern unsigned long empty_zero_page; 51 extern unsigned long zero_page_mask; 52 53 #define ZERO_PAGE(vaddr) \ 54 (virt_to_page((void *)(empty_zero_page + \ 55 (((unsigned long)(vaddr)) &zero_page_mask)))) 56 #define __HAVE_COLOR_ZERO_PAGE 57 58 /* TODO: s390 cannot support io_remap_pfn_range... */ 59 #endif /* !__ASSEMBLY__ */ 60 61 /* 62 * PMD_SHIFT determines the size of the area a second-level page 63 * table can map 64 * PGDIR_SHIFT determines what a third-level page table entry can map 65 */ 66 #define PMD_SHIFT 20 67 #define PUD_SHIFT 31 68 #define PGDIR_SHIFT 42 69 70 #define PMD_SIZE (1UL << PMD_SHIFT) 71 #define PMD_MASK (~(PMD_SIZE-1)) 72 #define PUD_SIZE (1UL << PUD_SHIFT) 73 #define PUD_MASK (~(PUD_SIZE-1)) 74 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 75 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 76 77 /* 78 * entries per page directory level: the S390 is two-level, so 79 * we don't really have any PMD directory physically. 80 * for S390 segment-table entries are combined to one PGD 81 * that leads to 1024 pte per pgd 82 */ 83 #define PTRS_PER_PTE 256 84 #define PTRS_PER_PMD 2048 85 #define PTRS_PER_PUD 2048 86 #define PTRS_PER_PGD 2048 87 88 #define FIRST_USER_ADDRESS 0UL 89 90 #define pte_ERROR(e) \ 91 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) 92 #define pmd_ERROR(e) \ 93 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) 94 #define pud_ERROR(e) \ 95 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e)) 96 #define pgd_ERROR(e) \ 97 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) 98 99 #ifndef __ASSEMBLY__ 100 /* 101 * The vmalloc and module area will always be on the topmost area of the 102 * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules. 103 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where 104 * modules will reside. That makes sure that inter module branches always 105 * happen without trampolines and in addition the placement within a 2GB frame 106 * is branch prediction unit friendly. 107 */ 108 extern unsigned long VMALLOC_START; 109 extern unsigned long VMALLOC_END; 110 extern struct page *vmemmap; 111 112 #define VMEM_MAX_PHYS ((unsigned long) vmemmap) 113 114 extern unsigned long MODULES_VADDR; 115 extern unsigned long MODULES_END; 116 #define MODULES_VADDR MODULES_VADDR 117 #define MODULES_END MODULES_END 118 #define MODULES_LEN (1UL << 31) 119 120 static inline int is_module_addr(void *addr) 121 { 122 BUILD_BUG_ON(MODULES_LEN > (1UL << 31)); 123 if (addr < (void *)MODULES_VADDR) 124 return 0; 125 if (addr > (void *)MODULES_END) 126 return 0; 127 return 1; 128 } 129 130 /* 131 * A 64 bit pagetable entry of S390 has following format: 132 * | PFRA |0IPC| OS | 133 * 0000000000111111111122222222223333333333444444444455555555556666 134 * 0123456789012345678901234567890123456789012345678901234567890123 135 * 136 * I Page-Invalid Bit: Page is not available for address-translation 137 * P Page-Protection Bit: Store access not possible for page 138 * C Change-bit override: HW is not required to set change bit 139 * 140 * A 64 bit segmenttable entry of S390 has following format: 141 * | P-table origin | TT 142 * 0000000000111111111122222222223333333333444444444455555555556666 143 * 0123456789012345678901234567890123456789012345678901234567890123 144 * 145 * I Segment-Invalid Bit: Segment is not available for address-translation 146 * C Common-Segment Bit: Segment is not private (PoP 3-30) 147 * P Page-Protection Bit: Store access not possible for page 148 * TT Type 00 149 * 150 * A 64 bit region table entry of S390 has following format: 151 * | S-table origin | TF TTTL 152 * 0000000000111111111122222222223333333333444444444455555555556666 153 * 0123456789012345678901234567890123456789012345678901234567890123 154 * 155 * I Segment-Invalid Bit: Segment is not available for address-translation 156 * TT Type 01 157 * TF 158 * TL Table length 159 * 160 * The 64 bit regiontable origin of S390 has following format: 161 * | region table origon | DTTL 162 * 0000000000111111111122222222223333333333444444444455555555556666 163 * 0123456789012345678901234567890123456789012345678901234567890123 164 * 165 * X Space-Switch event: 166 * G Segment-Invalid Bit: 167 * P Private-Space Bit: 168 * S Storage-Alteration: 169 * R Real space 170 * TL Table-Length: 171 * 172 * A storage key has the following format: 173 * | ACC |F|R|C|0| 174 * 0 3 4 5 6 7 175 * ACC: access key 176 * F : fetch protection bit 177 * R : referenced bit 178 * C : changed bit 179 */ 180 181 /* Hardware bits in the page table entry */ 182 #define _PAGE_PROTECT 0x200 /* HW read-only bit */ 183 #define _PAGE_INVALID 0x400 /* HW invalid bit */ 184 #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */ 185 186 /* Software bits in the page table entry */ 187 #define _PAGE_PRESENT 0x001 /* SW pte present bit */ 188 #define _PAGE_YOUNG 0x004 /* SW pte young bit */ 189 #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */ 190 #define _PAGE_READ 0x010 /* SW pte read bit */ 191 #define _PAGE_WRITE 0x020 /* SW pte write bit */ 192 #define _PAGE_SPECIAL 0x040 /* SW associated with special page */ 193 #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */ 194 #define __HAVE_ARCH_PTE_SPECIAL 195 196 #ifdef CONFIG_MEM_SOFT_DIRTY 197 #define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */ 198 #else 199 #define _PAGE_SOFT_DIRTY 0x000 200 #endif 201 202 /* Set of bits not changed in pte_modify */ 203 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \ 204 _PAGE_YOUNG | _PAGE_SOFT_DIRTY) 205 206 /* 207 * handle_pte_fault uses pte_present and pte_none to find out the pte type 208 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to 209 * distinguish present from not-present ptes. It is changed only with the page 210 * table lock held. 211 * 212 * The following table gives the different possible bit combinations for 213 * the pte hardware and software bits in the last 12 bits of a pte 214 * (. unassigned bit, x don't care, t swap type): 215 * 216 * 842100000000 217 * 000084210000 218 * 000000008421 219 * .IR.uswrdy.p 220 * empty .10.00000000 221 * swap .11..ttttt.0 222 * prot-none, clean, old .11.xx0000.1 223 * prot-none, clean, young .11.xx0001.1 224 * prot-none, dirty, old .10.xx0010.1 225 * prot-none, dirty, young .10.xx0011.1 226 * read-only, clean, old .11.xx0100.1 227 * read-only, clean, young .01.xx0101.1 228 * read-only, dirty, old .11.xx0110.1 229 * read-only, dirty, young .01.xx0111.1 230 * read-write, clean, old .11.xx1100.1 231 * read-write, clean, young .01.xx1101.1 232 * read-write, dirty, old .10.xx1110.1 233 * read-write, dirty, young .00.xx1111.1 234 * HW-bits: R read-only, I invalid 235 * SW-bits: p present, y young, d dirty, r read, w write, s special, 236 * u unused, l large 237 * 238 * pte_none is true for the bit pattern .10.00000000, pte == 0x400 239 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200 240 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001 241 */ 242 243 /* Bits in the segment/region table address-space-control-element */ 244 #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */ 245 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ 246 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ 247 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */ 248 #define _ASCE_REAL_SPACE 0x20 /* real space control */ 249 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */ 250 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */ 251 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */ 252 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */ 253 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */ 254 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */ 255 256 /* Bits in the region table entry */ 257 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ 258 #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */ 259 #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */ 260 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ 261 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ 262 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ 263 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */ 264 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */ 265 266 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) 267 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID) 268 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) 269 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID) 270 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) 271 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID) 272 273 #define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */ 274 #define _REGION3_ENTRY_RO 0x200 /* page protection bit */ 275 276 /* Bits in the segment table entry */ 277 #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL 278 #define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL 279 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ 280 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ 281 #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */ 282 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */ 283 284 #define _SEGMENT_ENTRY (0) 285 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) 286 287 #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */ 288 #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */ 289 #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */ 290 #define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */ 291 #define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */ 292 293 #ifdef CONFIG_MEM_SOFT_DIRTY 294 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */ 295 #else 296 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */ 297 #endif 298 299 /* 300 * Segment table entry encoding (R = read-only, I = invalid, y = young bit): 301 * dy..R...I...wr 302 * prot-none, clean, old 00..1...1...00 303 * prot-none, clean, young 01..1...1...00 304 * prot-none, dirty, old 10..1...1...00 305 * prot-none, dirty, young 11..1...1...00 306 * read-only, clean, old 00..1...1...01 307 * read-only, clean, young 01..1...0...01 308 * read-only, dirty, old 10..1...1...01 309 * read-only, dirty, young 11..1...0...01 310 * read-write, clean, old 00..1...1...11 311 * read-write, clean, young 01..1...0...11 312 * read-write, dirty, old 10..0...1...11 313 * read-write, dirty, young 11..0...0...11 314 * The segment table origin is used to distinguish empty (origin==0) from 315 * read-write, old segment table entries (origin!=0) 316 * HW-bits: R read-only, I invalid 317 * SW-bits: y young, d dirty, r read, w write 318 */ 319 320 /* Page status table bits for virtualization */ 321 #define PGSTE_ACC_BITS 0xf000000000000000UL 322 #define PGSTE_FP_BIT 0x0800000000000000UL 323 #define PGSTE_PCL_BIT 0x0080000000000000UL 324 #define PGSTE_HR_BIT 0x0040000000000000UL 325 #define PGSTE_HC_BIT 0x0020000000000000UL 326 #define PGSTE_GR_BIT 0x0004000000000000UL 327 #define PGSTE_GC_BIT 0x0002000000000000UL 328 #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */ 329 #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */ 330 331 /* Guest Page State used for virtualization */ 332 #define _PGSTE_GPS_ZERO 0x0000000080000000UL 333 #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL 334 #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL 335 #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL 336 337 /* 338 * A user page table pointer has the space-switch-event bit, the 339 * private-space-control bit and the storage-alteration-event-control 340 * bit set. A kernel page table pointer doesn't need them. 341 */ 342 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ 343 _ASCE_ALT_EVENT) 344 345 /* 346 * Page protection definitions. 347 */ 348 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID) 349 #define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \ 350 _PAGE_INVALID | _PAGE_PROTECT) 351 #define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 352 _PAGE_INVALID | _PAGE_PROTECT) 353 354 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 355 _PAGE_YOUNG | _PAGE_DIRTY) 356 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 357 _PAGE_YOUNG | _PAGE_DIRTY) 358 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \ 359 _PAGE_PROTECT) 360 361 /* 362 * On s390 the page table entry has an invalid bit and a read-only bit. 363 * Read permission implies execute permission and write permission 364 * implies read permission. 365 */ 366 /*xwr*/ 367 #define __P000 PAGE_NONE 368 #define __P001 PAGE_READ 369 #define __P010 PAGE_READ 370 #define __P011 PAGE_READ 371 #define __P100 PAGE_READ 372 #define __P101 PAGE_READ 373 #define __P110 PAGE_READ 374 #define __P111 PAGE_READ 375 376 #define __S000 PAGE_NONE 377 #define __S001 PAGE_READ 378 #define __S010 PAGE_WRITE 379 #define __S011 PAGE_WRITE 380 #define __S100 PAGE_READ 381 #define __S101 PAGE_READ 382 #define __S110 PAGE_WRITE 383 #define __S111 PAGE_WRITE 384 385 /* 386 * Segment entry (large page) protection definitions. 387 */ 388 #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \ 389 _SEGMENT_ENTRY_PROTECT) 390 #define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT | \ 391 _SEGMENT_ENTRY_READ) 392 #define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_READ | \ 393 _SEGMENT_ENTRY_WRITE) 394 395 static inline int mm_has_pgste(struct mm_struct *mm) 396 { 397 #ifdef CONFIG_PGSTE 398 if (unlikely(mm->context.has_pgste)) 399 return 1; 400 #endif 401 return 0; 402 } 403 404 static inline int mm_alloc_pgste(struct mm_struct *mm) 405 { 406 #ifdef CONFIG_PGSTE 407 if (unlikely(mm->context.alloc_pgste)) 408 return 1; 409 #endif 410 return 0; 411 } 412 413 /* 414 * In the case that a guest uses storage keys 415 * faults should no longer be backed by zero pages 416 */ 417 #define mm_forbids_zeropage mm_use_skey 418 static inline int mm_use_skey(struct mm_struct *mm) 419 { 420 #ifdef CONFIG_PGSTE 421 if (mm->context.use_skey) 422 return 1; 423 #endif 424 return 0; 425 } 426 427 /* 428 * pgd/pmd/pte query functions 429 */ 430 static inline int pgd_present(pgd_t pgd) 431 { 432 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) 433 return 1; 434 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL; 435 } 436 437 static inline int pgd_none(pgd_t pgd) 438 { 439 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) 440 return 0; 441 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL; 442 } 443 444 static inline int pgd_bad(pgd_t pgd) 445 { 446 /* 447 * With dynamic page table levels the pgd can be a region table 448 * entry or a segment table entry. Check for the bit that are 449 * invalid for either table entry. 450 */ 451 unsigned long mask = 452 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID & 453 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; 454 return (pgd_val(pgd) & mask) != 0; 455 } 456 457 static inline int pud_present(pud_t pud) 458 { 459 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) 460 return 1; 461 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; 462 } 463 464 static inline int pud_none(pud_t pud) 465 { 466 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) 467 return 0; 468 return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL; 469 } 470 471 static inline int pud_large(pud_t pud) 472 { 473 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3) 474 return 0; 475 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE); 476 } 477 478 static inline int pud_bad(pud_t pud) 479 { 480 /* 481 * With dynamic page table levels the pud can be a region table 482 * entry or a segment table entry. Check for the bit that are 483 * invalid for either table entry. 484 */ 485 unsigned long mask = 486 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID & 487 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; 488 return (pud_val(pud) & mask) != 0; 489 } 490 491 static inline int pmd_present(pmd_t pmd) 492 { 493 return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID; 494 } 495 496 static inline int pmd_none(pmd_t pmd) 497 { 498 return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID; 499 } 500 501 static inline int pmd_large(pmd_t pmd) 502 { 503 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0; 504 } 505 506 static inline unsigned long pmd_pfn(pmd_t pmd) 507 { 508 unsigned long origin_mask; 509 510 origin_mask = _SEGMENT_ENTRY_ORIGIN; 511 if (pmd_large(pmd)) 512 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE; 513 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT; 514 } 515 516 static inline int pmd_bad(pmd_t pmd) 517 { 518 if (pmd_large(pmd)) 519 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0; 520 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0; 521 } 522 523 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 524 extern int pmdp_set_access_flags(struct vm_area_struct *vma, 525 unsigned long address, pmd_t *pmdp, 526 pmd_t entry, int dirty); 527 528 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 529 extern int pmdp_clear_flush_young(struct vm_area_struct *vma, 530 unsigned long address, pmd_t *pmdp); 531 532 #define __HAVE_ARCH_PMD_WRITE 533 static inline int pmd_write(pmd_t pmd) 534 { 535 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0; 536 } 537 538 static inline int pmd_dirty(pmd_t pmd) 539 { 540 int dirty = 1; 541 if (pmd_large(pmd)) 542 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0; 543 return dirty; 544 } 545 546 static inline int pmd_young(pmd_t pmd) 547 { 548 int young = 1; 549 if (pmd_large(pmd)) 550 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0; 551 return young; 552 } 553 554 static inline int pte_present(pte_t pte) 555 { 556 /* Bit pattern: (pte & 0x001) == 0x001 */ 557 return (pte_val(pte) & _PAGE_PRESENT) != 0; 558 } 559 560 static inline int pte_none(pte_t pte) 561 { 562 /* Bit pattern: pte == 0x400 */ 563 return pte_val(pte) == _PAGE_INVALID; 564 } 565 566 static inline int pte_swap(pte_t pte) 567 { 568 /* Bit pattern: (pte & 0x201) == 0x200 */ 569 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT)) 570 == _PAGE_PROTECT; 571 } 572 573 static inline int pte_special(pte_t pte) 574 { 575 return (pte_val(pte) & _PAGE_SPECIAL); 576 } 577 578 #define __HAVE_ARCH_PTE_SAME 579 static inline int pte_same(pte_t a, pte_t b) 580 { 581 return pte_val(a) == pte_val(b); 582 } 583 584 #ifdef CONFIG_NUMA_BALANCING 585 static inline int pte_protnone(pte_t pte) 586 { 587 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ); 588 } 589 590 static inline int pmd_protnone(pmd_t pmd) 591 { 592 /* pmd_large(pmd) implies pmd_present(pmd) */ 593 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ); 594 } 595 #endif 596 597 static inline int pte_soft_dirty(pte_t pte) 598 { 599 return pte_val(pte) & _PAGE_SOFT_DIRTY; 600 } 601 #define pte_swp_soft_dirty pte_soft_dirty 602 603 static inline pte_t pte_mksoft_dirty(pte_t pte) 604 { 605 pte_val(pte) |= _PAGE_SOFT_DIRTY; 606 return pte; 607 } 608 #define pte_swp_mksoft_dirty pte_mksoft_dirty 609 610 static inline pte_t pte_clear_soft_dirty(pte_t pte) 611 { 612 pte_val(pte) &= ~_PAGE_SOFT_DIRTY; 613 return pte; 614 } 615 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty 616 617 static inline int pmd_soft_dirty(pmd_t pmd) 618 { 619 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY; 620 } 621 622 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) 623 { 624 pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY; 625 return pmd; 626 } 627 628 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) 629 { 630 pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY; 631 return pmd; 632 } 633 634 static inline pgste_t pgste_get_lock(pte_t *ptep) 635 { 636 unsigned long new = 0; 637 #ifdef CONFIG_PGSTE 638 unsigned long old; 639 640 preempt_disable(); 641 asm( 642 " lg %0,%2\n" 643 "0: lgr %1,%0\n" 644 " nihh %0,0xff7f\n" /* clear PCL bit in old */ 645 " oihh %1,0x0080\n" /* set PCL bit in new */ 646 " csg %0,%1,%2\n" 647 " jl 0b\n" 648 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE]) 649 : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory"); 650 #endif 651 return __pgste(new); 652 } 653 654 static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste) 655 { 656 #ifdef CONFIG_PGSTE 657 asm( 658 " nihh %1,0xff7f\n" /* clear PCL bit */ 659 " stg %1,%0\n" 660 : "=Q" (ptep[PTRS_PER_PTE]) 661 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) 662 : "cc", "memory"); 663 preempt_enable(); 664 #endif 665 } 666 667 static inline pgste_t pgste_get(pte_t *ptep) 668 { 669 unsigned long pgste = 0; 670 #ifdef CONFIG_PGSTE 671 pgste = *(unsigned long *)(ptep + PTRS_PER_PTE); 672 #endif 673 return __pgste(pgste); 674 } 675 676 static inline void pgste_set(pte_t *ptep, pgste_t pgste) 677 { 678 #ifdef CONFIG_PGSTE 679 *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste; 680 #endif 681 } 682 683 static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste, 684 struct mm_struct *mm) 685 { 686 #ifdef CONFIG_PGSTE 687 unsigned long address, bits, skey; 688 689 if (!mm_use_skey(mm) || pte_val(*ptep) & _PAGE_INVALID) 690 return pgste; 691 address = pte_val(*ptep) & PAGE_MASK; 692 skey = (unsigned long) page_get_storage_key(address); 693 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); 694 /* Transfer page changed & referenced bit to guest bits in pgste */ 695 pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */ 696 /* Copy page access key and fetch protection bit to pgste */ 697 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT); 698 pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; 699 #endif 700 return pgste; 701 702 } 703 704 static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry, 705 struct mm_struct *mm) 706 { 707 #ifdef CONFIG_PGSTE 708 unsigned long address; 709 unsigned long nkey; 710 711 if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID) 712 return; 713 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID)); 714 address = pte_val(entry) & PAGE_MASK; 715 /* 716 * Set page access key and fetch protection bit from pgste. 717 * The guest C/R information is still in the PGSTE, set real 718 * key C/R to 0. 719 */ 720 nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56; 721 nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48; 722 page_set_storage_key(address, nkey, 0); 723 #endif 724 } 725 726 static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry) 727 { 728 if ((pte_val(entry) & _PAGE_PRESENT) && 729 (pte_val(entry) & _PAGE_WRITE) && 730 !(pte_val(entry) & _PAGE_INVALID)) { 731 if (!MACHINE_HAS_ESOP) { 732 /* 733 * Without enhanced suppression-on-protection force 734 * the dirty bit on for all writable ptes. 735 */ 736 pte_val(entry) |= _PAGE_DIRTY; 737 pte_val(entry) &= ~_PAGE_PROTECT; 738 } 739 if (!(pte_val(entry) & _PAGE_PROTECT)) 740 /* This pte allows write access, set user-dirty */ 741 pgste_val(pgste) |= PGSTE_UC_BIT; 742 } 743 *ptep = entry; 744 return pgste; 745 } 746 747 /** 748 * struct gmap_struct - guest address space 749 * @crst_list: list of all crst tables used in the guest address space 750 * @mm: pointer to the parent mm_struct 751 * @guest_to_host: radix tree with guest to host address translation 752 * @host_to_guest: radix tree with pointer to segment table entries 753 * @guest_table_lock: spinlock to protect all entries in the guest page table 754 * @table: pointer to the page directory 755 * @asce: address space control element for gmap page table 756 * @pfault_enabled: defines if pfaults are applicable for the guest 757 */ 758 struct gmap { 759 struct list_head list; 760 struct list_head crst_list; 761 struct mm_struct *mm; 762 struct radix_tree_root guest_to_host; 763 struct radix_tree_root host_to_guest; 764 spinlock_t guest_table_lock; 765 unsigned long *table; 766 unsigned long asce; 767 unsigned long asce_end; 768 void *private; 769 bool pfault_enabled; 770 }; 771 772 /** 773 * struct gmap_notifier - notify function block for page invalidation 774 * @notifier_call: address of callback function 775 */ 776 struct gmap_notifier { 777 struct list_head list; 778 void (*notifier_call)(struct gmap *gmap, unsigned long gaddr); 779 }; 780 781 struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit); 782 void gmap_free(struct gmap *gmap); 783 void gmap_enable(struct gmap *gmap); 784 void gmap_disable(struct gmap *gmap); 785 int gmap_map_segment(struct gmap *gmap, unsigned long from, 786 unsigned long to, unsigned long len); 787 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); 788 unsigned long __gmap_translate(struct gmap *, unsigned long gaddr); 789 unsigned long gmap_translate(struct gmap *, unsigned long gaddr); 790 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr); 791 int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags); 792 void gmap_discard(struct gmap *, unsigned long from, unsigned long to); 793 void __gmap_zap(struct gmap *, unsigned long gaddr); 794 bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *); 795 796 797 void gmap_register_ipte_notifier(struct gmap_notifier *); 798 void gmap_unregister_ipte_notifier(struct gmap_notifier *); 799 int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len); 800 void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *); 801 802 static inline pgste_t pgste_ipte_notify(struct mm_struct *mm, 803 unsigned long addr, 804 pte_t *ptep, pgste_t pgste) 805 { 806 #ifdef CONFIG_PGSTE 807 if (pgste_val(pgste) & PGSTE_IN_BIT) { 808 pgste_val(pgste) &= ~PGSTE_IN_BIT; 809 gmap_do_ipte_notify(mm, addr, ptep); 810 } 811 #endif 812 return pgste; 813 } 814 815 /* 816 * Certain architectures need to do special things when PTEs 817 * within a page table are directly modified. Thus, the following 818 * hook is made available. 819 */ 820 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 821 pte_t *ptep, pte_t entry) 822 { 823 pgste_t pgste; 824 825 if (mm_has_pgste(mm)) { 826 pgste = pgste_get_lock(ptep); 827 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO; 828 pgste_set_key(ptep, pgste, entry, mm); 829 pgste = pgste_set_pte(ptep, pgste, entry); 830 pgste_set_unlock(ptep, pgste); 831 } else { 832 *ptep = entry; 833 } 834 } 835 836 /* 837 * query functions pte_write/pte_dirty/pte_young only work if 838 * pte_present() is true. Undefined behaviour if not.. 839 */ 840 static inline int pte_write(pte_t pte) 841 { 842 return (pte_val(pte) & _PAGE_WRITE) != 0; 843 } 844 845 static inline int pte_dirty(pte_t pte) 846 { 847 return (pte_val(pte) & _PAGE_DIRTY) != 0; 848 } 849 850 static inline int pte_young(pte_t pte) 851 { 852 return (pte_val(pte) & _PAGE_YOUNG) != 0; 853 } 854 855 #define __HAVE_ARCH_PTE_UNUSED 856 static inline int pte_unused(pte_t pte) 857 { 858 return pte_val(pte) & _PAGE_UNUSED; 859 } 860 861 /* 862 * pgd/pmd/pte modification functions 863 */ 864 865 static inline void pgd_clear(pgd_t *pgd) 866 { 867 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 868 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY; 869 } 870 871 static inline void pud_clear(pud_t *pud) 872 { 873 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 874 pud_val(*pud) = _REGION3_ENTRY_EMPTY; 875 } 876 877 static inline void pmd_clear(pmd_t *pmdp) 878 { 879 pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID; 880 } 881 882 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 883 { 884 pte_val(*ptep) = _PAGE_INVALID; 885 } 886 887 /* 888 * The following pte modification functions only work if 889 * pte_present() is true. Undefined behaviour if not.. 890 */ 891 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 892 { 893 pte_val(pte) &= _PAGE_CHG_MASK; 894 pte_val(pte) |= pgprot_val(newprot); 895 /* 896 * newprot for PAGE_NONE, PAGE_READ and PAGE_WRITE has the 897 * invalid bit set, clear it again for readable, young pages 898 */ 899 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ)) 900 pte_val(pte) &= ~_PAGE_INVALID; 901 /* 902 * newprot for PAGE_READ and PAGE_WRITE has the page protection 903 * bit set, clear it again for writable, dirty pages 904 */ 905 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE)) 906 pte_val(pte) &= ~_PAGE_PROTECT; 907 return pte; 908 } 909 910 static inline pte_t pte_wrprotect(pte_t pte) 911 { 912 pte_val(pte) &= ~_PAGE_WRITE; 913 pte_val(pte) |= _PAGE_PROTECT; 914 return pte; 915 } 916 917 static inline pte_t pte_mkwrite(pte_t pte) 918 { 919 pte_val(pte) |= _PAGE_WRITE; 920 if (pte_val(pte) & _PAGE_DIRTY) 921 pte_val(pte) &= ~_PAGE_PROTECT; 922 return pte; 923 } 924 925 static inline pte_t pte_mkclean(pte_t pte) 926 { 927 pte_val(pte) &= ~_PAGE_DIRTY; 928 pte_val(pte) |= _PAGE_PROTECT; 929 return pte; 930 } 931 932 static inline pte_t pte_mkdirty(pte_t pte) 933 { 934 pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY; 935 if (pte_val(pte) & _PAGE_WRITE) 936 pte_val(pte) &= ~_PAGE_PROTECT; 937 return pte; 938 } 939 940 static inline pte_t pte_mkold(pte_t pte) 941 { 942 pte_val(pte) &= ~_PAGE_YOUNG; 943 pte_val(pte) |= _PAGE_INVALID; 944 return pte; 945 } 946 947 static inline pte_t pte_mkyoung(pte_t pte) 948 { 949 pte_val(pte) |= _PAGE_YOUNG; 950 if (pte_val(pte) & _PAGE_READ) 951 pte_val(pte) &= ~_PAGE_INVALID; 952 return pte; 953 } 954 955 static inline pte_t pte_mkspecial(pte_t pte) 956 { 957 pte_val(pte) |= _PAGE_SPECIAL; 958 return pte; 959 } 960 961 #ifdef CONFIG_HUGETLB_PAGE 962 static inline pte_t pte_mkhuge(pte_t pte) 963 { 964 pte_val(pte) |= _PAGE_LARGE; 965 return pte; 966 } 967 #endif 968 969 static inline void __ptep_ipte(unsigned long address, pte_t *ptep) 970 { 971 unsigned long pto = (unsigned long) ptep; 972 973 /* Invalidation + global TLB flush for the pte */ 974 asm volatile( 975 " ipte %2,%3" 976 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address)); 977 } 978 979 static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep) 980 { 981 unsigned long pto = (unsigned long) ptep; 982 983 /* Invalidation + local TLB flush for the pte */ 984 asm volatile( 985 " .insn rrf,0xb2210000,%2,%3,0,1" 986 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address)); 987 } 988 989 static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep) 990 { 991 unsigned long pto = (unsigned long) ptep; 992 993 /* Invalidate a range of ptes + global TLB flush of the ptes */ 994 do { 995 asm volatile( 996 " .insn rrf,0xb2210000,%2,%0,%1,0" 997 : "+a" (address), "+a" (nr) : "a" (pto) : "memory"); 998 } while (nr != 255); 999 } 1000 1001 static inline void ptep_flush_direct(struct mm_struct *mm, 1002 unsigned long address, pte_t *ptep) 1003 { 1004 int active, count; 1005 1006 if (pte_val(*ptep) & _PAGE_INVALID) 1007 return; 1008 active = (mm == current->active_mm) ? 1 : 0; 1009 count = atomic_add_return(0x10000, &mm->context.attach_count); 1010 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active && 1011 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) 1012 __ptep_ipte_local(address, ptep); 1013 else 1014 __ptep_ipte(address, ptep); 1015 atomic_sub(0x10000, &mm->context.attach_count); 1016 } 1017 1018 static inline void ptep_flush_lazy(struct mm_struct *mm, 1019 unsigned long address, pte_t *ptep) 1020 { 1021 int active, count; 1022 1023 if (pte_val(*ptep) & _PAGE_INVALID) 1024 return; 1025 active = (mm == current->active_mm) ? 1 : 0; 1026 count = atomic_add_return(0x10000, &mm->context.attach_count); 1027 if ((count & 0xffff) <= active) { 1028 pte_val(*ptep) |= _PAGE_INVALID; 1029 mm->context.flush_mm = 1; 1030 } else 1031 __ptep_ipte(address, ptep); 1032 atomic_sub(0x10000, &mm->context.attach_count); 1033 } 1034 1035 /* 1036 * Get (and clear) the user dirty bit for a pte. 1037 */ 1038 static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm, 1039 unsigned long addr, 1040 pte_t *ptep) 1041 { 1042 pgste_t pgste; 1043 pte_t pte; 1044 int dirty; 1045 1046 if (!mm_has_pgste(mm)) 1047 return 0; 1048 pgste = pgste_get_lock(ptep); 1049 dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT); 1050 pgste_val(pgste) &= ~PGSTE_UC_BIT; 1051 pte = *ptep; 1052 if (dirty && (pte_val(pte) & _PAGE_PRESENT)) { 1053 pgste = pgste_ipte_notify(mm, addr, ptep, pgste); 1054 __ptep_ipte(addr, ptep); 1055 if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE)) 1056 pte_val(pte) |= _PAGE_PROTECT; 1057 else 1058 pte_val(pte) |= _PAGE_INVALID; 1059 *ptep = pte; 1060 } 1061 pgste_set_unlock(ptep, pgste); 1062 return dirty; 1063 } 1064 1065 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 1066 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 1067 unsigned long addr, pte_t *ptep) 1068 { 1069 pgste_t pgste; 1070 pte_t pte, oldpte; 1071 int young; 1072 1073 if (mm_has_pgste(vma->vm_mm)) { 1074 pgste = pgste_get_lock(ptep); 1075 pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste); 1076 } 1077 1078 oldpte = pte = *ptep; 1079 ptep_flush_direct(vma->vm_mm, addr, ptep); 1080 young = pte_young(pte); 1081 pte = pte_mkold(pte); 1082 1083 if (mm_has_pgste(vma->vm_mm)) { 1084 pgste = pgste_update_all(&oldpte, pgste, vma->vm_mm); 1085 pgste = pgste_set_pte(ptep, pgste, pte); 1086 pgste_set_unlock(ptep, pgste); 1087 } else 1088 *ptep = pte; 1089 1090 return young; 1091 } 1092 1093 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 1094 static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 1095 unsigned long address, pte_t *ptep) 1096 { 1097 return ptep_test_and_clear_young(vma, address, ptep); 1098 } 1099 1100 /* 1101 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush 1102 * both clear the TLB for the unmapped pte. The reason is that 1103 * ptep_get_and_clear is used in common code (e.g. change_pte_range) 1104 * to modify an active pte. The sequence is 1105 * 1) ptep_get_and_clear 1106 * 2) set_pte_at 1107 * 3) flush_tlb_range 1108 * On s390 the tlb needs to get flushed with the modification of the pte 1109 * if the pte is active. The only way how this can be implemented is to 1110 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range 1111 * is a nop. 1112 */ 1113 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 1114 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 1115 unsigned long address, pte_t *ptep) 1116 { 1117 pgste_t pgste; 1118 pte_t pte; 1119 1120 if (mm_has_pgste(mm)) { 1121 pgste = pgste_get_lock(ptep); 1122 pgste = pgste_ipte_notify(mm, address, ptep, pgste); 1123 } 1124 1125 pte = *ptep; 1126 ptep_flush_lazy(mm, address, ptep); 1127 pte_val(*ptep) = _PAGE_INVALID; 1128 1129 if (mm_has_pgste(mm)) { 1130 pgste = pgste_update_all(&pte, pgste, mm); 1131 pgste_set_unlock(ptep, pgste); 1132 } 1133 return pte; 1134 } 1135 1136 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 1137 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, 1138 unsigned long address, 1139 pte_t *ptep) 1140 { 1141 pgste_t pgste; 1142 pte_t pte; 1143 1144 if (mm_has_pgste(mm)) { 1145 pgste = pgste_get_lock(ptep); 1146 pgste_ipte_notify(mm, address, ptep, pgste); 1147 } 1148 1149 pte = *ptep; 1150 ptep_flush_lazy(mm, address, ptep); 1151 1152 if (mm_has_pgste(mm)) { 1153 pgste = pgste_update_all(&pte, pgste, mm); 1154 pgste_set(ptep, pgste); 1155 } 1156 return pte; 1157 } 1158 1159 static inline void ptep_modify_prot_commit(struct mm_struct *mm, 1160 unsigned long address, 1161 pte_t *ptep, pte_t pte) 1162 { 1163 pgste_t pgste; 1164 1165 if (mm_has_pgste(mm)) { 1166 pgste = pgste_get(ptep); 1167 pgste_set_key(ptep, pgste, pte, mm); 1168 pgste = pgste_set_pte(ptep, pgste, pte); 1169 pgste_set_unlock(ptep, pgste); 1170 } else 1171 *ptep = pte; 1172 } 1173 1174 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH 1175 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, 1176 unsigned long address, pte_t *ptep) 1177 { 1178 pgste_t pgste; 1179 pte_t pte; 1180 1181 if (mm_has_pgste(vma->vm_mm)) { 1182 pgste = pgste_get_lock(ptep); 1183 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste); 1184 } 1185 1186 pte = *ptep; 1187 ptep_flush_direct(vma->vm_mm, address, ptep); 1188 pte_val(*ptep) = _PAGE_INVALID; 1189 1190 if (mm_has_pgste(vma->vm_mm)) { 1191 if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) == 1192 _PGSTE_GPS_USAGE_UNUSED) 1193 pte_val(pte) |= _PAGE_UNUSED; 1194 pgste = pgste_update_all(&pte, pgste, vma->vm_mm); 1195 pgste_set_unlock(ptep, pgste); 1196 } 1197 return pte; 1198 } 1199 1200 /* 1201 * The batched pte unmap code uses ptep_get_and_clear_full to clear the 1202 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all 1203 * tlbs of an mm if it can guarantee that the ptes of the mm_struct 1204 * cannot be accessed while the batched unmap is running. In this case 1205 * full==1 and a simple pte_clear is enough. See tlb.h. 1206 */ 1207 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 1208 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, 1209 unsigned long address, 1210 pte_t *ptep, int full) 1211 { 1212 pgste_t pgste; 1213 pte_t pte; 1214 1215 if (!full && mm_has_pgste(mm)) { 1216 pgste = pgste_get_lock(ptep); 1217 pgste = pgste_ipte_notify(mm, address, ptep, pgste); 1218 } 1219 1220 pte = *ptep; 1221 if (!full) 1222 ptep_flush_lazy(mm, address, ptep); 1223 pte_val(*ptep) = _PAGE_INVALID; 1224 1225 if (!full && mm_has_pgste(mm)) { 1226 pgste = pgste_update_all(&pte, pgste, mm); 1227 pgste_set_unlock(ptep, pgste); 1228 } 1229 return pte; 1230 } 1231 1232 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 1233 static inline pte_t ptep_set_wrprotect(struct mm_struct *mm, 1234 unsigned long address, pte_t *ptep) 1235 { 1236 pgste_t pgste; 1237 pte_t pte = *ptep; 1238 1239 if (pte_write(pte)) { 1240 if (mm_has_pgste(mm)) { 1241 pgste = pgste_get_lock(ptep); 1242 pgste = pgste_ipte_notify(mm, address, ptep, pgste); 1243 } 1244 1245 ptep_flush_lazy(mm, address, ptep); 1246 pte = pte_wrprotect(pte); 1247 1248 if (mm_has_pgste(mm)) { 1249 pgste = pgste_set_pte(ptep, pgste, pte); 1250 pgste_set_unlock(ptep, pgste); 1251 } else 1252 *ptep = pte; 1253 } 1254 return pte; 1255 } 1256 1257 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 1258 static inline int ptep_set_access_flags(struct vm_area_struct *vma, 1259 unsigned long address, pte_t *ptep, 1260 pte_t entry, int dirty) 1261 { 1262 pgste_t pgste; 1263 pte_t oldpte; 1264 1265 oldpte = *ptep; 1266 if (pte_same(oldpte, entry)) 1267 return 0; 1268 if (mm_has_pgste(vma->vm_mm)) { 1269 pgste = pgste_get_lock(ptep); 1270 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste); 1271 } 1272 1273 ptep_flush_direct(vma->vm_mm, address, ptep); 1274 1275 if (mm_has_pgste(vma->vm_mm)) { 1276 if (pte_val(oldpte) & _PAGE_INVALID) 1277 pgste_set_key(ptep, pgste, entry, vma->vm_mm); 1278 pgste = pgste_set_pte(ptep, pgste, entry); 1279 pgste_set_unlock(ptep, pgste); 1280 } else 1281 *ptep = entry; 1282 return 1; 1283 } 1284 1285 /* 1286 * Conversion functions: convert a page and protection to a page entry, 1287 * and a page entry and page directory to the page they refer to. 1288 */ 1289 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) 1290 { 1291 pte_t __pte; 1292 pte_val(__pte) = physpage + pgprot_val(pgprot); 1293 return pte_mkyoung(__pte); 1294 } 1295 1296 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) 1297 { 1298 unsigned long physpage = page_to_phys(page); 1299 pte_t __pte = mk_pte_phys(physpage, pgprot); 1300 1301 if (pte_write(__pte) && PageDirty(page)) 1302 __pte = pte_mkdirty(__pte); 1303 return __pte; 1304 } 1305 1306 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 1307 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 1308 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 1309 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) 1310 1311 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 1312 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 1313 1314 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 1315 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) 1316 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) 1317 1318 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) 1319 { 1320 pud_t *pud = (pud_t *) pgd; 1321 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 1322 pud = (pud_t *) pgd_deref(*pgd); 1323 return pud + pud_index(address); 1324 } 1325 1326 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) 1327 { 1328 pmd_t *pmd = (pmd_t *) pud; 1329 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 1330 pmd = (pmd_t *) pud_deref(*pud); 1331 return pmd + pmd_index(address); 1332 } 1333 1334 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) 1335 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) 1336 #define pte_page(x) pfn_to_page(pte_pfn(x)) 1337 1338 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) 1339 1340 /* Find an entry in the lowest level page table.. */ 1341 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) 1342 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address) 1343 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) 1344 #define pte_unmap(pte) do { } while (0) 1345 1346 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) 1347 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) 1348 { 1349 /* 1350 * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx) 1351 * Convert to segment table entry format. 1352 */ 1353 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE)) 1354 return pgprot_val(SEGMENT_NONE); 1355 if (pgprot_val(pgprot) == pgprot_val(PAGE_READ)) 1356 return pgprot_val(SEGMENT_READ); 1357 return pgprot_val(SEGMENT_WRITE); 1358 } 1359 1360 static inline pmd_t pmd_wrprotect(pmd_t pmd) 1361 { 1362 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE; 1363 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1364 return pmd; 1365 } 1366 1367 static inline pmd_t pmd_mkwrite(pmd_t pmd) 1368 { 1369 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE; 1370 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) 1371 return pmd; 1372 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; 1373 return pmd; 1374 } 1375 1376 static inline pmd_t pmd_mkclean(pmd_t pmd) 1377 { 1378 if (pmd_large(pmd)) { 1379 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY; 1380 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1381 } 1382 return pmd; 1383 } 1384 1385 static inline pmd_t pmd_mkdirty(pmd_t pmd) 1386 { 1387 if (pmd_large(pmd)) { 1388 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | 1389 _SEGMENT_ENTRY_SOFT_DIRTY; 1390 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) 1391 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; 1392 } 1393 return pmd; 1394 } 1395 1396 static inline pmd_t pmd_mkyoung(pmd_t pmd) 1397 { 1398 if (pmd_large(pmd)) { 1399 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; 1400 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ) 1401 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID; 1402 } 1403 return pmd; 1404 } 1405 1406 static inline pmd_t pmd_mkold(pmd_t pmd) 1407 { 1408 if (pmd_large(pmd)) { 1409 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG; 1410 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; 1411 } 1412 return pmd; 1413 } 1414 1415 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 1416 { 1417 if (pmd_large(pmd)) { 1418 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE | 1419 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG | 1420 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY; 1421 pmd_val(pmd) |= massage_pgprot_pmd(newprot); 1422 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) 1423 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1424 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)) 1425 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; 1426 return pmd; 1427 } 1428 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN; 1429 pmd_val(pmd) |= massage_pgprot_pmd(newprot); 1430 return pmd; 1431 } 1432 1433 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) 1434 { 1435 pmd_t __pmd; 1436 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); 1437 return __pmd; 1438 } 1439 1440 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ 1441 1442 static inline void __pmdp_csp(pmd_t *pmdp) 1443 { 1444 register unsigned long reg2 asm("2") = pmd_val(*pmdp); 1445 register unsigned long reg3 asm("3") = pmd_val(*pmdp) | 1446 _SEGMENT_ENTRY_INVALID; 1447 register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5; 1448 1449 asm volatile( 1450 " csp %1,%3" 1451 : "=m" (*pmdp) 1452 : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc"); 1453 } 1454 1455 static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp) 1456 { 1457 unsigned long sto; 1458 1459 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t); 1460 asm volatile( 1461 " .insn rrf,0xb98e0000,%2,%3,0,0" 1462 : "=m" (*pmdp) 1463 : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK)) 1464 : "cc" ); 1465 } 1466 1467 static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp) 1468 { 1469 unsigned long sto; 1470 1471 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t); 1472 asm volatile( 1473 " .insn rrf,0xb98e0000,%2,%3,0,1" 1474 : "=m" (*pmdp) 1475 : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK)) 1476 : "cc" ); 1477 } 1478 1479 static inline void pmdp_flush_direct(struct mm_struct *mm, 1480 unsigned long address, pmd_t *pmdp) 1481 { 1482 int active, count; 1483 1484 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID) 1485 return; 1486 if (!MACHINE_HAS_IDTE) { 1487 __pmdp_csp(pmdp); 1488 return; 1489 } 1490 active = (mm == current->active_mm) ? 1 : 0; 1491 count = atomic_add_return(0x10000, &mm->context.attach_count); 1492 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active && 1493 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) 1494 __pmdp_idte_local(address, pmdp); 1495 else 1496 __pmdp_idte(address, pmdp); 1497 atomic_sub(0x10000, &mm->context.attach_count); 1498 } 1499 1500 static inline void pmdp_flush_lazy(struct mm_struct *mm, 1501 unsigned long address, pmd_t *pmdp) 1502 { 1503 int active, count; 1504 1505 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID) 1506 return; 1507 active = (mm == current->active_mm) ? 1 : 0; 1508 count = atomic_add_return(0x10000, &mm->context.attach_count); 1509 if ((count & 0xffff) <= active) { 1510 pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID; 1511 mm->context.flush_mm = 1; 1512 } else if (MACHINE_HAS_IDTE) 1513 __pmdp_idte(address, pmdp); 1514 else 1515 __pmdp_csp(pmdp); 1516 atomic_sub(0x10000, &mm->context.attach_count); 1517 } 1518 1519 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1520 1521 #define __HAVE_ARCH_PGTABLE_DEPOSIT 1522 extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 1523 pgtable_t pgtable); 1524 1525 #define __HAVE_ARCH_PGTABLE_WITHDRAW 1526 extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 1527 1528 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 1529 pmd_t *pmdp, pmd_t entry) 1530 { 1531 *pmdp = entry; 1532 } 1533 1534 static inline pmd_t pmd_mkhuge(pmd_t pmd) 1535 { 1536 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; 1537 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; 1538 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1539 return pmd; 1540 } 1541 1542 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 1543 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 1544 unsigned long address, pmd_t *pmdp) 1545 { 1546 pmd_t pmd; 1547 1548 pmd = *pmdp; 1549 pmdp_flush_direct(vma->vm_mm, address, pmdp); 1550 *pmdp = pmd_mkold(pmd); 1551 return pmd_young(pmd); 1552 } 1553 1554 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 1555 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 1556 unsigned long address, pmd_t *pmdp) 1557 { 1558 pmd_t pmd = *pmdp; 1559 1560 pmdp_flush_direct(mm, address, pmdp); 1561 pmd_clear(pmdp); 1562 return pmd; 1563 } 1564 1565 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL 1566 static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm, 1567 unsigned long address, 1568 pmd_t *pmdp, int full) 1569 { 1570 pmd_t pmd = *pmdp; 1571 1572 if (!full) 1573 pmdp_flush_lazy(mm, address, pmdp); 1574 pmd_clear(pmdp); 1575 return pmd; 1576 } 1577 1578 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH 1579 static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, 1580 unsigned long address, pmd_t *pmdp) 1581 { 1582 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 1583 } 1584 1585 #define __HAVE_ARCH_PMDP_INVALIDATE 1586 static inline void pmdp_invalidate(struct vm_area_struct *vma, 1587 unsigned long address, pmd_t *pmdp) 1588 { 1589 pmdp_flush_direct(vma->vm_mm, address, pmdp); 1590 } 1591 1592 #define __HAVE_ARCH_PMDP_SET_WRPROTECT 1593 static inline void pmdp_set_wrprotect(struct mm_struct *mm, 1594 unsigned long address, pmd_t *pmdp) 1595 { 1596 pmd_t pmd = *pmdp; 1597 1598 if (pmd_write(pmd)) { 1599 pmdp_flush_direct(mm, address, pmdp); 1600 set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd)); 1601 } 1602 } 1603 1604 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, 1605 unsigned long address, 1606 pmd_t *pmdp) 1607 { 1608 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 1609 } 1610 #define pmdp_collapse_flush pmdp_collapse_flush 1611 1612 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot)) 1613 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) 1614 1615 static inline int pmd_trans_huge(pmd_t pmd) 1616 { 1617 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE; 1618 } 1619 1620 static inline int has_transparent_hugepage(void) 1621 { 1622 return MACHINE_HAS_HPAGE ? 1 : 0; 1623 } 1624 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1625 1626 /* 1627 * 64 bit swap entry format: 1628 * A page-table entry has some bits we have to treat in a special way. 1629 * Bits 52 and bit 55 have to be zero, otherwise a specification 1630 * exception will occur instead of a page translation exception. The 1631 * specification exception has the bad habit not to store necessary 1632 * information in the lowcore. 1633 * Bits 54 and 63 are used to indicate the page type. 1634 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200 1635 * This leaves the bits 0-51 and bits 56-62 to store type and offset. 1636 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51 1637 * for the offset. 1638 * | offset |01100|type |00| 1639 * |0000000000111111111122222222223333333333444444444455|55555|55566|66| 1640 * |0123456789012345678901234567890123456789012345678901|23456|78901|23| 1641 */ 1642 1643 #define __SWP_OFFSET_MASK ((1UL << 52) - 1) 1644 #define __SWP_OFFSET_SHIFT 12 1645 #define __SWP_TYPE_MASK ((1UL << 5) - 1) 1646 #define __SWP_TYPE_SHIFT 2 1647 1648 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) 1649 { 1650 pte_t pte; 1651 1652 pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT; 1653 pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT; 1654 pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT; 1655 return pte; 1656 } 1657 1658 static inline unsigned long __swp_type(swp_entry_t entry) 1659 { 1660 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK; 1661 } 1662 1663 static inline unsigned long __swp_offset(swp_entry_t entry) 1664 { 1665 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK; 1666 } 1667 1668 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset) 1669 { 1670 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) }; 1671 } 1672 1673 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 1674 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 1675 1676 #endif /* !__ASSEMBLY__ */ 1677 1678 #define kern_addr_valid(addr) (1) 1679 1680 extern int vmem_add_mapping(unsigned long start, unsigned long size); 1681 extern int vmem_remove_mapping(unsigned long start, unsigned long size); 1682 extern int s390_enable_sie(void); 1683 extern int s390_enable_skey(void); 1684 extern void s390_reset_cmma(struct mm_struct *mm); 1685 1686 /* s390 has a private copy of get unmapped area to deal with cache synonyms */ 1687 #define HAVE_ARCH_UNMAPPED_AREA 1688 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 1689 1690 /* 1691 * No page table caches to initialise 1692 */ 1693 static inline void pgtable_cache_init(void) { } 1694 static inline void check_pgt_cache(void) { } 1695 1696 #include <asm-generic/pgtable.h> 1697 1698 #endif /* _S390_PAGE_H */ 1699