1 /* 2 * S390 version 3 * Copyright IBM Corp. 1999, 2000 4 * Author(s): Hartmut Penner (hp@de.ibm.com) 5 * Ulrich Weigand (weigand@de.ibm.com) 6 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * 8 * Derived from "include/asm-i386/pgtable.h" 9 */ 10 11 #ifndef _ASM_S390_PGTABLE_H 12 #define _ASM_S390_PGTABLE_H 13 14 /* 15 * The Linux memory management assumes a three-level page table setup. For 16 * s390 31 bit we "fold" the mid level into the top-level page table, so 17 * that we physically have the same two-level page table as the s390 mmu 18 * expects in 31 bit mode. For s390 64 bit we use three of the five levels 19 * the hardware provides (region first and region second tables are not 20 * used). 21 * 22 * The "pgd_xxx()" functions are trivial for a folded two-level 23 * setup: the pgd is never bad, and a pmd always exists (as it's folded 24 * into the pgd entry) 25 * 26 * This file contains the functions and defines necessary to modify and use 27 * the S390 page table tree. 28 */ 29 #ifndef __ASSEMBLY__ 30 #include <linux/sched.h> 31 #include <linux/mm_types.h> 32 #include <linux/page-flags.h> 33 #include <asm/bug.h> 34 #include <asm/page.h> 35 36 extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); 37 extern void paging_init(void); 38 extern void vmem_map_init(void); 39 40 /* 41 * The S390 doesn't have any external MMU info: the kernel page 42 * tables contain all the necessary information. 43 */ 44 #define update_mmu_cache(vma, address, ptep) do { } while (0) 45 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0) 46 47 /* 48 * ZERO_PAGE is a global shared page that is always zero; used 49 * for zero-mapped memory areas etc.. 50 */ 51 52 extern unsigned long empty_zero_page; 53 extern unsigned long zero_page_mask; 54 55 #define ZERO_PAGE(vaddr) \ 56 (virt_to_page((void *)(empty_zero_page + \ 57 (((unsigned long)(vaddr)) &zero_page_mask)))) 58 #define __HAVE_COLOR_ZERO_PAGE 59 60 /* TODO: s390 cannot support io_remap_pfn_range... */ 61 #endif /* !__ASSEMBLY__ */ 62 63 /* 64 * PMD_SHIFT determines the size of the area a second-level page 65 * table can map 66 * PGDIR_SHIFT determines what a third-level page table entry can map 67 */ 68 #ifndef CONFIG_64BIT 69 # define PMD_SHIFT 20 70 # define PUD_SHIFT 20 71 # define PGDIR_SHIFT 20 72 #else /* CONFIG_64BIT */ 73 # define PMD_SHIFT 20 74 # define PUD_SHIFT 31 75 # define PGDIR_SHIFT 42 76 #endif /* CONFIG_64BIT */ 77 78 #define PMD_SIZE (1UL << PMD_SHIFT) 79 #define PMD_MASK (~(PMD_SIZE-1)) 80 #define PUD_SIZE (1UL << PUD_SHIFT) 81 #define PUD_MASK (~(PUD_SIZE-1)) 82 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 83 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 84 85 /* 86 * entries per page directory level: the S390 is two-level, so 87 * we don't really have any PMD directory physically. 88 * for S390 segment-table entries are combined to one PGD 89 * that leads to 1024 pte per pgd 90 */ 91 #define PTRS_PER_PTE 256 92 #ifndef CONFIG_64BIT 93 #define PTRS_PER_PMD 1 94 #define PTRS_PER_PUD 1 95 #else /* CONFIG_64BIT */ 96 #define PTRS_PER_PMD 2048 97 #define PTRS_PER_PUD 2048 98 #endif /* CONFIG_64BIT */ 99 #define PTRS_PER_PGD 2048 100 101 #define FIRST_USER_ADDRESS 0 102 103 #define pte_ERROR(e) \ 104 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) 105 #define pmd_ERROR(e) \ 106 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) 107 #define pud_ERROR(e) \ 108 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e)) 109 #define pgd_ERROR(e) \ 110 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) 111 112 #ifndef __ASSEMBLY__ 113 /* 114 * The vmalloc and module area will always be on the topmost area of the kernel 115 * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules. 116 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where 117 * modules will reside. That makes sure that inter module branches always 118 * happen without trampolines and in addition the placement within a 2GB frame 119 * is branch prediction unit friendly. 120 */ 121 extern unsigned long VMALLOC_START; 122 extern unsigned long VMALLOC_END; 123 extern struct page *vmemmap; 124 125 #define VMEM_MAX_PHYS ((unsigned long) vmemmap) 126 127 #ifdef CONFIG_64BIT 128 extern unsigned long MODULES_VADDR; 129 extern unsigned long MODULES_END; 130 #define MODULES_VADDR MODULES_VADDR 131 #define MODULES_END MODULES_END 132 #define MODULES_LEN (1UL << 31) 133 #endif 134 135 /* 136 * A 31 bit pagetable entry of S390 has following format: 137 * | PFRA | | OS | 138 * 0 0IP0 139 * 00000000001111111111222222222233 140 * 01234567890123456789012345678901 141 * 142 * I Page-Invalid Bit: Page is not available for address-translation 143 * P Page-Protection Bit: Store access not possible for page 144 * 145 * A 31 bit segmenttable entry of S390 has following format: 146 * | P-table origin | |PTL 147 * 0 IC 148 * 00000000001111111111222222222233 149 * 01234567890123456789012345678901 150 * 151 * I Segment-Invalid Bit: Segment is not available for address-translation 152 * C Common-Segment Bit: Segment is not private (PoP 3-30) 153 * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256) 154 * 155 * The 31 bit segmenttable origin of S390 has following format: 156 * 157 * |S-table origin | | STL | 158 * X **GPS 159 * 00000000001111111111222222222233 160 * 01234567890123456789012345678901 161 * 162 * X Space-Switch event: 163 * G Segment-Invalid Bit: * 164 * P Private-Space Bit: Segment is not private (PoP 3-30) 165 * S Storage-Alteration: 166 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048) 167 * 168 * A 64 bit pagetable entry of S390 has following format: 169 * | PFRA |0IPC| OS | 170 * 0000000000111111111122222222223333333333444444444455555555556666 171 * 0123456789012345678901234567890123456789012345678901234567890123 172 * 173 * I Page-Invalid Bit: Page is not available for address-translation 174 * P Page-Protection Bit: Store access not possible for page 175 * C Change-bit override: HW is not required to set change bit 176 * 177 * A 64 bit segmenttable entry of S390 has following format: 178 * | P-table origin | TT 179 * 0000000000111111111122222222223333333333444444444455555555556666 180 * 0123456789012345678901234567890123456789012345678901234567890123 181 * 182 * I Segment-Invalid Bit: Segment is not available for address-translation 183 * C Common-Segment Bit: Segment is not private (PoP 3-30) 184 * P Page-Protection Bit: Store access not possible for page 185 * TT Type 00 186 * 187 * A 64 bit region table entry of S390 has following format: 188 * | S-table origin | TF TTTL 189 * 0000000000111111111122222222223333333333444444444455555555556666 190 * 0123456789012345678901234567890123456789012345678901234567890123 191 * 192 * I Segment-Invalid Bit: Segment is not available for address-translation 193 * TT Type 01 194 * TF 195 * TL Table length 196 * 197 * The 64 bit regiontable origin of S390 has following format: 198 * | region table origon | DTTL 199 * 0000000000111111111122222222223333333333444444444455555555556666 200 * 0123456789012345678901234567890123456789012345678901234567890123 201 * 202 * X Space-Switch event: 203 * G Segment-Invalid Bit: 204 * P Private-Space Bit: 205 * S Storage-Alteration: 206 * R Real space 207 * TL Table-Length: 208 * 209 * A storage key has the following format: 210 * | ACC |F|R|C|0| 211 * 0 3 4 5 6 7 212 * ACC: access key 213 * F : fetch protection bit 214 * R : referenced bit 215 * C : changed bit 216 */ 217 218 /* Hardware bits in the page table entry */ 219 #define _PAGE_CO 0x100 /* HW Change-bit override */ 220 #define _PAGE_PROTECT 0x200 /* HW read-only bit */ 221 #define _PAGE_INVALID 0x400 /* HW invalid bit */ 222 #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */ 223 224 /* Software bits in the page table entry */ 225 #define _PAGE_PRESENT 0x001 /* SW pte present bit */ 226 #define _PAGE_TYPE 0x002 /* SW pte type bit */ 227 #define _PAGE_YOUNG 0x004 /* SW pte young bit */ 228 #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */ 229 #define _PAGE_READ 0x010 /* SW pte read bit */ 230 #define _PAGE_WRITE 0x020 /* SW pte write bit */ 231 #define _PAGE_SPECIAL 0x040 /* SW associated with special page */ 232 #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */ 233 #define __HAVE_ARCH_PTE_SPECIAL 234 235 /* Set of bits not changed in pte_modify */ 236 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \ 237 _PAGE_DIRTY | _PAGE_YOUNG) 238 239 /* 240 * handle_pte_fault uses pte_present, pte_none and pte_file to find out the 241 * pte type WITHOUT holding the page table lock. The _PAGE_PRESENT bit 242 * is used to distinguish present from not-present ptes. It is changed only 243 * with the page table lock held. 244 * 245 * The following table gives the different possible bit combinations for 246 * the pte hardware and software bits in the last 12 bits of a pte: 247 * 248 * 842100000000 249 * 000084210000 250 * 000000008421 251 * .IR...wrdytp 252 * empty .10...000000 253 * swap .10...xxxx10 254 * file .11...xxxxx0 255 * prot-none, clean, old .11...000001 256 * prot-none, clean, young .11...000101 257 * prot-none, dirty, old .10...001001 258 * prot-none, dirty, young .10...001101 259 * read-only, clean, old .11...010001 260 * read-only, clean, young .01...010101 261 * read-only, dirty, old .11...011001 262 * read-only, dirty, young .01...011101 263 * read-write, clean, old .11...110001 264 * read-write, clean, young .01...110101 265 * read-write, dirty, old .10...111001 266 * read-write, dirty, young .00...111101 267 * 268 * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001 269 * pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400 270 * pte_file is true for the bit pattern .11...xxxxx0, (pte & 0x601) == 0x600 271 * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402 272 */ 273 274 #ifndef CONFIG_64BIT 275 276 /* Bits in the segment table address-space-control-element */ 277 #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */ 278 #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */ 279 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ 280 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ 281 #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */ 282 283 /* Bits in the segment table entry */ 284 #define _SEGMENT_ENTRY_BITS 0x7fffffffUL /* Valid segment table bits */ 285 #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */ 286 #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */ 287 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */ 288 #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ 289 #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ 290 291 #define _SEGMENT_ENTRY_DIRTY 0 /* No sw dirty bit for 31-bit */ 292 #define _SEGMENT_ENTRY_YOUNG 0 /* No sw young bit for 31-bit */ 293 #define _SEGMENT_ENTRY_READ 0 /* No sw read bit for 31-bit */ 294 #define _SEGMENT_ENTRY_WRITE 0 /* No sw write bit for 31-bit */ 295 #define _SEGMENT_ENTRY_LARGE 0 /* No large pages for 31-bit */ 296 #define _SEGMENT_ENTRY_BITS_LARGE 0 297 #define _SEGMENT_ENTRY_ORIGIN_LARGE 0 298 299 #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) 300 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) 301 302 /* 303 * Segment table entry encoding (I = invalid, R = read-only bit): 304 * ..R...I..... 305 * prot-none ..1...1..... 306 * read-only ..1...0..... 307 * read-write ..0...0..... 308 * empty ..0...1..... 309 */ 310 311 /* Page status table bits for virtualization */ 312 #define PGSTE_ACC_BITS 0xf0000000UL 313 #define PGSTE_FP_BIT 0x08000000UL 314 #define PGSTE_PCL_BIT 0x00800000UL 315 #define PGSTE_HR_BIT 0x00400000UL 316 #define PGSTE_HC_BIT 0x00200000UL 317 #define PGSTE_GR_BIT 0x00040000UL 318 #define PGSTE_GC_BIT 0x00020000UL 319 #define PGSTE_UC_BIT 0x00008000UL /* user dirty (migration) */ 320 #define PGSTE_IN_BIT 0x00004000UL /* IPTE notify bit */ 321 322 #else /* CONFIG_64BIT */ 323 324 /* Bits in the segment/region table address-space-control-element */ 325 #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */ 326 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ 327 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ 328 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */ 329 #define _ASCE_REAL_SPACE 0x20 /* real space control */ 330 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */ 331 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */ 332 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */ 333 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */ 334 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */ 335 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */ 336 337 /* Bits in the region table entry */ 338 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ 339 #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */ 340 #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */ 341 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ 342 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ 343 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ 344 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */ 345 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */ 346 347 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) 348 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID) 349 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) 350 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID) 351 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) 352 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID) 353 354 #define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */ 355 #define _REGION3_ENTRY_RO 0x200 /* page protection bit */ 356 #define _REGION3_ENTRY_CO 0x100 /* change-recording override */ 357 358 /* Bits in the segment table entry */ 359 #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL 360 #define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL 361 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ 362 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ 363 #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */ 364 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */ 365 366 #define _SEGMENT_ENTRY (0) 367 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) 368 369 #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */ 370 #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */ 371 #define _SEGMENT_ENTRY_SPLIT 0x0800 /* THP splitting bit */ 372 #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */ 373 #define _SEGMENT_ENTRY_CO 0x0100 /* change-recording override */ 374 #define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */ 375 #define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */ 376 377 /* 378 * Segment table entry encoding (R = read-only, I = invalid, y = young bit): 379 * dy..R...I...wr 380 * prot-none, clean, old 00..1...1...00 381 * prot-none, clean, young 01..1...1...00 382 * prot-none, dirty, old 10..1...1...00 383 * prot-none, dirty, young 11..1...1...00 384 * read-only, clean, old 00..1...1...01 385 * read-only, clean, young 01..1...0...01 386 * read-only, dirty, old 10..1...1...01 387 * read-only, dirty, young 11..1...0...01 388 * read-write, clean, old 00..1...1...11 389 * read-write, clean, young 01..1...0...11 390 * read-write, dirty, old 10..0...1...11 391 * read-write, dirty, young 11..0...0...11 392 * The segment table origin is used to distinguish empty (origin==0) from 393 * read-write, old segment table entries (origin!=0) 394 */ 395 396 #define _SEGMENT_ENTRY_SPLIT_BIT 11 /* THP splitting bit number */ 397 398 /* Page status table bits for virtualization */ 399 #define PGSTE_ACC_BITS 0xf000000000000000UL 400 #define PGSTE_FP_BIT 0x0800000000000000UL 401 #define PGSTE_PCL_BIT 0x0080000000000000UL 402 #define PGSTE_HR_BIT 0x0040000000000000UL 403 #define PGSTE_HC_BIT 0x0020000000000000UL 404 #define PGSTE_GR_BIT 0x0004000000000000UL 405 #define PGSTE_GC_BIT 0x0002000000000000UL 406 #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */ 407 #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */ 408 409 #endif /* CONFIG_64BIT */ 410 411 /* Guest Page State used for virtualization */ 412 #define _PGSTE_GPS_ZERO 0x0000000080000000UL 413 #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL 414 #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL 415 #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL 416 417 /* 418 * A user page table pointer has the space-switch-event bit, the 419 * private-space-control bit and the storage-alteration-event-control 420 * bit set. A kernel page table pointer doesn't need them. 421 */ 422 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ 423 _ASCE_ALT_EVENT) 424 425 /* 426 * Page protection definitions. 427 */ 428 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID) 429 #define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \ 430 _PAGE_INVALID | _PAGE_PROTECT) 431 #define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 432 _PAGE_INVALID | _PAGE_PROTECT) 433 434 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 435 _PAGE_YOUNG | _PAGE_DIRTY) 436 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 437 _PAGE_YOUNG | _PAGE_DIRTY) 438 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \ 439 _PAGE_PROTECT) 440 441 /* 442 * On s390 the page table entry has an invalid bit and a read-only bit. 443 * Read permission implies execute permission and write permission 444 * implies read permission. 445 */ 446 /*xwr*/ 447 #define __P000 PAGE_NONE 448 #define __P001 PAGE_READ 449 #define __P010 PAGE_READ 450 #define __P011 PAGE_READ 451 #define __P100 PAGE_READ 452 #define __P101 PAGE_READ 453 #define __P110 PAGE_READ 454 #define __P111 PAGE_READ 455 456 #define __S000 PAGE_NONE 457 #define __S001 PAGE_READ 458 #define __S010 PAGE_WRITE 459 #define __S011 PAGE_WRITE 460 #define __S100 PAGE_READ 461 #define __S101 PAGE_READ 462 #define __S110 PAGE_WRITE 463 #define __S111 PAGE_WRITE 464 465 /* 466 * Segment entry (large page) protection definitions. 467 */ 468 #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \ 469 _SEGMENT_ENTRY_PROTECT) 470 #define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT | \ 471 _SEGMENT_ENTRY_READ) 472 #define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_READ | \ 473 _SEGMENT_ENTRY_WRITE) 474 475 static inline int mm_has_pgste(struct mm_struct *mm) 476 { 477 #ifdef CONFIG_PGSTE 478 if (unlikely(mm->context.has_pgste)) 479 return 1; 480 #endif 481 return 0; 482 } 483 484 static inline int mm_use_skey(struct mm_struct *mm) 485 { 486 #ifdef CONFIG_PGSTE 487 if (mm->context.use_skey) 488 return 1; 489 #endif 490 return 0; 491 } 492 493 /* 494 * pgd/pmd/pte query functions 495 */ 496 #ifndef CONFIG_64BIT 497 498 static inline int pgd_present(pgd_t pgd) { return 1; } 499 static inline int pgd_none(pgd_t pgd) { return 0; } 500 static inline int pgd_bad(pgd_t pgd) { return 0; } 501 502 static inline int pud_present(pud_t pud) { return 1; } 503 static inline int pud_none(pud_t pud) { return 0; } 504 static inline int pud_large(pud_t pud) { return 0; } 505 static inline int pud_bad(pud_t pud) { return 0; } 506 507 #else /* CONFIG_64BIT */ 508 509 static inline int pgd_present(pgd_t pgd) 510 { 511 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) 512 return 1; 513 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL; 514 } 515 516 static inline int pgd_none(pgd_t pgd) 517 { 518 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) 519 return 0; 520 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL; 521 } 522 523 static inline int pgd_bad(pgd_t pgd) 524 { 525 /* 526 * With dynamic page table levels the pgd can be a region table 527 * entry or a segment table entry. Check for the bit that are 528 * invalid for either table entry. 529 */ 530 unsigned long mask = 531 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID & 532 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; 533 return (pgd_val(pgd) & mask) != 0; 534 } 535 536 static inline int pud_present(pud_t pud) 537 { 538 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) 539 return 1; 540 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; 541 } 542 543 static inline int pud_none(pud_t pud) 544 { 545 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) 546 return 0; 547 return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL; 548 } 549 550 static inline int pud_large(pud_t pud) 551 { 552 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3) 553 return 0; 554 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE); 555 } 556 557 static inline int pud_bad(pud_t pud) 558 { 559 /* 560 * With dynamic page table levels the pud can be a region table 561 * entry or a segment table entry. Check for the bit that are 562 * invalid for either table entry. 563 */ 564 unsigned long mask = 565 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID & 566 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; 567 return (pud_val(pud) & mask) != 0; 568 } 569 570 #endif /* CONFIG_64BIT */ 571 572 static inline int pmd_present(pmd_t pmd) 573 { 574 return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID; 575 } 576 577 static inline int pmd_none(pmd_t pmd) 578 { 579 return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID; 580 } 581 582 static inline int pmd_large(pmd_t pmd) 583 { 584 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0; 585 } 586 587 static inline int pmd_pfn(pmd_t pmd) 588 { 589 unsigned long origin_mask; 590 591 origin_mask = _SEGMENT_ENTRY_ORIGIN; 592 if (pmd_large(pmd)) 593 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE; 594 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT; 595 } 596 597 static inline int pmd_bad(pmd_t pmd) 598 { 599 if (pmd_large(pmd)) 600 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0; 601 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0; 602 } 603 604 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH 605 extern void pmdp_splitting_flush(struct vm_area_struct *vma, 606 unsigned long addr, pmd_t *pmdp); 607 608 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 609 extern int pmdp_set_access_flags(struct vm_area_struct *vma, 610 unsigned long address, pmd_t *pmdp, 611 pmd_t entry, int dirty); 612 613 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 614 extern int pmdp_clear_flush_young(struct vm_area_struct *vma, 615 unsigned long address, pmd_t *pmdp); 616 617 #define __HAVE_ARCH_PMD_WRITE 618 static inline int pmd_write(pmd_t pmd) 619 { 620 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0; 621 } 622 623 static inline int pmd_dirty(pmd_t pmd) 624 { 625 int dirty = 1; 626 if (pmd_large(pmd)) 627 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0; 628 return dirty; 629 } 630 631 static inline int pmd_young(pmd_t pmd) 632 { 633 int young = 1; 634 if (pmd_large(pmd)) 635 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0; 636 return young; 637 } 638 639 static inline int pte_present(pte_t pte) 640 { 641 /* Bit pattern: (pte & 0x001) == 0x001 */ 642 return (pte_val(pte) & _PAGE_PRESENT) != 0; 643 } 644 645 static inline int pte_none(pte_t pte) 646 { 647 /* Bit pattern: pte == 0x400 */ 648 return pte_val(pte) == _PAGE_INVALID; 649 } 650 651 static inline int pte_swap(pte_t pte) 652 { 653 /* Bit pattern: (pte & 0x603) == 0x402 */ 654 return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT | 655 _PAGE_TYPE | _PAGE_PRESENT)) 656 == (_PAGE_INVALID | _PAGE_TYPE); 657 } 658 659 static inline int pte_file(pte_t pte) 660 { 661 /* Bit pattern: (pte & 0x601) == 0x600 */ 662 return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT | _PAGE_PRESENT)) 663 == (_PAGE_INVALID | _PAGE_PROTECT); 664 } 665 666 static inline int pte_special(pte_t pte) 667 { 668 return (pte_val(pte) & _PAGE_SPECIAL); 669 } 670 671 #define __HAVE_ARCH_PTE_SAME 672 static inline int pte_same(pte_t a, pte_t b) 673 { 674 return pte_val(a) == pte_val(b); 675 } 676 677 static inline pgste_t pgste_get_lock(pte_t *ptep) 678 { 679 unsigned long new = 0; 680 #ifdef CONFIG_PGSTE 681 unsigned long old; 682 683 preempt_disable(); 684 asm( 685 " lg %0,%2\n" 686 "0: lgr %1,%0\n" 687 " nihh %0,0xff7f\n" /* clear PCL bit in old */ 688 " oihh %1,0x0080\n" /* set PCL bit in new */ 689 " csg %0,%1,%2\n" 690 " jl 0b\n" 691 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE]) 692 : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory"); 693 #endif 694 return __pgste(new); 695 } 696 697 static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste) 698 { 699 #ifdef CONFIG_PGSTE 700 asm( 701 " nihh %1,0xff7f\n" /* clear PCL bit */ 702 " stg %1,%0\n" 703 : "=Q" (ptep[PTRS_PER_PTE]) 704 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) 705 : "cc", "memory"); 706 preempt_enable(); 707 #endif 708 } 709 710 static inline pgste_t pgste_get(pte_t *ptep) 711 { 712 unsigned long pgste = 0; 713 #ifdef CONFIG_PGSTE 714 pgste = *(unsigned long *)(ptep + PTRS_PER_PTE); 715 #endif 716 return __pgste(pgste); 717 } 718 719 static inline void pgste_set(pte_t *ptep, pgste_t pgste) 720 { 721 #ifdef CONFIG_PGSTE 722 *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste; 723 #endif 724 } 725 726 static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste, 727 struct mm_struct *mm) 728 { 729 #ifdef CONFIG_PGSTE 730 unsigned long address, bits, skey; 731 732 if (!mm_use_skey(mm) || pte_val(*ptep) & _PAGE_INVALID) 733 return pgste; 734 address = pte_val(*ptep) & PAGE_MASK; 735 skey = (unsigned long) page_get_storage_key(address); 736 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); 737 /* Transfer page changed & referenced bit to guest bits in pgste */ 738 pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */ 739 /* Copy page access key and fetch protection bit to pgste */ 740 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT); 741 pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; 742 #endif 743 return pgste; 744 745 } 746 747 static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry, 748 struct mm_struct *mm) 749 { 750 #ifdef CONFIG_PGSTE 751 unsigned long address; 752 unsigned long nkey; 753 754 if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID) 755 return; 756 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID)); 757 address = pte_val(entry) & PAGE_MASK; 758 /* 759 * Set page access key and fetch protection bit from pgste. 760 * The guest C/R information is still in the PGSTE, set real 761 * key C/R to 0. 762 */ 763 nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56; 764 nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48; 765 page_set_storage_key(address, nkey, 0); 766 #endif 767 } 768 769 static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry) 770 { 771 if ((pte_val(entry) & _PAGE_PRESENT) && 772 (pte_val(entry) & _PAGE_WRITE) && 773 !(pte_val(entry) & _PAGE_INVALID)) { 774 if (!MACHINE_HAS_ESOP) { 775 /* 776 * Without enhanced suppression-on-protection force 777 * the dirty bit on for all writable ptes. 778 */ 779 pte_val(entry) |= _PAGE_DIRTY; 780 pte_val(entry) &= ~_PAGE_PROTECT; 781 } 782 if (!(pte_val(entry) & _PAGE_PROTECT)) 783 /* This pte allows write access, set user-dirty */ 784 pgste_val(pgste) |= PGSTE_UC_BIT; 785 } 786 *ptep = entry; 787 return pgste; 788 } 789 790 /** 791 * struct gmap_struct - guest address space 792 * @mm: pointer to the parent mm_struct 793 * @table: pointer to the page directory 794 * @asce: address space control element for gmap page table 795 * @crst_list: list of all crst tables used in the guest address space 796 * @pfault_enabled: defines if pfaults are applicable for the guest 797 */ 798 struct gmap { 799 struct list_head list; 800 struct mm_struct *mm; 801 unsigned long *table; 802 unsigned long asce; 803 void *private; 804 struct list_head crst_list; 805 bool pfault_enabled; 806 }; 807 808 /** 809 * struct gmap_rmap - reverse mapping for segment table entries 810 * @gmap: pointer to the gmap_struct 811 * @entry: pointer to a segment table entry 812 * @vmaddr: virtual address in the guest address space 813 */ 814 struct gmap_rmap { 815 struct list_head list; 816 struct gmap *gmap; 817 unsigned long *entry; 818 unsigned long vmaddr; 819 }; 820 821 /** 822 * struct gmap_pgtable - gmap information attached to a page table 823 * @vmaddr: address of the 1MB segment in the process virtual memory 824 * @mapper: list of segment table entries mapping a page table 825 */ 826 struct gmap_pgtable { 827 unsigned long vmaddr; 828 struct list_head mapper; 829 }; 830 831 /** 832 * struct gmap_notifier - notify function block for page invalidation 833 * @notifier_call: address of callback function 834 */ 835 struct gmap_notifier { 836 struct list_head list; 837 void (*notifier_call)(struct gmap *gmap, unsigned long address); 838 }; 839 840 struct gmap *gmap_alloc(struct mm_struct *mm); 841 void gmap_free(struct gmap *gmap); 842 void gmap_enable(struct gmap *gmap); 843 void gmap_disable(struct gmap *gmap); 844 int gmap_map_segment(struct gmap *gmap, unsigned long from, 845 unsigned long to, unsigned long len); 846 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); 847 unsigned long __gmap_translate(unsigned long address, struct gmap *); 848 unsigned long gmap_translate(unsigned long address, struct gmap *); 849 unsigned long __gmap_fault(unsigned long address, struct gmap *); 850 unsigned long gmap_fault(unsigned long address, struct gmap *); 851 void gmap_discard(unsigned long from, unsigned long to, struct gmap *); 852 void __gmap_zap(unsigned long address, struct gmap *); 853 bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *); 854 855 856 void gmap_register_ipte_notifier(struct gmap_notifier *); 857 void gmap_unregister_ipte_notifier(struct gmap_notifier *); 858 int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len); 859 void gmap_do_ipte_notify(struct mm_struct *, pte_t *); 860 861 static inline pgste_t pgste_ipte_notify(struct mm_struct *mm, 862 pte_t *ptep, pgste_t pgste) 863 { 864 #ifdef CONFIG_PGSTE 865 if (pgste_val(pgste) & PGSTE_IN_BIT) { 866 pgste_val(pgste) &= ~PGSTE_IN_BIT; 867 gmap_do_ipte_notify(mm, ptep); 868 } 869 #endif 870 return pgste; 871 } 872 873 /* 874 * Certain architectures need to do special things when PTEs 875 * within a page table are directly modified. Thus, the following 876 * hook is made available. 877 */ 878 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 879 pte_t *ptep, pte_t entry) 880 { 881 pgste_t pgste; 882 883 if (mm_has_pgste(mm)) { 884 pgste = pgste_get_lock(ptep); 885 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO; 886 pgste_set_key(ptep, pgste, entry, mm); 887 pgste = pgste_set_pte(ptep, pgste, entry); 888 pgste_set_unlock(ptep, pgste); 889 } else { 890 if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1) 891 pte_val(entry) |= _PAGE_CO; 892 *ptep = entry; 893 } 894 } 895 896 /* 897 * query functions pte_write/pte_dirty/pte_young only work if 898 * pte_present() is true. Undefined behaviour if not.. 899 */ 900 static inline int pte_write(pte_t pte) 901 { 902 return (pte_val(pte) & _PAGE_WRITE) != 0; 903 } 904 905 static inline int pte_dirty(pte_t pte) 906 { 907 return (pte_val(pte) & _PAGE_DIRTY) != 0; 908 } 909 910 static inline int pte_young(pte_t pte) 911 { 912 return (pte_val(pte) & _PAGE_YOUNG) != 0; 913 } 914 915 #define __HAVE_ARCH_PTE_UNUSED 916 static inline int pte_unused(pte_t pte) 917 { 918 return pte_val(pte) & _PAGE_UNUSED; 919 } 920 921 /* 922 * pgd/pmd/pte modification functions 923 */ 924 925 static inline void pgd_clear(pgd_t *pgd) 926 { 927 #ifdef CONFIG_64BIT 928 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 929 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY; 930 #endif 931 } 932 933 static inline void pud_clear(pud_t *pud) 934 { 935 #ifdef CONFIG_64BIT 936 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 937 pud_val(*pud) = _REGION3_ENTRY_EMPTY; 938 #endif 939 } 940 941 static inline void pmd_clear(pmd_t *pmdp) 942 { 943 pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID; 944 } 945 946 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 947 { 948 pte_val(*ptep) = _PAGE_INVALID; 949 } 950 951 /* 952 * The following pte modification functions only work if 953 * pte_present() is true. Undefined behaviour if not.. 954 */ 955 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 956 { 957 pte_val(pte) &= _PAGE_CHG_MASK; 958 pte_val(pte) |= pgprot_val(newprot); 959 /* 960 * newprot for PAGE_NONE, PAGE_READ and PAGE_WRITE has the 961 * invalid bit set, clear it again for readable, young pages 962 */ 963 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ)) 964 pte_val(pte) &= ~_PAGE_INVALID; 965 /* 966 * newprot for PAGE_READ and PAGE_WRITE has the page protection 967 * bit set, clear it again for writable, dirty pages 968 */ 969 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE)) 970 pte_val(pte) &= ~_PAGE_PROTECT; 971 return pte; 972 } 973 974 static inline pte_t pte_wrprotect(pte_t pte) 975 { 976 pte_val(pte) &= ~_PAGE_WRITE; 977 pte_val(pte) |= _PAGE_PROTECT; 978 return pte; 979 } 980 981 static inline pte_t pte_mkwrite(pte_t pte) 982 { 983 pte_val(pte) |= _PAGE_WRITE; 984 if (pte_val(pte) & _PAGE_DIRTY) 985 pte_val(pte) &= ~_PAGE_PROTECT; 986 return pte; 987 } 988 989 static inline pte_t pte_mkclean(pte_t pte) 990 { 991 pte_val(pte) &= ~_PAGE_DIRTY; 992 pte_val(pte) |= _PAGE_PROTECT; 993 return pte; 994 } 995 996 static inline pte_t pte_mkdirty(pte_t pte) 997 { 998 pte_val(pte) |= _PAGE_DIRTY; 999 if (pte_val(pte) & _PAGE_WRITE) 1000 pte_val(pte) &= ~_PAGE_PROTECT; 1001 return pte; 1002 } 1003 1004 static inline pte_t pte_mkold(pte_t pte) 1005 { 1006 pte_val(pte) &= ~_PAGE_YOUNG; 1007 pte_val(pte) |= _PAGE_INVALID; 1008 return pte; 1009 } 1010 1011 static inline pte_t pte_mkyoung(pte_t pte) 1012 { 1013 pte_val(pte) |= _PAGE_YOUNG; 1014 if (pte_val(pte) & _PAGE_READ) 1015 pte_val(pte) &= ~_PAGE_INVALID; 1016 return pte; 1017 } 1018 1019 static inline pte_t pte_mkspecial(pte_t pte) 1020 { 1021 pte_val(pte) |= _PAGE_SPECIAL; 1022 return pte; 1023 } 1024 1025 #ifdef CONFIG_HUGETLB_PAGE 1026 static inline pte_t pte_mkhuge(pte_t pte) 1027 { 1028 pte_val(pte) |= _PAGE_LARGE; 1029 return pte; 1030 } 1031 #endif 1032 1033 static inline void __ptep_ipte(unsigned long address, pte_t *ptep) 1034 { 1035 unsigned long pto = (unsigned long) ptep; 1036 1037 #ifndef CONFIG_64BIT 1038 /* pto in ESA mode must point to the start of the segment table */ 1039 pto &= 0x7ffffc00; 1040 #endif 1041 /* Invalidation + global TLB flush for the pte */ 1042 asm volatile( 1043 " ipte %2,%3" 1044 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address)); 1045 } 1046 1047 static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep) 1048 { 1049 unsigned long pto = (unsigned long) ptep; 1050 1051 #ifndef CONFIG_64BIT 1052 /* pto in ESA mode must point to the start of the segment table */ 1053 pto &= 0x7ffffc00; 1054 #endif 1055 /* Invalidation + local TLB flush for the pte */ 1056 asm volatile( 1057 " .insn rrf,0xb2210000,%2,%3,0,1" 1058 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address)); 1059 } 1060 1061 static inline void ptep_flush_direct(struct mm_struct *mm, 1062 unsigned long address, pte_t *ptep) 1063 { 1064 int active, count; 1065 1066 if (pte_val(*ptep) & _PAGE_INVALID) 1067 return; 1068 active = (mm == current->active_mm) ? 1 : 0; 1069 count = atomic_add_return(0x10000, &mm->context.attach_count); 1070 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active && 1071 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) 1072 __ptep_ipte_local(address, ptep); 1073 else 1074 __ptep_ipte(address, ptep); 1075 atomic_sub(0x10000, &mm->context.attach_count); 1076 } 1077 1078 static inline void ptep_flush_lazy(struct mm_struct *mm, 1079 unsigned long address, pte_t *ptep) 1080 { 1081 int active, count; 1082 1083 if (pte_val(*ptep) & _PAGE_INVALID) 1084 return; 1085 active = (mm == current->active_mm) ? 1 : 0; 1086 count = atomic_add_return(0x10000, &mm->context.attach_count); 1087 if ((count & 0xffff) <= active) { 1088 pte_val(*ptep) |= _PAGE_INVALID; 1089 mm->context.flush_mm = 1; 1090 } else 1091 __ptep_ipte(address, ptep); 1092 atomic_sub(0x10000, &mm->context.attach_count); 1093 } 1094 1095 /* 1096 * Get (and clear) the user dirty bit for a pte. 1097 */ 1098 static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm, 1099 unsigned long addr, 1100 pte_t *ptep) 1101 { 1102 pgste_t pgste; 1103 pte_t pte; 1104 int dirty; 1105 1106 if (!mm_has_pgste(mm)) 1107 return 0; 1108 pgste = pgste_get_lock(ptep); 1109 dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT); 1110 pgste_val(pgste) &= ~PGSTE_UC_BIT; 1111 pte = *ptep; 1112 if (dirty && (pte_val(pte) & _PAGE_PRESENT)) { 1113 pgste = pgste_ipte_notify(mm, ptep, pgste); 1114 __ptep_ipte(addr, ptep); 1115 if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE)) 1116 pte_val(pte) |= _PAGE_PROTECT; 1117 else 1118 pte_val(pte) |= _PAGE_INVALID; 1119 *ptep = pte; 1120 } 1121 pgste_set_unlock(ptep, pgste); 1122 return dirty; 1123 } 1124 1125 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 1126 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 1127 unsigned long addr, pte_t *ptep) 1128 { 1129 pgste_t pgste; 1130 pte_t pte; 1131 int young; 1132 1133 if (mm_has_pgste(vma->vm_mm)) { 1134 pgste = pgste_get_lock(ptep); 1135 pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste); 1136 } 1137 1138 pte = *ptep; 1139 ptep_flush_direct(vma->vm_mm, addr, ptep); 1140 young = pte_young(pte); 1141 pte = pte_mkold(pte); 1142 1143 if (mm_has_pgste(vma->vm_mm)) { 1144 pgste = pgste_set_pte(ptep, pgste, pte); 1145 pgste_set_unlock(ptep, pgste); 1146 } else 1147 *ptep = pte; 1148 1149 return young; 1150 } 1151 1152 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 1153 static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 1154 unsigned long address, pte_t *ptep) 1155 { 1156 return ptep_test_and_clear_young(vma, address, ptep); 1157 } 1158 1159 /* 1160 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush 1161 * both clear the TLB for the unmapped pte. The reason is that 1162 * ptep_get_and_clear is used in common code (e.g. change_pte_range) 1163 * to modify an active pte. The sequence is 1164 * 1) ptep_get_and_clear 1165 * 2) set_pte_at 1166 * 3) flush_tlb_range 1167 * On s390 the tlb needs to get flushed with the modification of the pte 1168 * if the pte is active. The only way how this can be implemented is to 1169 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range 1170 * is a nop. 1171 */ 1172 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 1173 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 1174 unsigned long address, pte_t *ptep) 1175 { 1176 pgste_t pgste; 1177 pte_t pte; 1178 1179 if (mm_has_pgste(mm)) { 1180 pgste = pgste_get_lock(ptep); 1181 pgste = pgste_ipte_notify(mm, ptep, pgste); 1182 } 1183 1184 pte = *ptep; 1185 ptep_flush_lazy(mm, address, ptep); 1186 pte_val(*ptep) = _PAGE_INVALID; 1187 1188 if (mm_has_pgste(mm)) { 1189 pgste = pgste_update_all(&pte, pgste, mm); 1190 pgste_set_unlock(ptep, pgste); 1191 } 1192 return pte; 1193 } 1194 1195 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 1196 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, 1197 unsigned long address, 1198 pte_t *ptep) 1199 { 1200 pgste_t pgste; 1201 pte_t pte; 1202 1203 if (mm_has_pgste(mm)) { 1204 pgste = pgste_get_lock(ptep); 1205 pgste_ipte_notify(mm, ptep, pgste); 1206 } 1207 1208 pte = *ptep; 1209 ptep_flush_lazy(mm, address, ptep); 1210 1211 if (mm_has_pgste(mm)) { 1212 pgste = pgste_update_all(&pte, pgste, mm); 1213 pgste_set(ptep, pgste); 1214 } 1215 return pte; 1216 } 1217 1218 static inline void ptep_modify_prot_commit(struct mm_struct *mm, 1219 unsigned long address, 1220 pte_t *ptep, pte_t pte) 1221 { 1222 pgste_t pgste; 1223 1224 if (mm_has_pgste(mm)) { 1225 pgste = pgste_get(ptep); 1226 pgste_set_key(ptep, pgste, pte, mm); 1227 pgste = pgste_set_pte(ptep, pgste, pte); 1228 pgste_set_unlock(ptep, pgste); 1229 } else 1230 *ptep = pte; 1231 } 1232 1233 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH 1234 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, 1235 unsigned long address, pte_t *ptep) 1236 { 1237 pgste_t pgste; 1238 pte_t pte; 1239 1240 if (mm_has_pgste(vma->vm_mm)) { 1241 pgste = pgste_get_lock(ptep); 1242 pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste); 1243 } 1244 1245 pte = *ptep; 1246 ptep_flush_direct(vma->vm_mm, address, ptep); 1247 pte_val(*ptep) = _PAGE_INVALID; 1248 1249 if (mm_has_pgste(vma->vm_mm)) { 1250 if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) == 1251 _PGSTE_GPS_USAGE_UNUSED) 1252 pte_val(pte) |= _PAGE_UNUSED; 1253 pgste = pgste_update_all(&pte, pgste, vma->vm_mm); 1254 pgste_set_unlock(ptep, pgste); 1255 } 1256 return pte; 1257 } 1258 1259 /* 1260 * The batched pte unmap code uses ptep_get_and_clear_full to clear the 1261 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all 1262 * tlbs of an mm if it can guarantee that the ptes of the mm_struct 1263 * cannot be accessed while the batched unmap is running. In this case 1264 * full==1 and a simple pte_clear is enough. See tlb.h. 1265 */ 1266 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 1267 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, 1268 unsigned long address, 1269 pte_t *ptep, int full) 1270 { 1271 pgste_t pgste; 1272 pte_t pte; 1273 1274 if (!full && mm_has_pgste(mm)) { 1275 pgste = pgste_get_lock(ptep); 1276 pgste = pgste_ipte_notify(mm, ptep, pgste); 1277 } 1278 1279 pte = *ptep; 1280 if (!full) 1281 ptep_flush_lazy(mm, address, ptep); 1282 pte_val(*ptep) = _PAGE_INVALID; 1283 1284 if (!full && mm_has_pgste(mm)) { 1285 pgste = pgste_update_all(&pte, pgste, mm); 1286 pgste_set_unlock(ptep, pgste); 1287 } 1288 return pte; 1289 } 1290 1291 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 1292 static inline pte_t ptep_set_wrprotect(struct mm_struct *mm, 1293 unsigned long address, pte_t *ptep) 1294 { 1295 pgste_t pgste; 1296 pte_t pte = *ptep; 1297 1298 if (pte_write(pte)) { 1299 if (mm_has_pgste(mm)) { 1300 pgste = pgste_get_lock(ptep); 1301 pgste = pgste_ipte_notify(mm, ptep, pgste); 1302 } 1303 1304 ptep_flush_lazy(mm, address, ptep); 1305 pte = pte_wrprotect(pte); 1306 1307 if (mm_has_pgste(mm)) { 1308 pgste = pgste_set_pte(ptep, pgste, pte); 1309 pgste_set_unlock(ptep, pgste); 1310 } else 1311 *ptep = pte; 1312 } 1313 return pte; 1314 } 1315 1316 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 1317 static inline int ptep_set_access_flags(struct vm_area_struct *vma, 1318 unsigned long address, pte_t *ptep, 1319 pte_t entry, int dirty) 1320 { 1321 pgste_t pgste; 1322 1323 if (pte_same(*ptep, entry)) 1324 return 0; 1325 if (mm_has_pgste(vma->vm_mm)) { 1326 pgste = pgste_get_lock(ptep); 1327 pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste); 1328 } 1329 1330 ptep_flush_direct(vma->vm_mm, address, ptep); 1331 1332 if (mm_has_pgste(vma->vm_mm)) { 1333 pgste = pgste_set_pte(ptep, pgste, entry); 1334 pgste_set_unlock(ptep, pgste); 1335 } else 1336 *ptep = entry; 1337 return 1; 1338 } 1339 1340 /* 1341 * Conversion functions: convert a page and protection to a page entry, 1342 * and a page entry and page directory to the page they refer to. 1343 */ 1344 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) 1345 { 1346 pte_t __pte; 1347 pte_val(__pte) = physpage + pgprot_val(pgprot); 1348 return pte_mkyoung(__pte); 1349 } 1350 1351 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) 1352 { 1353 unsigned long physpage = page_to_phys(page); 1354 pte_t __pte = mk_pte_phys(physpage, pgprot); 1355 1356 if (pte_write(__pte) && PageDirty(page)) 1357 __pte = pte_mkdirty(__pte); 1358 return __pte; 1359 } 1360 1361 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 1362 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 1363 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 1364 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) 1365 1366 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 1367 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 1368 1369 #ifndef CONFIG_64BIT 1370 1371 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 1372 #define pud_deref(pmd) ({ BUG(); 0UL; }) 1373 #define pgd_deref(pmd) ({ BUG(); 0UL; }) 1374 1375 #define pud_offset(pgd, address) ((pud_t *) pgd) 1376 #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address)) 1377 1378 #else /* CONFIG_64BIT */ 1379 1380 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 1381 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) 1382 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) 1383 1384 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) 1385 { 1386 pud_t *pud = (pud_t *) pgd; 1387 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 1388 pud = (pud_t *) pgd_deref(*pgd); 1389 return pud + pud_index(address); 1390 } 1391 1392 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) 1393 { 1394 pmd_t *pmd = (pmd_t *) pud; 1395 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 1396 pmd = (pmd_t *) pud_deref(*pud); 1397 return pmd + pmd_index(address); 1398 } 1399 1400 #endif /* CONFIG_64BIT */ 1401 1402 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) 1403 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) 1404 #define pte_page(x) pfn_to_page(pte_pfn(x)) 1405 1406 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) 1407 1408 /* Find an entry in the lowest level page table.. */ 1409 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) 1410 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address) 1411 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) 1412 #define pte_unmap(pte) do { } while (0) 1413 1414 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) 1415 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) 1416 { 1417 /* 1418 * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx) 1419 * Convert to segment table entry format. 1420 */ 1421 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE)) 1422 return pgprot_val(SEGMENT_NONE); 1423 if (pgprot_val(pgprot) == pgprot_val(PAGE_READ)) 1424 return pgprot_val(SEGMENT_READ); 1425 return pgprot_val(SEGMENT_WRITE); 1426 } 1427 1428 static inline pmd_t pmd_wrprotect(pmd_t pmd) 1429 { 1430 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE; 1431 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1432 return pmd; 1433 } 1434 1435 static inline pmd_t pmd_mkwrite(pmd_t pmd) 1436 { 1437 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE; 1438 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) 1439 return pmd; 1440 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; 1441 return pmd; 1442 } 1443 1444 static inline pmd_t pmd_mkclean(pmd_t pmd) 1445 { 1446 if (pmd_large(pmd)) { 1447 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY; 1448 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1449 } 1450 return pmd; 1451 } 1452 1453 static inline pmd_t pmd_mkdirty(pmd_t pmd) 1454 { 1455 if (pmd_large(pmd)) { 1456 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY; 1457 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) 1458 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; 1459 } 1460 return pmd; 1461 } 1462 1463 static inline pmd_t pmd_mkyoung(pmd_t pmd) 1464 { 1465 if (pmd_large(pmd)) { 1466 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; 1467 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ) 1468 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID; 1469 } 1470 return pmd; 1471 } 1472 1473 static inline pmd_t pmd_mkold(pmd_t pmd) 1474 { 1475 if (pmd_large(pmd)) { 1476 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG; 1477 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; 1478 } 1479 return pmd; 1480 } 1481 1482 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 1483 { 1484 if (pmd_large(pmd)) { 1485 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE | 1486 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG | 1487 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SPLIT; 1488 pmd_val(pmd) |= massage_pgprot_pmd(newprot); 1489 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) 1490 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1491 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)) 1492 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; 1493 return pmd; 1494 } 1495 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN; 1496 pmd_val(pmd) |= massage_pgprot_pmd(newprot); 1497 return pmd; 1498 } 1499 1500 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) 1501 { 1502 pmd_t __pmd; 1503 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); 1504 return __pmd; 1505 } 1506 1507 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ 1508 1509 static inline void __pmdp_csp(pmd_t *pmdp) 1510 { 1511 register unsigned long reg2 asm("2") = pmd_val(*pmdp); 1512 register unsigned long reg3 asm("3") = pmd_val(*pmdp) | 1513 _SEGMENT_ENTRY_INVALID; 1514 register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5; 1515 1516 asm volatile( 1517 " csp %1,%3" 1518 : "=m" (*pmdp) 1519 : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc"); 1520 } 1521 1522 static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp) 1523 { 1524 unsigned long sto; 1525 1526 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t); 1527 asm volatile( 1528 " .insn rrf,0xb98e0000,%2,%3,0,0" 1529 : "=m" (*pmdp) 1530 : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK)) 1531 : "cc" ); 1532 } 1533 1534 static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp) 1535 { 1536 unsigned long sto; 1537 1538 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t); 1539 asm volatile( 1540 " .insn rrf,0xb98e0000,%2,%3,0,1" 1541 : "=m" (*pmdp) 1542 : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK)) 1543 : "cc" ); 1544 } 1545 1546 static inline void pmdp_flush_direct(struct mm_struct *mm, 1547 unsigned long address, pmd_t *pmdp) 1548 { 1549 int active, count; 1550 1551 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID) 1552 return; 1553 if (!MACHINE_HAS_IDTE) { 1554 __pmdp_csp(pmdp); 1555 return; 1556 } 1557 active = (mm == current->active_mm) ? 1 : 0; 1558 count = atomic_add_return(0x10000, &mm->context.attach_count); 1559 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active && 1560 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) 1561 __pmdp_idte_local(address, pmdp); 1562 else 1563 __pmdp_idte(address, pmdp); 1564 atomic_sub(0x10000, &mm->context.attach_count); 1565 } 1566 1567 static inline void pmdp_flush_lazy(struct mm_struct *mm, 1568 unsigned long address, pmd_t *pmdp) 1569 { 1570 int active, count; 1571 1572 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID) 1573 return; 1574 active = (mm == current->active_mm) ? 1 : 0; 1575 count = atomic_add_return(0x10000, &mm->context.attach_count); 1576 if ((count & 0xffff) <= active) { 1577 pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID; 1578 mm->context.flush_mm = 1; 1579 } else if (MACHINE_HAS_IDTE) 1580 __pmdp_idte(address, pmdp); 1581 else 1582 __pmdp_csp(pmdp); 1583 atomic_sub(0x10000, &mm->context.attach_count); 1584 } 1585 1586 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1587 1588 #define __HAVE_ARCH_PGTABLE_DEPOSIT 1589 extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 1590 pgtable_t pgtable); 1591 1592 #define __HAVE_ARCH_PGTABLE_WITHDRAW 1593 extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 1594 1595 static inline int pmd_trans_splitting(pmd_t pmd) 1596 { 1597 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) && 1598 (pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT); 1599 } 1600 1601 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 1602 pmd_t *pmdp, pmd_t entry) 1603 { 1604 *pmdp = entry; 1605 } 1606 1607 static inline pmd_t pmd_mkhuge(pmd_t pmd) 1608 { 1609 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; 1610 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; 1611 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1612 return pmd; 1613 } 1614 1615 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 1616 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 1617 unsigned long address, pmd_t *pmdp) 1618 { 1619 pmd_t pmd; 1620 1621 pmd = *pmdp; 1622 pmdp_flush_direct(vma->vm_mm, address, pmdp); 1623 *pmdp = pmd_mkold(pmd); 1624 return pmd_young(pmd); 1625 } 1626 1627 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR 1628 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, 1629 unsigned long address, pmd_t *pmdp) 1630 { 1631 pmd_t pmd = *pmdp; 1632 1633 pmdp_flush_direct(mm, address, pmdp); 1634 pmd_clear(pmdp); 1635 return pmd; 1636 } 1637 1638 #define __HAVE_ARCH_PMDP_CLEAR_FLUSH 1639 static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma, 1640 unsigned long address, pmd_t *pmdp) 1641 { 1642 return pmdp_get_and_clear(vma->vm_mm, address, pmdp); 1643 } 1644 1645 #define __HAVE_ARCH_PMDP_INVALIDATE 1646 static inline void pmdp_invalidate(struct vm_area_struct *vma, 1647 unsigned long address, pmd_t *pmdp) 1648 { 1649 pmdp_flush_direct(vma->vm_mm, address, pmdp); 1650 } 1651 1652 #define __HAVE_ARCH_PMDP_SET_WRPROTECT 1653 static inline void pmdp_set_wrprotect(struct mm_struct *mm, 1654 unsigned long address, pmd_t *pmdp) 1655 { 1656 pmd_t pmd = *pmdp; 1657 1658 if (pmd_write(pmd)) { 1659 pmdp_flush_direct(mm, address, pmdp); 1660 set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd)); 1661 } 1662 } 1663 1664 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot)) 1665 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) 1666 1667 static inline int pmd_trans_huge(pmd_t pmd) 1668 { 1669 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE; 1670 } 1671 1672 static inline int has_transparent_hugepage(void) 1673 { 1674 return MACHINE_HAS_HPAGE ? 1 : 0; 1675 } 1676 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1677 1678 /* 1679 * 31 bit swap entry format: 1680 * A page-table entry has some bits we have to treat in a special way. 1681 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification 1682 * exception will occur instead of a page translation exception. The 1683 * specifiation exception has the bad habit not to store necessary 1684 * information in the lowcore. 1685 * Bits 21, 22, 30 and 31 are used to indicate the page type. 1686 * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402 1687 * This leaves the bits 1-19 and bits 24-29 to store type and offset. 1688 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19 1689 * plus 24 for the offset. 1690 * 0| offset |0110|o|type |00| 1691 * 0 0000000001111111111 2222 2 22222 33 1692 * 0 1234567890123456789 0123 4 56789 01 1693 * 1694 * 64 bit swap entry format: 1695 * A page-table entry has some bits we have to treat in a special way. 1696 * Bits 52 and bit 55 have to be zero, otherwise an specification 1697 * exception will occur instead of a page translation exception. The 1698 * specifiation exception has the bad habit not to store necessary 1699 * information in the lowcore. 1700 * Bits 53, 54, 62 and 63 are used to indicate the page type. 1701 * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402 1702 * This leaves the bits 0-51 and bits 56-61 to store type and offset. 1703 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51 1704 * plus 56 for the offset. 1705 * | offset |0110|o|type |00| 1706 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66 1707 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23 1708 */ 1709 #ifndef CONFIG_64BIT 1710 #define __SWP_OFFSET_MASK (~0UL >> 12) 1711 #else 1712 #define __SWP_OFFSET_MASK (~0UL >> 11) 1713 #endif 1714 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) 1715 { 1716 pte_t pte; 1717 offset &= __SWP_OFFSET_MASK; 1718 pte_val(pte) = _PAGE_INVALID | _PAGE_TYPE | ((type & 0x1f) << 2) | 1719 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); 1720 return pte; 1721 } 1722 1723 #define __swp_type(entry) (((entry).val >> 2) & 0x1f) 1724 #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1)) 1725 #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) }) 1726 1727 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 1728 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 1729 1730 #ifndef CONFIG_64BIT 1731 # define PTE_FILE_MAX_BITS 26 1732 #else /* CONFIG_64BIT */ 1733 # define PTE_FILE_MAX_BITS 59 1734 #endif /* CONFIG_64BIT */ 1735 1736 #define pte_to_pgoff(__pte) \ 1737 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f)) 1738 1739 #define pgoff_to_pte(__off) \ 1740 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ 1741 | _PAGE_INVALID | _PAGE_PROTECT }) 1742 1743 #endif /* !__ASSEMBLY__ */ 1744 1745 #define kern_addr_valid(addr) (1) 1746 1747 extern int vmem_add_mapping(unsigned long start, unsigned long size); 1748 extern int vmem_remove_mapping(unsigned long start, unsigned long size); 1749 extern int s390_enable_sie(void); 1750 extern void s390_enable_skey(void); 1751 1752 /* 1753 * No page table caches to initialise 1754 */ 1755 static inline void pgtable_cache_init(void) { } 1756 static inline void check_pgt_cache(void) { } 1757 1758 #include <asm-generic/pgtable.h> 1759 1760 #endif /* _S390_PAGE_H */ 1761