1 /* 2 * include/asm-s390/pgtable.h 3 * 4 * S390 version 5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Author(s): Hartmut Penner (hp@de.ibm.com) 7 * Ulrich Weigand (weigand@de.ibm.com) 8 * Martin Schwidefsky (schwidefsky@de.ibm.com) 9 * 10 * Derived from "include/asm-i386/pgtable.h" 11 */ 12 13 #ifndef _ASM_S390_PGTABLE_H 14 #define _ASM_S390_PGTABLE_H 15 16 /* 17 * The Linux memory management assumes a three-level page table setup. For 18 * s390 31 bit we "fold" the mid level into the top-level page table, so 19 * that we physically have the same two-level page table as the s390 mmu 20 * expects in 31 bit mode. For s390 64 bit we use three of the five levels 21 * the hardware provides (region first and region second tables are not 22 * used). 23 * 24 * The "pgd_xxx()" functions are trivial for a folded two-level 25 * setup: the pgd is never bad, and a pmd always exists (as it's folded 26 * into the pgd entry) 27 * 28 * This file contains the functions and defines necessary to modify and use 29 * the S390 page table tree. 30 */ 31 #ifndef __ASSEMBLY__ 32 #include <linux/sched.h> 33 #include <linux/mm_types.h> 34 #include <asm/bitops.h> 35 #include <asm/bug.h> 36 #include <asm/processor.h> 37 38 extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); 39 extern void paging_init(void); 40 extern void vmem_map_init(void); 41 42 /* 43 * The S390 doesn't have any external MMU info: the kernel page 44 * tables contain all the necessary information. 45 */ 46 #define update_mmu_cache(vma, address, pte) do { } while (0) 47 48 /* 49 * ZERO_PAGE is a global shared page that is always zero: used 50 * for zero-mapped memory areas etc.. 51 */ 52 extern char empty_zero_page[PAGE_SIZE]; 53 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 54 #endif /* !__ASSEMBLY__ */ 55 56 /* 57 * PMD_SHIFT determines the size of the area a second-level page 58 * table can map 59 * PGDIR_SHIFT determines what a third-level page table entry can map 60 */ 61 #ifndef __s390x__ 62 # define PMD_SHIFT 20 63 # define PUD_SHIFT 20 64 # define PGDIR_SHIFT 20 65 #else /* __s390x__ */ 66 # define PMD_SHIFT 20 67 # define PUD_SHIFT 31 68 # define PGDIR_SHIFT 42 69 #endif /* __s390x__ */ 70 71 #define PMD_SIZE (1UL << PMD_SHIFT) 72 #define PMD_MASK (~(PMD_SIZE-1)) 73 #define PUD_SIZE (1UL << PUD_SHIFT) 74 #define PUD_MASK (~(PUD_SIZE-1)) 75 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 76 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 77 78 /* 79 * entries per page directory level: the S390 is two-level, so 80 * we don't really have any PMD directory physically. 81 * for S390 segment-table entries are combined to one PGD 82 * that leads to 1024 pte per pgd 83 */ 84 #define PTRS_PER_PTE 256 85 #ifndef __s390x__ 86 #define PTRS_PER_PMD 1 87 #define PTRS_PER_PUD 1 88 #else /* __s390x__ */ 89 #define PTRS_PER_PMD 2048 90 #define PTRS_PER_PUD 2048 91 #endif /* __s390x__ */ 92 #define PTRS_PER_PGD 2048 93 94 #define FIRST_USER_ADDRESS 0 95 96 #define pte_ERROR(e) \ 97 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) 98 #define pmd_ERROR(e) \ 99 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) 100 #define pud_ERROR(e) \ 101 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e)) 102 #define pgd_ERROR(e) \ 103 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) 104 105 #ifndef __ASSEMBLY__ 106 /* 107 * The vmalloc area will always be on the topmost area of the kernel 108 * mapping. We reserve 96MB (31bit) / 1GB (64bit) for vmalloc, 109 * which should be enough for any sane case. 110 * By putting vmalloc at the top, we maximise the gap between physical 111 * memory and vmalloc to catch misplaced memory accesses. As a side 112 * effect, this also makes sure that 64 bit module code cannot be used 113 * as system call address. 114 */ 115 #ifndef __s390x__ 116 #define VMALLOC_START 0x78000000UL 117 #define VMALLOC_END 0x7e000000UL 118 #define VMEM_MAP_END 0x80000000UL 119 #else /* __s390x__ */ 120 #define VMALLOC_START 0x3e000000000UL 121 #define VMALLOC_END 0x3e040000000UL 122 #define VMEM_MAP_END 0x40000000000UL 123 #endif /* __s390x__ */ 124 125 /* 126 * VMEM_MAX_PHYS is the highest physical address that can be added to the 1:1 127 * mapping. This needs to be calculated at compile time since the size of the 128 * VMEM_MAP is static but the size of struct page can change. 129 */ 130 #define VMEM_MAX_PAGES ((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page)) 131 #define VMEM_MAX_PFN min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES) 132 #define VMEM_MAX_PHYS ((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1)) 133 #define vmemmap ((struct page *) VMALLOC_END) 134 135 /* 136 * A 31 bit pagetable entry of S390 has following format: 137 * | PFRA | | OS | 138 * 0 0IP0 139 * 00000000001111111111222222222233 140 * 01234567890123456789012345678901 141 * 142 * I Page-Invalid Bit: Page is not available for address-translation 143 * P Page-Protection Bit: Store access not possible for page 144 * 145 * A 31 bit segmenttable entry of S390 has following format: 146 * | P-table origin | |PTL 147 * 0 IC 148 * 00000000001111111111222222222233 149 * 01234567890123456789012345678901 150 * 151 * I Segment-Invalid Bit: Segment is not available for address-translation 152 * C Common-Segment Bit: Segment is not private (PoP 3-30) 153 * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256) 154 * 155 * The 31 bit segmenttable origin of S390 has following format: 156 * 157 * |S-table origin | | STL | 158 * X **GPS 159 * 00000000001111111111222222222233 160 * 01234567890123456789012345678901 161 * 162 * X Space-Switch event: 163 * G Segment-Invalid Bit: * 164 * P Private-Space Bit: Segment is not private (PoP 3-30) 165 * S Storage-Alteration: 166 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048) 167 * 168 * A 64 bit pagetable entry of S390 has following format: 169 * | PFRA |0IP0| OS | 170 * 0000000000111111111122222222223333333333444444444455555555556666 171 * 0123456789012345678901234567890123456789012345678901234567890123 172 * 173 * I Page-Invalid Bit: Page is not available for address-translation 174 * P Page-Protection Bit: Store access not possible for page 175 * 176 * A 64 bit segmenttable entry of S390 has following format: 177 * | P-table origin | TT 178 * 0000000000111111111122222222223333333333444444444455555555556666 179 * 0123456789012345678901234567890123456789012345678901234567890123 180 * 181 * I Segment-Invalid Bit: Segment is not available for address-translation 182 * C Common-Segment Bit: Segment is not private (PoP 3-30) 183 * P Page-Protection Bit: Store access not possible for page 184 * TT Type 00 185 * 186 * A 64 bit region table entry of S390 has following format: 187 * | S-table origin | TF TTTL 188 * 0000000000111111111122222222223333333333444444444455555555556666 189 * 0123456789012345678901234567890123456789012345678901234567890123 190 * 191 * I Segment-Invalid Bit: Segment is not available for address-translation 192 * TT Type 01 193 * TF 194 * TL Table length 195 * 196 * The 64 bit regiontable origin of S390 has following format: 197 * | region table origon | DTTL 198 * 0000000000111111111122222222223333333333444444444455555555556666 199 * 0123456789012345678901234567890123456789012345678901234567890123 200 * 201 * X Space-Switch event: 202 * G Segment-Invalid Bit: 203 * P Private-Space Bit: 204 * S Storage-Alteration: 205 * R Real space 206 * TL Table-Length: 207 * 208 * A storage key has the following format: 209 * | ACC |F|R|C|0| 210 * 0 3 4 5 6 7 211 * ACC: access key 212 * F : fetch protection bit 213 * R : referenced bit 214 * C : changed bit 215 */ 216 217 /* Hardware bits in the page table entry */ 218 #define _PAGE_RO 0x200 /* HW read-only bit */ 219 #define _PAGE_INVALID 0x400 /* HW invalid bit */ 220 221 /* Software bits in the page table entry */ 222 #define _PAGE_SWT 0x001 /* SW pte type bit t */ 223 #define _PAGE_SWX 0x002 /* SW pte type bit x */ 224 #define _PAGE_SPECIAL 0x004 /* SW associated with special page */ 225 #define __HAVE_ARCH_PTE_SPECIAL 226 227 /* Set of bits not changed in pte_modify */ 228 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL) 229 230 /* Six different types of pages. */ 231 #define _PAGE_TYPE_EMPTY 0x400 232 #define _PAGE_TYPE_NONE 0x401 233 #define _PAGE_TYPE_SWAP 0x403 234 #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ 235 #define _PAGE_TYPE_RO 0x200 236 #define _PAGE_TYPE_RW 0x000 237 #define _PAGE_TYPE_EX_RO 0x202 238 #define _PAGE_TYPE_EX_RW 0x002 239 240 /* 241 * Only four types for huge pages, using the invalid bit and protection bit 242 * of a segment table entry. 243 */ 244 #define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */ 245 #define _HPAGE_TYPE_NONE 0x220 246 #define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */ 247 #define _HPAGE_TYPE_RW 0x000 248 249 /* 250 * PTE type bits are rather complicated. handle_pte_fault uses pte_present, 251 * pte_none and pte_file to find out the pte type WITHOUT holding the page 252 * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to 253 * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs 254 * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards. 255 * This change is done while holding the lock, but the intermediate step 256 * of a previously valid pte with the hw invalid bit set can be observed by 257 * handle_pte_fault. That makes it necessary that all valid pte types with 258 * the hw invalid bit set must be distinguishable from the four pte types 259 * empty, none, swap and file. 260 * 261 * irxt ipte irxt 262 * _PAGE_TYPE_EMPTY 1000 -> 1000 263 * _PAGE_TYPE_NONE 1001 -> 1001 264 * _PAGE_TYPE_SWAP 1011 -> 1011 265 * _PAGE_TYPE_FILE 11?1 -> 11?1 266 * _PAGE_TYPE_RO 0100 -> 1100 267 * _PAGE_TYPE_RW 0000 -> 1000 268 * _PAGE_TYPE_EX_RO 0110 -> 1110 269 * _PAGE_TYPE_EX_RW 0010 -> 1010 270 * 271 * pte_none is true for bits combinations 1000, 1010, 1100, 1110 272 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 273 * pte_file is true for bits combinations 1101, 1111 274 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. 275 */ 276 277 /* Page status table bits for virtualization */ 278 #define RCP_PCL_BIT 55 279 #define RCP_HR_BIT 54 280 #define RCP_HC_BIT 53 281 #define RCP_GR_BIT 50 282 #define RCP_GC_BIT 49 283 284 /* User dirty bit for KVM's migration feature */ 285 #define KVM_UD_BIT 47 286 287 #ifndef __s390x__ 288 289 /* Bits in the segment table address-space-control-element */ 290 #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */ 291 #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */ 292 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ 293 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ 294 #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */ 295 296 /* Bits in the segment table entry */ 297 #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */ 298 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ 299 #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ 300 #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ 301 302 #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) 303 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) 304 305 #else /* __s390x__ */ 306 307 /* Bits in the segment/region table address-space-control-element */ 308 #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */ 309 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ 310 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ 311 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */ 312 #define _ASCE_REAL_SPACE 0x20 /* real space control */ 313 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */ 314 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */ 315 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */ 316 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */ 317 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */ 318 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */ 319 320 /* Bits in the region table entry */ 321 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ 322 #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */ 323 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ 324 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ 325 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ 326 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */ 327 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */ 328 329 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) 330 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV) 331 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) 332 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV) 333 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) 334 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV) 335 336 /* Bits in the segment table entry */ 337 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ 338 #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ 339 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ 340 341 #define _SEGMENT_ENTRY (0) 342 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) 343 344 #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */ 345 #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */ 346 347 #endif /* __s390x__ */ 348 349 /* 350 * A user page table pointer has the space-switch-event bit, the 351 * private-space-control bit and the storage-alteration-event-control 352 * bit set. A kernel page table pointer doesn't need them. 353 */ 354 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ 355 _ASCE_ALT_EVENT) 356 357 /* Bits int the storage key */ 358 #define _PAGE_CHANGED 0x02 /* HW changed bit */ 359 #define _PAGE_REFERENCED 0x04 /* HW referenced bit */ 360 361 /* 362 * Page protection definitions. 363 */ 364 #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) 365 #define PAGE_RO __pgprot(_PAGE_TYPE_RO) 366 #define PAGE_RW __pgprot(_PAGE_TYPE_RW) 367 #define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO) 368 #define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW) 369 370 #define PAGE_KERNEL PAGE_RW 371 #define PAGE_COPY PAGE_RO 372 373 /* 374 * Dependent on the EXEC_PROTECT option s390 can do execute protection. 375 * Write permission always implies read permission. In theory with a 376 * primary/secondary page table execute only can be implemented but 377 * it would cost an additional bit in the pte to distinguish all the 378 * different pte types. To avoid that execute permission currently 379 * implies read permission as well. 380 */ 381 /*xwr*/ 382 #define __P000 PAGE_NONE 383 #define __P001 PAGE_RO 384 #define __P010 PAGE_RO 385 #define __P011 PAGE_RO 386 #define __P100 PAGE_EX_RO 387 #define __P101 PAGE_EX_RO 388 #define __P110 PAGE_EX_RO 389 #define __P111 PAGE_EX_RO 390 391 #define __S000 PAGE_NONE 392 #define __S001 PAGE_RO 393 #define __S010 PAGE_RW 394 #define __S011 PAGE_RW 395 #define __S100 PAGE_EX_RO 396 #define __S101 PAGE_EX_RO 397 #define __S110 PAGE_EX_RW 398 #define __S111 PAGE_EX_RW 399 400 #ifndef __s390x__ 401 # define PxD_SHADOW_SHIFT 1 402 #else /* __s390x__ */ 403 # define PxD_SHADOW_SHIFT 2 404 #endif /* __s390x__ */ 405 406 static inline void *get_shadow_table(void *table) 407 { 408 unsigned long addr, offset; 409 struct page *page; 410 411 addr = (unsigned long) table; 412 offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1); 413 page = virt_to_page((void *)(addr ^ offset)); 414 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL); 415 } 416 417 /* 418 * Certain architectures need to do special things when PTEs 419 * within a page table are directly modified. Thus, the following 420 * hook is made available. 421 */ 422 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 423 pte_t *ptep, pte_t entry) 424 { 425 *ptep = entry; 426 if (mm->context.noexec) { 427 if (!(pte_val(entry) & _PAGE_INVALID) && 428 (pte_val(entry) & _PAGE_SWX)) 429 pte_val(entry) |= _PAGE_RO; 430 else 431 pte_val(entry) = _PAGE_TYPE_EMPTY; 432 ptep[PTRS_PER_PTE] = entry; 433 } 434 } 435 436 /* 437 * pgd/pmd/pte query functions 438 */ 439 #ifndef __s390x__ 440 441 static inline int pgd_present(pgd_t pgd) { return 1; } 442 static inline int pgd_none(pgd_t pgd) { return 0; } 443 static inline int pgd_bad(pgd_t pgd) { return 0; } 444 445 static inline int pud_present(pud_t pud) { return 1; } 446 static inline int pud_none(pud_t pud) { return 0; } 447 static inline int pud_bad(pud_t pud) { return 0; } 448 449 #else /* __s390x__ */ 450 451 static inline int pgd_present(pgd_t pgd) 452 { 453 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) 454 return 1; 455 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL; 456 } 457 458 static inline int pgd_none(pgd_t pgd) 459 { 460 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) 461 return 0; 462 return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL; 463 } 464 465 static inline int pgd_bad(pgd_t pgd) 466 { 467 /* 468 * With dynamic page table levels the pgd can be a region table 469 * entry or a segment table entry. Check for the bit that are 470 * invalid for either table entry. 471 */ 472 unsigned long mask = 473 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & 474 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; 475 return (pgd_val(pgd) & mask) != 0; 476 } 477 478 static inline int pud_present(pud_t pud) 479 { 480 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) 481 return 1; 482 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; 483 } 484 485 static inline int pud_none(pud_t pud) 486 { 487 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) 488 return 0; 489 return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL; 490 } 491 492 static inline int pud_bad(pud_t pud) 493 { 494 /* 495 * With dynamic page table levels the pud can be a region table 496 * entry or a segment table entry. Check for the bit that are 497 * invalid for either table entry. 498 */ 499 unsigned long mask = 500 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & 501 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; 502 return (pud_val(pud) & mask) != 0; 503 } 504 505 #endif /* __s390x__ */ 506 507 static inline int pmd_present(pmd_t pmd) 508 { 509 return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL; 510 } 511 512 static inline int pmd_none(pmd_t pmd) 513 { 514 return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL; 515 } 516 517 static inline int pmd_bad(pmd_t pmd) 518 { 519 unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV; 520 return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY; 521 } 522 523 static inline int pte_none(pte_t pte) 524 { 525 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); 526 } 527 528 static inline int pte_present(pte_t pte) 529 { 530 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX; 531 return (pte_val(pte) & mask) == _PAGE_TYPE_NONE || 532 (!(pte_val(pte) & _PAGE_INVALID) && 533 !(pte_val(pte) & _PAGE_SWT)); 534 } 535 536 static inline int pte_file(pte_t pte) 537 { 538 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT; 539 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; 540 } 541 542 static inline int pte_special(pte_t pte) 543 { 544 return (pte_val(pte) & _PAGE_SPECIAL); 545 } 546 547 #define __HAVE_ARCH_PTE_SAME 548 #define pte_same(a,b) (pte_val(a) == pte_val(b)) 549 550 static inline void rcp_lock(pte_t *ptep) 551 { 552 #ifdef CONFIG_PGSTE 553 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); 554 preempt_disable(); 555 while (test_and_set_bit(RCP_PCL_BIT, pgste)) 556 ; 557 #endif 558 } 559 560 static inline void rcp_unlock(pte_t *ptep) 561 { 562 #ifdef CONFIG_PGSTE 563 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); 564 clear_bit(RCP_PCL_BIT, pgste); 565 preempt_enable(); 566 #endif 567 } 568 569 /* forward declaration for SetPageUptodate in page-flags.h*/ 570 static inline void page_clear_dirty(struct page *page); 571 #include <linux/page-flags.h> 572 573 static inline void ptep_rcp_copy(pte_t *ptep) 574 { 575 #ifdef CONFIG_PGSTE 576 struct page *page = virt_to_page(pte_val(*ptep)); 577 unsigned int skey; 578 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); 579 580 skey = page_get_storage_key(page_to_phys(page)); 581 if (skey & _PAGE_CHANGED) { 582 set_bit_simple(RCP_GC_BIT, pgste); 583 set_bit_simple(KVM_UD_BIT, pgste); 584 } 585 if (skey & _PAGE_REFERENCED) 586 set_bit_simple(RCP_GR_BIT, pgste); 587 if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) { 588 SetPageDirty(page); 589 set_bit_simple(KVM_UD_BIT, pgste); 590 } 591 if (test_and_clear_bit_simple(RCP_HR_BIT, pgste)) 592 SetPageReferenced(page); 593 #endif 594 } 595 596 /* 597 * query functions pte_write/pte_dirty/pte_young only work if 598 * pte_present() is true. Undefined behaviour if not.. 599 */ 600 static inline int pte_write(pte_t pte) 601 { 602 return (pte_val(pte) & _PAGE_RO) == 0; 603 } 604 605 static inline int pte_dirty(pte_t pte) 606 { 607 /* A pte is neither clean nor dirty on s/390. The dirty bit 608 * is in the storage key. See page_test_and_clear_dirty for 609 * details. 610 */ 611 return 0; 612 } 613 614 static inline int pte_young(pte_t pte) 615 { 616 /* A pte is neither young nor old on s/390. The young bit 617 * is in the storage key. See page_test_and_clear_young for 618 * details. 619 */ 620 return 0; 621 } 622 623 /* 624 * pgd/pmd/pte modification functions 625 */ 626 627 #ifndef __s390x__ 628 629 #define pgd_clear(pgd) do { } while (0) 630 #define pud_clear(pud) do { } while (0) 631 632 #else /* __s390x__ */ 633 634 static inline void pgd_clear_kernel(pgd_t * pgd) 635 { 636 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 637 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY; 638 } 639 640 static inline void pgd_clear(pgd_t * pgd) 641 { 642 pgd_t *shadow = get_shadow_table(pgd); 643 644 pgd_clear_kernel(pgd); 645 if (shadow) 646 pgd_clear_kernel(shadow); 647 } 648 649 static inline void pud_clear_kernel(pud_t *pud) 650 { 651 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 652 pud_val(*pud) = _REGION3_ENTRY_EMPTY; 653 } 654 655 static inline void pud_clear(pud_t *pud) 656 { 657 pud_t *shadow = get_shadow_table(pud); 658 659 pud_clear_kernel(pud); 660 if (shadow) 661 pud_clear_kernel(shadow); 662 } 663 664 #endif /* __s390x__ */ 665 666 static inline void pmd_clear_kernel(pmd_t * pmdp) 667 { 668 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; 669 } 670 671 static inline void pmd_clear(pmd_t *pmd) 672 { 673 pmd_t *shadow = get_shadow_table(pmd); 674 675 pmd_clear_kernel(pmd); 676 if (shadow) 677 pmd_clear_kernel(shadow); 678 } 679 680 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 681 { 682 if (mm->context.has_pgste) 683 ptep_rcp_copy(ptep); 684 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 685 if (mm->context.noexec) 686 pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY; 687 } 688 689 /* 690 * The following pte modification functions only work if 691 * pte_present() is true. Undefined behaviour if not.. 692 */ 693 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 694 { 695 pte_val(pte) &= _PAGE_CHG_MASK; 696 pte_val(pte) |= pgprot_val(newprot); 697 return pte; 698 } 699 700 static inline pte_t pte_wrprotect(pte_t pte) 701 { 702 /* Do not clobber _PAGE_TYPE_NONE pages! */ 703 if (!(pte_val(pte) & _PAGE_INVALID)) 704 pte_val(pte) |= _PAGE_RO; 705 return pte; 706 } 707 708 static inline pte_t pte_mkwrite(pte_t pte) 709 { 710 pte_val(pte) &= ~_PAGE_RO; 711 return pte; 712 } 713 714 static inline pte_t pte_mkclean(pte_t pte) 715 { 716 /* The only user of pte_mkclean is the fork() code. 717 We must *not* clear the *physical* page dirty bit 718 just because fork() wants to clear the dirty bit in 719 *one* of the page's mappings. So we just do nothing. */ 720 return pte; 721 } 722 723 static inline pte_t pte_mkdirty(pte_t pte) 724 { 725 /* We do not explicitly set the dirty bit because the 726 * sske instruction is slow. It is faster to let the 727 * next instruction set the dirty bit. 728 */ 729 return pte; 730 } 731 732 static inline pte_t pte_mkold(pte_t pte) 733 { 734 /* S/390 doesn't keep its dirty/referenced bit in the pte. 735 * There is no point in clearing the real referenced bit. 736 */ 737 return pte; 738 } 739 740 static inline pte_t pte_mkyoung(pte_t pte) 741 { 742 /* S/390 doesn't keep its dirty/referenced bit in the pte. 743 * There is no point in setting the real referenced bit. 744 */ 745 return pte; 746 } 747 748 static inline pte_t pte_mkspecial(pte_t pte) 749 { 750 pte_val(pte) |= _PAGE_SPECIAL; 751 return pte; 752 } 753 754 #ifdef CONFIG_PGSTE 755 /* 756 * Get (and clear) the user dirty bit for a PTE. 757 */ 758 static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm, 759 pte_t *ptep) 760 { 761 int dirty; 762 unsigned long *pgste; 763 struct page *page; 764 unsigned int skey; 765 766 if (!mm->context.has_pgste) 767 return -EINVAL; 768 rcp_lock(ptep); 769 pgste = (unsigned long *) (ptep + PTRS_PER_PTE); 770 page = virt_to_page(pte_val(*ptep)); 771 skey = page_get_storage_key(page_to_phys(page)); 772 if (skey & _PAGE_CHANGED) { 773 set_bit_simple(RCP_GC_BIT, pgste); 774 set_bit_simple(KVM_UD_BIT, pgste); 775 } 776 if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) { 777 SetPageDirty(page); 778 set_bit_simple(KVM_UD_BIT, pgste); 779 } 780 dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste); 781 if (skey & _PAGE_CHANGED) 782 page_clear_dirty(page); 783 rcp_unlock(ptep); 784 return dirty; 785 } 786 #endif 787 788 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 789 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 790 unsigned long addr, pte_t *ptep) 791 { 792 #ifdef CONFIG_PGSTE 793 unsigned long physpage; 794 int young; 795 unsigned long *pgste; 796 797 if (!vma->vm_mm->context.has_pgste) 798 return 0; 799 physpage = pte_val(*ptep) & PAGE_MASK; 800 pgste = (unsigned long *) (ptep + PTRS_PER_PTE); 801 802 young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0); 803 rcp_lock(ptep); 804 if (young) 805 set_bit_simple(RCP_GR_BIT, pgste); 806 young |= test_and_clear_bit_simple(RCP_HR_BIT, pgste); 807 rcp_unlock(ptep); 808 return young; 809 #endif 810 return 0; 811 } 812 813 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 814 static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 815 unsigned long address, pte_t *ptep) 816 { 817 /* No need to flush TLB 818 * On s390 reference bits are in storage key and never in TLB 819 * With virtualization we handle the reference bit, without we 820 * we can simply return */ 821 #ifdef CONFIG_PGSTE 822 return ptep_test_and_clear_young(vma, address, ptep); 823 #endif 824 return 0; 825 } 826 827 static inline void __ptep_ipte(unsigned long address, pte_t *ptep) 828 { 829 if (!(pte_val(*ptep) & _PAGE_INVALID)) { 830 #ifndef __s390x__ 831 /* pto must point to the start of the segment table */ 832 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); 833 #else 834 /* ipte in zarch mode can do the math */ 835 pte_t *pto = ptep; 836 #endif 837 asm volatile( 838 " ipte %2,%3" 839 : "=m" (*ptep) : "m" (*ptep), 840 "a" (pto), "a" (address)); 841 } 842 } 843 844 static inline void ptep_invalidate(struct mm_struct *mm, 845 unsigned long address, pte_t *ptep) 846 { 847 if (mm->context.has_pgste) { 848 rcp_lock(ptep); 849 __ptep_ipte(address, ptep); 850 ptep_rcp_copy(ptep); 851 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 852 rcp_unlock(ptep); 853 return; 854 } 855 __ptep_ipte(address, ptep); 856 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 857 if (mm->context.noexec) { 858 __ptep_ipte(address, ptep + PTRS_PER_PTE); 859 pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY; 860 } 861 } 862 863 /* 864 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush 865 * both clear the TLB for the unmapped pte. The reason is that 866 * ptep_get_and_clear is used in common code (e.g. change_pte_range) 867 * to modify an active pte. The sequence is 868 * 1) ptep_get_and_clear 869 * 2) set_pte_at 870 * 3) flush_tlb_range 871 * On s390 the tlb needs to get flushed with the modification of the pte 872 * if the pte is active. The only way how this can be implemented is to 873 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range 874 * is a nop. 875 */ 876 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 877 #define ptep_get_and_clear(__mm, __address, __ptep) \ 878 ({ \ 879 pte_t __pte = *(__ptep); \ 880 if (atomic_read(&(__mm)->mm_users) > 1 || \ 881 (__mm) != current->active_mm) \ 882 ptep_invalidate(__mm, __address, __ptep); \ 883 else \ 884 pte_clear((__mm), (__address), (__ptep)); \ 885 __pte; \ 886 }) 887 888 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH 889 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, 890 unsigned long address, pte_t *ptep) 891 { 892 pte_t pte = *ptep; 893 ptep_invalidate(vma->vm_mm, address, ptep); 894 return pte; 895 } 896 897 /* 898 * The batched pte unmap code uses ptep_get_and_clear_full to clear the 899 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all 900 * tlbs of an mm if it can guarantee that the ptes of the mm_struct 901 * cannot be accessed while the batched unmap is running. In this case 902 * full==1 and a simple pte_clear is enough. See tlb.h. 903 */ 904 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 905 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, 906 unsigned long addr, 907 pte_t *ptep, int full) 908 { 909 pte_t pte = *ptep; 910 911 if (full) 912 pte_clear(mm, addr, ptep); 913 else 914 ptep_invalidate(mm, addr, ptep); 915 return pte; 916 } 917 918 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 919 #define ptep_set_wrprotect(__mm, __addr, __ptep) \ 920 ({ \ 921 pte_t __pte = *(__ptep); \ 922 if (pte_write(__pte)) { \ 923 if (atomic_read(&(__mm)->mm_users) > 1 || \ 924 (__mm) != current->active_mm) \ 925 ptep_invalidate(__mm, __addr, __ptep); \ 926 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \ 927 } \ 928 }) 929 930 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 931 #define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ 932 ({ \ 933 int __changed = !pte_same(*(__ptep), __entry); \ 934 if (__changed) { \ 935 ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \ 936 set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \ 937 } \ 938 __changed; \ 939 }) 940 941 /* 942 * Test and clear dirty bit in storage key. 943 * We can't clear the changed bit atomically. This is a potential 944 * race against modification of the referenced bit. This function 945 * should therefore only be called if it is not mapped in any 946 * address space. 947 */ 948 #define __HAVE_ARCH_PAGE_TEST_DIRTY 949 static inline int page_test_dirty(struct page *page) 950 { 951 return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0; 952 } 953 954 #define __HAVE_ARCH_PAGE_CLEAR_DIRTY 955 static inline void page_clear_dirty(struct page *page) 956 { 957 page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY); 958 } 959 960 /* 961 * Test and clear referenced bit in storage key. 962 */ 963 #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG 964 static inline int page_test_and_clear_young(struct page *page) 965 { 966 unsigned long physpage = page_to_phys(page); 967 int ccode; 968 969 asm volatile( 970 " rrbe 0,%1\n" 971 " ipm %0\n" 972 " srl %0,28\n" 973 : "=d" (ccode) : "a" (physpage) : "cc" ); 974 return ccode & 2; 975 } 976 977 /* 978 * Conversion functions: convert a page and protection to a page entry, 979 * and a page entry and page directory to the page they refer to. 980 */ 981 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) 982 { 983 pte_t __pte; 984 pte_val(__pte) = physpage + pgprot_val(pgprot); 985 return __pte; 986 } 987 988 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) 989 { 990 unsigned long physpage = page_to_phys(page); 991 992 return mk_pte_phys(physpage, pgprot); 993 } 994 995 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 996 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 997 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 998 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) 999 1000 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 1001 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 1002 1003 #ifndef __s390x__ 1004 1005 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 1006 #define pud_deref(pmd) ({ BUG(); 0UL; }) 1007 #define pgd_deref(pmd) ({ BUG(); 0UL; }) 1008 1009 #define pud_offset(pgd, address) ((pud_t *) pgd) 1010 #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address)) 1011 1012 #else /* __s390x__ */ 1013 1014 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 1015 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) 1016 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) 1017 1018 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) 1019 { 1020 pud_t *pud = (pud_t *) pgd; 1021 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 1022 pud = (pud_t *) pgd_deref(*pgd); 1023 return pud + pud_index(address); 1024 } 1025 1026 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) 1027 { 1028 pmd_t *pmd = (pmd_t *) pud; 1029 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 1030 pmd = (pmd_t *) pud_deref(*pud); 1031 return pmd + pmd_index(address); 1032 } 1033 1034 #endif /* __s390x__ */ 1035 1036 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) 1037 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) 1038 #define pte_page(x) pfn_to_page(pte_pfn(x)) 1039 1040 #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) 1041 1042 /* Find an entry in the lowest level page table.. */ 1043 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) 1044 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address) 1045 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) 1046 #define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address) 1047 #define pte_unmap(pte) do { } while (0) 1048 #define pte_unmap_nested(pte) do { } while (0) 1049 1050 /* 1051 * 31 bit swap entry format: 1052 * A page-table entry has some bits we have to treat in a special way. 1053 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification 1054 * exception will occur instead of a page translation exception. The 1055 * specifiation exception has the bad habit not to store necessary 1056 * information in the lowcore. 1057 * Bit 21 and bit 22 are the page invalid bit and the page protection 1058 * bit. We set both to indicate a swapped page. 1059 * Bit 30 and 31 are used to distinguish the different page types. For 1060 * a swapped page these bits need to be zero. 1061 * This leaves the bits 1-19 and bits 24-29 to store type and offset. 1062 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19 1063 * plus 24 for the offset. 1064 * 0| offset |0110|o|type |00| 1065 * 0 0000000001111111111 2222 2 22222 33 1066 * 0 1234567890123456789 0123 4 56789 01 1067 * 1068 * 64 bit swap entry format: 1069 * A page-table entry has some bits we have to treat in a special way. 1070 * Bits 52 and bit 55 have to be zero, otherwise an specification 1071 * exception will occur instead of a page translation exception. The 1072 * specifiation exception has the bad habit not to store necessary 1073 * information in the lowcore. 1074 * Bit 53 and bit 54 are the page invalid bit and the page protection 1075 * bit. We set both to indicate a swapped page. 1076 * Bit 62 and 63 are used to distinguish the different page types. For 1077 * a swapped page these bits need to be zero. 1078 * This leaves the bits 0-51 and bits 56-61 to store type and offset. 1079 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51 1080 * plus 56 for the offset. 1081 * | offset |0110|o|type |00| 1082 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66 1083 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23 1084 */ 1085 #ifndef __s390x__ 1086 #define __SWP_OFFSET_MASK (~0UL >> 12) 1087 #else 1088 #define __SWP_OFFSET_MASK (~0UL >> 11) 1089 #endif 1090 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) 1091 { 1092 pte_t pte; 1093 offset &= __SWP_OFFSET_MASK; 1094 pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) | 1095 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); 1096 return pte; 1097 } 1098 1099 #define __swp_type(entry) (((entry).val >> 2) & 0x1f) 1100 #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1)) 1101 #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) }) 1102 1103 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 1104 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 1105 1106 #ifndef __s390x__ 1107 # define PTE_FILE_MAX_BITS 26 1108 #else /* __s390x__ */ 1109 # define PTE_FILE_MAX_BITS 59 1110 #endif /* __s390x__ */ 1111 1112 #define pte_to_pgoff(__pte) \ 1113 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f)) 1114 1115 #define pgoff_to_pte(__off) \ 1116 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ 1117 | _PAGE_TYPE_FILE }) 1118 1119 #endif /* !__ASSEMBLY__ */ 1120 1121 #define kern_addr_valid(addr) (1) 1122 1123 extern int vmem_add_mapping(unsigned long start, unsigned long size); 1124 extern int vmem_remove_mapping(unsigned long start, unsigned long size); 1125 extern int s390_enable_sie(void); 1126 1127 /* 1128 * No page table caches to initialise 1129 */ 1130 #define pgtable_cache_init() do { } while (0) 1131 1132 #include <asm-generic/pgtable.h> 1133 1134 #endif /* _S390_PAGE_H */ 1135