1 #ifndef _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ 2 #define _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ 3 /* 4 * PowerPC64 memory management structures 5 * 6 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com> 7 * PPC64 rework. 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 12 * 2 of the License, or (at your option) any later version. 13 */ 14 15 #include <asm/asm-compat.h> 16 #include <asm/page.h> 17 #include <asm/bug.h> 18 19 /* 20 * This is necessary to get the definition of PGTABLE_RANGE which we 21 * need for various slices related matters. Note that this isn't the 22 * complete pgtable.h but only a portion of it. 23 */ 24 #include <asm/book3s/64/pgtable.h> 25 #include <asm/bug.h> 26 #include <asm/processor.h> 27 #include <asm/cpu_has_feature.h> 28 29 /* 30 * SLB 31 */ 32 33 #define SLB_NUM_BOLTED 3 34 #define SLB_CACHE_ENTRIES 8 35 #define SLB_MIN_SIZE 32 36 37 /* Bits in the SLB ESID word */ 38 #define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */ 39 40 /* Bits in the SLB VSID word */ 41 #define SLB_VSID_SHIFT 12 42 #define SLB_VSID_SHIFT_1T 24 43 #define SLB_VSID_SSIZE_SHIFT 62 44 #define SLB_VSID_B ASM_CONST(0xc000000000000000) 45 #define SLB_VSID_B_256M ASM_CONST(0x0000000000000000) 46 #define SLB_VSID_B_1T ASM_CONST(0x4000000000000000) 47 #define SLB_VSID_KS ASM_CONST(0x0000000000000800) 48 #define SLB_VSID_KP ASM_CONST(0x0000000000000400) 49 #define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */ 50 #define SLB_VSID_L ASM_CONST(0x0000000000000100) 51 #define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */ 52 #define SLB_VSID_LP ASM_CONST(0x0000000000000030) 53 #define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000) 54 #define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010) 55 #define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020) 56 #define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030) 57 #define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP) 58 59 #define SLB_VSID_KERNEL (SLB_VSID_KP) 60 #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C) 61 62 #define SLBIE_C (0x08000000) 63 #define SLBIE_SSIZE_SHIFT 25 64 65 /* 66 * Hash table 67 */ 68 69 #define HPTES_PER_GROUP 8 70 71 #define HPTE_V_SSIZE_SHIFT 62 72 #define HPTE_V_AVPN_SHIFT 7 73 #define HPTE_V_COMMON_BITS ASM_CONST(0x000fffffffffffff) 74 #define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80) 75 #define HPTE_V_AVPN_3_0 ASM_CONST(0x000fffffffffff80) 76 #define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT) 77 #define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL)) 78 #define HPTE_V_BOLTED ASM_CONST(0x0000000000000010) 79 #define HPTE_V_LOCK ASM_CONST(0x0000000000000008) 80 #define HPTE_V_LARGE ASM_CONST(0x0000000000000004) 81 #define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002) 82 #define HPTE_V_VALID ASM_CONST(0x0000000000000001) 83 84 /* 85 * ISA 3.0 has a different HPTE format. 86 */ 87 #define HPTE_R_3_0_SSIZE_SHIFT 58 88 #define HPTE_R_3_0_SSIZE_MASK (3ull << HPTE_R_3_0_SSIZE_SHIFT) 89 #define HPTE_R_PP0 ASM_CONST(0x8000000000000000) 90 #define HPTE_R_TS ASM_CONST(0x4000000000000000) 91 #define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000) 92 #define HPTE_R_RPN_SHIFT 12 93 #define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000) 94 #define HPTE_R_RPN_3_0 ASM_CONST(0x01fffffffffff000) 95 #define HPTE_R_PP ASM_CONST(0x0000000000000003) 96 #define HPTE_R_PPP ASM_CONST(0x8000000000000003) 97 #define HPTE_R_N ASM_CONST(0x0000000000000004) 98 #define HPTE_R_G ASM_CONST(0x0000000000000008) 99 #define HPTE_R_M ASM_CONST(0x0000000000000010) 100 #define HPTE_R_I ASM_CONST(0x0000000000000020) 101 #define HPTE_R_W ASM_CONST(0x0000000000000040) 102 #define HPTE_R_WIMG ASM_CONST(0x0000000000000078) 103 #define HPTE_R_C ASM_CONST(0x0000000000000080) 104 #define HPTE_R_R ASM_CONST(0x0000000000000100) 105 #define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00) 106 107 #define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000) 108 #define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000) 109 110 /* Values for PP (assumes Ks=0, Kp=1) */ 111 #define PP_RWXX 0 /* Supervisor read/write, User none */ 112 #define PP_RWRX 1 /* Supervisor read/write, User read */ 113 #define PP_RWRW 2 /* Supervisor read/write, User read/write */ 114 #define PP_RXRX 3 /* Supervisor read, User read */ 115 #define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */ 116 117 /* Fields for tlbiel instruction in architecture 2.06 */ 118 #define TLBIEL_INVAL_SEL_MASK 0xc00 /* invalidation selector */ 119 #define TLBIEL_INVAL_PAGE 0x000 /* invalidate a single page */ 120 #define TLBIEL_INVAL_SET_LPID 0x800 /* invalidate a set for current LPID */ 121 #define TLBIEL_INVAL_SET 0xc00 /* invalidate a set for all LPIDs */ 122 #define TLBIEL_INVAL_SET_MASK 0xfff000 /* set number to inval. */ 123 #define TLBIEL_INVAL_SET_SHIFT 12 124 125 #define POWER7_TLB_SETS 128 /* # sets in POWER7 TLB */ 126 #define POWER8_TLB_SETS 512 /* # sets in POWER8 TLB */ 127 #define POWER9_TLB_SETS_HASH 256 /* # sets in POWER9 TLB Hash mode */ 128 #define POWER9_TLB_SETS_RADIX 128 /* # sets in POWER9 TLB Radix mode */ 129 130 #ifndef __ASSEMBLY__ 131 132 struct mmu_hash_ops { 133 void (*hpte_invalidate)(unsigned long slot, 134 unsigned long vpn, 135 int bpsize, int apsize, 136 int ssize, int local); 137 long (*hpte_updatepp)(unsigned long slot, 138 unsigned long newpp, 139 unsigned long vpn, 140 int bpsize, int apsize, 141 int ssize, unsigned long flags); 142 void (*hpte_updateboltedpp)(unsigned long newpp, 143 unsigned long ea, 144 int psize, int ssize); 145 long (*hpte_insert)(unsigned long hpte_group, 146 unsigned long vpn, 147 unsigned long prpn, 148 unsigned long rflags, 149 unsigned long vflags, 150 int psize, int apsize, 151 int ssize); 152 long (*hpte_remove)(unsigned long hpte_group); 153 int (*hpte_removebolted)(unsigned long ea, 154 int psize, int ssize); 155 void (*flush_hash_range)(unsigned long number, int local); 156 void (*hugepage_invalidate)(unsigned long vsid, 157 unsigned long addr, 158 unsigned char *hpte_slot_array, 159 int psize, int ssize, int local); 160 /* 161 * Special for kexec. 162 * To be called in real mode with interrupts disabled. No locks are 163 * taken as such, concurrent access on pre POWER5 hardware could result 164 * in a deadlock. 165 * The linear mapping is destroyed as well. 166 */ 167 void (*hpte_clear_all)(void); 168 }; 169 extern struct mmu_hash_ops mmu_hash_ops; 170 171 struct hash_pte { 172 __be64 v; 173 __be64 r; 174 }; 175 176 extern struct hash_pte *htab_address; 177 extern unsigned long htab_size_bytes; 178 extern unsigned long htab_hash_mask; 179 180 181 static inline int shift_to_mmu_psize(unsigned int shift) 182 { 183 int psize; 184 185 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) 186 if (mmu_psize_defs[psize].shift == shift) 187 return psize; 188 return -1; 189 } 190 191 static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) 192 { 193 if (mmu_psize_defs[mmu_psize].shift) 194 return mmu_psize_defs[mmu_psize].shift; 195 BUG(); 196 } 197 198 static inline unsigned long get_sllp_encoding(int psize) 199 { 200 unsigned long sllp; 201 202 sllp = ((mmu_psize_defs[psize].sllp & SLB_VSID_L) >> 6) | 203 ((mmu_psize_defs[psize].sllp & SLB_VSID_LP) >> 4); 204 return sllp; 205 } 206 207 #endif /* __ASSEMBLY__ */ 208 209 /* 210 * Segment sizes. 211 * These are the values used by hardware in the B field of 212 * SLB entries and the first dword of MMU hashtable entries. 213 * The B field is 2 bits; the values 2 and 3 are unused and reserved. 214 */ 215 #define MMU_SEGSIZE_256M 0 216 #define MMU_SEGSIZE_1T 1 217 218 /* 219 * encode page number shift. 220 * in order to fit the 78 bit va in a 64 bit variable we shift the va by 221 * 12 bits. This enable us to address upto 76 bit va. 222 * For hpt hash from a va we can ignore the page size bits of va and for 223 * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure 224 * we work in all cases including 4k page size. 225 */ 226 #define VPN_SHIFT 12 227 228 /* 229 * HPTE Large Page (LP) details 230 */ 231 #define LP_SHIFT 12 232 #define LP_BITS 8 233 #define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT) 234 235 #ifndef __ASSEMBLY__ 236 237 static inline int slb_vsid_shift(int ssize) 238 { 239 if (ssize == MMU_SEGSIZE_256M) 240 return SLB_VSID_SHIFT; 241 return SLB_VSID_SHIFT_1T; 242 } 243 244 static inline int segment_shift(int ssize) 245 { 246 if (ssize == MMU_SEGSIZE_256M) 247 return SID_SHIFT; 248 return SID_SHIFT_1T; 249 } 250 251 /* 252 * This array is indexed by the LP field of the HPTE second dword. 253 * Since this field may contain some RPN bits, some entries are 254 * replicated so that we get the same value irrespective of RPN. 255 * The top 4 bits are the page size index (MMU_PAGE_*) for the 256 * actual page size, the bottom 4 bits are the base page size. 257 */ 258 extern u8 hpte_page_sizes[1 << LP_BITS]; 259 260 static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l, 261 bool is_base_size) 262 { 263 unsigned int i, lp; 264 265 if (!(h & HPTE_V_LARGE)) 266 return 1ul << 12; 267 268 /* Look at the 8 bit LP value */ 269 lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1); 270 i = hpte_page_sizes[lp]; 271 if (!i) 272 return 0; 273 if (!is_base_size) 274 i >>= 4; 275 return 1ul << mmu_psize_defs[i & 0xf].shift; 276 } 277 278 static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) 279 { 280 return __hpte_page_size(h, l, 0); 281 } 282 283 static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l) 284 { 285 return __hpte_page_size(h, l, 1); 286 } 287 288 /* 289 * The current system page and segment sizes 290 */ 291 extern int mmu_kernel_ssize; 292 extern int mmu_highuser_ssize; 293 extern u16 mmu_slb_size; 294 extern unsigned long tce_alloc_start, tce_alloc_end; 295 296 /* 297 * If the processor supports 64k normal pages but not 64k cache 298 * inhibited pages, we have to be prepared to switch processes 299 * to use 4k pages when they create cache-inhibited mappings. 300 * If this is the case, mmu_ci_restrictions will be set to 1. 301 */ 302 extern int mmu_ci_restrictions; 303 304 /* 305 * This computes the AVPN and B fields of the first dword of a HPTE, 306 * for use when we want to match an existing PTE. The bottom 7 bits 307 * of the returned value are zero. 308 */ 309 static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize, 310 int ssize) 311 { 312 unsigned long v; 313 /* 314 * The AVA field omits the low-order 23 bits of the 78 bits VA. 315 * These bits are not needed in the PTE, because the 316 * low-order b of these bits are part of the byte offset 317 * into the virtual page and, if b < 23, the high-order 318 * 23-b of these bits are always used in selecting the 319 * PTEGs to be searched 320 */ 321 v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm); 322 v <<= HPTE_V_AVPN_SHIFT; 323 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT; 324 return v; 325 } 326 327 /* 328 * ISA v3.0 defines a new HPTE format, which differs from the old 329 * format in having smaller AVPN and ARPN fields, and the B field 330 * in the second dword instead of the first. 331 */ 332 static inline unsigned long hpte_old_to_new_v(unsigned long v) 333 { 334 /* trim AVPN, drop B */ 335 return v & HPTE_V_COMMON_BITS; 336 } 337 338 static inline unsigned long hpte_old_to_new_r(unsigned long v, unsigned long r) 339 { 340 /* move B field from 1st to 2nd dword, trim ARPN */ 341 return (r & ~HPTE_R_3_0_SSIZE_MASK) | 342 (((v) >> HPTE_V_SSIZE_SHIFT) << HPTE_R_3_0_SSIZE_SHIFT); 343 } 344 345 static inline unsigned long hpte_new_to_old_v(unsigned long v, unsigned long r) 346 { 347 /* insert B field */ 348 return (v & HPTE_V_COMMON_BITS) | 349 ((r & HPTE_R_3_0_SSIZE_MASK) << 350 (HPTE_V_SSIZE_SHIFT - HPTE_R_3_0_SSIZE_SHIFT)); 351 } 352 353 static inline unsigned long hpte_new_to_old_r(unsigned long r) 354 { 355 /* clear out B field */ 356 return r & ~HPTE_R_3_0_SSIZE_MASK; 357 } 358 359 /* 360 * This function sets the AVPN and L fields of the HPTE appropriately 361 * using the base page size and actual page size. 362 */ 363 static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize, 364 int actual_psize, int ssize) 365 { 366 unsigned long v; 367 v = hpte_encode_avpn(vpn, base_psize, ssize); 368 if (actual_psize != MMU_PAGE_4K) 369 v |= HPTE_V_LARGE; 370 return v; 371 } 372 373 /* 374 * This function sets the ARPN, and LP fields of the HPTE appropriately 375 * for the page size. We assume the pa is already "clean" that is properly 376 * aligned for the requested page size 377 */ 378 static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize, 379 int actual_psize) 380 { 381 /* A 4K page needs no special encoding */ 382 if (actual_psize == MMU_PAGE_4K) 383 return pa & HPTE_R_RPN; 384 else { 385 unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize]; 386 unsigned int shift = mmu_psize_defs[actual_psize].shift; 387 return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT); 388 } 389 } 390 391 /* 392 * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size. 393 */ 394 static inline unsigned long hpt_vpn(unsigned long ea, 395 unsigned long vsid, int ssize) 396 { 397 unsigned long mask; 398 int s_shift = segment_shift(ssize); 399 400 mask = (1ul << (s_shift - VPN_SHIFT)) - 1; 401 return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask); 402 } 403 404 /* 405 * This hashes a virtual address 406 */ 407 static inline unsigned long hpt_hash(unsigned long vpn, 408 unsigned int shift, int ssize) 409 { 410 int mask; 411 unsigned long hash, vsid; 412 413 /* VPN_SHIFT can be atmost 12 */ 414 if (ssize == MMU_SEGSIZE_256M) { 415 mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1; 416 hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^ 417 ((vpn & mask) >> (shift - VPN_SHIFT)); 418 } else { 419 mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1; 420 vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT); 421 hash = vsid ^ (vsid << 25) ^ 422 ((vpn & mask) >> (shift - VPN_SHIFT)) ; 423 } 424 return hash & 0x7fffffffffUL; 425 } 426 427 #define HPTE_LOCAL_UPDATE 0x1 428 #define HPTE_NOHPTE_UPDATE 0x2 429 430 extern int __hash_page_4K(unsigned long ea, unsigned long access, 431 unsigned long vsid, pte_t *ptep, unsigned long trap, 432 unsigned long flags, int ssize, int subpage_prot); 433 extern int __hash_page_64K(unsigned long ea, unsigned long access, 434 unsigned long vsid, pte_t *ptep, unsigned long trap, 435 unsigned long flags, int ssize); 436 struct mm_struct; 437 unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap); 438 extern int hash_page_mm(struct mm_struct *mm, unsigned long ea, 439 unsigned long access, unsigned long trap, 440 unsigned long flags); 441 extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap, 442 unsigned long dsisr); 443 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, 444 pte_t *ptep, unsigned long trap, unsigned long flags, 445 int ssize, unsigned int shift, unsigned int mmu_psize); 446 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 447 extern int __hash_page_thp(unsigned long ea, unsigned long access, 448 unsigned long vsid, pmd_t *pmdp, unsigned long trap, 449 unsigned long flags, int ssize, unsigned int psize); 450 #else 451 static inline int __hash_page_thp(unsigned long ea, unsigned long access, 452 unsigned long vsid, pmd_t *pmdp, 453 unsigned long trap, unsigned long flags, 454 int ssize, unsigned int psize) 455 { 456 BUG(); 457 return -1; 458 } 459 #endif 460 extern void hash_failure_debug(unsigned long ea, unsigned long access, 461 unsigned long vsid, unsigned long trap, 462 int ssize, int psize, int lpsize, 463 unsigned long pte); 464 extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, 465 unsigned long pstart, unsigned long prot, 466 int psize, int ssize); 467 int htab_remove_mapping(unsigned long vstart, unsigned long vend, 468 int psize, int ssize); 469 extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages); 470 extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr); 471 472 #ifdef CONFIG_PPC_PSERIES 473 void hpte_init_pseries(void); 474 #else 475 static inline void hpte_init_pseries(void) { } 476 #endif 477 478 extern void hpte_init_native(void); 479 480 extern void slb_initialize(void); 481 extern void slb_flush_and_rebolt(void); 482 483 extern void slb_vmalloc_update(void); 484 extern void slb_set_size(u16 size); 485 #endif /* __ASSEMBLY__ */ 486 487 /* 488 * VSID allocation (256MB segment) 489 * 490 * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated 491 * from mmu context id and effective segment id of the address. 492 * 493 * For user processes max context id is limited to ((1ul << 19) - 5) 494 * for kernel space, we use the top 4 context ids to map address as below 495 * NOTE: each context only support 64TB now. 496 * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ] 497 * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ] 498 * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ] 499 * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ] 500 * 501 * The proto-VSIDs are then scrambled into real VSIDs with the 502 * multiplicative hash: 503 * 504 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS 505 * 506 * VSID_MULTIPLIER is prime, so in particular it is 507 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. 508 * Because the modulus is 2^n-1 we can compute it efficiently without 509 * a divide or extra multiply (see below). The scramble function gives 510 * robust scattering in the hash table (at least based on some initial 511 * results). 512 * 513 * We also consider VSID 0 special. We use VSID 0 for slb entries mapping 514 * bad address. This enables us to consolidate bad address handling in 515 * hash_page. 516 * 517 * We also need to avoid the last segment of the last context, because that 518 * would give a protovsid of 0x1fffffffff. That will result in a VSID 0 519 * because of the modulo operation in vsid scramble. But the vmemmap 520 * (which is what uses region 0xf) will never be close to 64TB in size 521 * (it's 56 bytes per page of system memory). 522 */ 523 524 #define CONTEXT_BITS 19 525 #define ESID_BITS 18 526 #define ESID_BITS_1T 6 527 528 /* 529 * 256MB segment 530 * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments 531 * available for user + kernel mapping. The top 4 contexts are used for 532 * kernel mapping. Each segment contains 2^28 bytes. Each 533 * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts 534 * (19 == 37 + 28 - 46). 535 */ 536 #define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5) 537 538 /* 539 * This should be computed such that protovosid * vsid_mulitplier 540 * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus 541 */ 542 #define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */ 543 #define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS) 544 #define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1) 545 546 #define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */ 547 #define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T) 548 #define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1) 549 550 551 #define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT)) 552 553 /* 554 * This macro generates asm code to compute the VSID scramble 555 * function. Used in slb_allocate() and do_stab_bolted. The function 556 * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS 557 * 558 * rt = register containing the proto-VSID and into which the 559 * VSID will be stored 560 * rx = scratch register (clobbered) 561 * 562 * - rt and rx must be different registers 563 * - The answer will end up in the low VSID_BITS bits of rt. The higher 564 * bits may contain other garbage, so you may need to mask the 565 * result. 566 */ 567 #define ASM_VSID_SCRAMBLE(rt, rx, size) \ 568 lis rx,VSID_MULTIPLIER_##size@h; \ 569 ori rx,rx,VSID_MULTIPLIER_##size@l; \ 570 mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \ 571 \ 572 srdi rx,rt,VSID_BITS_##size; \ 573 clrldi rt,rt,(64-VSID_BITS_##size); \ 574 add rt,rt,rx; /* add high and low bits */ \ 575 /* NOTE: explanation based on VSID_BITS_##size = 36 \ 576 * Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ 577 * 2^36-1+2^28-1. That in particular means that if r3 >= \ 578 * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ 579 * the bit clear, r3 already has the answer we want, if it \ 580 * doesn't, the answer is the low 36 bits of r3+1. So in all \ 581 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\ 582 addi rx,rt,1; \ 583 srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \ 584 add rt,rt,rx 585 586 /* 4 bits per slice and we have one slice per 1TB */ 587 #define SLICE_ARRAY_SIZE (H_PGTABLE_RANGE >> 41) 588 589 #ifndef __ASSEMBLY__ 590 591 #ifdef CONFIG_PPC_SUBPAGE_PROT 592 /* 593 * For the sub-page protection option, we extend the PGD with one of 594 * these. Basically we have a 3-level tree, with the top level being 595 * the protptrs array. To optimize speed and memory consumption when 596 * only addresses < 4GB are being protected, pointers to the first 597 * four pages of sub-page protection words are stored in the low_prot 598 * array. 599 * Each page of sub-page protection words protects 1GB (4 bytes 600 * protects 64k). For the 3-level tree, each page of pointers then 601 * protects 8TB. 602 */ 603 struct subpage_prot_table { 604 unsigned long maxaddr; /* only addresses < this are protected */ 605 unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)]; 606 unsigned int *low_prot[4]; 607 }; 608 609 #define SBP_L1_BITS (PAGE_SHIFT - 2) 610 #define SBP_L2_BITS (PAGE_SHIFT - 3) 611 #define SBP_L1_COUNT (1 << SBP_L1_BITS) 612 #define SBP_L2_COUNT (1 << SBP_L2_BITS) 613 #define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS) 614 #define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS) 615 616 extern void subpage_prot_free(struct mm_struct *mm); 617 extern void subpage_prot_init_new_context(struct mm_struct *mm); 618 #else 619 static inline void subpage_prot_free(struct mm_struct *mm) {} 620 static inline void subpage_prot_init_new_context(struct mm_struct *mm) { } 621 #endif /* CONFIG_PPC_SUBPAGE_PROT */ 622 623 #if 0 624 /* 625 * The code below is equivalent to this function for arguments 626 * < 2^VSID_BITS, which is all this should ever be called 627 * with. However gcc is not clever enough to compute the 628 * modulus (2^n-1) without a second multiply. 629 */ 630 #define vsid_scramble(protovsid, size) \ 631 ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size)) 632 633 #else /* 1 */ 634 #define vsid_scramble(protovsid, size) \ 635 ({ \ 636 unsigned long x; \ 637 x = (protovsid) * VSID_MULTIPLIER_##size; \ 638 x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \ 639 (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \ 640 }) 641 #endif /* 1 */ 642 643 /* Returns the segment size indicator for a user address */ 644 static inline int user_segment_size(unsigned long addr) 645 { 646 /* Use 1T segments if possible for addresses >= 1T */ 647 if (addr >= (1UL << SID_SHIFT_1T)) 648 return mmu_highuser_ssize; 649 return MMU_SEGSIZE_256M; 650 } 651 652 static inline unsigned long get_vsid(unsigned long context, unsigned long ea, 653 int ssize) 654 { 655 /* 656 * Bad address. We return VSID 0 for that 657 */ 658 if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE) 659 return 0; 660 661 if (ssize == MMU_SEGSIZE_256M) 662 return vsid_scramble((context << ESID_BITS) 663 | (ea >> SID_SHIFT), 256M); 664 return vsid_scramble((context << ESID_BITS_1T) 665 | (ea >> SID_SHIFT_1T), 1T); 666 } 667 668 /* 669 * This is only valid for addresses >= PAGE_OFFSET 670 * 671 * For kernel space, we use the top 4 context ids to map address as below 672 * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ] 673 * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ] 674 * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ] 675 * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ] 676 */ 677 static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) 678 { 679 unsigned long context; 680 681 /* 682 * kernel take the top 4 context from the available range 683 */ 684 context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1; 685 return get_vsid(context, ea, ssize); 686 } 687 688 unsigned htab_shift_for_mem_size(unsigned long mem_size); 689 690 #endif /* __ASSEMBLY__ */ 691 692 #endif /* _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ */ 693