1 /* 2 * pSeries_lpar.c 3 * Copyright (C) 2001 Todd Inglett, IBM Corporation 4 * 5 * pSeries LPAR support. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 */ 21 22 /* Enables debugging of low-level hash table routines - careful! */ 23 #undef DEBUG 24 #define pr_fmt(fmt) "lpar: " fmt 25 26 #include <linux/kernel.h> 27 #include <linux/dma-mapping.h> 28 #include <linux/console.h> 29 #include <linux/export.h> 30 #include <linux/jump_label.h> 31 #include <linux/delay.h> 32 #include <linux/stop_machine.h> 33 #include <asm/processor.h> 34 #include <asm/mmu.h> 35 #include <asm/page.h> 36 #include <asm/pgtable.h> 37 #include <asm/machdep.h> 38 #include <asm/mmu_context.h> 39 #include <asm/iommu.h> 40 #include <asm/tlb.h> 41 #include <asm/prom.h> 42 #include <asm/cputable.h> 43 #include <asm/udbg.h> 44 #include <asm/smp.h> 45 #include <asm/trace.h> 46 #include <asm/firmware.h> 47 #include <asm/plpar_wrappers.h> 48 #include <asm/kexec.h> 49 #include <asm/fadump.h> 50 #include <asm/asm-prototypes.h> 51 52 #include "pseries.h" 53 54 /* Flag bits for H_BULK_REMOVE */ 55 #define HBR_REQUEST 0x4000000000000000UL 56 #define HBR_RESPONSE 0x8000000000000000UL 57 #define HBR_END 0xc000000000000000UL 58 #define HBR_AVPN 0x0200000000000000UL 59 #define HBR_ANDCOND 0x0100000000000000UL 60 61 62 /* in hvCall.S */ 63 EXPORT_SYMBOL(plpar_hcall); 64 EXPORT_SYMBOL(plpar_hcall9); 65 EXPORT_SYMBOL(plpar_hcall_norets); 66 67 void vpa_init(int cpu) 68 { 69 int hwcpu = get_hard_smp_processor_id(cpu); 70 unsigned long addr; 71 long ret; 72 struct paca_struct *pp; 73 struct dtl_entry *dtl; 74 75 /* 76 * The spec says it "may be problematic" if CPU x registers the VPA of 77 * CPU y. We should never do that, but wail if we ever do. 78 */ 79 WARN_ON(cpu != smp_processor_id()); 80 81 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 82 lppaca_of(cpu).vmxregs_in_use = 1; 83 84 if (cpu_has_feature(CPU_FTR_ARCH_207S)) 85 lppaca_of(cpu).ebb_regs_in_use = 1; 86 87 addr = __pa(&lppaca_of(cpu)); 88 ret = register_vpa(hwcpu, addr); 89 90 if (ret) { 91 pr_err("WARNING: VPA registration for cpu %d (hw %d) of area " 92 "%lx failed with %ld\n", cpu, hwcpu, addr, ret); 93 return; 94 } 95 96 #ifdef CONFIG_PPC_BOOK3S_64 97 /* 98 * PAPR says this feature is SLB-Buffer but firmware never 99 * reports that. All SPLPAR support SLB shadow buffer. 100 */ 101 if (!radix_enabled() && firmware_has_feature(FW_FEATURE_SPLPAR)) { 102 addr = __pa(paca_ptrs[cpu]->slb_shadow_ptr); 103 ret = register_slb_shadow(hwcpu, addr); 104 if (ret) 105 pr_err("WARNING: SLB shadow buffer registration for " 106 "cpu %d (hw %d) of area %lx failed with %ld\n", 107 cpu, hwcpu, addr, ret); 108 } 109 #endif /* CONFIG_PPC_BOOK3S_64 */ 110 111 /* 112 * Register dispatch trace log, if one has been allocated. 113 */ 114 pp = paca_ptrs[cpu]; 115 dtl = pp->dispatch_log; 116 if (dtl) { 117 pp->dtl_ridx = 0; 118 pp->dtl_curr = dtl; 119 lppaca_of(cpu).dtl_idx = 0; 120 121 /* hypervisor reads buffer length from this field */ 122 dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES); 123 ret = register_dtl(hwcpu, __pa(dtl)); 124 if (ret) 125 pr_err("WARNING: DTL registration of cpu %d (hw %d) " 126 "failed with %ld\n", smp_processor_id(), 127 hwcpu, ret); 128 lppaca_of(cpu).dtl_enable_mask = 2; 129 } 130 } 131 132 #ifdef CONFIG_PPC_BOOK3S_64 133 134 static long pSeries_lpar_hpte_insert(unsigned long hpte_group, 135 unsigned long vpn, unsigned long pa, 136 unsigned long rflags, unsigned long vflags, 137 int psize, int apsize, int ssize) 138 { 139 unsigned long lpar_rc; 140 unsigned long flags; 141 unsigned long slot; 142 unsigned long hpte_v, hpte_r; 143 144 if (!(vflags & HPTE_V_BOLTED)) 145 pr_devel("hpte_insert(group=%lx, vpn=%016lx, " 146 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n", 147 hpte_group, vpn, pa, rflags, vflags, psize); 148 149 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID; 150 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags; 151 152 if (!(vflags & HPTE_V_BOLTED)) 153 pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); 154 155 /* Now fill in the actual HPTE */ 156 /* Set CEC cookie to 0 */ 157 /* Zero page = 0 */ 158 /* I-cache Invalidate = 0 */ 159 /* I-cache synchronize = 0 */ 160 /* Exact = 0 */ 161 flags = 0; 162 163 if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N)) 164 flags |= H_COALESCE_CAND; 165 166 lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot); 167 if (unlikely(lpar_rc == H_PTEG_FULL)) { 168 pr_devel("Hash table group is full\n"); 169 return -1; 170 } 171 172 /* 173 * Since we try and ioremap PHBs we don't own, the pte insert 174 * will fail. However we must catch the failure in hash_page 175 * or we will loop forever, so return -2 in this case. 176 */ 177 if (unlikely(lpar_rc != H_SUCCESS)) { 178 pr_err("Failed hash pte insert with error %ld\n", lpar_rc); 179 return -2; 180 } 181 if (!(vflags & HPTE_V_BOLTED)) 182 pr_devel(" -> slot: %lu\n", slot & 7); 183 184 /* Because of iSeries, we have to pass down the secondary 185 * bucket bit here as well 186 */ 187 return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3); 188 } 189 190 static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock); 191 192 static long pSeries_lpar_hpte_remove(unsigned long hpte_group) 193 { 194 unsigned long slot_offset; 195 unsigned long lpar_rc; 196 int i; 197 unsigned long dummy1, dummy2; 198 199 /* pick a random slot to start at */ 200 slot_offset = mftb() & 0x7; 201 202 for (i = 0; i < HPTES_PER_GROUP; i++) { 203 204 /* don't remove a bolted entry */ 205 lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset, 206 (0x1UL << 4), &dummy1, &dummy2); 207 if (lpar_rc == H_SUCCESS) 208 return i; 209 210 /* 211 * The test for adjunct partition is performed before the 212 * ANDCOND test. H_RESOURCE may be returned, so we need to 213 * check for that as well. 214 */ 215 BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE); 216 217 slot_offset++; 218 slot_offset &= 0x7; 219 } 220 221 return -1; 222 } 223 224 static void manual_hpte_clear_all(void) 225 { 226 unsigned long size_bytes = 1UL << ppc64_pft_size; 227 unsigned long hpte_count = size_bytes >> 4; 228 struct { 229 unsigned long pteh; 230 unsigned long ptel; 231 } ptes[4]; 232 long lpar_rc; 233 unsigned long i, j; 234 235 /* Read in batches of 4, 236 * invalidate only valid entries not in the VRMA 237 * hpte_count will be a multiple of 4 238 */ 239 for (i = 0; i < hpte_count; i += 4) { 240 lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes); 241 if (lpar_rc != H_SUCCESS) { 242 pr_info("Failed to read hash page table at %ld err %ld\n", 243 i, lpar_rc); 244 continue; 245 } 246 for (j = 0; j < 4; j++){ 247 if ((ptes[j].pteh & HPTE_V_VRMA_MASK) == 248 HPTE_V_VRMA_MASK) 249 continue; 250 if (ptes[j].pteh & HPTE_V_VALID) 251 plpar_pte_remove_raw(0, i + j, 0, 252 &(ptes[j].pteh), &(ptes[j].ptel)); 253 } 254 } 255 } 256 257 static int hcall_hpte_clear_all(void) 258 { 259 int rc; 260 261 do { 262 rc = plpar_hcall_norets(H_CLEAR_HPT); 263 } while (rc == H_CONTINUE); 264 265 return rc; 266 } 267 268 static void pseries_hpte_clear_all(void) 269 { 270 int rc; 271 272 rc = hcall_hpte_clear_all(); 273 if (rc != H_SUCCESS) 274 manual_hpte_clear_all(); 275 276 #ifdef __LITTLE_ENDIAN__ 277 /* 278 * Reset exceptions to big endian. 279 * 280 * FIXME this is a hack for kexec, we need to reset the exception 281 * endian before starting the new kernel and this is a convenient place 282 * to do it. 283 * 284 * This is also called on boot when a fadump happens. In that case we 285 * must not change the exception endian mode. 286 */ 287 if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active()) 288 pseries_big_endian_exceptions(); 289 #endif 290 } 291 292 /* 293 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and 294 * the low 3 bits of flags happen to line up. So no transform is needed. 295 * We can probably optimize here and assume the high bits of newpp are 296 * already zero. For now I am paranoid. 297 */ 298 static long pSeries_lpar_hpte_updatepp(unsigned long slot, 299 unsigned long newpp, 300 unsigned long vpn, 301 int psize, int apsize, 302 int ssize, unsigned long inv_flags) 303 { 304 unsigned long lpar_rc; 305 unsigned long flags; 306 unsigned long want_v; 307 308 want_v = hpte_encode_avpn(vpn, psize, ssize); 309 310 flags = (newpp & 7) | H_AVPN; 311 if (mmu_has_feature(MMU_FTR_KERNEL_RO)) 312 /* Move pp0 into bit 8 (IBM 55) */ 313 flags |= (newpp & HPTE_R_PP0) >> 55; 314 315 pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...", 316 want_v, slot, flags, psize); 317 318 lpar_rc = plpar_pte_protect(flags, slot, want_v); 319 320 if (lpar_rc == H_NOT_FOUND) { 321 pr_devel("not found !\n"); 322 return -1; 323 } 324 325 pr_devel("ok\n"); 326 327 BUG_ON(lpar_rc != H_SUCCESS); 328 329 return 0; 330 } 331 332 static long __pSeries_lpar_hpte_find(unsigned long want_v, unsigned long hpte_group) 333 { 334 long lpar_rc; 335 unsigned long i, j; 336 struct { 337 unsigned long pteh; 338 unsigned long ptel; 339 } ptes[4]; 340 341 for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) { 342 343 lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes); 344 if (lpar_rc != H_SUCCESS) { 345 pr_info("Failed to read hash page table at %ld err %ld\n", 346 hpte_group, lpar_rc); 347 continue; 348 } 349 350 for (j = 0; j < 4; j++) { 351 if (HPTE_V_COMPARE(ptes[j].pteh, want_v) && 352 (ptes[j].pteh & HPTE_V_VALID)) 353 return i + j; 354 } 355 } 356 357 return -1; 358 } 359 360 static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize) 361 { 362 long slot; 363 unsigned long hash; 364 unsigned long want_v; 365 unsigned long hpte_group; 366 367 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize); 368 want_v = hpte_encode_avpn(vpn, psize, ssize); 369 370 /* Bolted entries are always in the primary group */ 371 hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; 372 slot = __pSeries_lpar_hpte_find(want_v, hpte_group); 373 if (slot < 0) 374 return -1; 375 return hpte_group + slot; 376 } 377 378 static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp, 379 unsigned long ea, 380 int psize, int ssize) 381 { 382 unsigned long vpn; 383 unsigned long lpar_rc, slot, vsid, flags; 384 385 vsid = get_kernel_vsid(ea, ssize); 386 vpn = hpt_vpn(ea, vsid, ssize); 387 388 slot = pSeries_lpar_hpte_find(vpn, psize, ssize); 389 BUG_ON(slot == -1); 390 391 flags = newpp & 7; 392 if (mmu_has_feature(MMU_FTR_KERNEL_RO)) 393 /* Move pp0 into bit 8 (IBM 55) */ 394 flags |= (newpp & HPTE_R_PP0) >> 55; 395 396 lpar_rc = plpar_pte_protect(flags, slot, 0); 397 398 BUG_ON(lpar_rc != H_SUCCESS); 399 } 400 401 static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn, 402 int psize, int apsize, 403 int ssize, int local) 404 { 405 unsigned long want_v; 406 unsigned long lpar_rc; 407 unsigned long dummy1, dummy2; 408 409 pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n", 410 slot, vpn, psize, local); 411 412 want_v = hpte_encode_avpn(vpn, psize, ssize); 413 lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2); 414 if (lpar_rc == H_NOT_FOUND) 415 return; 416 417 BUG_ON(lpar_rc != H_SUCCESS); 418 } 419 420 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 421 /* 422 * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need 423 * to make sure that we avoid bouncing the hypervisor tlbie lock. 424 */ 425 #define PPC64_HUGE_HPTE_BATCH 12 426 427 static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot, 428 unsigned long *vpn, int count, 429 int psize, int ssize) 430 { 431 unsigned long param[PLPAR_HCALL9_BUFSIZE]; 432 int i = 0, pix = 0, rc; 433 unsigned long flags = 0; 434 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 435 436 if (lock_tlbie) 437 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); 438 439 for (i = 0; i < count; i++) { 440 441 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { 442 pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0, 443 ssize, 0); 444 } else { 445 param[pix] = HBR_REQUEST | HBR_AVPN | slot[i]; 446 param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize); 447 pix += 2; 448 if (pix == 8) { 449 rc = plpar_hcall9(H_BULK_REMOVE, param, 450 param[0], param[1], param[2], 451 param[3], param[4], param[5], 452 param[6], param[7]); 453 BUG_ON(rc != H_SUCCESS); 454 pix = 0; 455 } 456 } 457 } 458 if (pix) { 459 param[pix] = HBR_END; 460 rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1], 461 param[2], param[3], param[4], param[5], 462 param[6], param[7]); 463 BUG_ON(rc != H_SUCCESS); 464 } 465 466 if (lock_tlbie) 467 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); 468 } 469 470 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid, 471 unsigned long addr, 472 unsigned char *hpte_slot_array, 473 int psize, int ssize, int local) 474 { 475 int i, index = 0; 476 unsigned long s_addr = addr; 477 unsigned int max_hpte_count, valid; 478 unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH]; 479 unsigned long slot_array[PPC64_HUGE_HPTE_BATCH]; 480 unsigned long shift, hidx, vpn = 0, hash, slot; 481 482 shift = mmu_psize_defs[psize].shift; 483 max_hpte_count = 1U << (PMD_SHIFT - shift); 484 485 for (i = 0; i < max_hpte_count; i++) { 486 valid = hpte_valid(hpte_slot_array, i); 487 if (!valid) 488 continue; 489 hidx = hpte_hash_index(hpte_slot_array, i); 490 491 /* get the vpn */ 492 addr = s_addr + (i * (1ul << shift)); 493 vpn = hpt_vpn(addr, vsid, ssize); 494 hash = hpt_hash(vpn, shift, ssize); 495 if (hidx & _PTEIDX_SECONDARY) 496 hash = ~hash; 497 498 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 499 slot += hidx & _PTEIDX_GROUP_IX; 500 501 slot_array[index] = slot; 502 vpn_array[index] = vpn; 503 if (index == PPC64_HUGE_HPTE_BATCH - 1) { 504 /* 505 * Now do a bluk invalidate 506 */ 507 __pSeries_lpar_hugepage_invalidate(slot_array, 508 vpn_array, 509 PPC64_HUGE_HPTE_BATCH, 510 psize, ssize); 511 index = 0; 512 } else 513 index++; 514 } 515 if (index) 516 __pSeries_lpar_hugepage_invalidate(slot_array, vpn_array, 517 index, psize, ssize); 518 } 519 #else 520 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid, 521 unsigned long addr, 522 unsigned char *hpte_slot_array, 523 int psize, int ssize, int local) 524 { 525 WARN(1, "%s called without THP support\n", __func__); 526 } 527 #endif 528 529 static int pSeries_lpar_hpte_removebolted(unsigned long ea, 530 int psize, int ssize) 531 { 532 unsigned long vpn; 533 unsigned long slot, vsid; 534 535 vsid = get_kernel_vsid(ea, ssize); 536 vpn = hpt_vpn(ea, vsid, ssize); 537 538 slot = pSeries_lpar_hpte_find(vpn, psize, ssize); 539 if (slot == -1) 540 return -ENOENT; 541 542 /* 543 * lpar doesn't use the passed actual page size 544 */ 545 pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0); 546 return 0; 547 } 548 549 /* 550 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie 551 * lock. 552 */ 553 static void pSeries_lpar_flush_hash_range(unsigned long number, int local) 554 { 555 unsigned long vpn; 556 unsigned long i, pix, rc; 557 unsigned long flags = 0; 558 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); 559 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 560 unsigned long param[PLPAR_HCALL9_BUFSIZE]; 561 unsigned long hash, index, shift, hidx, slot; 562 real_pte_t pte; 563 int psize, ssize; 564 565 if (lock_tlbie) 566 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); 567 568 psize = batch->psize; 569 ssize = batch->ssize; 570 pix = 0; 571 for (i = 0; i < number; i++) { 572 vpn = batch->vpn[i]; 573 pte = batch->pte[i]; 574 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { 575 hash = hpt_hash(vpn, shift, ssize); 576 hidx = __rpte_to_hidx(pte, index); 577 if (hidx & _PTEIDX_SECONDARY) 578 hash = ~hash; 579 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 580 slot += hidx & _PTEIDX_GROUP_IX; 581 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { 582 /* 583 * lpar doesn't use the passed actual page size 584 */ 585 pSeries_lpar_hpte_invalidate(slot, vpn, psize, 586 0, ssize, local); 587 } else { 588 param[pix] = HBR_REQUEST | HBR_AVPN | slot; 589 param[pix+1] = hpte_encode_avpn(vpn, psize, 590 ssize); 591 pix += 2; 592 if (pix == 8) { 593 rc = plpar_hcall9(H_BULK_REMOVE, param, 594 param[0], param[1], param[2], 595 param[3], param[4], param[5], 596 param[6], param[7]); 597 BUG_ON(rc != H_SUCCESS); 598 pix = 0; 599 } 600 } 601 } pte_iterate_hashed_end(); 602 } 603 if (pix) { 604 param[pix] = HBR_END; 605 rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1], 606 param[2], param[3], param[4], param[5], 607 param[6], param[7]); 608 BUG_ON(rc != H_SUCCESS); 609 } 610 611 if (lock_tlbie) 612 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); 613 } 614 615 static int __init disable_bulk_remove(char *str) 616 { 617 if (strcmp(str, "off") == 0 && 618 firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { 619 pr_info("Disabling BULK_REMOVE firmware feature"); 620 powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE; 621 } 622 return 1; 623 } 624 625 __setup("bulk_remove=", disable_bulk_remove); 626 627 #define HPT_RESIZE_TIMEOUT 10000 /* ms */ 628 629 struct hpt_resize_state { 630 unsigned long shift; 631 int commit_rc; 632 }; 633 634 static int pseries_lpar_resize_hpt_commit(void *data) 635 { 636 struct hpt_resize_state *state = data; 637 638 state->commit_rc = plpar_resize_hpt_commit(0, state->shift); 639 if (state->commit_rc != H_SUCCESS) 640 return -EIO; 641 642 /* Hypervisor has transitioned the HTAB, update our globals */ 643 ppc64_pft_size = state->shift; 644 htab_size_bytes = 1UL << ppc64_pft_size; 645 htab_hash_mask = (htab_size_bytes >> 7) - 1; 646 647 return 0; 648 } 649 650 /* Must be called in user context */ 651 static int pseries_lpar_resize_hpt(unsigned long shift) 652 { 653 struct hpt_resize_state state = { 654 .shift = shift, 655 .commit_rc = H_FUNCTION, 656 }; 657 unsigned int delay, total_delay = 0; 658 int rc; 659 ktime_t t0, t1, t2; 660 661 might_sleep(); 662 663 if (!firmware_has_feature(FW_FEATURE_HPT_RESIZE)) 664 return -ENODEV; 665 666 pr_info("Attempting to resize HPT to shift %lu\n", shift); 667 668 t0 = ktime_get(); 669 670 rc = plpar_resize_hpt_prepare(0, shift); 671 while (H_IS_LONG_BUSY(rc)) { 672 delay = get_longbusy_msecs(rc); 673 total_delay += delay; 674 if (total_delay > HPT_RESIZE_TIMEOUT) { 675 /* prepare with shift==0 cancels an in-progress resize */ 676 rc = plpar_resize_hpt_prepare(0, 0); 677 if (rc != H_SUCCESS) 678 pr_warn("Unexpected error %d cancelling timed out HPT resize\n", 679 rc); 680 return -ETIMEDOUT; 681 } 682 msleep(delay); 683 rc = plpar_resize_hpt_prepare(0, shift); 684 }; 685 686 switch (rc) { 687 case H_SUCCESS: 688 /* Continue on */ 689 break; 690 691 case H_PARAMETER: 692 return -EINVAL; 693 case H_RESOURCE: 694 return -EPERM; 695 default: 696 pr_warn("Unexpected error %d from H_RESIZE_HPT_PREPARE\n", rc); 697 return -EIO; 698 } 699 700 t1 = ktime_get(); 701 702 rc = stop_machine(pseries_lpar_resize_hpt_commit, &state, NULL); 703 704 t2 = ktime_get(); 705 706 if (rc != 0) { 707 switch (state.commit_rc) { 708 case H_PTEG_FULL: 709 pr_warn("Hash collision while resizing HPT\n"); 710 return -ENOSPC; 711 712 default: 713 pr_warn("Unexpected error %d from H_RESIZE_HPT_COMMIT\n", 714 state.commit_rc); 715 return -EIO; 716 }; 717 } 718 719 pr_info("HPT resize to shift %lu complete (%lld ms / %lld ms)\n", 720 shift, (long long) ktime_ms_delta(t1, t0), 721 (long long) ktime_ms_delta(t2, t1)); 722 723 return 0; 724 } 725 726 static int pseries_lpar_register_process_table(unsigned long base, 727 unsigned long page_size, unsigned long table_size) 728 { 729 long rc; 730 unsigned long flags = 0; 731 732 if (table_size) 733 flags |= PROC_TABLE_NEW; 734 if (radix_enabled()) 735 flags |= PROC_TABLE_RADIX | PROC_TABLE_GTSE; 736 else 737 flags |= PROC_TABLE_HPT_SLB; 738 for (;;) { 739 rc = plpar_hcall_norets(H_REGISTER_PROC_TBL, flags, base, 740 page_size, table_size); 741 if (!H_IS_LONG_BUSY(rc)) 742 break; 743 mdelay(get_longbusy_msecs(rc)); 744 } 745 if (rc != H_SUCCESS) { 746 pr_err("Failed to register process table (rc=%ld)\n", rc); 747 BUG(); 748 } 749 return rc; 750 } 751 752 void __init hpte_init_pseries(void) 753 { 754 mmu_hash_ops.hpte_invalidate = pSeries_lpar_hpte_invalidate; 755 mmu_hash_ops.hpte_updatepp = pSeries_lpar_hpte_updatepp; 756 mmu_hash_ops.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp; 757 mmu_hash_ops.hpte_insert = pSeries_lpar_hpte_insert; 758 mmu_hash_ops.hpte_remove = pSeries_lpar_hpte_remove; 759 mmu_hash_ops.hpte_removebolted = pSeries_lpar_hpte_removebolted; 760 mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range; 761 mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all; 762 mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate; 763 register_process_table = pseries_lpar_register_process_table; 764 765 if (firmware_has_feature(FW_FEATURE_HPT_RESIZE)) 766 mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt; 767 } 768 769 void radix_init_pseries(void) 770 { 771 pr_info("Using radix MMU under hypervisor\n"); 772 register_process_table = pseries_lpar_register_process_table; 773 } 774 775 #ifdef CONFIG_PPC_SMLPAR 776 #define CMO_FREE_HINT_DEFAULT 1 777 static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT; 778 779 static int __init cmo_free_hint(char *str) 780 { 781 char *parm; 782 parm = strstrip(str); 783 784 if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) { 785 pr_info("%s: CMO free page hinting is not active.\n", __func__); 786 cmo_free_hint_flag = 0; 787 return 1; 788 } 789 790 cmo_free_hint_flag = 1; 791 pr_info("%s: CMO free page hinting is active.\n", __func__); 792 793 if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0) 794 return 1; 795 796 return 0; 797 } 798 799 __setup("cmo_free_hint=", cmo_free_hint); 800 801 static void pSeries_set_page_state(struct page *page, int order, 802 unsigned long state) 803 { 804 int i, j; 805 unsigned long cmo_page_sz, addr; 806 807 cmo_page_sz = cmo_get_page_size(); 808 addr = __pa((unsigned long)page_address(page)); 809 810 for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) { 811 for (j = 0; j < PAGE_SIZE; j += cmo_page_sz) 812 plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0); 813 } 814 } 815 816 void arch_free_page(struct page *page, int order) 817 { 818 if (radix_enabled()) 819 return; 820 if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO)) 821 return; 822 823 pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED); 824 } 825 EXPORT_SYMBOL(arch_free_page); 826 827 #endif /* CONFIG_PPC_SMLPAR */ 828 #endif /* CONFIG_PPC_BOOK3S_64 */ 829 830 #ifdef CONFIG_TRACEPOINTS 831 #ifdef HAVE_JUMP_LABEL 832 struct static_key hcall_tracepoint_key = STATIC_KEY_INIT; 833 834 int hcall_tracepoint_regfunc(void) 835 { 836 static_key_slow_inc(&hcall_tracepoint_key); 837 return 0; 838 } 839 840 void hcall_tracepoint_unregfunc(void) 841 { 842 static_key_slow_dec(&hcall_tracepoint_key); 843 } 844 #else 845 /* 846 * We optimise our hcall path by placing hcall_tracepoint_refcount 847 * directly in the TOC so we can check if the hcall tracepoints are 848 * enabled via a single load. 849 */ 850 851 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ 852 extern long hcall_tracepoint_refcount; 853 854 int hcall_tracepoint_regfunc(void) 855 { 856 hcall_tracepoint_refcount++; 857 return 0; 858 } 859 860 void hcall_tracepoint_unregfunc(void) 861 { 862 hcall_tracepoint_refcount--; 863 } 864 #endif 865 866 /* 867 * Since the tracing code might execute hcalls we need to guard against 868 * recursion. One example of this are spinlocks calling H_YIELD on 869 * shared processor partitions. 870 */ 871 static DEFINE_PER_CPU(unsigned int, hcall_trace_depth); 872 873 874 void __trace_hcall_entry(unsigned long opcode, unsigned long *args) 875 { 876 unsigned long flags; 877 unsigned int *depth; 878 879 /* 880 * We cannot call tracepoints inside RCU idle regions which 881 * means we must not trace H_CEDE. 882 */ 883 if (opcode == H_CEDE) 884 return; 885 886 local_irq_save(flags); 887 888 depth = this_cpu_ptr(&hcall_trace_depth); 889 890 if (*depth) 891 goto out; 892 893 (*depth)++; 894 preempt_disable(); 895 trace_hcall_entry(opcode, args); 896 (*depth)--; 897 898 out: 899 local_irq_restore(flags); 900 } 901 902 void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf) 903 { 904 unsigned long flags; 905 unsigned int *depth; 906 907 if (opcode == H_CEDE) 908 return; 909 910 local_irq_save(flags); 911 912 depth = this_cpu_ptr(&hcall_trace_depth); 913 914 if (*depth) 915 goto out; 916 917 (*depth)++; 918 trace_hcall_exit(opcode, retval, retbuf); 919 preempt_enable(); 920 (*depth)--; 921 922 out: 923 local_irq_restore(flags); 924 } 925 #endif 926 927 /** 928 * h_get_mpp 929 * H_GET_MPP hcall returns info in 7 parms 930 */ 931 int h_get_mpp(struct hvcall_mpp_data *mpp_data) 932 { 933 int rc; 934 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; 935 936 rc = plpar_hcall9(H_GET_MPP, retbuf); 937 938 mpp_data->entitled_mem = retbuf[0]; 939 mpp_data->mapped_mem = retbuf[1]; 940 941 mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff; 942 mpp_data->pool_num = retbuf[2] & 0xffff; 943 944 mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff; 945 mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff; 946 mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL; 947 948 mpp_data->pool_size = retbuf[4]; 949 mpp_data->loan_request = retbuf[5]; 950 mpp_data->backing_mem = retbuf[6]; 951 952 return rc; 953 } 954 EXPORT_SYMBOL(h_get_mpp); 955 956 int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data) 957 { 958 int rc; 959 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 }; 960 961 rc = plpar_hcall9(H_GET_MPP_X, retbuf); 962 963 mpp_x_data->coalesced_bytes = retbuf[0]; 964 mpp_x_data->pool_coalesced_bytes = retbuf[1]; 965 mpp_x_data->pool_purr_cycles = retbuf[2]; 966 mpp_x_data->pool_spurr_cycles = retbuf[3]; 967 968 return rc; 969 } 970 971 static unsigned long vsid_unscramble(unsigned long vsid, int ssize) 972 { 973 unsigned long protovsid; 974 unsigned long va_bits = VA_BITS; 975 unsigned long modinv, vsid_modulus; 976 unsigned long max_mod_inv, tmp_modinv; 977 978 if (!mmu_has_feature(MMU_FTR_68_BIT_VA)) 979 va_bits = 65; 980 981 if (ssize == MMU_SEGSIZE_256M) { 982 modinv = VSID_MULINV_256M; 983 vsid_modulus = ((1UL << (va_bits - SID_SHIFT)) - 1); 984 } else { 985 modinv = VSID_MULINV_1T; 986 vsid_modulus = ((1UL << (va_bits - SID_SHIFT_1T)) - 1); 987 } 988 989 /* 990 * vsid outside our range. 991 */ 992 if (vsid >= vsid_modulus) 993 return 0; 994 995 /* 996 * If modinv is the modular multiplicate inverse of (x % vsid_modulus) 997 * and vsid = (protovsid * x) % vsid_modulus, then we say: 998 * protovsid = (vsid * modinv) % vsid_modulus 999 */ 1000 1001 /* Check if (vsid * modinv) overflow (63 bits) */ 1002 max_mod_inv = 0x7fffffffffffffffull / vsid; 1003 if (modinv < max_mod_inv) 1004 return (vsid * modinv) % vsid_modulus; 1005 1006 tmp_modinv = modinv/max_mod_inv; 1007 modinv %= max_mod_inv; 1008 1009 protovsid = (((vsid * max_mod_inv) % vsid_modulus) * tmp_modinv) % vsid_modulus; 1010 protovsid = (protovsid + vsid * modinv) % vsid_modulus; 1011 1012 return protovsid; 1013 } 1014 1015 static int __init reserve_vrma_context_id(void) 1016 { 1017 unsigned long protovsid; 1018 1019 /* 1020 * Reserve context ids which map to reserved virtual addresses. For now 1021 * we only reserve the context id which maps to the VRMA VSID. We ignore 1022 * the addresses in "ibm,adjunct-virtual-addresses" because we don't 1023 * enable adjunct support via the "ibm,client-architecture-support" 1024 * interface. 1025 */ 1026 protovsid = vsid_unscramble(VRMA_VSID, MMU_SEGSIZE_1T); 1027 hash__reserve_context_id(protovsid >> ESID_BITS_1T); 1028 return 0; 1029 } 1030 machine_device_initcall(pseries, reserve_vrma_context_id); 1031