1 /* 2 * pSeries_lpar.c 3 * Copyright (C) 2001 Todd Inglett, IBM Corporation 4 * 5 * pSeries LPAR support. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 */ 21 22 /* Enables debugging of low-level hash table routines - careful! */ 23 #undef DEBUG 24 25 #include <linux/kernel.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/console.h> 28 #include <linux/export.h> 29 #include <linux/static_key.h> 30 #include <asm/processor.h> 31 #include <asm/mmu.h> 32 #include <asm/page.h> 33 #include <asm/pgtable.h> 34 #include <asm/machdep.h> 35 #include <asm/mmu_context.h> 36 #include <asm/iommu.h> 37 #include <asm/tlbflush.h> 38 #include <asm/tlb.h> 39 #include <asm/prom.h> 40 #include <asm/cputable.h> 41 #include <asm/udbg.h> 42 #include <asm/smp.h> 43 #include <asm/trace.h> 44 #include <asm/firmware.h> 45 #include <asm/plpar_wrappers.h> 46 #include <asm/fadump.h> 47 48 #include "pseries.h" 49 50 /* Flag bits for H_BULK_REMOVE */ 51 #define HBR_REQUEST 0x4000000000000000UL 52 #define HBR_RESPONSE 0x8000000000000000UL 53 #define HBR_END 0xc000000000000000UL 54 #define HBR_AVPN 0x0200000000000000UL 55 #define HBR_ANDCOND 0x0100000000000000UL 56 57 58 /* in hvCall.S */ 59 EXPORT_SYMBOL(plpar_hcall); 60 EXPORT_SYMBOL(plpar_hcall9); 61 EXPORT_SYMBOL(plpar_hcall_norets); 62 63 void vpa_init(int cpu) 64 { 65 int hwcpu = get_hard_smp_processor_id(cpu); 66 unsigned long addr; 67 long ret; 68 struct paca_struct *pp; 69 struct dtl_entry *dtl; 70 71 /* 72 * The spec says it "may be problematic" if CPU x registers the VPA of 73 * CPU y. We should never do that, but wail if we ever do. 74 */ 75 WARN_ON(cpu != smp_processor_id()); 76 77 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 78 lppaca_of(cpu).vmxregs_in_use = 1; 79 80 if (cpu_has_feature(CPU_FTR_ARCH_207S)) 81 lppaca_of(cpu).ebb_regs_in_use = 1; 82 83 addr = __pa(&lppaca_of(cpu)); 84 ret = register_vpa(hwcpu, addr); 85 86 if (ret) { 87 pr_err("WARNING: VPA registration for cpu %d (hw %d) of area " 88 "%lx failed with %ld\n", cpu, hwcpu, addr, ret); 89 return; 90 } 91 /* 92 * PAPR says this feature is SLB-Buffer but firmware never 93 * reports that. All SPLPAR support SLB shadow buffer. 94 */ 95 addr = __pa(paca[cpu].slb_shadow_ptr); 96 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 97 ret = register_slb_shadow(hwcpu, addr); 98 if (ret) 99 pr_err("WARNING: SLB shadow buffer registration for " 100 "cpu %d (hw %d) of area %lx failed with %ld\n", 101 cpu, hwcpu, addr, ret); 102 } 103 104 /* 105 * Register dispatch trace log, if one has been allocated. 106 */ 107 pp = &paca[cpu]; 108 dtl = pp->dispatch_log; 109 if (dtl) { 110 pp->dtl_ridx = 0; 111 pp->dtl_curr = dtl; 112 lppaca_of(cpu).dtl_idx = 0; 113 114 /* hypervisor reads buffer length from this field */ 115 dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES); 116 ret = register_dtl(hwcpu, __pa(dtl)); 117 if (ret) 118 pr_err("WARNING: DTL registration of cpu %d (hw %d) " 119 "failed with %ld\n", smp_processor_id(), 120 hwcpu, ret); 121 lppaca_of(cpu).dtl_enable_mask = 2; 122 } 123 } 124 125 static long pSeries_lpar_hpte_insert(unsigned long hpte_group, 126 unsigned long vpn, unsigned long pa, 127 unsigned long rflags, unsigned long vflags, 128 int psize, int apsize, int ssize) 129 { 130 unsigned long lpar_rc; 131 unsigned long flags; 132 unsigned long slot; 133 unsigned long hpte_v, hpte_r; 134 135 if (!(vflags & HPTE_V_BOLTED)) 136 pr_devel("hpte_insert(group=%lx, vpn=%016lx, " 137 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n", 138 hpte_group, vpn, pa, rflags, vflags, psize); 139 140 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID; 141 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags; 142 143 if (!(vflags & HPTE_V_BOLTED)) 144 pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); 145 146 /* Now fill in the actual HPTE */ 147 /* Set CEC cookie to 0 */ 148 /* Zero page = 0 */ 149 /* I-cache Invalidate = 0 */ 150 /* I-cache synchronize = 0 */ 151 /* Exact = 0 */ 152 flags = 0; 153 154 /* Make pHyp happy */ 155 if ((rflags & _PAGE_NO_CACHE) && !(rflags & _PAGE_WRITETHRU)) 156 hpte_r &= ~HPTE_R_M; 157 158 if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N)) 159 flags |= H_COALESCE_CAND; 160 161 lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot); 162 if (unlikely(lpar_rc == H_PTEG_FULL)) { 163 if (!(vflags & HPTE_V_BOLTED)) 164 pr_devel(" full\n"); 165 return -1; 166 } 167 168 /* 169 * Since we try and ioremap PHBs we don't own, the pte insert 170 * will fail. However we must catch the failure in hash_page 171 * or we will loop forever, so return -2 in this case. 172 */ 173 if (unlikely(lpar_rc != H_SUCCESS)) { 174 if (!(vflags & HPTE_V_BOLTED)) 175 pr_devel(" lpar err %ld\n", lpar_rc); 176 return -2; 177 } 178 if (!(vflags & HPTE_V_BOLTED)) 179 pr_devel(" -> slot: %lu\n", slot & 7); 180 181 /* Because of iSeries, we have to pass down the secondary 182 * bucket bit here as well 183 */ 184 return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3); 185 } 186 187 static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock); 188 189 static long pSeries_lpar_hpte_remove(unsigned long hpte_group) 190 { 191 unsigned long slot_offset; 192 unsigned long lpar_rc; 193 int i; 194 unsigned long dummy1, dummy2; 195 196 /* pick a random slot to start at */ 197 slot_offset = mftb() & 0x7; 198 199 for (i = 0; i < HPTES_PER_GROUP; i++) { 200 201 /* don't remove a bolted entry */ 202 lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset, 203 (0x1UL << 4), &dummy1, &dummy2); 204 if (lpar_rc == H_SUCCESS) 205 return i; 206 207 /* 208 * The test for adjunct partition is performed before the 209 * ANDCOND test. H_RESOURCE may be returned, so we need to 210 * check for that as well. 211 */ 212 BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE); 213 214 slot_offset++; 215 slot_offset &= 0x7; 216 } 217 218 return -1; 219 } 220 221 static void pSeries_lpar_hptab_clear(void) 222 { 223 unsigned long size_bytes = 1UL << ppc64_pft_size; 224 unsigned long hpte_count = size_bytes >> 4; 225 struct { 226 unsigned long pteh; 227 unsigned long ptel; 228 } ptes[4]; 229 long lpar_rc; 230 unsigned long i, j; 231 232 /* Read in batches of 4, 233 * invalidate only valid entries not in the VRMA 234 * hpte_count will be a multiple of 4 235 */ 236 for (i = 0; i < hpte_count; i += 4) { 237 lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes); 238 if (lpar_rc != H_SUCCESS) 239 continue; 240 for (j = 0; j < 4; j++){ 241 if ((ptes[j].pteh & HPTE_V_VRMA_MASK) == 242 HPTE_V_VRMA_MASK) 243 continue; 244 if (ptes[j].pteh & HPTE_V_VALID) 245 plpar_pte_remove_raw(0, i + j, 0, 246 &(ptes[j].pteh), &(ptes[j].ptel)); 247 } 248 } 249 250 #ifdef __LITTLE_ENDIAN__ 251 /* 252 * Reset exceptions to big endian. 253 * 254 * FIXME this is a hack for kexec, we need to reset the exception 255 * endian before starting the new kernel and this is a convenient place 256 * to do it. 257 * 258 * This is also called on boot when a fadump happens. In that case we 259 * must not change the exception endian mode. 260 */ 261 if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active()) { 262 long rc; 263 264 rc = pseries_big_endian_exceptions(); 265 /* 266 * At this point it is unlikely panic() will get anything 267 * out to the user, but at least this will stop us from 268 * continuing on further and creating an even more 269 * difficult to debug situation. 270 */ 271 if (rc) 272 panic("Could not enable big endian exceptions"); 273 } 274 #endif 275 } 276 277 /* 278 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and 279 * the low 3 bits of flags happen to line up. So no transform is needed. 280 * We can probably optimize here and assume the high bits of newpp are 281 * already zero. For now I am paranoid. 282 */ 283 static long pSeries_lpar_hpte_updatepp(unsigned long slot, 284 unsigned long newpp, 285 unsigned long vpn, 286 int psize, int apsize, 287 int ssize, int local) 288 { 289 unsigned long lpar_rc; 290 unsigned long flags = (newpp & 7) | H_AVPN; 291 unsigned long want_v; 292 293 want_v = hpte_encode_avpn(vpn, psize, ssize); 294 295 pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...", 296 want_v, slot, flags, psize); 297 298 lpar_rc = plpar_pte_protect(flags, slot, want_v); 299 300 if (lpar_rc == H_NOT_FOUND) { 301 pr_devel("not found !\n"); 302 return -1; 303 } 304 305 pr_devel("ok\n"); 306 307 BUG_ON(lpar_rc != H_SUCCESS); 308 309 return 0; 310 } 311 312 static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot) 313 { 314 unsigned long dword0; 315 unsigned long lpar_rc; 316 unsigned long dummy_word1; 317 unsigned long flags; 318 319 /* Read 1 pte at a time */ 320 /* Do not need RPN to logical page translation */ 321 /* No cross CEC PFT access */ 322 flags = 0; 323 324 lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1); 325 326 BUG_ON(lpar_rc != H_SUCCESS); 327 328 return dword0; 329 } 330 331 static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize) 332 { 333 unsigned long hash; 334 unsigned long i; 335 long slot; 336 unsigned long want_v, hpte_v; 337 338 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize); 339 want_v = hpte_encode_avpn(vpn, psize, ssize); 340 341 /* Bolted entries are always in the primary group */ 342 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 343 for (i = 0; i < HPTES_PER_GROUP; i++) { 344 hpte_v = pSeries_lpar_hpte_getword0(slot); 345 346 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) 347 /* HPTE matches */ 348 return slot; 349 ++slot; 350 } 351 352 return -1; 353 } 354 355 static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp, 356 unsigned long ea, 357 int psize, int ssize) 358 { 359 unsigned long vpn; 360 unsigned long lpar_rc, slot, vsid, flags; 361 362 vsid = get_kernel_vsid(ea, ssize); 363 vpn = hpt_vpn(ea, vsid, ssize); 364 365 slot = pSeries_lpar_hpte_find(vpn, psize, ssize); 366 BUG_ON(slot == -1); 367 368 flags = newpp & 7; 369 lpar_rc = plpar_pte_protect(flags, slot, 0); 370 371 BUG_ON(lpar_rc != H_SUCCESS); 372 } 373 374 static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn, 375 int psize, int apsize, 376 int ssize, int local) 377 { 378 unsigned long want_v; 379 unsigned long lpar_rc; 380 unsigned long dummy1, dummy2; 381 382 pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n", 383 slot, vpn, psize, local); 384 385 want_v = hpte_encode_avpn(vpn, psize, ssize); 386 lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2); 387 if (lpar_rc == H_NOT_FOUND) 388 return; 389 390 BUG_ON(lpar_rc != H_SUCCESS); 391 } 392 393 /* 394 * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need 395 * to make sure that we avoid bouncing the hypervisor tlbie lock. 396 */ 397 #define PPC64_HUGE_HPTE_BATCH 12 398 399 static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot, 400 unsigned long *vpn, int count, 401 int psize, int ssize) 402 { 403 unsigned long param[8]; 404 int i = 0, pix = 0, rc; 405 unsigned long flags = 0; 406 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 407 408 if (lock_tlbie) 409 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); 410 411 for (i = 0; i < count; i++) { 412 413 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { 414 pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0, 415 ssize, 0); 416 } else { 417 param[pix] = HBR_REQUEST | HBR_AVPN | slot[i]; 418 param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize); 419 pix += 2; 420 if (pix == 8) { 421 rc = plpar_hcall9(H_BULK_REMOVE, param, 422 param[0], param[1], param[2], 423 param[3], param[4], param[5], 424 param[6], param[7]); 425 BUG_ON(rc != H_SUCCESS); 426 pix = 0; 427 } 428 } 429 } 430 if (pix) { 431 param[pix] = HBR_END; 432 rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1], 433 param[2], param[3], param[4], param[5], 434 param[6], param[7]); 435 BUG_ON(rc != H_SUCCESS); 436 } 437 438 if (lock_tlbie) 439 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); 440 } 441 442 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid, 443 unsigned long addr, 444 unsigned char *hpte_slot_array, 445 int psize, int ssize) 446 { 447 int i, index = 0; 448 unsigned long s_addr = addr; 449 unsigned int max_hpte_count, valid; 450 unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH]; 451 unsigned long slot_array[PPC64_HUGE_HPTE_BATCH]; 452 unsigned long shift, hidx, vpn = 0, hash, slot; 453 454 shift = mmu_psize_defs[psize].shift; 455 max_hpte_count = 1U << (PMD_SHIFT - shift); 456 457 for (i = 0; i < max_hpte_count; i++) { 458 valid = hpte_valid(hpte_slot_array, i); 459 if (!valid) 460 continue; 461 hidx = hpte_hash_index(hpte_slot_array, i); 462 463 /* get the vpn */ 464 addr = s_addr + (i * (1ul << shift)); 465 vpn = hpt_vpn(addr, vsid, ssize); 466 hash = hpt_hash(vpn, shift, ssize); 467 if (hidx & _PTEIDX_SECONDARY) 468 hash = ~hash; 469 470 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 471 slot += hidx & _PTEIDX_GROUP_IX; 472 473 slot_array[index] = slot; 474 vpn_array[index] = vpn; 475 if (index == PPC64_HUGE_HPTE_BATCH - 1) { 476 /* 477 * Now do a bluk invalidate 478 */ 479 __pSeries_lpar_hugepage_invalidate(slot_array, 480 vpn_array, 481 PPC64_HUGE_HPTE_BATCH, 482 psize, ssize); 483 index = 0; 484 } else 485 index++; 486 } 487 if (index) 488 __pSeries_lpar_hugepage_invalidate(slot_array, vpn_array, 489 index, psize, ssize); 490 } 491 492 static void pSeries_lpar_hpte_removebolted(unsigned long ea, 493 int psize, int ssize) 494 { 495 unsigned long vpn; 496 unsigned long slot, vsid; 497 498 vsid = get_kernel_vsid(ea, ssize); 499 vpn = hpt_vpn(ea, vsid, ssize); 500 501 slot = pSeries_lpar_hpte_find(vpn, psize, ssize); 502 BUG_ON(slot == -1); 503 /* 504 * lpar doesn't use the passed actual page size 505 */ 506 pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0); 507 } 508 509 /* 510 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie 511 * lock. 512 */ 513 static void pSeries_lpar_flush_hash_range(unsigned long number, int local) 514 { 515 unsigned long vpn; 516 unsigned long i, pix, rc; 517 unsigned long flags = 0; 518 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 519 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 520 unsigned long param[9]; 521 unsigned long hash, index, shift, hidx, slot; 522 real_pte_t pte; 523 int psize, ssize; 524 525 if (lock_tlbie) 526 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); 527 528 psize = batch->psize; 529 ssize = batch->ssize; 530 pix = 0; 531 for (i = 0; i < number; i++) { 532 vpn = batch->vpn[i]; 533 pte = batch->pte[i]; 534 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { 535 hash = hpt_hash(vpn, shift, ssize); 536 hidx = __rpte_to_hidx(pte, index); 537 if (hidx & _PTEIDX_SECONDARY) 538 hash = ~hash; 539 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 540 slot += hidx & _PTEIDX_GROUP_IX; 541 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { 542 /* 543 * lpar doesn't use the passed actual page size 544 */ 545 pSeries_lpar_hpte_invalidate(slot, vpn, psize, 546 0, ssize, local); 547 } else { 548 param[pix] = HBR_REQUEST | HBR_AVPN | slot; 549 param[pix+1] = hpte_encode_avpn(vpn, psize, 550 ssize); 551 pix += 2; 552 if (pix == 8) { 553 rc = plpar_hcall9(H_BULK_REMOVE, param, 554 param[0], param[1], param[2], 555 param[3], param[4], param[5], 556 param[6], param[7]); 557 BUG_ON(rc != H_SUCCESS); 558 pix = 0; 559 } 560 } 561 } pte_iterate_hashed_end(); 562 } 563 if (pix) { 564 param[pix] = HBR_END; 565 rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1], 566 param[2], param[3], param[4], param[5], 567 param[6], param[7]); 568 BUG_ON(rc != H_SUCCESS); 569 } 570 571 if (lock_tlbie) 572 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); 573 } 574 575 static int __init disable_bulk_remove(char *str) 576 { 577 if (strcmp(str, "off") == 0 && 578 firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { 579 printk(KERN_INFO "Disabling BULK_REMOVE firmware feature"); 580 powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE; 581 } 582 return 1; 583 } 584 585 __setup("bulk_remove=", disable_bulk_remove); 586 587 void __init hpte_init_lpar(void) 588 { 589 ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate; 590 ppc_md.hpte_updatepp = pSeries_lpar_hpte_updatepp; 591 ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp; 592 ppc_md.hpte_insert = pSeries_lpar_hpte_insert; 593 ppc_md.hpte_remove = pSeries_lpar_hpte_remove; 594 ppc_md.hpte_removebolted = pSeries_lpar_hpte_removebolted; 595 ppc_md.flush_hash_range = pSeries_lpar_flush_hash_range; 596 ppc_md.hpte_clear_all = pSeries_lpar_hptab_clear; 597 ppc_md.hugepage_invalidate = pSeries_lpar_hugepage_invalidate; 598 } 599 600 #ifdef CONFIG_PPC_SMLPAR 601 #define CMO_FREE_HINT_DEFAULT 1 602 static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT; 603 604 static int __init cmo_free_hint(char *str) 605 { 606 char *parm; 607 parm = strstrip(str); 608 609 if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) { 610 printk(KERN_INFO "cmo_free_hint: CMO free page hinting is not active.\n"); 611 cmo_free_hint_flag = 0; 612 return 1; 613 } 614 615 cmo_free_hint_flag = 1; 616 printk(KERN_INFO "cmo_free_hint: CMO free page hinting is active.\n"); 617 618 if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0) 619 return 1; 620 621 return 0; 622 } 623 624 __setup("cmo_free_hint=", cmo_free_hint); 625 626 static void pSeries_set_page_state(struct page *page, int order, 627 unsigned long state) 628 { 629 int i, j; 630 unsigned long cmo_page_sz, addr; 631 632 cmo_page_sz = cmo_get_page_size(); 633 addr = __pa((unsigned long)page_address(page)); 634 635 for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) { 636 for (j = 0; j < PAGE_SIZE; j += cmo_page_sz) 637 plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0); 638 } 639 } 640 641 void arch_free_page(struct page *page, int order) 642 { 643 if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO)) 644 return; 645 646 pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED); 647 } 648 EXPORT_SYMBOL(arch_free_page); 649 650 #endif 651 652 #ifdef CONFIG_TRACEPOINTS 653 #ifdef HAVE_JUMP_LABEL 654 struct static_key hcall_tracepoint_key = STATIC_KEY_INIT; 655 656 void hcall_tracepoint_regfunc(void) 657 { 658 static_key_slow_inc(&hcall_tracepoint_key); 659 } 660 661 void hcall_tracepoint_unregfunc(void) 662 { 663 static_key_slow_dec(&hcall_tracepoint_key); 664 } 665 #else 666 /* 667 * We optimise our hcall path by placing hcall_tracepoint_refcount 668 * directly in the TOC so we can check if the hcall tracepoints are 669 * enabled via a single load. 670 */ 671 672 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ 673 extern long hcall_tracepoint_refcount; 674 675 void hcall_tracepoint_regfunc(void) 676 { 677 hcall_tracepoint_refcount++; 678 } 679 680 void hcall_tracepoint_unregfunc(void) 681 { 682 hcall_tracepoint_refcount--; 683 } 684 #endif 685 686 /* 687 * Since the tracing code might execute hcalls we need to guard against 688 * recursion. One example of this are spinlocks calling H_YIELD on 689 * shared processor partitions. 690 */ 691 static DEFINE_PER_CPU(unsigned int, hcall_trace_depth); 692 693 694 void __trace_hcall_entry(unsigned long opcode, unsigned long *args) 695 { 696 unsigned long flags; 697 unsigned int *depth; 698 699 /* 700 * We cannot call tracepoints inside RCU idle regions which 701 * means we must not trace H_CEDE. 702 */ 703 if (opcode == H_CEDE) 704 return; 705 706 local_irq_save(flags); 707 708 depth = &__get_cpu_var(hcall_trace_depth); 709 710 if (*depth) 711 goto out; 712 713 (*depth)++; 714 preempt_disable(); 715 trace_hcall_entry(opcode, args); 716 (*depth)--; 717 718 out: 719 local_irq_restore(flags); 720 } 721 722 void __trace_hcall_exit(long opcode, unsigned long retval, 723 unsigned long *retbuf) 724 { 725 unsigned long flags; 726 unsigned int *depth; 727 728 if (opcode == H_CEDE) 729 return; 730 731 local_irq_save(flags); 732 733 depth = &__get_cpu_var(hcall_trace_depth); 734 735 if (*depth) 736 goto out; 737 738 (*depth)++; 739 trace_hcall_exit(opcode, retval, retbuf); 740 preempt_enable(); 741 (*depth)--; 742 743 out: 744 local_irq_restore(flags); 745 } 746 #endif 747 748 /** 749 * h_get_mpp 750 * H_GET_MPP hcall returns info in 7 parms 751 */ 752 int h_get_mpp(struct hvcall_mpp_data *mpp_data) 753 { 754 int rc; 755 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; 756 757 rc = plpar_hcall9(H_GET_MPP, retbuf); 758 759 mpp_data->entitled_mem = retbuf[0]; 760 mpp_data->mapped_mem = retbuf[1]; 761 762 mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff; 763 mpp_data->pool_num = retbuf[2] & 0xffff; 764 765 mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff; 766 mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff; 767 mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL; 768 769 mpp_data->pool_size = retbuf[4]; 770 mpp_data->loan_request = retbuf[5]; 771 mpp_data->backing_mem = retbuf[6]; 772 773 return rc; 774 } 775 EXPORT_SYMBOL(h_get_mpp); 776 777 int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data) 778 { 779 int rc; 780 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 }; 781 782 rc = plpar_hcall9(H_GET_MPP_X, retbuf); 783 784 mpp_x_data->coalesced_bytes = retbuf[0]; 785 mpp_x_data->pool_coalesced_bytes = retbuf[1]; 786 mpp_x_data->pool_purr_cycles = retbuf[2]; 787 mpp_x_data->pool_spurr_cycles = retbuf[3]; 788 789 return rc; 790 } 791