1 /*
2  * pSeries_lpar.c
3  * Copyright (C) 2001 Todd Inglett, IBM Corporation
4  *
5  * pSeries LPAR support.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
20  */
21 
22 /* Enables debugging of low-level hash table routines - careful! */
23 #undef DEBUG
24 
25 #include <linux/kernel.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/console.h>
28 #include <linux/export.h>
29 #include <linux/jump_label.h>
30 #include <linux/delay.h>
31 #include <linux/stop_machine.h>
32 #include <asm/processor.h>
33 #include <asm/mmu.h>
34 #include <asm/page.h>
35 #include <asm/pgtable.h>
36 #include <asm/machdep.h>
37 #include <asm/mmu_context.h>
38 #include <asm/iommu.h>
39 #include <asm/tlbflush.h>
40 #include <asm/tlb.h>
41 #include <asm/prom.h>
42 #include <asm/cputable.h>
43 #include <asm/udbg.h>
44 #include <asm/smp.h>
45 #include <asm/trace.h>
46 #include <asm/firmware.h>
47 #include <asm/plpar_wrappers.h>
48 #include <asm/kexec.h>
49 #include <asm/fadump.h>
50 #include <asm/asm-prototypes.h>
51 
52 #include "pseries.h"
53 
54 /* Flag bits for H_BULK_REMOVE */
55 #define HBR_REQUEST	0x4000000000000000UL
56 #define HBR_RESPONSE	0x8000000000000000UL
57 #define HBR_END		0xc000000000000000UL
58 #define HBR_AVPN	0x0200000000000000UL
59 #define HBR_ANDCOND	0x0100000000000000UL
60 
61 
62 /* in hvCall.S */
63 EXPORT_SYMBOL(plpar_hcall);
64 EXPORT_SYMBOL(plpar_hcall9);
65 EXPORT_SYMBOL(plpar_hcall_norets);
66 
67 void vpa_init(int cpu)
68 {
69 	int hwcpu = get_hard_smp_processor_id(cpu);
70 	unsigned long addr;
71 	long ret;
72 	struct paca_struct *pp;
73 	struct dtl_entry *dtl;
74 
75 	/*
76 	 * The spec says it "may be problematic" if CPU x registers the VPA of
77 	 * CPU y. We should never do that, but wail if we ever do.
78 	 */
79 	WARN_ON(cpu != smp_processor_id());
80 
81 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
82 		lppaca_of(cpu).vmxregs_in_use = 1;
83 
84 	if (cpu_has_feature(CPU_FTR_ARCH_207S))
85 		lppaca_of(cpu).ebb_regs_in_use = 1;
86 
87 	addr = __pa(&lppaca_of(cpu));
88 	ret = register_vpa(hwcpu, addr);
89 
90 	if (ret) {
91 		pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
92 		       "%lx failed with %ld\n", cpu, hwcpu, addr, ret);
93 		return;
94 	}
95 
96 #ifdef CONFIG_PPC_BOOK3S_64
97 	/*
98 	 * PAPR says this feature is SLB-Buffer but firmware never
99 	 * reports that.  All SPLPAR support SLB shadow buffer.
100 	 */
101 	if (!radix_enabled() && firmware_has_feature(FW_FEATURE_SPLPAR)) {
102 		addr = __pa(paca_ptrs[cpu]->slb_shadow_ptr);
103 		ret = register_slb_shadow(hwcpu, addr);
104 		if (ret)
105 			pr_err("WARNING: SLB shadow buffer registration for "
106 			       "cpu %d (hw %d) of area %lx failed with %ld\n",
107 			       cpu, hwcpu, addr, ret);
108 	}
109 #endif /* CONFIG_PPC_BOOK3S_64 */
110 
111 	/*
112 	 * Register dispatch trace log, if one has been allocated.
113 	 */
114 	pp = paca_ptrs[cpu];
115 	dtl = pp->dispatch_log;
116 	if (dtl) {
117 		pp->dtl_ridx = 0;
118 		pp->dtl_curr = dtl;
119 		lppaca_of(cpu).dtl_idx = 0;
120 
121 		/* hypervisor reads buffer length from this field */
122 		dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
123 		ret = register_dtl(hwcpu, __pa(dtl));
124 		if (ret)
125 			pr_err("WARNING: DTL registration of cpu %d (hw %d) "
126 			       "failed with %ld\n", smp_processor_id(),
127 			       hwcpu, ret);
128 		lppaca_of(cpu).dtl_enable_mask = 2;
129 	}
130 }
131 
132 #ifdef CONFIG_PPC_BOOK3S_64
133 
134 static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
135 				     unsigned long vpn, unsigned long pa,
136 				     unsigned long rflags, unsigned long vflags,
137 				     int psize, int apsize, int ssize)
138 {
139 	unsigned long lpar_rc;
140 	unsigned long flags;
141 	unsigned long slot;
142 	unsigned long hpte_v, hpte_r;
143 
144 	if (!(vflags & HPTE_V_BOLTED))
145 		pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
146 			 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
147 			 hpte_group, vpn,  pa, rflags, vflags, psize);
148 
149 	hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
150 	hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
151 
152 	if (!(vflags & HPTE_V_BOLTED))
153 		pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
154 
155 	/* Now fill in the actual HPTE */
156 	/* Set CEC cookie to 0         */
157 	/* Zero page = 0               */
158 	/* I-cache Invalidate = 0      */
159 	/* I-cache synchronize = 0     */
160 	/* Exact = 0                   */
161 	flags = 0;
162 
163 	if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
164 		flags |= H_COALESCE_CAND;
165 
166 	lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
167 	if (unlikely(lpar_rc == H_PTEG_FULL)) {
168 		if (!(vflags & HPTE_V_BOLTED))
169 			pr_devel(" full\n");
170 		return -1;
171 	}
172 
173 	/*
174 	 * Since we try and ioremap PHBs we don't own, the pte insert
175 	 * will fail. However we must catch the failure in hash_page
176 	 * or we will loop forever, so return -2 in this case.
177 	 */
178 	if (unlikely(lpar_rc != H_SUCCESS)) {
179 		if (!(vflags & HPTE_V_BOLTED))
180 			pr_devel(" lpar err %ld\n", lpar_rc);
181 		return -2;
182 	}
183 	if (!(vflags & HPTE_V_BOLTED))
184 		pr_devel(" -> slot: %lu\n", slot & 7);
185 
186 	/* Because of iSeries, we have to pass down the secondary
187 	 * bucket bit here as well
188 	 */
189 	return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3);
190 }
191 
192 static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock);
193 
194 static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
195 {
196 	unsigned long slot_offset;
197 	unsigned long lpar_rc;
198 	int i;
199 	unsigned long dummy1, dummy2;
200 
201 	/* pick a random slot to start at */
202 	slot_offset = mftb() & 0x7;
203 
204 	for (i = 0; i < HPTES_PER_GROUP; i++) {
205 
206 		/* don't remove a bolted entry */
207 		lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
208 					   (0x1UL << 4), &dummy1, &dummy2);
209 		if (lpar_rc == H_SUCCESS)
210 			return i;
211 
212 		/*
213 		 * The test for adjunct partition is performed before the
214 		 * ANDCOND test.  H_RESOURCE may be returned, so we need to
215 		 * check for that as well.
216 		 */
217 		BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
218 
219 		slot_offset++;
220 		slot_offset &= 0x7;
221 	}
222 
223 	return -1;
224 }
225 
226 static void manual_hpte_clear_all(void)
227 {
228 	unsigned long size_bytes = 1UL << ppc64_pft_size;
229 	unsigned long hpte_count = size_bytes >> 4;
230 	struct {
231 		unsigned long pteh;
232 		unsigned long ptel;
233 	} ptes[4];
234 	long lpar_rc;
235 	unsigned long i, j;
236 
237 	/* Read in batches of 4,
238 	 * invalidate only valid entries not in the VRMA
239 	 * hpte_count will be a multiple of 4
240          */
241 	for (i = 0; i < hpte_count; i += 4) {
242 		lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
243 		if (lpar_rc != H_SUCCESS)
244 			continue;
245 		for (j = 0; j < 4; j++){
246 			if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
247 				HPTE_V_VRMA_MASK)
248 				continue;
249 			if (ptes[j].pteh & HPTE_V_VALID)
250 				plpar_pte_remove_raw(0, i + j, 0,
251 					&(ptes[j].pteh), &(ptes[j].ptel));
252 		}
253 	}
254 }
255 
256 static int hcall_hpte_clear_all(void)
257 {
258 	int rc;
259 
260 	do {
261 		rc = plpar_hcall_norets(H_CLEAR_HPT);
262 	} while (rc == H_CONTINUE);
263 
264 	return rc;
265 }
266 
267 static void pseries_hpte_clear_all(void)
268 {
269 	int rc;
270 
271 	rc = hcall_hpte_clear_all();
272 	if (rc != H_SUCCESS)
273 		manual_hpte_clear_all();
274 
275 #ifdef __LITTLE_ENDIAN__
276 	/*
277 	 * Reset exceptions to big endian.
278 	 *
279 	 * FIXME this is a hack for kexec, we need to reset the exception
280 	 * endian before starting the new kernel and this is a convenient place
281 	 * to do it.
282 	 *
283 	 * This is also called on boot when a fadump happens. In that case we
284 	 * must not change the exception endian mode.
285 	 */
286 	if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active())
287 		pseries_big_endian_exceptions();
288 #endif
289 }
290 
291 /*
292  * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
293  * the low 3 bits of flags happen to line up.  So no transform is needed.
294  * We can probably optimize here and assume the high bits of newpp are
295  * already zero.  For now I am paranoid.
296  */
297 static long pSeries_lpar_hpte_updatepp(unsigned long slot,
298 				       unsigned long newpp,
299 				       unsigned long vpn,
300 				       int psize, int apsize,
301 				       int ssize, unsigned long inv_flags)
302 {
303 	unsigned long lpar_rc;
304 	unsigned long flags;
305 	unsigned long want_v;
306 
307 	want_v = hpte_encode_avpn(vpn, psize, ssize);
308 
309 	flags = (newpp & 7) | H_AVPN;
310 	if (mmu_has_feature(MMU_FTR_KERNEL_RO))
311 		/* Move pp0 into bit 8 (IBM 55) */
312 		flags |= (newpp & HPTE_R_PP0) >> 55;
313 
314 	pr_devel("    update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
315 		 want_v, slot, flags, psize);
316 
317 	lpar_rc = plpar_pte_protect(flags, slot, want_v);
318 
319 	if (lpar_rc == H_NOT_FOUND) {
320 		pr_devel("not found !\n");
321 		return -1;
322 	}
323 
324 	pr_devel("ok\n");
325 
326 	BUG_ON(lpar_rc != H_SUCCESS);
327 
328 	return 0;
329 }
330 
331 static long __pSeries_lpar_hpte_find(unsigned long want_v, unsigned long hpte_group)
332 {
333 	long lpar_rc;
334 	unsigned long i, j;
335 	struct {
336 		unsigned long pteh;
337 		unsigned long ptel;
338 	} ptes[4];
339 
340 	for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
341 
342 		lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
343 		if (lpar_rc != H_SUCCESS)
344 			continue;
345 
346 		for (j = 0; j < 4; j++) {
347 			if (HPTE_V_COMPARE(ptes[j].pteh, want_v) &&
348 			    (ptes[j].pteh & HPTE_V_VALID))
349 				return i + j;
350 		}
351 	}
352 
353 	return -1;
354 }
355 
356 static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
357 {
358 	long slot;
359 	unsigned long hash;
360 	unsigned long want_v;
361 	unsigned long hpte_group;
362 
363 	hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
364 	want_v = hpte_encode_avpn(vpn, psize, ssize);
365 
366 	/* Bolted entries are always in the primary group */
367 	hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
368 	slot = __pSeries_lpar_hpte_find(want_v, hpte_group);
369 	if (slot < 0)
370 		return -1;
371 	return hpte_group + slot;
372 }
373 
374 static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
375 					     unsigned long ea,
376 					     int psize, int ssize)
377 {
378 	unsigned long vpn;
379 	unsigned long lpar_rc, slot, vsid, flags;
380 
381 	vsid = get_kernel_vsid(ea, ssize);
382 	vpn = hpt_vpn(ea, vsid, ssize);
383 
384 	slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
385 	BUG_ON(slot == -1);
386 
387 	flags = newpp & 7;
388 	if (mmu_has_feature(MMU_FTR_KERNEL_RO))
389 		/* Move pp0 into bit 8 (IBM 55) */
390 		flags |= (newpp & HPTE_R_PP0) >> 55;
391 
392 	lpar_rc = plpar_pte_protect(flags, slot, 0);
393 
394 	BUG_ON(lpar_rc != H_SUCCESS);
395 }
396 
397 static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
398 					 int psize, int apsize,
399 					 int ssize, int local)
400 {
401 	unsigned long want_v;
402 	unsigned long lpar_rc;
403 	unsigned long dummy1, dummy2;
404 
405 	pr_devel("    inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
406 		 slot, vpn, psize, local);
407 
408 	want_v = hpte_encode_avpn(vpn, psize, ssize);
409 	lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
410 	if (lpar_rc == H_NOT_FOUND)
411 		return;
412 
413 	BUG_ON(lpar_rc != H_SUCCESS);
414 }
415 
416 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
417 /*
418  * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
419  * to make sure that we avoid bouncing the hypervisor tlbie lock.
420  */
421 #define PPC64_HUGE_HPTE_BATCH 12
422 
423 static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
424 					     unsigned long *vpn, int count,
425 					     int psize, int ssize)
426 {
427 	unsigned long param[PLPAR_HCALL9_BUFSIZE];
428 	int i = 0, pix = 0, rc;
429 	unsigned long flags = 0;
430 	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
431 
432 	if (lock_tlbie)
433 		spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
434 
435 	for (i = 0; i < count; i++) {
436 
437 		if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
438 			pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0,
439 						     ssize, 0);
440 		} else {
441 			param[pix] = HBR_REQUEST | HBR_AVPN | slot[i];
442 			param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize);
443 			pix += 2;
444 			if (pix == 8) {
445 				rc = plpar_hcall9(H_BULK_REMOVE, param,
446 						  param[0], param[1], param[2],
447 						  param[3], param[4], param[5],
448 						  param[6], param[7]);
449 				BUG_ON(rc != H_SUCCESS);
450 				pix = 0;
451 			}
452 		}
453 	}
454 	if (pix) {
455 		param[pix] = HBR_END;
456 		rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
457 				  param[2], param[3], param[4], param[5],
458 				  param[6], param[7]);
459 		BUG_ON(rc != H_SUCCESS);
460 	}
461 
462 	if (lock_tlbie)
463 		spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
464 }
465 
466 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
467 					     unsigned long addr,
468 					     unsigned char *hpte_slot_array,
469 					     int psize, int ssize, int local)
470 {
471 	int i, index = 0;
472 	unsigned long s_addr = addr;
473 	unsigned int max_hpte_count, valid;
474 	unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
475 	unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
476 	unsigned long shift, hidx, vpn = 0, hash, slot;
477 
478 	shift = mmu_psize_defs[psize].shift;
479 	max_hpte_count = 1U << (PMD_SHIFT - shift);
480 
481 	for (i = 0; i < max_hpte_count; i++) {
482 		valid = hpte_valid(hpte_slot_array, i);
483 		if (!valid)
484 			continue;
485 		hidx =  hpte_hash_index(hpte_slot_array, i);
486 
487 		/* get the vpn */
488 		addr = s_addr + (i * (1ul << shift));
489 		vpn = hpt_vpn(addr, vsid, ssize);
490 		hash = hpt_hash(vpn, shift, ssize);
491 		if (hidx & _PTEIDX_SECONDARY)
492 			hash = ~hash;
493 
494 		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
495 		slot += hidx & _PTEIDX_GROUP_IX;
496 
497 		slot_array[index] = slot;
498 		vpn_array[index] = vpn;
499 		if (index == PPC64_HUGE_HPTE_BATCH - 1) {
500 			/*
501 			 * Now do a bluk invalidate
502 			 */
503 			__pSeries_lpar_hugepage_invalidate(slot_array,
504 							   vpn_array,
505 							   PPC64_HUGE_HPTE_BATCH,
506 							   psize, ssize);
507 			index = 0;
508 		} else
509 			index++;
510 	}
511 	if (index)
512 		__pSeries_lpar_hugepage_invalidate(slot_array, vpn_array,
513 						   index, psize, ssize);
514 }
515 #else
516 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
517 					     unsigned long addr,
518 					     unsigned char *hpte_slot_array,
519 					     int psize, int ssize, int local)
520 {
521 	WARN(1, "%s called without THP support\n", __func__);
522 }
523 #endif
524 
525 static int pSeries_lpar_hpte_removebolted(unsigned long ea,
526 					  int psize, int ssize)
527 {
528 	unsigned long vpn;
529 	unsigned long slot, vsid;
530 
531 	vsid = get_kernel_vsid(ea, ssize);
532 	vpn = hpt_vpn(ea, vsid, ssize);
533 
534 	slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
535 	if (slot == -1)
536 		return -ENOENT;
537 
538 	/*
539 	 * lpar doesn't use the passed actual page size
540 	 */
541 	pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0);
542 	return 0;
543 }
544 
545 /*
546  * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
547  * lock.
548  */
549 static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
550 {
551 	unsigned long vpn;
552 	unsigned long i, pix, rc;
553 	unsigned long flags = 0;
554 	struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
555 	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
556 	unsigned long param[PLPAR_HCALL9_BUFSIZE];
557 	unsigned long hash, index, shift, hidx, slot;
558 	real_pte_t pte;
559 	int psize, ssize;
560 
561 	if (lock_tlbie)
562 		spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
563 
564 	psize = batch->psize;
565 	ssize = batch->ssize;
566 	pix = 0;
567 	for (i = 0; i < number; i++) {
568 		vpn = batch->vpn[i];
569 		pte = batch->pte[i];
570 		pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
571 			hash = hpt_hash(vpn, shift, ssize);
572 			hidx = __rpte_to_hidx(pte, index);
573 			if (hidx & _PTEIDX_SECONDARY)
574 				hash = ~hash;
575 			slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
576 			slot += hidx & _PTEIDX_GROUP_IX;
577 			if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
578 				/*
579 				 * lpar doesn't use the passed actual page size
580 				 */
581 				pSeries_lpar_hpte_invalidate(slot, vpn, psize,
582 							     0, ssize, local);
583 			} else {
584 				param[pix] = HBR_REQUEST | HBR_AVPN | slot;
585 				param[pix+1] = hpte_encode_avpn(vpn, psize,
586 								ssize);
587 				pix += 2;
588 				if (pix == 8) {
589 					rc = plpar_hcall9(H_BULK_REMOVE, param,
590 						param[0], param[1], param[2],
591 						param[3], param[4], param[5],
592 						param[6], param[7]);
593 					BUG_ON(rc != H_SUCCESS);
594 					pix = 0;
595 				}
596 			}
597 		} pte_iterate_hashed_end();
598 	}
599 	if (pix) {
600 		param[pix] = HBR_END;
601 		rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
602 				  param[2], param[3], param[4], param[5],
603 				  param[6], param[7]);
604 		BUG_ON(rc != H_SUCCESS);
605 	}
606 
607 	if (lock_tlbie)
608 		spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
609 }
610 
611 static int __init disable_bulk_remove(char *str)
612 {
613 	if (strcmp(str, "off") == 0 &&
614 	    firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
615 			printk(KERN_INFO "Disabling BULK_REMOVE firmware feature");
616 			powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
617 	}
618 	return 1;
619 }
620 
621 __setup("bulk_remove=", disable_bulk_remove);
622 
623 #define HPT_RESIZE_TIMEOUT	10000 /* ms */
624 
625 struct hpt_resize_state {
626 	unsigned long shift;
627 	int commit_rc;
628 };
629 
630 static int pseries_lpar_resize_hpt_commit(void *data)
631 {
632 	struct hpt_resize_state *state = data;
633 
634 	state->commit_rc = plpar_resize_hpt_commit(0, state->shift);
635 	if (state->commit_rc != H_SUCCESS)
636 		return -EIO;
637 
638 	/* Hypervisor has transitioned the HTAB, update our globals */
639 	ppc64_pft_size = state->shift;
640 	htab_size_bytes = 1UL << ppc64_pft_size;
641 	htab_hash_mask = (htab_size_bytes >> 7) - 1;
642 
643 	return 0;
644 }
645 
646 /* Must be called in user context */
647 static int pseries_lpar_resize_hpt(unsigned long shift)
648 {
649 	struct hpt_resize_state state = {
650 		.shift = shift,
651 		.commit_rc = H_FUNCTION,
652 	};
653 	unsigned int delay, total_delay = 0;
654 	int rc;
655 	ktime_t t0, t1, t2;
656 
657 	might_sleep();
658 
659 	if (!firmware_has_feature(FW_FEATURE_HPT_RESIZE))
660 		return -ENODEV;
661 
662 	printk(KERN_INFO "lpar: Attempting to resize HPT to shift %lu\n",
663 	       shift);
664 
665 	t0 = ktime_get();
666 
667 	rc = plpar_resize_hpt_prepare(0, shift);
668 	while (H_IS_LONG_BUSY(rc)) {
669 		delay = get_longbusy_msecs(rc);
670 		total_delay += delay;
671 		if (total_delay > HPT_RESIZE_TIMEOUT) {
672 			/* prepare with shift==0 cancels an in-progress resize */
673 			rc = plpar_resize_hpt_prepare(0, 0);
674 			if (rc != H_SUCCESS)
675 				printk(KERN_WARNING
676 				       "lpar: Unexpected error %d cancelling timed out HPT resize\n",
677 				       rc);
678 			return -ETIMEDOUT;
679 		}
680 		msleep(delay);
681 		rc = plpar_resize_hpt_prepare(0, shift);
682 	};
683 
684 	switch (rc) {
685 	case H_SUCCESS:
686 		/* Continue on */
687 		break;
688 
689 	case H_PARAMETER:
690 		return -EINVAL;
691 	case H_RESOURCE:
692 		return -EPERM;
693 	default:
694 		printk(KERN_WARNING
695 		       "lpar: Unexpected error %d from H_RESIZE_HPT_PREPARE\n",
696 		       rc);
697 		return -EIO;
698 	}
699 
700 	t1 = ktime_get();
701 
702 	rc = stop_machine(pseries_lpar_resize_hpt_commit, &state, NULL);
703 
704 	t2 = ktime_get();
705 
706 	if (rc != 0) {
707 		switch (state.commit_rc) {
708 		case H_PTEG_FULL:
709 			printk(KERN_WARNING
710 			       "lpar: Hash collision while resizing HPT\n");
711 			return -ENOSPC;
712 
713 		default:
714 			printk(KERN_WARNING
715 			       "lpar: Unexpected error %d from H_RESIZE_HPT_COMMIT\n",
716 			       state.commit_rc);
717 			return -EIO;
718 		};
719 	}
720 
721 	printk(KERN_INFO
722 	       "lpar: HPT resize to shift %lu complete (%lld ms / %lld ms)\n",
723 	       shift, (long long) ktime_ms_delta(t1, t0),
724 	       (long long) ktime_ms_delta(t2, t1));
725 
726 	return 0;
727 }
728 
729 static int pseries_lpar_register_process_table(unsigned long base,
730 			unsigned long page_size, unsigned long table_size)
731 {
732 	long rc;
733 	unsigned long flags = 0;
734 
735 	if (table_size)
736 		flags |= PROC_TABLE_NEW;
737 	if (radix_enabled())
738 		flags |= PROC_TABLE_RADIX | PROC_TABLE_GTSE;
739 	else
740 		flags |= PROC_TABLE_HPT_SLB;
741 	for (;;) {
742 		rc = plpar_hcall_norets(H_REGISTER_PROC_TBL, flags, base,
743 					page_size, table_size);
744 		if (!H_IS_LONG_BUSY(rc))
745 			break;
746 		mdelay(get_longbusy_msecs(rc));
747 	}
748 	if (rc != H_SUCCESS) {
749 		pr_err("Failed to register process table (rc=%ld)\n", rc);
750 		BUG();
751 	}
752 	return rc;
753 }
754 
755 void __init hpte_init_pseries(void)
756 {
757 	mmu_hash_ops.hpte_invalidate	 = pSeries_lpar_hpte_invalidate;
758 	mmu_hash_ops.hpte_updatepp	 = pSeries_lpar_hpte_updatepp;
759 	mmu_hash_ops.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
760 	mmu_hash_ops.hpte_insert	 = pSeries_lpar_hpte_insert;
761 	mmu_hash_ops.hpte_remove	 = pSeries_lpar_hpte_remove;
762 	mmu_hash_ops.hpte_removebolted   = pSeries_lpar_hpte_removebolted;
763 	mmu_hash_ops.flush_hash_range	 = pSeries_lpar_flush_hash_range;
764 	mmu_hash_ops.hpte_clear_all      = pseries_hpte_clear_all;
765 	mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
766 	register_process_table		 = pseries_lpar_register_process_table;
767 
768 	if (firmware_has_feature(FW_FEATURE_HPT_RESIZE))
769 		mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
770 }
771 
772 void radix_init_pseries(void)
773 {
774 	pr_info("Using radix MMU under hypervisor\n");
775 	register_process_table = pseries_lpar_register_process_table;
776 }
777 
778 #ifdef CONFIG_PPC_SMLPAR
779 #define CMO_FREE_HINT_DEFAULT 1
780 static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT;
781 
782 static int __init cmo_free_hint(char *str)
783 {
784 	char *parm;
785 	parm = strstrip(str);
786 
787 	if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) {
788 		printk(KERN_INFO "cmo_free_hint: CMO free page hinting is not active.\n");
789 		cmo_free_hint_flag = 0;
790 		return 1;
791 	}
792 
793 	cmo_free_hint_flag = 1;
794 	printk(KERN_INFO "cmo_free_hint: CMO free page hinting is active.\n");
795 
796 	if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0)
797 		return 1;
798 
799 	return 0;
800 }
801 
802 __setup("cmo_free_hint=", cmo_free_hint);
803 
804 static void pSeries_set_page_state(struct page *page, int order,
805 				   unsigned long state)
806 {
807 	int i, j;
808 	unsigned long cmo_page_sz, addr;
809 
810 	cmo_page_sz = cmo_get_page_size();
811 	addr = __pa((unsigned long)page_address(page));
812 
813 	for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) {
814 		for (j = 0; j < PAGE_SIZE; j += cmo_page_sz)
815 			plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0);
816 	}
817 }
818 
819 void arch_free_page(struct page *page, int order)
820 {
821 	if (radix_enabled())
822 		return;
823 	if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO))
824 		return;
825 
826 	pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED);
827 }
828 EXPORT_SYMBOL(arch_free_page);
829 
830 #endif /* CONFIG_PPC_SMLPAR */
831 #endif /* CONFIG_PPC_BOOK3S_64 */
832 
833 #ifdef CONFIG_TRACEPOINTS
834 #ifdef HAVE_JUMP_LABEL
835 struct static_key hcall_tracepoint_key = STATIC_KEY_INIT;
836 
837 int hcall_tracepoint_regfunc(void)
838 {
839 	static_key_slow_inc(&hcall_tracepoint_key);
840 	return 0;
841 }
842 
843 void hcall_tracepoint_unregfunc(void)
844 {
845 	static_key_slow_dec(&hcall_tracepoint_key);
846 }
847 #else
848 /*
849  * We optimise our hcall path by placing hcall_tracepoint_refcount
850  * directly in the TOC so we can check if the hcall tracepoints are
851  * enabled via a single load.
852  */
853 
854 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
855 extern long hcall_tracepoint_refcount;
856 
857 int hcall_tracepoint_regfunc(void)
858 {
859 	hcall_tracepoint_refcount++;
860 	return 0;
861 }
862 
863 void hcall_tracepoint_unregfunc(void)
864 {
865 	hcall_tracepoint_refcount--;
866 }
867 #endif
868 
869 /*
870  * Since the tracing code might execute hcalls we need to guard against
871  * recursion. One example of this are spinlocks calling H_YIELD on
872  * shared processor partitions.
873  */
874 static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
875 
876 
877 void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
878 {
879 	unsigned long flags;
880 	unsigned int *depth;
881 
882 	/*
883 	 * We cannot call tracepoints inside RCU idle regions which
884 	 * means we must not trace H_CEDE.
885 	 */
886 	if (opcode == H_CEDE)
887 		return;
888 
889 	local_irq_save(flags);
890 
891 	depth = this_cpu_ptr(&hcall_trace_depth);
892 
893 	if (*depth)
894 		goto out;
895 
896 	(*depth)++;
897 	preempt_disable();
898 	trace_hcall_entry(opcode, args);
899 	(*depth)--;
900 
901 out:
902 	local_irq_restore(flags);
903 }
904 
905 void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf)
906 {
907 	unsigned long flags;
908 	unsigned int *depth;
909 
910 	if (opcode == H_CEDE)
911 		return;
912 
913 	local_irq_save(flags);
914 
915 	depth = this_cpu_ptr(&hcall_trace_depth);
916 
917 	if (*depth)
918 		goto out;
919 
920 	(*depth)++;
921 	trace_hcall_exit(opcode, retval, retbuf);
922 	preempt_enable();
923 	(*depth)--;
924 
925 out:
926 	local_irq_restore(flags);
927 }
928 #endif
929 
930 /**
931  * h_get_mpp
932  * H_GET_MPP hcall returns info in 7 parms
933  */
934 int h_get_mpp(struct hvcall_mpp_data *mpp_data)
935 {
936 	int rc;
937 	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
938 
939 	rc = plpar_hcall9(H_GET_MPP, retbuf);
940 
941 	mpp_data->entitled_mem = retbuf[0];
942 	mpp_data->mapped_mem = retbuf[1];
943 
944 	mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
945 	mpp_data->pool_num = retbuf[2] & 0xffff;
946 
947 	mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
948 	mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
949 	mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL;
950 
951 	mpp_data->pool_size = retbuf[4];
952 	mpp_data->loan_request = retbuf[5];
953 	mpp_data->backing_mem = retbuf[6];
954 
955 	return rc;
956 }
957 EXPORT_SYMBOL(h_get_mpp);
958 
959 int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
960 {
961 	int rc;
962 	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 };
963 
964 	rc = plpar_hcall9(H_GET_MPP_X, retbuf);
965 
966 	mpp_x_data->coalesced_bytes = retbuf[0];
967 	mpp_x_data->pool_coalesced_bytes = retbuf[1];
968 	mpp_x_data->pool_purr_cycles = retbuf[2];
969 	mpp_x_data->pool_spurr_cycles = retbuf[3];
970 
971 	return rc;
972 }
973 
974 static unsigned long vsid_unscramble(unsigned long vsid, int ssize)
975 {
976 	unsigned long protovsid;
977 	unsigned long va_bits = VA_BITS;
978 	unsigned long modinv, vsid_modulus;
979 	unsigned long max_mod_inv, tmp_modinv;
980 
981 	if (!mmu_has_feature(MMU_FTR_68_BIT_VA))
982 		va_bits = 65;
983 
984 	if (ssize == MMU_SEGSIZE_256M) {
985 		modinv = VSID_MULINV_256M;
986 		vsid_modulus = ((1UL << (va_bits - SID_SHIFT)) - 1);
987 	} else {
988 		modinv = VSID_MULINV_1T;
989 		vsid_modulus = ((1UL << (va_bits - SID_SHIFT_1T)) - 1);
990 	}
991 
992 	/*
993 	 * vsid outside our range.
994 	 */
995 	if (vsid >= vsid_modulus)
996 		return 0;
997 
998 	/*
999 	 * If modinv is the modular multiplicate inverse of (x % vsid_modulus)
1000 	 * and vsid = (protovsid * x) % vsid_modulus, then we say:
1001 	 *   protovsid = (vsid * modinv) % vsid_modulus
1002 	 */
1003 
1004 	/* Check if (vsid * modinv) overflow (63 bits) */
1005 	max_mod_inv = 0x7fffffffffffffffull / vsid;
1006 	if (modinv < max_mod_inv)
1007 		return (vsid * modinv) % vsid_modulus;
1008 
1009 	tmp_modinv = modinv/max_mod_inv;
1010 	modinv %= max_mod_inv;
1011 
1012 	protovsid = (((vsid * max_mod_inv) % vsid_modulus) * tmp_modinv) % vsid_modulus;
1013 	protovsid = (protovsid + vsid * modinv) % vsid_modulus;
1014 
1015 	return protovsid;
1016 }
1017 
1018 static int __init reserve_vrma_context_id(void)
1019 {
1020 	unsigned long protovsid;
1021 
1022 	/*
1023 	 * Reserve context ids which map to reserved virtual addresses. For now
1024 	 * we only reserve the context id which maps to the VRMA VSID. We ignore
1025 	 * the addresses in "ibm,adjunct-virtual-addresses" because we don't
1026 	 * enable adjunct support via the "ibm,client-architecture-support"
1027 	 * interface.
1028 	 */
1029 	protovsid = vsid_unscramble(VRMA_VSID, MMU_SEGSIZE_1T);
1030 	hash__reserve_context_id(protovsid >> ESID_BITS_1T);
1031 	return 0;
1032 }
1033 machine_device_initcall(pseries, reserve_vrma_context_id);
1034