1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * pSeries_lpar.c
4  * Copyright (C) 2001 Todd Inglett, IBM Corporation
5  *
6  * pSeries LPAR support.
7  */
8 
9 /* Enables debugging of low-level hash table routines - careful! */
10 #undef DEBUG
11 #define pr_fmt(fmt) "lpar: " fmt
12 
13 #include <linux/kernel.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/console.h>
16 #include <linux/export.h>
17 #include <linux/jump_label.h>
18 #include <linux/delay.h>
19 #include <linux/stop_machine.h>
20 #include <linux/spinlock.h>
21 #include <linux/cpuhotplug.h>
22 #include <linux/workqueue.h>
23 #include <linux/proc_fs.h>
24 #include <linux/pgtable.h>
25 #include <asm/processor.h>
26 #include <asm/mmu.h>
27 #include <asm/page.h>
28 #include <asm/machdep.h>
29 #include <asm/mmu_context.h>
30 #include <asm/iommu.h>
31 #include <asm/tlb.h>
32 #include <asm/prom.h>
33 #include <asm/cputable.h>
34 #include <asm/udbg.h>
35 #include <asm/smp.h>
36 #include <asm/trace.h>
37 #include <asm/firmware.h>
38 #include <asm/plpar_wrappers.h>
39 #include <asm/kexec.h>
40 #include <asm/fadump.h>
41 #include <asm/asm-prototypes.h>
42 #include <asm/debugfs.h>
43 #include <asm/dtl.h>
44 
45 #include "pseries.h"
46 
47 /* Flag bits for H_BULK_REMOVE */
48 #define HBR_REQUEST	0x4000000000000000UL
49 #define HBR_RESPONSE	0x8000000000000000UL
50 #define HBR_END		0xc000000000000000UL
51 #define HBR_AVPN	0x0200000000000000UL
52 #define HBR_ANDCOND	0x0100000000000000UL
53 
54 
55 /* in hvCall.S */
56 EXPORT_SYMBOL(plpar_hcall);
57 EXPORT_SYMBOL(plpar_hcall9);
58 EXPORT_SYMBOL(plpar_hcall_norets);
59 
60 /*
61  * H_BLOCK_REMOVE supported block size for this page size in segment who's base
62  * page size is that page size.
63  *
64  * The first index is the segment base page size, the second one is the actual
65  * page size.
66  */
67 static int hblkrm_size[MMU_PAGE_COUNT][MMU_PAGE_COUNT] __ro_after_init;
68 
69 /*
70  * Due to the involved complexity, and that the current hypervisor is only
71  * returning this value or 0, we are limiting the support of the H_BLOCK_REMOVE
72  * buffer size to 8 size block.
73  */
74 #define HBLKRM_SUPPORTED_BLOCK_SIZE 8
75 
76 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
77 static u8 dtl_mask = DTL_LOG_PREEMPT;
78 #else
79 static u8 dtl_mask;
80 #endif
81 
82 void alloc_dtl_buffers(unsigned long *time_limit)
83 {
84 	int cpu;
85 	struct paca_struct *pp;
86 	struct dtl_entry *dtl;
87 
88 	for_each_possible_cpu(cpu) {
89 		pp = paca_ptrs[cpu];
90 		if (pp->dispatch_log)
91 			continue;
92 		dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL);
93 		if (!dtl) {
94 			pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
95 				cpu);
96 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
97 			pr_warn("Stolen time statistics will be unreliable\n");
98 #endif
99 			break;
100 		}
101 
102 		pp->dtl_ridx = 0;
103 		pp->dispatch_log = dtl;
104 		pp->dispatch_log_end = dtl + N_DISPATCH_LOG;
105 		pp->dtl_curr = dtl;
106 
107 		if (time_limit && time_after(jiffies, *time_limit)) {
108 			cond_resched();
109 			*time_limit = jiffies + HZ;
110 		}
111 	}
112 }
113 
114 void register_dtl_buffer(int cpu)
115 {
116 	long ret;
117 	struct paca_struct *pp;
118 	struct dtl_entry *dtl;
119 	int hwcpu = get_hard_smp_processor_id(cpu);
120 
121 	pp = paca_ptrs[cpu];
122 	dtl = pp->dispatch_log;
123 	if (dtl && dtl_mask) {
124 		pp->dtl_ridx = 0;
125 		pp->dtl_curr = dtl;
126 		lppaca_of(cpu).dtl_idx = 0;
127 
128 		/* hypervisor reads buffer length from this field */
129 		dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
130 		ret = register_dtl(hwcpu, __pa(dtl));
131 		if (ret)
132 			pr_err("WARNING: DTL registration of cpu %d (hw %d) failed with %ld\n",
133 			       cpu, hwcpu, ret);
134 
135 		lppaca_of(cpu).dtl_enable_mask = dtl_mask;
136 	}
137 }
138 
139 #ifdef CONFIG_PPC_SPLPAR
140 struct dtl_worker {
141 	struct delayed_work work;
142 	int cpu;
143 };
144 
145 struct vcpu_dispatch_data {
146 	int last_disp_cpu;
147 
148 	int total_disp;
149 
150 	int same_cpu_disp;
151 	int same_chip_disp;
152 	int diff_chip_disp;
153 	int far_chip_disp;
154 
155 	int numa_home_disp;
156 	int numa_remote_disp;
157 	int numa_far_disp;
158 };
159 
160 /*
161  * This represents the number of cpus in the hypervisor. Since there is no
162  * architected way to discover the number of processors in the host, we
163  * provision for dealing with NR_CPUS. This is currently 2048 by default, and
164  * is sufficient for our purposes. This will need to be tweaked if
165  * CONFIG_NR_CPUS is changed.
166  */
167 #define NR_CPUS_H	NR_CPUS
168 
169 DEFINE_RWLOCK(dtl_access_lock);
170 static DEFINE_PER_CPU(struct vcpu_dispatch_data, vcpu_disp_data);
171 static DEFINE_PER_CPU(u64, dtl_entry_ridx);
172 static DEFINE_PER_CPU(struct dtl_worker, dtl_workers);
173 static enum cpuhp_state dtl_worker_state;
174 static DEFINE_MUTEX(dtl_enable_mutex);
175 static int vcpudispatch_stats_on __read_mostly;
176 static int vcpudispatch_stats_freq = 50;
177 static __be32 *vcpu_associativity, *pcpu_associativity;
178 
179 
180 static void free_dtl_buffers(unsigned long *time_limit)
181 {
182 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
183 	int cpu;
184 	struct paca_struct *pp;
185 
186 	for_each_possible_cpu(cpu) {
187 		pp = paca_ptrs[cpu];
188 		if (!pp->dispatch_log)
189 			continue;
190 		kmem_cache_free(dtl_cache, pp->dispatch_log);
191 		pp->dtl_ridx = 0;
192 		pp->dispatch_log = 0;
193 		pp->dispatch_log_end = 0;
194 		pp->dtl_curr = 0;
195 
196 		if (time_limit && time_after(jiffies, *time_limit)) {
197 			cond_resched();
198 			*time_limit = jiffies + HZ;
199 		}
200 	}
201 #endif
202 }
203 
204 static int init_cpu_associativity(void)
205 {
206 	vcpu_associativity = kcalloc(num_possible_cpus() / threads_per_core,
207 			VPHN_ASSOC_BUFSIZE * sizeof(__be32), GFP_KERNEL);
208 	pcpu_associativity = kcalloc(NR_CPUS_H / threads_per_core,
209 			VPHN_ASSOC_BUFSIZE * sizeof(__be32), GFP_KERNEL);
210 
211 	if (!vcpu_associativity || !pcpu_associativity) {
212 		pr_err("error allocating memory for associativity information\n");
213 		return -ENOMEM;
214 	}
215 
216 	return 0;
217 }
218 
219 static void destroy_cpu_associativity(void)
220 {
221 	kfree(vcpu_associativity);
222 	kfree(pcpu_associativity);
223 	vcpu_associativity = pcpu_associativity = 0;
224 }
225 
226 static __be32 *__get_cpu_associativity(int cpu, __be32 *cpu_assoc, int flag)
227 {
228 	__be32 *assoc;
229 	int rc = 0;
230 
231 	assoc = &cpu_assoc[(int)(cpu / threads_per_core) * VPHN_ASSOC_BUFSIZE];
232 	if (!assoc[0]) {
233 		rc = hcall_vphn(cpu, flag, &assoc[0]);
234 		if (rc)
235 			return NULL;
236 	}
237 
238 	return assoc;
239 }
240 
241 static __be32 *get_pcpu_associativity(int cpu)
242 {
243 	return __get_cpu_associativity(cpu, pcpu_associativity, VPHN_FLAG_PCPU);
244 }
245 
246 static __be32 *get_vcpu_associativity(int cpu)
247 {
248 	return __get_cpu_associativity(cpu, vcpu_associativity, VPHN_FLAG_VCPU);
249 }
250 
251 static int cpu_relative_dispatch_distance(int last_disp_cpu, int cur_disp_cpu)
252 {
253 	__be32 *last_disp_cpu_assoc, *cur_disp_cpu_assoc;
254 
255 	if (last_disp_cpu >= NR_CPUS_H || cur_disp_cpu >= NR_CPUS_H)
256 		return -EINVAL;
257 
258 	last_disp_cpu_assoc = get_pcpu_associativity(last_disp_cpu);
259 	cur_disp_cpu_assoc = get_pcpu_associativity(cur_disp_cpu);
260 
261 	if (!last_disp_cpu_assoc || !cur_disp_cpu_assoc)
262 		return -EIO;
263 
264 	return cpu_distance(last_disp_cpu_assoc, cur_disp_cpu_assoc);
265 }
266 
267 static int cpu_home_node_dispatch_distance(int disp_cpu)
268 {
269 	__be32 *disp_cpu_assoc, *vcpu_assoc;
270 	int vcpu_id = smp_processor_id();
271 
272 	if (disp_cpu >= NR_CPUS_H) {
273 		pr_debug_ratelimited("vcpu dispatch cpu %d > %d\n",
274 						disp_cpu, NR_CPUS_H);
275 		return -EINVAL;
276 	}
277 
278 	disp_cpu_assoc = get_pcpu_associativity(disp_cpu);
279 	vcpu_assoc = get_vcpu_associativity(vcpu_id);
280 
281 	if (!disp_cpu_assoc || !vcpu_assoc)
282 		return -EIO;
283 
284 	return cpu_distance(disp_cpu_assoc, vcpu_assoc);
285 }
286 
287 static void update_vcpu_disp_stat(int disp_cpu)
288 {
289 	struct vcpu_dispatch_data *disp;
290 	int distance;
291 
292 	disp = this_cpu_ptr(&vcpu_disp_data);
293 	if (disp->last_disp_cpu == -1) {
294 		disp->last_disp_cpu = disp_cpu;
295 		return;
296 	}
297 
298 	disp->total_disp++;
299 
300 	if (disp->last_disp_cpu == disp_cpu ||
301 		(cpu_first_thread_sibling(disp->last_disp_cpu) ==
302 					cpu_first_thread_sibling(disp_cpu)))
303 		disp->same_cpu_disp++;
304 	else {
305 		distance = cpu_relative_dispatch_distance(disp->last_disp_cpu,
306 								disp_cpu);
307 		if (distance < 0)
308 			pr_debug_ratelimited("vcpudispatch_stats: cpu %d: error determining associativity\n",
309 					smp_processor_id());
310 		else {
311 			switch (distance) {
312 			case 0:
313 				disp->same_chip_disp++;
314 				break;
315 			case 1:
316 				disp->diff_chip_disp++;
317 				break;
318 			case 2:
319 				disp->far_chip_disp++;
320 				break;
321 			default:
322 				pr_debug_ratelimited("vcpudispatch_stats: cpu %d (%d -> %d): unexpected relative dispatch distance %d\n",
323 						 smp_processor_id(),
324 						 disp->last_disp_cpu,
325 						 disp_cpu,
326 						 distance);
327 			}
328 		}
329 	}
330 
331 	distance = cpu_home_node_dispatch_distance(disp_cpu);
332 	if (distance < 0)
333 		pr_debug_ratelimited("vcpudispatch_stats: cpu %d: error determining associativity\n",
334 				smp_processor_id());
335 	else {
336 		switch (distance) {
337 		case 0:
338 			disp->numa_home_disp++;
339 			break;
340 		case 1:
341 			disp->numa_remote_disp++;
342 			break;
343 		case 2:
344 			disp->numa_far_disp++;
345 			break;
346 		default:
347 			pr_debug_ratelimited("vcpudispatch_stats: cpu %d on %d: unexpected numa dispatch distance %d\n",
348 						 smp_processor_id(),
349 						 disp_cpu,
350 						 distance);
351 		}
352 	}
353 
354 	disp->last_disp_cpu = disp_cpu;
355 }
356 
357 static void process_dtl_buffer(struct work_struct *work)
358 {
359 	struct dtl_entry dtle;
360 	u64 i = __this_cpu_read(dtl_entry_ridx);
361 	struct dtl_entry *dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
362 	struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
363 	struct lppaca *vpa = local_paca->lppaca_ptr;
364 	struct dtl_worker *d = container_of(work, struct dtl_worker, work.work);
365 
366 	if (!local_paca->dispatch_log)
367 		return;
368 
369 	/* if we have been migrated away, we cancel ourself */
370 	if (d->cpu != smp_processor_id()) {
371 		pr_debug("vcpudispatch_stats: cpu %d worker migrated -- canceling worker\n",
372 						smp_processor_id());
373 		return;
374 	}
375 
376 	if (i == be64_to_cpu(vpa->dtl_idx))
377 		goto out;
378 
379 	while (i < be64_to_cpu(vpa->dtl_idx)) {
380 		dtle = *dtl;
381 		barrier();
382 		if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
383 			/* buffer has overflowed */
384 			pr_debug_ratelimited("vcpudispatch_stats: cpu %d lost %lld DTL samples\n",
385 				d->cpu,
386 				be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG - i);
387 			i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
388 			dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
389 			continue;
390 		}
391 		update_vcpu_disp_stat(be16_to_cpu(dtle.processor_id));
392 		++i;
393 		++dtl;
394 		if (dtl == dtl_end)
395 			dtl = local_paca->dispatch_log;
396 	}
397 
398 	__this_cpu_write(dtl_entry_ridx, i);
399 
400 out:
401 	schedule_delayed_work_on(d->cpu, to_delayed_work(work),
402 					HZ / vcpudispatch_stats_freq);
403 }
404 
405 static int dtl_worker_online(unsigned int cpu)
406 {
407 	struct dtl_worker *d = &per_cpu(dtl_workers, cpu);
408 
409 	memset(d, 0, sizeof(*d));
410 	INIT_DELAYED_WORK(&d->work, process_dtl_buffer);
411 	d->cpu = cpu;
412 
413 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
414 	per_cpu(dtl_entry_ridx, cpu) = 0;
415 	register_dtl_buffer(cpu);
416 #else
417 	per_cpu(dtl_entry_ridx, cpu) = be64_to_cpu(lppaca_of(cpu).dtl_idx);
418 #endif
419 
420 	schedule_delayed_work_on(cpu, &d->work, HZ / vcpudispatch_stats_freq);
421 	return 0;
422 }
423 
424 static int dtl_worker_offline(unsigned int cpu)
425 {
426 	struct dtl_worker *d = &per_cpu(dtl_workers, cpu);
427 
428 	cancel_delayed_work_sync(&d->work);
429 
430 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
431 	unregister_dtl(get_hard_smp_processor_id(cpu));
432 #endif
433 
434 	return 0;
435 }
436 
437 static void set_global_dtl_mask(u8 mask)
438 {
439 	int cpu;
440 
441 	dtl_mask = mask;
442 	for_each_present_cpu(cpu)
443 		lppaca_of(cpu).dtl_enable_mask = dtl_mask;
444 }
445 
446 static void reset_global_dtl_mask(void)
447 {
448 	int cpu;
449 
450 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
451 	dtl_mask = DTL_LOG_PREEMPT;
452 #else
453 	dtl_mask = 0;
454 #endif
455 	for_each_present_cpu(cpu)
456 		lppaca_of(cpu).dtl_enable_mask = dtl_mask;
457 }
458 
459 static int dtl_worker_enable(unsigned long *time_limit)
460 {
461 	int rc = 0, state;
462 
463 	if (!write_trylock(&dtl_access_lock)) {
464 		rc = -EBUSY;
465 		goto out;
466 	}
467 
468 	set_global_dtl_mask(DTL_LOG_ALL);
469 
470 	/* Setup dtl buffers and register those */
471 	alloc_dtl_buffers(time_limit);
472 
473 	state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/dtl:online",
474 					dtl_worker_online, dtl_worker_offline);
475 	if (state < 0) {
476 		pr_err("vcpudispatch_stats: unable to setup workqueue for DTL processing\n");
477 		free_dtl_buffers(time_limit);
478 		reset_global_dtl_mask();
479 		write_unlock(&dtl_access_lock);
480 		rc = -EINVAL;
481 		goto out;
482 	}
483 	dtl_worker_state = state;
484 
485 out:
486 	return rc;
487 }
488 
489 static void dtl_worker_disable(unsigned long *time_limit)
490 {
491 	cpuhp_remove_state(dtl_worker_state);
492 	free_dtl_buffers(time_limit);
493 	reset_global_dtl_mask();
494 	write_unlock(&dtl_access_lock);
495 }
496 
497 static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p,
498 		size_t count, loff_t *ppos)
499 {
500 	unsigned long time_limit = jiffies + HZ;
501 	struct vcpu_dispatch_data *disp;
502 	int rc, cmd, cpu;
503 	char buf[16];
504 
505 	if (count > 15)
506 		return -EINVAL;
507 
508 	if (copy_from_user(buf, p, count))
509 		return -EFAULT;
510 
511 	buf[count] = 0;
512 	rc = kstrtoint(buf, 0, &cmd);
513 	if (rc || cmd < 0 || cmd > 1) {
514 		pr_err("vcpudispatch_stats: please use 0 to disable or 1 to enable dispatch statistics\n");
515 		return rc ? rc : -EINVAL;
516 	}
517 
518 	mutex_lock(&dtl_enable_mutex);
519 
520 	if ((cmd == 0 && !vcpudispatch_stats_on) ||
521 			(cmd == 1 && vcpudispatch_stats_on))
522 		goto out;
523 
524 	if (cmd) {
525 		rc = init_cpu_associativity();
526 		if (rc)
527 			goto out;
528 
529 		for_each_possible_cpu(cpu) {
530 			disp = per_cpu_ptr(&vcpu_disp_data, cpu);
531 			memset(disp, 0, sizeof(*disp));
532 			disp->last_disp_cpu = -1;
533 		}
534 
535 		rc = dtl_worker_enable(&time_limit);
536 		if (rc) {
537 			destroy_cpu_associativity();
538 			goto out;
539 		}
540 	} else {
541 		dtl_worker_disable(&time_limit);
542 		destroy_cpu_associativity();
543 	}
544 
545 	vcpudispatch_stats_on = cmd;
546 
547 out:
548 	mutex_unlock(&dtl_enable_mutex);
549 	if (rc)
550 		return rc;
551 	return count;
552 }
553 
554 static int vcpudispatch_stats_display(struct seq_file *p, void *v)
555 {
556 	int cpu;
557 	struct vcpu_dispatch_data *disp;
558 
559 	if (!vcpudispatch_stats_on) {
560 		seq_puts(p, "off\n");
561 		return 0;
562 	}
563 
564 	for_each_online_cpu(cpu) {
565 		disp = per_cpu_ptr(&vcpu_disp_data, cpu);
566 		seq_printf(p, "cpu%d", cpu);
567 		seq_put_decimal_ull(p, " ", disp->total_disp);
568 		seq_put_decimal_ull(p, " ", disp->same_cpu_disp);
569 		seq_put_decimal_ull(p, " ", disp->same_chip_disp);
570 		seq_put_decimal_ull(p, " ", disp->diff_chip_disp);
571 		seq_put_decimal_ull(p, " ", disp->far_chip_disp);
572 		seq_put_decimal_ull(p, " ", disp->numa_home_disp);
573 		seq_put_decimal_ull(p, " ", disp->numa_remote_disp);
574 		seq_put_decimal_ull(p, " ", disp->numa_far_disp);
575 		seq_puts(p, "\n");
576 	}
577 
578 	return 0;
579 }
580 
581 static int vcpudispatch_stats_open(struct inode *inode, struct file *file)
582 {
583 	return single_open(file, vcpudispatch_stats_display, NULL);
584 }
585 
586 static const struct proc_ops vcpudispatch_stats_proc_ops = {
587 	.proc_open	= vcpudispatch_stats_open,
588 	.proc_read	= seq_read,
589 	.proc_write	= vcpudispatch_stats_write,
590 	.proc_lseek	= seq_lseek,
591 	.proc_release	= single_release,
592 };
593 
594 static ssize_t vcpudispatch_stats_freq_write(struct file *file,
595 		const char __user *p, size_t count, loff_t *ppos)
596 {
597 	int rc, freq;
598 	char buf[16];
599 
600 	if (count > 15)
601 		return -EINVAL;
602 
603 	if (copy_from_user(buf, p, count))
604 		return -EFAULT;
605 
606 	buf[count] = 0;
607 	rc = kstrtoint(buf, 0, &freq);
608 	if (rc || freq < 1 || freq > HZ) {
609 		pr_err("vcpudispatch_stats_freq: please specify a frequency between 1 and %d\n",
610 				HZ);
611 		return rc ? rc : -EINVAL;
612 	}
613 
614 	vcpudispatch_stats_freq = freq;
615 
616 	return count;
617 }
618 
619 static int vcpudispatch_stats_freq_display(struct seq_file *p, void *v)
620 {
621 	seq_printf(p, "%d\n", vcpudispatch_stats_freq);
622 	return 0;
623 }
624 
625 static int vcpudispatch_stats_freq_open(struct inode *inode, struct file *file)
626 {
627 	return single_open(file, vcpudispatch_stats_freq_display, NULL);
628 }
629 
630 static const struct proc_ops vcpudispatch_stats_freq_proc_ops = {
631 	.proc_open	= vcpudispatch_stats_freq_open,
632 	.proc_read	= seq_read,
633 	.proc_write	= vcpudispatch_stats_freq_write,
634 	.proc_lseek	= seq_lseek,
635 	.proc_release	= single_release,
636 };
637 
638 static int __init vcpudispatch_stats_procfs_init(void)
639 {
640 	/*
641 	 * Avoid smp_processor_id while preemptible. All CPUs should have
642 	 * the same value for lppaca_shared_proc.
643 	 */
644 	preempt_disable();
645 	if (!lppaca_shared_proc(get_lppaca())) {
646 		preempt_enable();
647 		return 0;
648 	}
649 	preempt_enable();
650 
651 	if (!proc_create("powerpc/vcpudispatch_stats", 0600, NULL,
652 					&vcpudispatch_stats_proc_ops))
653 		pr_err("vcpudispatch_stats: error creating procfs file\n");
654 	else if (!proc_create("powerpc/vcpudispatch_stats_freq", 0600, NULL,
655 					&vcpudispatch_stats_freq_proc_ops))
656 		pr_err("vcpudispatch_stats_freq: error creating procfs file\n");
657 
658 	return 0;
659 }
660 
661 machine_device_initcall(pseries, vcpudispatch_stats_procfs_init);
662 #endif /* CONFIG_PPC_SPLPAR */
663 
664 void vpa_init(int cpu)
665 {
666 	int hwcpu = get_hard_smp_processor_id(cpu);
667 	unsigned long addr;
668 	long ret;
669 
670 	/*
671 	 * The spec says it "may be problematic" if CPU x registers the VPA of
672 	 * CPU y. We should never do that, but wail if we ever do.
673 	 */
674 	WARN_ON(cpu != smp_processor_id());
675 
676 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
677 		lppaca_of(cpu).vmxregs_in_use = 1;
678 
679 	if (cpu_has_feature(CPU_FTR_ARCH_207S))
680 		lppaca_of(cpu).ebb_regs_in_use = 1;
681 
682 	addr = __pa(&lppaca_of(cpu));
683 	ret = register_vpa(hwcpu, addr);
684 
685 	if (ret) {
686 		pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
687 		       "%lx failed with %ld\n", cpu, hwcpu, addr, ret);
688 		return;
689 	}
690 
691 #ifdef CONFIG_PPC_BOOK3S_64
692 	/*
693 	 * PAPR says this feature is SLB-Buffer but firmware never
694 	 * reports that.  All SPLPAR support SLB shadow buffer.
695 	 */
696 	if (!radix_enabled() && firmware_has_feature(FW_FEATURE_SPLPAR)) {
697 		addr = __pa(paca_ptrs[cpu]->slb_shadow_ptr);
698 		ret = register_slb_shadow(hwcpu, addr);
699 		if (ret)
700 			pr_err("WARNING: SLB shadow buffer registration for "
701 			       "cpu %d (hw %d) of area %lx failed with %ld\n",
702 			       cpu, hwcpu, addr, ret);
703 	}
704 #endif /* CONFIG_PPC_BOOK3S_64 */
705 
706 	/*
707 	 * Register dispatch trace log, if one has been allocated.
708 	 */
709 	register_dtl_buffer(cpu);
710 }
711 
712 #ifdef CONFIG_PPC_BOOK3S_64
713 
714 static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
715 				     unsigned long vpn, unsigned long pa,
716 				     unsigned long rflags, unsigned long vflags,
717 				     int psize, int apsize, int ssize)
718 {
719 	unsigned long lpar_rc;
720 	unsigned long flags;
721 	unsigned long slot;
722 	unsigned long hpte_v, hpte_r;
723 
724 	if (!(vflags & HPTE_V_BOLTED))
725 		pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
726 			 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
727 			 hpte_group, vpn,  pa, rflags, vflags, psize);
728 
729 	hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
730 	hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
731 
732 	if (!(vflags & HPTE_V_BOLTED))
733 		pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
734 
735 	/* Now fill in the actual HPTE */
736 	/* Set CEC cookie to 0         */
737 	/* Zero page = 0               */
738 	/* I-cache Invalidate = 0      */
739 	/* I-cache synchronize = 0     */
740 	/* Exact = 0                   */
741 	flags = 0;
742 
743 	if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
744 		flags |= H_COALESCE_CAND;
745 
746 	lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
747 	if (unlikely(lpar_rc == H_PTEG_FULL)) {
748 		pr_devel("Hash table group is full\n");
749 		return -1;
750 	}
751 
752 	/*
753 	 * Since we try and ioremap PHBs we don't own, the pte insert
754 	 * will fail. However we must catch the failure in hash_page
755 	 * or we will loop forever, so return -2 in this case.
756 	 */
757 	if (unlikely(lpar_rc != H_SUCCESS)) {
758 		pr_err("Failed hash pte insert with error %ld\n", lpar_rc);
759 		return -2;
760 	}
761 	if (!(vflags & HPTE_V_BOLTED))
762 		pr_devel(" -> slot: %lu\n", slot & 7);
763 
764 	/* Because of iSeries, we have to pass down the secondary
765 	 * bucket bit here as well
766 	 */
767 	return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3);
768 }
769 
770 static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock);
771 
772 static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
773 {
774 	unsigned long slot_offset;
775 	unsigned long lpar_rc;
776 	int i;
777 	unsigned long dummy1, dummy2;
778 
779 	/* pick a random slot to start at */
780 	slot_offset = mftb() & 0x7;
781 
782 	for (i = 0; i < HPTES_PER_GROUP; i++) {
783 
784 		/* don't remove a bolted entry */
785 		lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
786 					   HPTE_V_BOLTED, &dummy1, &dummy2);
787 		if (lpar_rc == H_SUCCESS)
788 			return i;
789 
790 		/*
791 		 * The test for adjunct partition is performed before the
792 		 * ANDCOND test.  H_RESOURCE may be returned, so we need to
793 		 * check for that as well.
794 		 */
795 		BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
796 
797 		slot_offset++;
798 		slot_offset &= 0x7;
799 	}
800 
801 	return -1;
802 }
803 
804 static void manual_hpte_clear_all(void)
805 {
806 	unsigned long size_bytes = 1UL << ppc64_pft_size;
807 	unsigned long hpte_count = size_bytes >> 4;
808 	struct {
809 		unsigned long pteh;
810 		unsigned long ptel;
811 	} ptes[4];
812 	long lpar_rc;
813 	unsigned long i, j;
814 
815 	/* Read in batches of 4,
816 	 * invalidate only valid entries not in the VRMA
817 	 * hpte_count will be a multiple of 4
818          */
819 	for (i = 0; i < hpte_count; i += 4) {
820 		lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
821 		if (lpar_rc != H_SUCCESS) {
822 			pr_info("Failed to read hash page table at %ld err %ld\n",
823 				i, lpar_rc);
824 			continue;
825 		}
826 		for (j = 0; j < 4; j++){
827 			if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
828 				HPTE_V_VRMA_MASK)
829 				continue;
830 			if (ptes[j].pteh & HPTE_V_VALID)
831 				plpar_pte_remove_raw(0, i + j, 0,
832 					&(ptes[j].pteh), &(ptes[j].ptel));
833 		}
834 	}
835 }
836 
837 static int hcall_hpte_clear_all(void)
838 {
839 	int rc;
840 
841 	do {
842 		rc = plpar_hcall_norets(H_CLEAR_HPT);
843 	} while (rc == H_CONTINUE);
844 
845 	return rc;
846 }
847 
848 static void pseries_hpte_clear_all(void)
849 {
850 	int rc;
851 
852 	rc = hcall_hpte_clear_all();
853 	if (rc != H_SUCCESS)
854 		manual_hpte_clear_all();
855 
856 #ifdef __LITTLE_ENDIAN__
857 	/*
858 	 * Reset exceptions to big endian.
859 	 *
860 	 * FIXME this is a hack for kexec, we need to reset the exception
861 	 * endian before starting the new kernel and this is a convenient place
862 	 * to do it.
863 	 *
864 	 * This is also called on boot when a fadump happens. In that case we
865 	 * must not change the exception endian mode.
866 	 */
867 	if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active())
868 		pseries_big_endian_exceptions();
869 #endif
870 }
871 
872 /*
873  * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
874  * the low 3 bits of flags happen to line up.  So no transform is needed.
875  * We can probably optimize here and assume the high bits of newpp are
876  * already zero.  For now I am paranoid.
877  */
878 static long pSeries_lpar_hpte_updatepp(unsigned long slot,
879 				       unsigned long newpp,
880 				       unsigned long vpn,
881 				       int psize, int apsize,
882 				       int ssize, unsigned long inv_flags)
883 {
884 	unsigned long lpar_rc;
885 	unsigned long flags;
886 	unsigned long want_v;
887 
888 	want_v = hpte_encode_avpn(vpn, psize, ssize);
889 
890 	flags = (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO)) | H_AVPN;
891 	flags |= (newpp & HPTE_R_KEY_HI) >> 48;
892 	if (mmu_has_feature(MMU_FTR_KERNEL_RO))
893 		/* Move pp0 into bit 8 (IBM 55) */
894 		flags |= (newpp & HPTE_R_PP0) >> 55;
895 
896 	pr_devel("    update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
897 		 want_v, slot, flags, psize);
898 
899 	lpar_rc = plpar_pte_protect(flags, slot, want_v);
900 
901 	if (lpar_rc == H_NOT_FOUND) {
902 		pr_devel("not found !\n");
903 		return -1;
904 	}
905 
906 	pr_devel("ok\n");
907 
908 	BUG_ON(lpar_rc != H_SUCCESS);
909 
910 	return 0;
911 }
912 
913 static long __pSeries_lpar_hpte_find(unsigned long want_v, unsigned long hpte_group)
914 {
915 	long lpar_rc;
916 	unsigned long i, j;
917 	struct {
918 		unsigned long pteh;
919 		unsigned long ptel;
920 	} ptes[4];
921 
922 	for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
923 
924 		lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
925 		if (lpar_rc != H_SUCCESS) {
926 			pr_info("Failed to read hash page table at %ld err %ld\n",
927 				hpte_group, lpar_rc);
928 			continue;
929 		}
930 
931 		for (j = 0; j < 4; j++) {
932 			if (HPTE_V_COMPARE(ptes[j].pteh, want_v) &&
933 			    (ptes[j].pteh & HPTE_V_VALID))
934 				return i + j;
935 		}
936 	}
937 
938 	return -1;
939 }
940 
941 static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
942 {
943 	long slot;
944 	unsigned long hash;
945 	unsigned long want_v;
946 	unsigned long hpte_group;
947 
948 	hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
949 	want_v = hpte_encode_avpn(vpn, psize, ssize);
950 
951 	/*
952 	 * We try to keep bolted entries always in primary hash
953 	 * But in some case we can find them in secondary too.
954 	 */
955 	hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
956 	slot = __pSeries_lpar_hpte_find(want_v, hpte_group);
957 	if (slot < 0) {
958 		/* Try in secondary */
959 		hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
960 		slot = __pSeries_lpar_hpte_find(want_v, hpte_group);
961 		if (slot < 0)
962 			return -1;
963 	}
964 	return hpte_group + slot;
965 }
966 
967 static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
968 					     unsigned long ea,
969 					     int psize, int ssize)
970 {
971 	unsigned long vpn;
972 	unsigned long lpar_rc, slot, vsid, flags;
973 
974 	vsid = get_kernel_vsid(ea, ssize);
975 	vpn = hpt_vpn(ea, vsid, ssize);
976 
977 	slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
978 	BUG_ON(slot == -1);
979 
980 	flags = newpp & (HPTE_R_PP | HPTE_R_N);
981 	if (mmu_has_feature(MMU_FTR_KERNEL_RO))
982 		/* Move pp0 into bit 8 (IBM 55) */
983 		flags |= (newpp & HPTE_R_PP0) >> 55;
984 
985 	flags |= ((newpp & HPTE_R_KEY_HI) >> 48) | (newpp & HPTE_R_KEY_LO);
986 
987 	lpar_rc = plpar_pte_protect(flags, slot, 0);
988 
989 	BUG_ON(lpar_rc != H_SUCCESS);
990 }
991 
992 static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
993 					 int psize, int apsize,
994 					 int ssize, int local)
995 {
996 	unsigned long want_v;
997 	unsigned long lpar_rc;
998 	unsigned long dummy1, dummy2;
999 
1000 	pr_devel("    inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
1001 		 slot, vpn, psize, local);
1002 
1003 	want_v = hpte_encode_avpn(vpn, psize, ssize);
1004 	lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
1005 	if (lpar_rc == H_NOT_FOUND)
1006 		return;
1007 
1008 	BUG_ON(lpar_rc != H_SUCCESS);
1009 }
1010 
1011 
1012 /*
1013  * As defined in the PAPR's section 14.5.4.1.8
1014  * The control mask doesn't include the returned reference and change bit from
1015  * the processed PTE.
1016  */
1017 #define HBLKR_AVPN		0x0100000000000000UL
1018 #define HBLKR_CTRL_MASK		0xf800000000000000UL
1019 #define HBLKR_CTRL_SUCCESS	0x8000000000000000UL
1020 #define HBLKR_CTRL_ERRNOTFOUND	0x8800000000000000UL
1021 #define HBLKR_CTRL_ERRBUSY	0xa000000000000000UL
1022 
1023 /*
1024  * Returned true if we are supporting this block size for the specified segment
1025  * base page size and actual page size.
1026  *
1027  * Currently, we only support 8 size block.
1028  */
1029 static inline bool is_supported_hlbkrm(int bpsize, int psize)
1030 {
1031 	return (hblkrm_size[bpsize][psize] == HBLKRM_SUPPORTED_BLOCK_SIZE);
1032 }
1033 
1034 /**
1035  * H_BLOCK_REMOVE caller.
1036  * @idx should point to the latest @param entry set with a PTEX.
1037  * If PTE cannot be processed because another CPUs has already locked that
1038  * group, those entries are put back in @param starting at index 1.
1039  * If entries has to be retried and @retry_busy is set to true, these entries
1040  * are retried until success. If @retry_busy is set to false, the returned
1041  * is the number of entries yet to process.
1042  */
1043 static unsigned long call_block_remove(unsigned long idx, unsigned long *param,
1044 				       bool retry_busy)
1045 {
1046 	unsigned long i, rc, new_idx;
1047 	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
1048 
1049 	if (idx < 2) {
1050 		pr_warn("Unexpected empty call to H_BLOCK_REMOVE");
1051 		return 0;
1052 	}
1053 again:
1054 	new_idx = 0;
1055 	if (idx > PLPAR_HCALL9_BUFSIZE) {
1056 		pr_err("Too many PTEs (%lu) for H_BLOCK_REMOVE", idx);
1057 		idx = PLPAR_HCALL9_BUFSIZE;
1058 	} else if (idx < PLPAR_HCALL9_BUFSIZE)
1059 		param[idx] = HBR_END;
1060 
1061 	rc = plpar_hcall9(H_BLOCK_REMOVE, retbuf,
1062 			  param[0], /* AVA */
1063 			  param[1],  param[2],  param[3],  param[4], /* TS0-7 */
1064 			  param[5],  param[6],  param[7],  param[8]);
1065 	if (rc == H_SUCCESS)
1066 		return 0;
1067 
1068 	BUG_ON(rc != H_PARTIAL);
1069 
1070 	/* Check that the unprocessed entries were 'not found' or 'busy' */
1071 	for (i = 0; i < idx-1; i++) {
1072 		unsigned long ctrl = retbuf[i] & HBLKR_CTRL_MASK;
1073 
1074 		if (ctrl == HBLKR_CTRL_ERRBUSY) {
1075 			param[++new_idx] = param[i+1];
1076 			continue;
1077 		}
1078 
1079 		BUG_ON(ctrl != HBLKR_CTRL_SUCCESS
1080 		       && ctrl != HBLKR_CTRL_ERRNOTFOUND);
1081 	}
1082 
1083 	/*
1084 	 * If there were entries found busy, retry these entries if requested,
1085 	 * of if all the entries have to be retried.
1086 	 */
1087 	if (new_idx && (retry_busy || new_idx == (PLPAR_HCALL9_BUFSIZE-1))) {
1088 		idx = new_idx + 1;
1089 		goto again;
1090 	}
1091 
1092 	return new_idx;
1093 }
1094 
1095 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1096 /*
1097  * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
1098  * to make sure that we avoid bouncing the hypervisor tlbie lock.
1099  */
1100 #define PPC64_HUGE_HPTE_BATCH 12
1101 
1102 static void hugepage_block_invalidate(unsigned long *slot, unsigned long *vpn,
1103 				      int count, int psize, int ssize)
1104 {
1105 	unsigned long param[PLPAR_HCALL9_BUFSIZE];
1106 	unsigned long shift, current_vpgb, vpgb;
1107 	int i, pix = 0;
1108 
1109 	shift = mmu_psize_defs[psize].shift;
1110 
1111 	for (i = 0; i < count; i++) {
1112 		/*
1113 		 * Shifting 3 bits more on the right to get a
1114 		 * 8 pages aligned virtual addresse.
1115 		 */
1116 		vpgb = (vpn[i] >> (shift - VPN_SHIFT + 3));
1117 		if (!pix || vpgb != current_vpgb) {
1118 			/*
1119 			 * Need to start a new 8 pages block, flush
1120 			 * the current one if needed.
1121 			 */
1122 			if (pix)
1123 				(void)call_block_remove(pix, param, true);
1124 			current_vpgb = vpgb;
1125 			param[0] = hpte_encode_avpn(vpn[i], psize, ssize);
1126 			pix = 1;
1127 		}
1128 
1129 		param[pix++] = HBR_REQUEST | HBLKR_AVPN | slot[i];
1130 		if (pix == PLPAR_HCALL9_BUFSIZE) {
1131 			pix = call_block_remove(pix, param, false);
1132 			/*
1133 			 * pix = 0 means that all the entries were
1134 			 * removed, we can start a new block.
1135 			 * Otherwise, this means that there are entries
1136 			 * to retry, and pix points to latest one, so
1137 			 * we should increment it and try to continue
1138 			 * the same block.
1139 			 */
1140 			if (pix)
1141 				pix++;
1142 		}
1143 	}
1144 	if (pix)
1145 		(void)call_block_remove(pix, param, true);
1146 }
1147 
1148 static void hugepage_bulk_invalidate(unsigned long *slot, unsigned long *vpn,
1149 				     int count, int psize, int ssize)
1150 {
1151 	unsigned long param[PLPAR_HCALL9_BUFSIZE];
1152 	int i = 0, pix = 0, rc;
1153 
1154 	for (i = 0; i < count; i++) {
1155 
1156 		if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
1157 			pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0,
1158 						     ssize, 0);
1159 		} else {
1160 			param[pix] = HBR_REQUEST | HBR_AVPN | slot[i];
1161 			param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize);
1162 			pix += 2;
1163 			if (pix == 8) {
1164 				rc = plpar_hcall9(H_BULK_REMOVE, param,
1165 						  param[0], param[1], param[2],
1166 						  param[3], param[4], param[5],
1167 						  param[6], param[7]);
1168 				BUG_ON(rc != H_SUCCESS);
1169 				pix = 0;
1170 			}
1171 		}
1172 	}
1173 	if (pix) {
1174 		param[pix] = HBR_END;
1175 		rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
1176 				  param[2], param[3], param[4], param[5],
1177 				  param[6], param[7]);
1178 		BUG_ON(rc != H_SUCCESS);
1179 	}
1180 }
1181 
1182 static inline void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
1183 						      unsigned long *vpn,
1184 						      int count, int psize,
1185 						      int ssize)
1186 {
1187 	unsigned long flags = 0;
1188 	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
1189 
1190 	if (lock_tlbie)
1191 		spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
1192 
1193 	/* Assuming THP size is 16M */
1194 	if (is_supported_hlbkrm(psize, MMU_PAGE_16M))
1195 		hugepage_block_invalidate(slot, vpn, count, psize, ssize);
1196 	else
1197 		hugepage_bulk_invalidate(slot, vpn, count, psize, ssize);
1198 
1199 	if (lock_tlbie)
1200 		spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
1201 }
1202 
1203 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
1204 					     unsigned long addr,
1205 					     unsigned char *hpte_slot_array,
1206 					     int psize, int ssize, int local)
1207 {
1208 	int i, index = 0;
1209 	unsigned long s_addr = addr;
1210 	unsigned int max_hpte_count, valid;
1211 	unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
1212 	unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
1213 	unsigned long shift, hidx, vpn = 0, hash, slot;
1214 
1215 	shift = mmu_psize_defs[psize].shift;
1216 	max_hpte_count = 1U << (PMD_SHIFT - shift);
1217 
1218 	for (i = 0; i < max_hpte_count; i++) {
1219 		valid = hpte_valid(hpte_slot_array, i);
1220 		if (!valid)
1221 			continue;
1222 		hidx =  hpte_hash_index(hpte_slot_array, i);
1223 
1224 		/* get the vpn */
1225 		addr = s_addr + (i * (1ul << shift));
1226 		vpn = hpt_vpn(addr, vsid, ssize);
1227 		hash = hpt_hash(vpn, shift, ssize);
1228 		if (hidx & _PTEIDX_SECONDARY)
1229 			hash = ~hash;
1230 
1231 		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1232 		slot += hidx & _PTEIDX_GROUP_IX;
1233 
1234 		slot_array[index] = slot;
1235 		vpn_array[index] = vpn;
1236 		if (index == PPC64_HUGE_HPTE_BATCH - 1) {
1237 			/*
1238 			 * Now do a bluk invalidate
1239 			 */
1240 			__pSeries_lpar_hugepage_invalidate(slot_array,
1241 							   vpn_array,
1242 							   PPC64_HUGE_HPTE_BATCH,
1243 							   psize, ssize);
1244 			index = 0;
1245 		} else
1246 			index++;
1247 	}
1248 	if (index)
1249 		__pSeries_lpar_hugepage_invalidate(slot_array, vpn_array,
1250 						   index, psize, ssize);
1251 }
1252 #else
1253 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
1254 					     unsigned long addr,
1255 					     unsigned char *hpte_slot_array,
1256 					     int psize, int ssize, int local)
1257 {
1258 	WARN(1, "%s called without THP support\n", __func__);
1259 }
1260 #endif
1261 
1262 static int pSeries_lpar_hpte_removebolted(unsigned long ea,
1263 					  int psize, int ssize)
1264 {
1265 	unsigned long vpn;
1266 	unsigned long slot, vsid;
1267 
1268 	vsid = get_kernel_vsid(ea, ssize);
1269 	vpn = hpt_vpn(ea, vsid, ssize);
1270 
1271 	slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
1272 	if (slot == -1)
1273 		return -ENOENT;
1274 
1275 	/*
1276 	 * lpar doesn't use the passed actual page size
1277 	 */
1278 	pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0);
1279 	return 0;
1280 }
1281 
1282 
1283 static inline unsigned long compute_slot(real_pte_t pte,
1284 					 unsigned long vpn,
1285 					 unsigned long index,
1286 					 unsigned long shift,
1287 					 int ssize)
1288 {
1289 	unsigned long slot, hash, hidx;
1290 
1291 	hash = hpt_hash(vpn, shift, ssize);
1292 	hidx = __rpte_to_hidx(pte, index);
1293 	if (hidx & _PTEIDX_SECONDARY)
1294 		hash = ~hash;
1295 	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1296 	slot += hidx & _PTEIDX_GROUP_IX;
1297 	return slot;
1298 }
1299 
1300 /**
1301  * The hcall H_BLOCK_REMOVE implies that the virtual pages to processed are
1302  * "all within the same naturally aligned 8 page virtual address block".
1303  */
1304 static void do_block_remove(unsigned long number, struct ppc64_tlb_batch *batch,
1305 			    unsigned long *param)
1306 {
1307 	unsigned long vpn;
1308 	unsigned long i, pix = 0;
1309 	unsigned long index, shift, slot, current_vpgb, vpgb;
1310 	real_pte_t pte;
1311 	int psize, ssize;
1312 
1313 	psize = batch->psize;
1314 	ssize = batch->ssize;
1315 
1316 	for (i = 0; i < number; i++) {
1317 		vpn = batch->vpn[i];
1318 		pte = batch->pte[i];
1319 		pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
1320 			/*
1321 			 * Shifting 3 bits more on the right to get a
1322 			 * 8 pages aligned virtual addresse.
1323 			 */
1324 			vpgb = (vpn >> (shift - VPN_SHIFT + 3));
1325 			if (!pix || vpgb != current_vpgb) {
1326 				/*
1327 				 * Need to start a new 8 pages block, flush
1328 				 * the current one if needed.
1329 				 */
1330 				if (pix)
1331 					(void)call_block_remove(pix, param,
1332 								true);
1333 				current_vpgb = vpgb;
1334 				param[0] = hpte_encode_avpn(vpn, psize,
1335 							    ssize);
1336 				pix = 1;
1337 			}
1338 
1339 			slot = compute_slot(pte, vpn, index, shift, ssize);
1340 			param[pix++] = HBR_REQUEST | HBLKR_AVPN | slot;
1341 
1342 			if (pix == PLPAR_HCALL9_BUFSIZE) {
1343 				pix = call_block_remove(pix, param, false);
1344 				/*
1345 				 * pix = 0 means that all the entries were
1346 				 * removed, we can start a new block.
1347 				 * Otherwise, this means that there are entries
1348 				 * to retry, and pix points to latest one, so
1349 				 * we should increment it and try to continue
1350 				 * the same block.
1351 				 */
1352 				if (pix)
1353 					pix++;
1354 			}
1355 		} pte_iterate_hashed_end();
1356 	}
1357 
1358 	if (pix)
1359 		(void)call_block_remove(pix, param, true);
1360 }
1361 
1362 /*
1363  * TLB Block Invalidate Characteristics
1364  *
1365  * These characteristics define the size of the block the hcall H_BLOCK_REMOVE
1366  * is able to process for each couple segment base page size, actual page size.
1367  *
1368  * The ibm,get-system-parameter properties is returning a buffer with the
1369  * following layout:
1370  *
1371  * [ 2 bytes size of the RTAS buffer (excluding these 2 bytes) ]
1372  * -----------------
1373  * TLB Block Invalidate Specifiers:
1374  * [ 1 byte LOG base 2 of the TLB invalidate block size being specified ]
1375  * [ 1 byte Number of page sizes (N) that are supported for the specified
1376  *          TLB invalidate block size ]
1377  * [ 1 byte Encoded segment base page size and actual page size
1378  *          MSB=0 means 4k segment base page size and actual page size
1379  *          MSB=1 the penc value in mmu_psize_def ]
1380  * ...
1381  * -----------------
1382  * Next TLB Block Invalidate Specifiers...
1383  * -----------------
1384  * [ 0 ]
1385  */
1386 static inline void set_hblkrm_bloc_size(int bpsize, int psize,
1387 					unsigned int block_size)
1388 {
1389 	if (block_size > hblkrm_size[bpsize][psize])
1390 		hblkrm_size[bpsize][psize] = block_size;
1391 }
1392 
1393 /*
1394  * Decode the Encoded segment base page size and actual page size.
1395  * PAPR specifies:
1396  *   - bit 7 is the L bit
1397  *   - bits 0-5 are the penc value
1398  * If the L bit is 0, this means 4K segment base page size and actual page size
1399  * otherwise the penc value should be read.
1400  */
1401 #define HBLKRM_L_MASK		0x80
1402 #define HBLKRM_PENC_MASK	0x3f
1403 static inline void __init check_lp_set_hblkrm(unsigned int lp,
1404 					      unsigned int block_size)
1405 {
1406 	unsigned int bpsize, psize;
1407 
1408 	/* First, check the L bit, if not set, this means 4K */
1409 	if ((lp & HBLKRM_L_MASK) == 0) {
1410 		set_hblkrm_bloc_size(MMU_PAGE_4K, MMU_PAGE_4K, block_size);
1411 		return;
1412 	}
1413 
1414 	lp &= HBLKRM_PENC_MASK;
1415 	for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++) {
1416 		struct mmu_psize_def *def = &mmu_psize_defs[bpsize];
1417 
1418 		for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
1419 			if (def->penc[psize] == lp) {
1420 				set_hblkrm_bloc_size(bpsize, psize, block_size);
1421 				return;
1422 			}
1423 		}
1424 	}
1425 }
1426 
1427 #define SPLPAR_TLB_BIC_TOKEN		50
1428 
1429 /*
1430  * The size of the TLB Block Invalidate Characteristics is variable. But at the
1431  * maximum it will be the number of possible page sizes *2 + 10 bytes.
1432  * Currently MMU_PAGE_COUNT is 16, which means 42 bytes. Use a cache line size
1433  * (128 bytes) for the buffer to get plenty of space.
1434  */
1435 #define SPLPAR_TLB_BIC_MAXLENGTH	128
1436 
1437 void __init pseries_lpar_read_hblkrm_characteristics(void)
1438 {
1439 	unsigned char local_buffer[SPLPAR_TLB_BIC_MAXLENGTH];
1440 	int call_status, len, idx, bpsize;
1441 
1442 	if (!firmware_has_feature(FW_FEATURE_BLOCK_REMOVE))
1443 		return;
1444 
1445 	spin_lock(&rtas_data_buf_lock);
1446 	memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE);
1447 	call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
1448 				NULL,
1449 				SPLPAR_TLB_BIC_TOKEN,
1450 				__pa(rtas_data_buf),
1451 				RTAS_DATA_BUF_SIZE);
1452 	memcpy(local_buffer, rtas_data_buf, SPLPAR_TLB_BIC_MAXLENGTH);
1453 	local_buffer[SPLPAR_TLB_BIC_MAXLENGTH - 1] = '\0';
1454 	spin_unlock(&rtas_data_buf_lock);
1455 
1456 	if (call_status != 0) {
1457 		pr_warn("%s %s Error calling get-system-parameter (0x%x)\n",
1458 			__FILE__, __func__, call_status);
1459 		return;
1460 	}
1461 
1462 	/*
1463 	 * The first two (2) bytes of the data in the buffer are the length of
1464 	 * the returned data, not counting these first two (2) bytes.
1465 	 */
1466 	len = be16_to_cpu(*((u16 *)local_buffer)) + 2;
1467 	if (len > SPLPAR_TLB_BIC_MAXLENGTH) {
1468 		pr_warn("%s too large returned buffer %d", __func__, len);
1469 		return;
1470 	}
1471 
1472 	idx = 2;
1473 	while (idx < len) {
1474 		u8 block_shift = local_buffer[idx++];
1475 		u32 block_size;
1476 		unsigned int npsize;
1477 
1478 		if (!block_shift)
1479 			break;
1480 
1481 		block_size = 1 << block_shift;
1482 
1483 		for (npsize = local_buffer[idx++];
1484 		     npsize > 0 && idx < len; npsize--)
1485 			check_lp_set_hblkrm((unsigned int) local_buffer[idx++],
1486 					    block_size);
1487 	}
1488 
1489 	for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++)
1490 		for (idx = 0; idx < MMU_PAGE_COUNT; idx++)
1491 			if (hblkrm_size[bpsize][idx])
1492 				pr_info("H_BLOCK_REMOVE supports base psize:%d psize:%d block size:%d",
1493 					bpsize, idx, hblkrm_size[bpsize][idx]);
1494 }
1495 
1496 /*
1497  * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
1498  * lock.
1499  */
1500 static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
1501 {
1502 	unsigned long vpn;
1503 	unsigned long i, pix, rc;
1504 	unsigned long flags = 0;
1505 	struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
1506 	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
1507 	unsigned long param[PLPAR_HCALL9_BUFSIZE];
1508 	unsigned long index, shift, slot;
1509 	real_pte_t pte;
1510 	int psize, ssize;
1511 
1512 	if (lock_tlbie)
1513 		spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
1514 
1515 	if (is_supported_hlbkrm(batch->psize, batch->psize)) {
1516 		do_block_remove(number, batch, param);
1517 		goto out;
1518 	}
1519 
1520 	psize = batch->psize;
1521 	ssize = batch->ssize;
1522 	pix = 0;
1523 	for (i = 0; i < number; i++) {
1524 		vpn = batch->vpn[i];
1525 		pte = batch->pte[i];
1526 		pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
1527 			slot = compute_slot(pte, vpn, index, shift, ssize);
1528 			if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
1529 				/*
1530 				 * lpar doesn't use the passed actual page size
1531 				 */
1532 				pSeries_lpar_hpte_invalidate(slot, vpn, psize,
1533 							     0, ssize, local);
1534 			} else {
1535 				param[pix] = HBR_REQUEST | HBR_AVPN | slot;
1536 				param[pix+1] = hpte_encode_avpn(vpn, psize,
1537 								ssize);
1538 				pix += 2;
1539 				if (pix == 8) {
1540 					rc = plpar_hcall9(H_BULK_REMOVE, param,
1541 						param[0], param[1], param[2],
1542 						param[3], param[4], param[5],
1543 						param[6], param[7]);
1544 					BUG_ON(rc != H_SUCCESS);
1545 					pix = 0;
1546 				}
1547 			}
1548 		} pte_iterate_hashed_end();
1549 	}
1550 	if (pix) {
1551 		param[pix] = HBR_END;
1552 		rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
1553 				  param[2], param[3], param[4], param[5],
1554 				  param[6], param[7]);
1555 		BUG_ON(rc != H_SUCCESS);
1556 	}
1557 
1558 out:
1559 	if (lock_tlbie)
1560 		spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
1561 }
1562 
1563 static int __init disable_bulk_remove(char *str)
1564 {
1565 	if (strcmp(str, "off") == 0 &&
1566 	    firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
1567 		pr_info("Disabling BULK_REMOVE firmware feature");
1568 		powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
1569 	}
1570 	return 1;
1571 }
1572 
1573 __setup("bulk_remove=", disable_bulk_remove);
1574 
1575 #define HPT_RESIZE_TIMEOUT	10000 /* ms */
1576 
1577 struct hpt_resize_state {
1578 	unsigned long shift;
1579 	int commit_rc;
1580 };
1581 
1582 static int pseries_lpar_resize_hpt_commit(void *data)
1583 {
1584 	struct hpt_resize_state *state = data;
1585 
1586 	state->commit_rc = plpar_resize_hpt_commit(0, state->shift);
1587 	if (state->commit_rc != H_SUCCESS)
1588 		return -EIO;
1589 
1590 	/* Hypervisor has transitioned the HTAB, update our globals */
1591 	ppc64_pft_size = state->shift;
1592 	htab_size_bytes = 1UL << ppc64_pft_size;
1593 	htab_hash_mask = (htab_size_bytes >> 7) - 1;
1594 
1595 	return 0;
1596 }
1597 
1598 /*
1599  * Must be called in process context. The caller must hold the
1600  * cpus_lock.
1601  */
1602 static int pseries_lpar_resize_hpt(unsigned long shift)
1603 {
1604 	struct hpt_resize_state state = {
1605 		.shift = shift,
1606 		.commit_rc = H_FUNCTION,
1607 	};
1608 	unsigned int delay, total_delay = 0;
1609 	int rc;
1610 	ktime_t t0, t1, t2;
1611 
1612 	might_sleep();
1613 
1614 	if (!firmware_has_feature(FW_FEATURE_HPT_RESIZE))
1615 		return -ENODEV;
1616 
1617 	pr_info("Attempting to resize HPT to shift %lu\n", shift);
1618 
1619 	t0 = ktime_get();
1620 
1621 	rc = plpar_resize_hpt_prepare(0, shift);
1622 	while (H_IS_LONG_BUSY(rc)) {
1623 		delay = get_longbusy_msecs(rc);
1624 		total_delay += delay;
1625 		if (total_delay > HPT_RESIZE_TIMEOUT) {
1626 			/* prepare with shift==0 cancels an in-progress resize */
1627 			rc = plpar_resize_hpt_prepare(0, 0);
1628 			if (rc != H_SUCCESS)
1629 				pr_warn("Unexpected error %d cancelling timed out HPT resize\n",
1630 				       rc);
1631 			return -ETIMEDOUT;
1632 		}
1633 		msleep(delay);
1634 		rc = plpar_resize_hpt_prepare(0, shift);
1635 	}
1636 
1637 	switch (rc) {
1638 	case H_SUCCESS:
1639 		/* Continue on */
1640 		break;
1641 
1642 	case H_PARAMETER:
1643 		pr_warn("Invalid argument from H_RESIZE_HPT_PREPARE\n");
1644 		return -EINVAL;
1645 	case H_RESOURCE:
1646 		pr_warn("Operation not permitted from H_RESIZE_HPT_PREPARE\n");
1647 		return -EPERM;
1648 	default:
1649 		pr_warn("Unexpected error %d from H_RESIZE_HPT_PREPARE\n", rc);
1650 		return -EIO;
1651 	}
1652 
1653 	t1 = ktime_get();
1654 
1655 	rc = stop_machine_cpuslocked(pseries_lpar_resize_hpt_commit,
1656 				     &state, NULL);
1657 
1658 	t2 = ktime_get();
1659 
1660 	if (rc != 0) {
1661 		switch (state.commit_rc) {
1662 		case H_PTEG_FULL:
1663 			return -ENOSPC;
1664 
1665 		default:
1666 			pr_warn("Unexpected error %d from H_RESIZE_HPT_COMMIT\n",
1667 				state.commit_rc);
1668 			return -EIO;
1669 		};
1670 	}
1671 
1672 	pr_info("HPT resize to shift %lu complete (%lld ms / %lld ms)\n",
1673 		shift, (long long) ktime_ms_delta(t1, t0),
1674 		(long long) ktime_ms_delta(t2, t1));
1675 
1676 	return 0;
1677 }
1678 
1679 static int pseries_lpar_register_process_table(unsigned long base,
1680 			unsigned long page_size, unsigned long table_size)
1681 {
1682 	long rc;
1683 	unsigned long flags = 0;
1684 
1685 	if (table_size)
1686 		flags |= PROC_TABLE_NEW;
1687 	if (radix_enabled()) {
1688 		flags |= PROC_TABLE_RADIX;
1689 		if (mmu_has_feature(MMU_FTR_GTSE))
1690 			flags |= PROC_TABLE_GTSE;
1691 	} else
1692 		flags |= PROC_TABLE_HPT_SLB;
1693 	for (;;) {
1694 		rc = plpar_hcall_norets(H_REGISTER_PROC_TBL, flags, base,
1695 					page_size, table_size);
1696 		if (!H_IS_LONG_BUSY(rc))
1697 			break;
1698 		mdelay(get_longbusy_msecs(rc));
1699 	}
1700 	if (rc != H_SUCCESS) {
1701 		pr_err("Failed to register process table (rc=%ld)\n", rc);
1702 		BUG();
1703 	}
1704 	return rc;
1705 }
1706 
1707 void __init hpte_init_pseries(void)
1708 {
1709 	mmu_hash_ops.hpte_invalidate	 = pSeries_lpar_hpte_invalidate;
1710 	mmu_hash_ops.hpte_updatepp	 = pSeries_lpar_hpte_updatepp;
1711 	mmu_hash_ops.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
1712 	mmu_hash_ops.hpte_insert	 = pSeries_lpar_hpte_insert;
1713 	mmu_hash_ops.hpte_remove	 = pSeries_lpar_hpte_remove;
1714 	mmu_hash_ops.hpte_removebolted   = pSeries_lpar_hpte_removebolted;
1715 	mmu_hash_ops.flush_hash_range	 = pSeries_lpar_flush_hash_range;
1716 	mmu_hash_ops.hpte_clear_all      = pseries_hpte_clear_all;
1717 	mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
1718 
1719 	if (firmware_has_feature(FW_FEATURE_HPT_RESIZE))
1720 		mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
1721 
1722 	/*
1723 	 * On POWER9, we need to do a H_REGISTER_PROC_TBL hcall
1724 	 * to inform the hypervisor that we wish to use the HPT.
1725 	 */
1726 	if (cpu_has_feature(CPU_FTR_ARCH_300))
1727 		pseries_lpar_register_process_table(0, 0, 0);
1728 }
1729 
1730 #ifdef CONFIG_PPC_RADIX_MMU
1731 void radix_init_pseries(void)
1732 {
1733 	pr_info("Using radix MMU under hypervisor\n");
1734 
1735 	pseries_lpar_register_process_table(__pa(process_tb),
1736 						0, PRTB_SIZE_SHIFT - 12);
1737 }
1738 #endif
1739 
1740 #ifdef CONFIG_PPC_SMLPAR
1741 #define CMO_FREE_HINT_DEFAULT 1
1742 static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT;
1743 
1744 static int __init cmo_free_hint(char *str)
1745 {
1746 	char *parm;
1747 	parm = strstrip(str);
1748 
1749 	if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) {
1750 		pr_info("%s: CMO free page hinting is not active.\n", __func__);
1751 		cmo_free_hint_flag = 0;
1752 		return 1;
1753 	}
1754 
1755 	cmo_free_hint_flag = 1;
1756 	pr_info("%s: CMO free page hinting is active.\n", __func__);
1757 
1758 	if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0)
1759 		return 1;
1760 
1761 	return 0;
1762 }
1763 
1764 __setup("cmo_free_hint=", cmo_free_hint);
1765 
1766 static void pSeries_set_page_state(struct page *page, int order,
1767 				   unsigned long state)
1768 {
1769 	int i, j;
1770 	unsigned long cmo_page_sz, addr;
1771 
1772 	cmo_page_sz = cmo_get_page_size();
1773 	addr = __pa((unsigned long)page_address(page));
1774 
1775 	for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) {
1776 		for (j = 0; j < PAGE_SIZE; j += cmo_page_sz)
1777 			plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0);
1778 	}
1779 }
1780 
1781 void arch_free_page(struct page *page, int order)
1782 {
1783 	if (radix_enabled())
1784 		return;
1785 	if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO))
1786 		return;
1787 
1788 	pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED);
1789 }
1790 EXPORT_SYMBOL(arch_free_page);
1791 
1792 #endif /* CONFIG_PPC_SMLPAR */
1793 #endif /* CONFIG_PPC_BOOK3S_64 */
1794 
1795 #ifdef CONFIG_TRACEPOINTS
1796 #ifdef CONFIG_JUMP_LABEL
1797 struct static_key hcall_tracepoint_key = STATIC_KEY_INIT;
1798 
1799 int hcall_tracepoint_regfunc(void)
1800 {
1801 	static_key_slow_inc(&hcall_tracepoint_key);
1802 	return 0;
1803 }
1804 
1805 void hcall_tracepoint_unregfunc(void)
1806 {
1807 	static_key_slow_dec(&hcall_tracepoint_key);
1808 }
1809 #else
1810 /*
1811  * We optimise our hcall path by placing hcall_tracepoint_refcount
1812  * directly in the TOC so we can check if the hcall tracepoints are
1813  * enabled via a single load.
1814  */
1815 
1816 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
1817 extern long hcall_tracepoint_refcount;
1818 
1819 int hcall_tracepoint_regfunc(void)
1820 {
1821 	hcall_tracepoint_refcount++;
1822 	return 0;
1823 }
1824 
1825 void hcall_tracepoint_unregfunc(void)
1826 {
1827 	hcall_tracepoint_refcount--;
1828 }
1829 #endif
1830 
1831 /*
1832  * Since the tracing code might execute hcalls we need to guard against
1833  * recursion. One example of this are spinlocks calling H_YIELD on
1834  * shared processor partitions.
1835  */
1836 static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
1837 
1838 
1839 void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
1840 {
1841 	unsigned long flags;
1842 	unsigned int *depth;
1843 
1844 	/*
1845 	 * We cannot call tracepoints inside RCU idle regions which
1846 	 * means we must not trace H_CEDE.
1847 	 */
1848 	if (opcode == H_CEDE)
1849 		return;
1850 
1851 	local_irq_save(flags);
1852 
1853 	depth = this_cpu_ptr(&hcall_trace_depth);
1854 
1855 	if (*depth)
1856 		goto out;
1857 
1858 	(*depth)++;
1859 	preempt_disable();
1860 	trace_hcall_entry(opcode, args);
1861 	(*depth)--;
1862 
1863 out:
1864 	local_irq_restore(flags);
1865 }
1866 
1867 void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf)
1868 {
1869 	unsigned long flags;
1870 	unsigned int *depth;
1871 
1872 	if (opcode == H_CEDE)
1873 		return;
1874 
1875 	local_irq_save(flags);
1876 
1877 	depth = this_cpu_ptr(&hcall_trace_depth);
1878 
1879 	if (*depth)
1880 		goto out;
1881 
1882 	(*depth)++;
1883 	trace_hcall_exit(opcode, retval, retbuf);
1884 	preempt_enable();
1885 	(*depth)--;
1886 
1887 out:
1888 	local_irq_restore(flags);
1889 }
1890 #endif
1891 
1892 /**
1893  * h_get_mpp
1894  * H_GET_MPP hcall returns info in 7 parms
1895  */
1896 int h_get_mpp(struct hvcall_mpp_data *mpp_data)
1897 {
1898 	int rc;
1899 	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
1900 
1901 	rc = plpar_hcall9(H_GET_MPP, retbuf);
1902 
1903 	mpp_data->entitled_mem = retbuf[0];
1904 	mpp_data->mapped_mem = retbuf[1];
1905 
1906 	mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
1907 	mpp_data->pool_num = retbuf[2] & 0xffff;
1908 
1909 	mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
1910 	mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
1911 	mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL;
1912 
1913 	mpp_data->pool_size = retbuf[4];
1914 	mpp_data->loan_request = retbuf[5];
1915 	mpp_data->backing_mem = retbuf[6];
1916 
1917 	return rc;
1918 }
1919 EXPORT_SYMBOL(h_get_mpp);
1920 
1921 int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
1922 {
1923 	int rc;
1924 	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 };
1925 
1926 	rc = plpar_hcall9(H_GET_MPP_X, retbuf);
1927 
1928 	mpp_x_data->coalesced_bytes = retbuf[0];
1929 	mpp_x_data->pool_coalesced_bytes = retbuf[1];
1930 	mpp_x_data->pool_purr_cycles = retbuf[2];
1931 	mpp_x_data->pool_spurr_cycles = retbuf[3];
1932 
1933 	return rc;
1934 }
1935 
1936 static unsigned long vsid_unscramble(unsigned long vsid, int ssize)
1937 {
1938 	unsigned long protovsid;
1939 	unsigned long va_bits = VA_BITS;
1940 	unsigned long modinv, vsid_modulus;
1941 	unsigned long max_mod_inv, tmp_modinv;
1942 
1943 	if (!mmu_has_feature(MMU_FTR_68_BIT_VA))
1944 		va_bits = 65;
1945 
1946 	if (ssize == MMU_SEGSIZE_256M) {
1947 		modinv = VSID_MULINV_256M;
1948 		vsid_modulus = ((1UL << (va_bits - SID_SHIFT)) - 1);
1949 	} else {
1950 		modinv = VSID_MULINV_1T;
1951 		vsid_modulus = ((1UL << (va_bits - SID_SHIFT_1T)) - 1);
1952 	}
1953 
1954 	/*
1955 	 * vsid outside our range.
1956 	 */
1957 	if (vsid >= vsid_modulus)
1958 		return 0;
1959 
1960 	/*
1961 	 * If modinv is the modular multiplicate inverse of (x % vsid_modulus)
1962 	 * and vsid = (protovsid * x) % vsid_modulus, then we say:
1963 	 *   protovsid = (vsid * modinv) % vsid_modulus
1964 	 */
1965 
1966 	/* Check if (vsid * modinv) overflow (63 bits) */
1967 	max_mod_inv = 0x7fffffffffffffffull / vsid;
1968 	if (modinv < max_mod_inv)
1969 		return (vsid * modinv) % vsid_modulus;
1970 
1971 	tmp_modinv = modinv/max_mod_inv;
1972 	modinv %= max_mod_inv;
1973 
1974 	protovsid = (((vsid * max_mod_inv) % vsid_modulus) * tmp_modinv) % vsid_modulus;
1975 	protovsid = (protovsid + vsid * modinv) % vsid_modulus;
1976 
1977 	return protovsid;
1978 }
1979 
1980 static int __init reserve_vrma_context_id(void)
1981 {
1982 	unsigned long protovsid;
1983 
1984 	/*
1985 	 * Reserve context ids which map to reserved virtual addresses. For now
1986 	 * we only reserve the context id which maps to the VRMA VSID. We ignore
1987 	 * the addresses in "ibm,adjunct-virtual-addresses" because we don't
1988 	 * enable adjunct support via the "ibm,client-architecture-support"
1989 	 * interface.
1990 	 */
1991 	protovsid = vsid_unscramble(VRMA_VSID, MMU_SEGSIZE_1T);
1992 	hash__reserve_context_id(protovsid >> ESID_BITS_1T);
1993 	return 0;
1994 }
1995 machine_device_initcall(pseries, reserve_vrma_context_id);
1996 
1997 #ifdef CONFIG_DEBUG_FS
1998 /* debugfs file interface for vpa data */
1999 static ssize_t vpa_file_read(struct file *filp, char __user *buf, size_t len,
2000 			      loff_t *pos)
2001 {
2002 	int cpu = (long)filp->private_data;
2003 	struct lppaca *lppaca = &lppaca_of(cpu);
2004 
2005 	return simple_read_from_buffer(buf, len, pos, lppaca,
2006 				sizeof(struct lppaca));
2007 }
2008 
2009 static const struct file_operations vpa_fops = {
2010 	.open		= simple_open,
2011 	.read		= vpa_file_read,
2012 	.llseek		= default_llseek,
2013 };
2014 
2015 static int __init vpa_debugfs_init(void)
2016 {
2017 	char name[16];
2018 	long i;
2019 	struct dentry *vpa_dir;
2020 
2021 	if (!firmware_has_feature(FW_FEATURE_SPLPAR))
2022 		return 0;
2023 
2024 	vpa_dir = debugfs_create_dir("vpa", powerpc_debugfs_root);
2025 
2026 	/* set up the per-cpu vpa file*/
2027 	for_each_possible_cpu(i) {
2028 		sprintf(name, "cpu-%ld", i);
2029 		debugfs_create_file(name, 0400, vpa_dir, (void *)i, &vpa_fops);
2030 	}
2031 
2032 	return 0;
2033 }
2034 machine_arch_initcall(pseries, vpa_debugfs_init);
2035 #endif /* CONFIG_DEBUG_FS */
2036