1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright IBM Corporation, 2018
4  * Authors Suraj Jitindar Singh <sjitindarsingh@gmail.com>
5  *	   Paul Mackerras <paulus@ozlabs.org>
6  *
7  * Description: KVM functions specific to running nested KVM-HV guests
8  * on Book3S processors (specifically POWER9 and later).
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/kvm_host.h>
13 #include <linux/llist.h>
14 #include <linux/pgtable.h>
15 
16 #include <asm/kvm_ppc.h>
17 #include <asm/kvm_book3s.h>
18 #include <asm/mmu.h>
19 #include <asm/pgalloc.h>
20 #include <asm/pte-walk.h>
21 #include <asm/reg.h>
22 
23 static struct patb_entry *pseries_partition_tb;
24 
25 static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp);
26 static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free);
27 
28 void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
29 {
30 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
31 
32 	hr->pcr = vc->pcr | PCR_MASK;
33 	hr->dpdes = vc->dpdes;
34 	hr->hfscr = vcpu->arch.hfscr;
35 	hr->tb_offset = vc->tb_offset;
36 	hr->dawr0 = vcpu->arch.dawr;
37 	hr->dawrx0 = vcpu->arch.dawrx;
38 	hr->ciabr = vcpu->arch.ciabr;
39 	hr->purr = vcpu->arch.purr;
40 	hr->spurr = vcpu->arch.spurr;
41 	hr->ic = vcpu->arch.ic;
42 	hr->vtb = vc->vtb;
43 	hr->srr0 = vcpu->arch.shregs.srr0;
44 	hr->srr1 = vcpu->arch.shregs.srr1;
45 	hr->sprg[0] = vcpu->arch.shregs.sprg0;
46 	hr->sprg[1] = vcpu->arch.shregs.sprg1;
47 	hr->sprg[2] = vcpu->arch.shregs.sprg2;
48 	hr->sprg[3] = vcpu->arch.shregs.sprg3;
49 	hr->pidr = vcpu->arch.pid;
50 	hr->cfar = vcpu->arch.cfar;
51 	hr->ppr = vcpu->arch.ppr;
52 }
53 
54 static void byteswap_pt_regs(struct pt_regs *regs)
55 {
56 	unsigned long *addr = (unsigned long *) regs;
57 
58 	for (; addr < ((unsigned long *) (regs + 1)); addr++)
59 		*addr = swab64(*addr);
60 }
61 
62 static void byteswap_hv_regs(struct hv_guest_state *hr)
63 {
64 	hr->version = swab64(hr->version);
65 	hr->lpid = swab32(hr->lpid);
66 	hr->vcpu_token = swab32(hr->vcpu_token);
67 	hr->lpcr = swab64(hr->lpcr);
68 	hr->pcr = swab64(hr->pcr) | PCR_MASK;
69 	hr->amor = swab64(hr->amor);
70 	hr->dpdes = swab64(hr->dpdes);
71 	hr->hfscr = swab64(hr->hfscr);
72 	hr->tb_offset = swab64(hr->tb_offset);
73 	hr->dawr0 = swab64(hr->dawr0);
74 	hr->dawrx0 = swab64(hr->dawrx0);
75 	hr->ciabr = swab64(hr->ciabr);
76 	hr->hdec_expiry = swab64(hr->hdec_expiry);
77 	hr->purr = swab64(hr->purr);
78 	hr->spurr = swab64(hr->spurr);
79 	hr->ic = swab64(hr->ic);
80 	hr->vtb = swab64(hr->vtb);
81 	hr->hdar = swab64(hr->hdar);
82 	hr->hdsisr = swab64(hr->hdsisr);
83 	hr->heir = swab64(hr->heir);
84 	hr->asdr = swab64(hr->asdr);
85 	hr->srr0 = swab64(hr->srr0);
86 	hr->srr1 = swab64(hr->srr1);
87 	hr->sprg[0] = swab64(hr->sprg[0]);
88 	hr->sprg[1] = swab64(hr->sprg[1]);
89 	hr->sprg[2] = swab64(hr->sprg[2]);
90 	hr->sprg[3] = swab64(hr->sprg[3]);
91 	hr->pidr = swab64(hr->pidr);
92 	hr->cfar = swab64(hr->cfar);
93 	hr->ppr = swab64(hr->ppr);
94 }
95 
96 static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap,
97 				 struct hv_guest_state *hr)
98 {
99 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
100 
101 	hr->dpdes = vc->dpdes;
102 	hr->hfscr = vcpu->arch.hfscr;
103 	hr->purr = vcpu->arch.purr;
104 	hr->spurr = vcpu->arch.spurr;
105 	hr->ic = vcpu->arch.ic;
106 	hr->vtb = vc->vtb;
107 	hr->srr0 = vcpu->arch.shregs.srr0;
108 	hr->srr1 = vcpu->arch.shregs.srr1;
109 	hr->sprg[0] = vcpu->arch.shregs.sprg0;
110 	hr->sprg[1] = vcpu->arch.shregs.sprg1;
111 	hr->sprg[2] = vcpu->arch.shregs.sprg2;
112 	hr->sprg[3] = vcpu->arch.shregs.sprg3;
113 	hr->pidr = vcpu->arch.pid;
114 	hr->cfar = vcpu->arch.cfar;
115 	hr->ppr = vcpu->arch.ppr;
116 	switch (trap) {
117 	case BOOK3S_INTERRUPT_H_DATA_STORAGE:
118 		hr->hdar = vcpu->arch.fault_dar;
119 		hr->hdsisr = vcpu->arch.fault_dsisr;
120 		hr->asdr = vcpu->arch.fault_gpa;
121 		break;
122 	case BOOK3S_INTERRUPT_H_INST_STORAGE:
123 		hr->asdr = vcpu->arch.fault_gpa;
124 		break;
125 	case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
126 		hr->heir = vcpu->arch.emul_inst;
127 		break;
128 	}
129 }
130 
131 static void sanitise_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
132 {
133 	/*
134 	 * Don't let L1 enable features for L2 which we've disabled for L1,
135 	 * but preserve the interrupt cause field.
136 	 */
137 	hr->hfscr &= (HFSCR_INTR_CAUSE | vcpu->arch.hfscr);
138 
139 	/* Don't let data address watchpoint match in hypervisor state */
140 	hr->dawrx0 &= ~DAWRX_HYP;
141 
142 	/* Don't let completed instruction address breakpt match in HV state */
143 	if ((hr->ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
144 		hr->ciabr &= ~CIABR_PRIV;
145 }
146 
147 static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
148 {
149 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
150 
151 	vc->pcr = hr->pcr | PCR_MASK;
152 	vc->dpdes = hr->dpdes;
153 	vcpu->arch.hfscr = hr->hfscr;
154 	vcpu->arch.dawr = hr->dawr0;
155 	vcpu->arch.dawrx = hr->dawrx0;
156 	vcpu->arch.ciabr = hr->ciabr;
157 	vcpu->arch.purr = hr->purr;
158 	vcpu->arch.spurr = hr->spurr;
159 	vcpu->arch.ic = hr->ic;
160 	vc->vtb = hr->vtb;
161 	vcpu->arch.shregs.srr0 = hr->srr0;
162 	vcpu->arch.shregs.srr1 = hr->srr1;
163 	vcpu->arch.shregs.sprg0 = hr->sprg[0];
164 	vcpu->arch.shregs.sprg1 = hr->sprg[1];
165 	vcpu->arch.shregs.sprg2 = hr->sprg[2];
166 	vcpu->arch.shregs.sprg3 = hr->sprg[3];
167 	vcpu->arch.pid = hr->pidr;
168 	vcpu->arch.cfar = hr->cfar;
169 	vcpu->arch.ppr = hr->ppr;
170 }
171 
172 void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
173 				   struct hv_guest_state *hr)
174 {
175 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
176 
177 	vc->dpdes = hr->dpdes;
178 	vcpu->arch.hfscr = hr->hfscr;
179 	vcpu->arch.purr = hr->purr;
180 	vcpu->arch.spurr = hr->spurr;
181 	vcpu->arch.ic = hr->ic;
182 	vc->vtb = hr->vtb;
183 	vcpu->arch.fault_dar = hr->hdar;
184 	vcpu->arch.fault_dsisr = hr->hdsisr;
185 	vcpu->arch.fault_gpa = hr->asdr;
186 	vcpu->arch.emul_inst = hr->heir;
187 	vcpu->arch.shregs.srr0 = hr->srr0;
188 	vcpu->arch.shregs.srr1 = hr->srr1;
189 	vcpu->arch.shregs.sprg0 = hr->sprg[0];
190 	vcpu->arch.shregs.sprg1 = hr->sprg[1];
191 	vcpu->arch.shregs.sprg2 = hr->sprg[2];
192 	vcpu->arch.shregs.sprg3 = hr->sprg[3];
193 	vcpu->arch.pid = hr->pidr;
194 	vcpu->arch.cfar = hr->cfar;
195 	vcpu->arch.ppr = hr->ppr;
196 }
197 
198 static void kvmhv_nested_mmio_needed(struct kvm_vcpu *vcpu, u64 regs_ptr)
199 {
200 	/* No need to reflect the page fault to L1, we've handled it */
201 	vcpu->arch.trap = 0;
202 
203 	/*
204 	 * Since the L2 gprs have already been written back into L1 memory when
205 	 * we complete the mmio, store the L1 memory location of the L2 gpr
206 	 * being loaded into by the mmio so that the loaded value can be
207 	 * written there in kvmppc_complete_mmio_load()
208 	 */
209 	if (((vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) == KVM_MMIO_REG_GPR)
210 	    && (vcpu->mmio_is_write == 0)) {
211 		vcpu->arch.nested_io_gpr = (gpa_t) regs_ptr +
212 					   offsetof(struct pt_regs,
213 						    gpr[vcpu->arch.io_gpr]);
214 		vcpu->arch.io_gpr = KVM_MMIO_REG_NESTED_GPR;
215 	}
216 }
217 
218 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
219 {
220 	long int err, r;
221 	struct kvm_nested_guest *l2;
222 	struct pt_regs l2_regs, saved_l1_regs;
223 	struct hv_guest_state l2_hv, saved_l1_hv;
224 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
225 	u64 hv_ptr, regs_ptr;
226 	u64 hdec_exp;
227 	s64 delta_purr, delta_spurr, delta_ic, delta_vtb;
228 	u64 mask;
229 	unsigned long lpcr;
230 
231 	if (vcpu->kvm->arch.l1_ptcr == 0)
232 		return H_NOT_AVAILABLE;
233 
234 	/* copy parameters in */
235 	hv_ptr = kvmppc_get_gpr(vcpu, 4);
236 	regs_ptr = kvmppc_get_gpr(vcpu, 5);
237 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
238 	err = kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv,
239 				  sizeof(struct hv_guest_state)) ||
240 		kvm_vcpu_read_guest(vcpu, regs_ptr, &l2_regs,
241 				    sizeof(struct pt_regs));
242 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
243 	if (err)
244 		return H_PARAMETER;
245 
246 	if (kvmppc_need_byteswap(vcpu))
247 		byteswap_hv_regs(&l2_hv);
248 	if (l2_hv.version != HV_GUEST_STATE_VERSION)
249 		return H_P2;
250 
251 	if (kvmppc_need_byteswap(vcpu))
252 		byteswap_pt_regs(&l2_regs);
253 	if (l2_hv.vcpu_token >= NR_CPUS)
254 		return H_PARAMETER;
255 
256 	/* translate lpid */
257 	l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true);
258 	if (!l2)
259 		return H_PARAMETER;
260 	if (!l2->l1_gr_to_hr) {
261 		mutex_lock(&l2->tlb_lock);
262 		kvmhv_update_ptbl_cache(l2);
263 		mutex_unlock(&l2->tlb_lock);
264 	}
265 
266 	/* save l1 values of things */
267 	vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
268 	saved_l1_regs = vcpu->arch.regs;
269 	kvmhv_save_hv_regs(vcpu, &saved_l1_hv);
270 
271 	/* convert TB values/offsets to host (L0) values */
272 	hdec_exp = l2_hv.hdec_expiry - vc->tb_offset;
273 	vc->tb_offset += l2_hv.tb_offset;
274 
275 	/* set L1 state to L2 state */
276 	vcpu->arch.nested = l2;
277 	vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token;
278 	vcpu->arch.regs = l2_regs;
279 	vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
280 	mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD |
281 		LPCR_LPES | LPCR_MER;
282 	lpcr = (vc->lpcr & ~mask) | (l2_hv.lpcr & mask);
283 	sanitise_hv_regs(vcpu, &l2_hv);
284 	restore_hv_regs(vcpu, &l2_hv);
285 
286 	vcpu->arch.ret = RESUME_GUEST;
287 	vcpu->arch.trap = 0;
288 	do {
289 		if (mftb() >= hdec_exp) {
290 			vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
291 			r = RESUME_HOST;
292 			break;
293 		}
294 		r = kvmhv_run_single_vcpu(vcpu, hdec_exp, lpcr);
295 	} while (is_kvmppc_resume_guest(r));
296 
297 	/* save L2 state for return */
298 	l2_regs = vcpu->arch.regs;
299 	l2_regs.msr = vcpu->arch.shregs.msr;
300 	delta_purr = vcpu->arch.purr - l2_hv.purr;
301 	delta_spurr = vcpu->arch.spurr - l2_hv.spurr;
302 	delta_ic = vcpu->arch.ic - l2_hv.ic;
303 	delta_vtb = vc->vtb - l2_hv.vtb;
304 	save_hv_return_state(vcpu, vcpu->arch.trap, &l2_hv);
305 
306 	/* restore L1 state */
307 	vcpu->arch.nested = NULL;
308 	vcpu->arch.regs = saved_l1_regs;
309 	vcpu->arch.shregs.msr = saved_l1_regs.msr & ~MSR_TS_MASK;
310 	/* set L1 MSR TS field according to L2 transaction state */
311 	if (l2_regs.msr & MSR_TS_MASK)
312 		vcpu->arch.shregs.msr |= MSR_TS_S;
313 	vc->tb_offset = saved_l1_hv.tb_offset;
314 	restore_hv_regs(vcpu, &saved_l1_hv);
315 	vcpu->arch.purr += delta_purr;
316 	vcpu->arch.spurr += delta_spurr;
317 	vcpu->arch.ic += delta_ic;
318 	vc->vtb += delta_vtb;
319 
320 	kvmhv_put_nested(l2);
321 
322 	/* copy l2_hv_state and regs back to guest */
323 	if (kvmppc_need_byteswap(vcpu)) {
324 		byteswap_hv_regs(&l2_hv);
325 		byteswap_pt_regs(&l2_regs);
326 	}
327 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
328 	err = kvm_vcpu_write_guest(vcpu, hv_ptr, &l2_hv,
329 				   sizeof(struct hv_guest_state)) ||
330 		kvm_vcpu_write_guest(vcpu, regs_ptr, &l2_regs,
331 				   sizeof(struct pt_regs));
332 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
333 	if (err)
334 		return H_AUTHORITY;
335 
336 	if (r == -EINTR)
337 		return H_INTERRUPT;
338 
339 	if (vcpu->mmio_needed) {
340 		kvmhv_nested_mmio_needed(vcpu, regs_ptr);
341 		return H_TOO_HARD;
342 	}
343 
344 	return vcpu->arch.trap;
345 }
346 
347 long kvmhv_nested_init(void)
348 {
349 	long int ptb_order;
350 	unsigned long ptcr;
351 	long rc;
352 
353 	if (!kvmhv_on_pseries())
354 		return 0;
355 	if (!radix_enabled())
356 		return -ENODEV;
357 
358 	/* find log base 2 of KVMPPC_NR_LPIDS, rounding up */
359 	ptb_order = __ilog2(KVMPPC_NR_LPIDS - 1) + 1;
360 	if (ptb_order < 8)
361 		ptb_order = 8;
362 	pseries_partition_tb = kmalloc(sizeof(struct patb_entry) << ptb_order,
363 				       GFP_KERNEL);
364 	if (!pseries_partition_tb) {
365 		pr_err("kvm-hv: failed to allocated nested partition table\n");
366 		return -ENOMEM;
367 	}
368 
369 	ptcr = __pa(pseries_partition_tb) | (ptb_order - 8);
370 	rc = plpar_hcall_norets(H_SET_PARTITION_TABLE, ptcr);
371 	if (rc != H_SUCCESS) {
372 		pr_err("kvm-hv: Parent hypervisor does not support nesting (rc=%ld)\n",
373 		       rc);
374 		kfree(pseries_partition_tb);
375 		pseries_partition_tb = NULL;
376 		return -ENODEV;
377 	}
378 
379 	return 0;
380 }
381 
382 void kvmhv_nested_exit(void)
383 {
384 	/*
385 	 * N.B. the kvmhv_on_pseries() test is there because it enables
386 	 * the compiler to remove the call to plpar_hcall_norets()
387 	 * when CONFIG_PPC_PSERIES=n.
388 	 */
389 	if (kvmhv_on_pseries() && pseries_partition_tb) {
390 		plpar_hcall_norets(H_SET_PARTITION_TABLE, 0);
391 		kfree(pseries_partition_tb);
392 		pseries_partition_tb = NULL;
393 	}
394 }
395 
396 static void kvmhv_flush_lpid(unsigned int lpid)
397 {
398 	long rc;
399 
400 	if (!kvmhv_on_pseries()) {
401 		radix__flush_all_lpid(lpid);
402 		return;
403 	}
404 
405 	rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1),
406 				lpid, TLBIEL_INVAL_SET_LPID);
407 	if (rc)
408 		pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc);
409 }
410 
411 void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1)
412 {
413 	if (!kvmhv_on_pseries()) {
414 		mmu_partition_table_set_entry(lpid, dw0, dw1, true);
415 		return;
416 	}
417 
418 	pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
419 	pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
420 	/* L0 will do the necessary barriers */
421 	kvmhv_flush_lpid(lpid);
422 }
423 
424 static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
425 {
426 	unsigned long dw0;
427 
428 	dw0 = PATB_HR | radix__get_tree_size() |
429 		__pa(gp->shadow_pgtable) | RADIX_PGD_INDEX_SIZE;
430 	kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table);
431 }
432 
433 void kvmhv_vm_nested_init(struct kvm *kvm)
434 {
435 	kvm->arch.max_nested_lpid = -1;
436 }
437 
438 /*
439  * Handle the H_SET_PARTITION_TABLE hcall.
440  * r4 = guest real address of partition table + log_2(size) - 12
441  * (formatted as for the PTCR).
442  */
443 long kvmhv_set_partition_table(struct kvm_vcpu *vcpu)
444 {
445 	struct kvm *kvm = vcpu->kvm;
446 	unsigned long ptcr = kvmppc_get_gpr(vcpu, 4);
447 	int srcu_idx;
448 	long ret = H_SUCCESS;
449 
450 	srcu_idx = srcu_read_lock(&kvm->srcu);
451 	/*
452 	 * Limit the partition table to 4096 entries (because that's what
453 	 * hardware supports), and check the base address.
454 	 */
455 	if ((ptcr & PRTS_MASK) > 12 - 8 ||
456 	    !kvm_is_visible_gfn(vcpu->kvm, (ptcr & PRTB_MASK) >> PAGE_SHIFT))
457 		ret = H_PARAMETER;
458 	srcu_read_unlock(&kvm->srcu, srcu_idx);
459 	if (ret == H_SUCCESS)
460 		kvm->arch.l1_ptcr = ptcr;
461 	return ret;
462 }
463 
464 /*
465  * Handle the H_COPY_TOFROM_GUEST hcall.
466  * r4 = L1 lpid of nested guest
467  * r5 = pid
468  * r6 = eaddr to access
469  * r7 = to buffer (L1 gpa)
470  * r8 = from buffer (L1 gpa)
471  * r9 = n bytes to copy
472  */
473 long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu)
474 {
475 	struct kvm_nested_guest *gp;
476 	int l1_lpid = kvmppc_get_gpr(vcpu, 4);
477 	int pid = kvmppc_get_gpr(vcpu, 5);
478 	gva_t eaddr = kvmppc_get_gpr(vcpu, 6);
479 	gpa_t gp_to = (gpa_t) kvmppc_get_gpr(vcpu, 7);
480 	gpa_t gp_from = (gpa_t) kvmppc_get_gpr(vcpu, 8);
481 	void *buf;
482 	unsigned long n = kvmppc_get_gpr(vcpu, 9);
483 	bool is_load = !!gp_to;
484 	long rc;
485 
486 	if (gp_to && gp_from) /* One must be NULL to determine the direction */
487 		return H_PARAMETER;
488 
489 	if (eaddr & (0xFFFUL << 52))
490 		return H_PARAMETER;
491 
492 	buf = kzalloc(n, GFP_KERNEL);
493 	if (!buf)
494 		return H_NO_MEM;
495 
496 	gp = kvmhv_get_nested(vcpu->kvm, l1_lpid, false);
497 	if (!gp) {
498 		rc = H_PARAMETER;
499 		goto out_free;
500 	}
501 
502 	mutex_lock(&gp->tlb_lock);
503 
504 	if (is_load) {
505 		/* Load from the nested guest into our buffer */
506 		rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
507 						     eaddr, buf, NULL, n);
508 		if (rc)
509 			goto not_found;
510 
511 		/* Write what was loaded into our buffer back to the L1 guest */
512 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
513 		rc = kvm_vcpu_write_guest(vcpu, gp_to, buf, n);
514 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
515 		if (rc)
516 			goto not_found;
517 	} else {
518 		/* Load the data to be stored from the L1 guest into our buf */
519 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
520 		rc = kvm_vcpu_read_guest(vcpu, gp_from, buf, n);
521 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
522 		if (rc)
523 			goto not_found;
524 
525 		/* Store from our buffer into the nested guest */
526 		rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
527 						     eaddr, NULL, buf, n);
528 		if (rc)
529 			goto not_found;
530 	}
531 
532 out_unlock:
533 	mutex_unlock(&gp->tlb_lock);
534 	kvmhv_put_nested(gp);
535 out_free:
536 	kfree(buf);
537 	return rc;
538 not_found:
539 	rc = H_NOT_FOUND;
540 	goto out_unlock;
541 }
542 
543 /*
544  * Reload the partition table entry for a guest.
545  * Caller must hold gp->tlb_lock.
546  */
547 static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp)
548 {
549 	int ret;
550 	struct patb_entry ptbl_entry;
551 	unsigned long ptbl_addr;
552 	struct kvm *kvm = gp->l1_host;
553 
554 	ret = -EFAULT;
555 	ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4);
556 	if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 8))) {
557 		int srcu_idx = srcu_read_lock(&kvm->srcu);
558 		ret = kvm_read_guest(kvm, ptbl_addr,
559 				     &ptbl_entry, sizeof(ptbl_entry));
560 		srcu_read_unlock(&kvm->srcu, srcu_idx);
561 	}
562 	if (ret) {
563 		gp->l1_gr_to_hr = 0;
564 		gp->process_table = 0;
565 	} else {
566 		gp->l1_gr_to_hr = be64_to_cpu(ptbl_entry.patb0);
567 		gp->process_table = be64_to_cpu(ptbl_entry.patb1);
568 	}
569 	kvmhv_set_nested_ptbl(gp);
570 }
571 
572 struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid)
573 {
574 	struct kvm_nested_guest *gp;
575 	long shadow_lpid;
576 
577 	gp = kzalloc(sizeof(*gp), GFP_KERNEL);
578 	if (!gp)
579 		return NULL;
580 	gp->l1_host = kvm;
581 	gp->l1_lpid = lpid;
582 	mutex_init(&gp->tlb_lock);
583 	gp->shadow_pgtable = pgd_alloc(kvm->mm);
584 	if (!gp->shadow_pgtable)
585 		goto out_free;
586 	shadow_lpid = kvmppc_alloc_lpid();
587 	if (shadow_lpid < 0)
588 		goto out_free2;
589 	gp->shadow_lpid = shadow_lpid;
590 	gp->radix = 1;
591 
592 	memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu));
593 
594 	return gp;
595 
596  out_free2:
597 	pgd_free(kvm->mm, gp->shadow_pgtable);
598  out_free:
599 	kfree(gp);
600 	return NULL;
601 }
602 
603 /*
604  * Free up any resources allocated for a nested guest.
605  */
606 static void kvmhv_release_nested(struct kvm_nested_guest *gp)
607 {
608 	struct kvm *kvm = gp->l1_host;
609 
610 	if (gp->shadow_pgtable) {
611 		/*
612 		 * No vcpu is using this struct and no call to
613 		 * kvmhv_get_nested can find this struct,
614 		 * so we don't need to hold kvm->mmu_lock.
615 		 */
616 		kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
617 					  gp->shadow_lpid);
618 		pgd_free(kvm->mm, gp->shadow_pgtable);
619 	}
620 	kvmhv_set_ptbl_entry(gp->shadow_lpid, 0, 0);
621 	kvmppc_free_lpid(gp->shadow_lpid);
622 	kfree(gp);
623 }
624 
625 static void kvmhv_remove_nested(struct kvm_nested_guest *gp)
626 {
627 	struct kvm *kvm = gp->l1_host;
628 	int lpid = gp->l1_lpid;
629 	long ref;
630 
631 	spin_lock(&kvm->mmu_lock);
632 	if (gp == kvm->arch.nested_guests[lpid]) {
633 		kvm->arch.nested_guests[lpid] = NULL;
634 		if (lpid == kvm->arch.max_nested_lpid) {
635 			while (--lpid >= 0 && !kvm->arch.nested_guests[lpid])
636 				;
637 			kvm->arch.max_nested_lpid = lpid;
638 		}
639 		--gp->refcnt;
640 	}
641 	ref = gp->refcnt;
642 	spin_unlock(&kvm->mmu_lock);
643 	if (ref == 0)
644 		kvmhv_release_nested(gp);
645 }
646 
647 /*
648  * Free up all nested resources allocated for this guest.
649  * This is called with no vcpus of the guest running, when
650  * switching the guest to HPT mode or when destroying the
651  * guest.
652  */
653 void kvmhv_release_all_nested(struct kvm *kvm)
654 {
655 	int i;
656 	struct kvm_nested_guest *gp;
657 	struct kvm_nested_guest *freelist = NULL;
658 	struct kvm_memory_slot *memslot;
659 	int srcu_idx;
660 
661 	spin_lock(&kvm->mmu_lock);
662 	for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
663 		gp = kvm->arch.nested_guests[i];
664 		if (!gp)
665 			continue;
666 		kvm->arch.nested_guests[i] = NULL;
667 		if (--gp->refcnt == 0) {
668 			gp->next = freelist;
669 			freelist = gp;
670 		}
671 	}
672 	kvm->arch.max_nested_lpid = -1;
673 	spin_unlock(&kvm->mmu_lock);
674 	while ((gp = freelist) != NULL) {
675 		freelist = gp->next;
676 		kvmhv_release_nested(gp);
677 	}
678 
679 	srcu_idx = srcu_read_lock(&kvm->srcu);
680 	kvm_for_each_memslot(memslot, kvm_memslots(kvm))
681 		kvmhv_free_memslot_nest_rmap(memslot);
682 	srcu_read_unlock(&kvm->srcu, srcu_idx);
683 }
684 
685 /* caller must hold gp->tlb_lock */
686 static void kvmhv_flush_nested(struct kvm_nested_guest *gp)
687 {
688 	struct kvm *kvm = gp->l1_host;
689 
690 	spin_lock(&kvm->mmu_lock);
691 	kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid);
692 	spin_unlock(&kvm->mmu_lock);
693 	kvmhv_flush_lpid(gp->shadow_lpid);
694 	kvmhv_update_ptbl_cache(gp);
695 	if (gp->l1_gr_to_hr == 0)
696 		kvmhv_remove_nested(gp);
697 }
698 
699 struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
700 					  bool create)
701 {
702 	struct kvm_nested_guest *gp, *newgp;
703 
704 	if (l1_lpid >= KVM_MAX_NESTED_GUESTS ||
705 	    l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4)))
706 		return NULL;
707 
708 	spin_lock(&kvm->mmu_lock);
709 	gp = kvm->arch.nested_guests[l1_lpid];
710 	if (gp)
711 		++gp->refcnt;
712 	spin_unlock(&kvm->mmu_lock);
713 
714 	if (gp || !create)
715 		return gp;
716 
717 	newgp = kvmhv_alloc_nested(kvm, l1_lpid);
718 	if (!newgp)
719 		return NULL;
720 	spin_lock(&kvm->mmu_lock);
721 	if (kvm->arch.nested_guests[l1_lpid]) {
722 		/* someone else beat us to it */
723 		gp = kvm->arch.nested_guests[l1_lpid];
724 	} else {
725 		kvm->arch.nested_guests[l1_lpid] = newgp;
726 		++newgp->refcnt;
727 		gp = newgp;
728 		newgp = NULL;
729 		if (l1_lpid > kvm->arch.max_nested_lpid)
730 			kvm->arch.max_nested_lpid = l1_lpid;
731 	}
732 	++gp->refcnt;
733 	spin_unlock(&kvm->mmu_lock);
734 
735 	if (newgp)
736 		kvmhv_release_nested(newgp);
737 
738 	return gp;
739 }
740 
741 void kvmhv_put_nested(struct kvm_nested_guest *gp)
742 {
743 	struct kvm *kvm = gp->l1_host;
744 	long ref;
745 
746 	spin_lock(&kvm->mmu_lock);
747 	ref = --gp->refcnt;
748 	spin_unlock(&kvm->mmu_lock);
749 	if (ref == 0)
750 		kvmhv_release_nested(gp);
751 }
752 
753 static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid)
754 {
755 	if (lpid > kvm->arch.max_nested_lpid)
756 		return NULL;
757 	return kvm->arch.nested_guests[lpid];
758 }
759 
760 pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
761 				 unsigned long ea, unsigned *hshift)
762 {
763 	struct kvm_nested_guest *gp;
764 	pte_t *pte;
765 
766 	gp = kvmhv_find_nested(kvm, lpid);
767 	if (!gp)
768 		return NULL;
769 
770 	VM_WARN(!spin_is_locked(&kvm->mmu_lock),
771 		"%s called with kvm mmu_lock not held \n", __func__);
772 	pte = __find_linux_pte(gp->shadow_pgtable, ea, NULL, hshift);
773 
774 	return pte;
775 }
776 
777 static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1, u64 rmap_2)
778 {
779 	return !((rmap_1 ^ rmap_2) & (RMAP_NESTED_LPID_MASK |
780 				       RMAP_NESTED_GPA_MASK));
781 }
782 
783 void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
784 			    struct rmap_nested **n_rmap)
785 {
786 	struct llist_node *entry = ((struct llist_head *) rmapp)->first;
787 	struct rmap_nested *cursor;
788 	u64 rmap, new_rmap = (*n_rmap)->rmap;
789 
790 	/* Are there any existing entries? */
791 	if (!(*rmapp)) {
792 		/* No -> use the rmap as a single entry */
793 		*rmapp = new_rmap | RMAP_NESTED_IS_SINGLE_ENTRY;
794 		return;
795 	}
796 
797 	/* Do any entries match what we're trying to insert? */
798 	for_each_nest_rmap_safe(cursor, entry, &rmap) {
799 		if (kvmhv_n_rmap_is_equal(rmap, new_rmap))
800 			return;
801 	}
802 
803 	/* Do we need to create a list or just add the new entry? */
804 	rmap = *rmapp;
805 	if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */
806 		*rmapp = 0UL;
807 	llist_add(&((*n_rmap)->list), (struct llist_head *) rmapp);
808 	if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */
809 		(*n_rmap)->list.next = (struct llist_node *) rmap;
810 
811 	/* Set NULL so not freed by caller */
812 	*n_rmap = NULL;
813 }
814 
815 static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap,
816 				      unsigned long clr, unsigned long set,
817 				      unsigned long hpa, unsigned long mask)
818 {
819 	unsigned long gpa;
820 	unsigned int shift, lpid;
821 	pte_t *ptep;
822 
823 	gpa = n_rmap & RMAP_NESTED_GPA_MASK;
824 	lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
825 
826 	/* Find the pte */
827 	ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
828 	/*
829 	 * If the pte is present and the pfn is still the same, update the pte.
830 	 * If the pfn has changed then this is a stale rmap entry, the nested
831 	 * gpa actually points somewhere else now, and there is nothing to do.
832 	 * XXX A future optimisation would be to remove the rmap entry here.
833 	 */
834 	if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa)) {
835 		__radix_pte_update(ptep, clr, set);
836 		kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
837 	}
838 }
839 
840 /*
841  * For a given list of rmap entries, update the rc bits in all ptes in shadow
842  * page tables for nested guests which are referenced by the rmap list.
843  */
844 void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
845 				    unsigned long clr, unsigned long set,
846 				    unsigned long hpa, unsigned long nbytes)
847 {
848 	struct llist_node *entry = ((struct llist_head *) rmapp)->first;
849 	struct rmap_nested *cursor;
850 	unsigned long rmap, mask;
851 
852 	if ((clr | set) & ~(_PAGE_DIRTY | _PAGE_ACCESSED))
853 		return;
854 
855 	mask = PTE_RPN_MASK & ~(nbytes - 1);
856 	hpa &= mask;
857 
858 	for_each_nest_rmap_safe(cursor, entry, &rmap)
859 		kvmhv_update_nest_rmap_rc(kvm, rmap, clr, set, hpa, mask);
860 }
861 
862 static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
863 				   unsigned long hpa, unsigned long mask)
864 {
865 	struct kvm_nested_guest *gp;
866 	unsigned long gpa;
867 	unsigned int shift, lpid;
868 	pte_t *ptep;
869 
870 	gpa = n_rmap & RMAP_NESTED_GPA_MASK;
871 	lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
872 	gp = kvmhv_find_nested(kvm, lpid);
873 	if (!gp)
874 		return;
875 
876 	/* Find and invalidate the pte */
877 	ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
878 	/* Don't spuriously invalidate ptes if the pfn has changed */
879 	if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa))
880 		kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
881 }
882 
883 static void kvmhv_remove_nest_rmap_list(struct kvm *kvm, unsigned long *rmapp,
884 					unsigned long hpa, unsigned long mask)
885 {
886 	struct llist_node *entry = llist_del_all((struct llist_head *) rmapp);
887 	struct rmap_nested *cursor;
888 	unsigned long rmap;
889 
890 	for_each_nest_rmap_safe(cursor, entry, &rmap) {
891 		kvmhv_remove_nest_rmap(kvm, rmap, hpa, mask);
892 		kfree(cursor);
893 	}
894 }
895 
896 /* called with kvm->mmu_lock held */
897 void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
898 				  const struct kvm_memory_slot *memslot,
899 				  unsigned long gpa, unsigned long hpa,
900 				  unsigned long nbytes)
901 {
902 	unsigned long gfn, end_gfn;
903 	unsigned long addr_mask;
904 
905 	if (!memslot)
906 		return;
907 	gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn;
908 	end_gfn = gfn + (nbytes >> PAGE_SHIFT);
909 
910 	addr_mask = PTE_RPN_MASK & ~(nbytes - 1);
911 	hpa &= addr_mask;
912 
913 	for (; gfn < end_gfn; gfn++) {
914 		unsigned long *rmap = &memslot->arch.rmap[gfn];
915 		kvmhv_remove_nest_rmap_list(kvm, rmap, hpa, addr_mask);
916 	}
917 }
918 
919 static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free)
920 {
921 	unsigned long page;
922 
923 	for (page = 0; page < free->npages; page++) {
924 		unsigned long rmap, *rmapp = &free->arch.rmap[page];
925 		struct rmap_nested *cursor;
926 		struct llist_node *entry;
927 
928 		entry = llist_del_all((struct llist_head *) rmapp);
929 		for_each_nest_rmap_safe(cursor, entry, &rmap)
930 			kfree(cursor);
931 	}
932 }
933 
934 static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu,
935 					struct kvm_nested_guest *gp,
936 					long gpa, int *shift_ret)
937 {
938 	struct kvm *kvm = vcpu->kvm;
939 	bool ret = false;
940 	pte_t *ptep;
941 	int shift;
942 
943 	spin_lock(&kvm->mmu_lock);
944 	ptep = find_kvm_nested_guest_pte(kvm, gp->l1_lpid, gpa, &shift);
945 	if (!shift)
946 		shift = PAGE_SHIFT;
947 	if (ptep && pte_present(*ptep)) {
948 		kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
949 		ret = true;
950 	}
951 	spin_unlock(&kvm->mmu_lock);
952 
953 	if (shift_ret)
954 		*shift_ret = shift;
955 	return ret;
956 }
957 
958 static inline int get_ric(unsigned int instr)
959 {
960 	return (instr >> 18) & 0x3;
961 }
962 
963 static inline int get_prs(unsigned int instr)
964 {
965 	return (instr >> 17) & 0x1;
966 }
967 
968 static inline int get_r(unsigned int instr)
969 {
970 	return (instr >> 16) & 0x1;
971 }
972 
973 static inline int get_lpid(unsigned long r_val)
974 {
975 	return r_val & 0xffffffff;
976 }
977 
978 static inline int get_is(unsigned long r_val)
979 {
980 	return (r_val >> 10) & 0x3;
981 }
982 
983 static inline int get_ap(unsigned long r_val)
984 {
985 	return (r_val >> 5) & 0x7;
986 }
987 
988 static inline long get_epn(unsigned long r_val)
989 {
990 	return r_val >> 12;
991 }
992 
993 static int kvmhv_emulate_tlbie_tlb_addr(struct kvm_vcpu *vcpu, int lpid,
994 					int ap, long epn)
995 {
996 	struct kvm *kvm = vcpu->kvm;
997 	struct kvm_nested_guest *gp;
998 	long npages;
999 	int shift, shadow_shift;
1000 	unsigned long addr;
1001 
1002 	shift = ap_to_shift(ap);
1003 	addr = epn << 12;
1004 	if (shift < 0)
1005 		/* Invalid ap encoding */
1006 		return -EINVAL;
1007 
1008 	addr &= ~((1UL << shift) - 1);
1009 	npages = 1UL << (shift - PAGE_SHIFT);
1010 
1011 	gp = kvmhv_get_nested(kvm, lpid, false);
1012 	if (!gp) /* No such guest -> nothing to do */
1013 		return 0;
1014 	mutex_lock(&gp->tlb_lock);
1015 
1016 	/* There may be more than one host page backing this single guest pte */
1017 	do {
1018 		kvmhv_invalidate_shadow_pte(vcpu, gp, addr, &shadow_shift);
1019 
1020 		npages -= 1UL << (shadow_shift - PAGE_SHIFT);
1021 		addr += 1UL << shadow_shift;
1022 	} while (npages > 0);
1023 
1024 	mutex_unlock(&gp->tlb_lock);
1025 	kvmhv_put_nested(gp);
1026 	return 0;
1027 }
1028 
1029 static void kvmhv_emulate_tlbie_lpid(struct kvm_vcpu *vcpu,
1030 				     struct kvm_nested_guest *gp, int ric)
1031 {
1032 	struct kvm *kvm = vcpu->kvm;
1033 
1034 	mutex_lock(&gp->tlb_lock);
1035 	switch (ric) {
1036 	case 0:
1037 		/* Invalidate TLB */
1038 		spin_lock(&kvm->mmu_lock);
1039 		kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
1040 					  gp->shadow_lpid);
1041 		kvmhv_flush_lpid(gp->shadow_lpid);
1042 		spin_unlock(&kvm->mmu_lock);
1043 		break;
1044 	case 1:
1045 		/*
1046 		 * Invalidate PWC
1047 		 * We don't cache this -> nothing to do
1048 		 */
1049 		break;
1050 	case 2:
1051 		/* Invalidate TLB, PWC and caching of partition table entries */
1052 		kvmhv_flush_nested(gp);
1053 		break;
1054 	default:
1055 		break;
1056 	}
1057 	mutex_unlock(&gp->tlb_lock);
1058 }
1059 
1060 static void kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu *vcpu, int ric)
1061 {
1062 	struct kvm *kvm = vcpu->kvm;
1063 	struct kvm_nested_guest *gp;
1064 	int i;
1065 
1066 	spin_lock(&kvm->mmu_lock);
1067 	for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
1068 		gp = kvm->arch.nested_guests[i];
1069 		if (gp) {
1070 			spin_unlock(&kvm->mmu_lock);
1071 			kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1072 			spin_lock(&kvm->mmu_lock);
1073 		}
1074 	}
1075 	spin_unlock(&kvm->mmu_lock);
1076 }
1077 
1078 static int kvmhv_emulate_priv_tlbie(struct kvm_vcpu *vcpu, unsigned int instr,
1079 				    unsigned long rsval, unsigned long rbval)
1080 {
1081 	struct kvm *kvm = vcpu->kvm;
1082 	struct kvm_nested_guest *gp;
1083 	int r, ric, prs, is, ap;
1084 	int lpid;
1085 	long epn;
1086 	int ret = 0;
1087 
1088 	ric = get_ric(instr);
1089 	prs = get_prs(instr);
1090 	r = get_r(instr);
1091 	lpid = get_lpid(rsval);
1092 	is = get_is(rbval);
1093 
1094 	/*
1095 	 * These cases are invalid and are not handled:
1096 	 * r   != 1 -> Only radix supported
1097 	 * prs == 1 -> Not HV privileged
1098 	 * ric == 3 -> No cluster bombs for radix
1099 	 * is  == 1 -> Partition scoped translations not associated with pid
1100 	 * (!is) && (ric == 1 || ric == 2) -> Not supported by ISA
1101 	 */
1102 	if ((!r) || (prs) || (ric == 3) || (is == 1) ||
1103 	    ((!is) && (ric == 1 || ric == 2)))
1104 		return -EINVAL;
1105 
1106 	switch (is) {
1107 	case 0:
1108 		/*
1109 		 * We know ric == 0
1110 		 * Invalidate TLB for a given target address
1111 		 */
1112 		epn = get_epn(rbval);
1113 		ap = get_ap(rbval);
1114 		ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap, epn);
1115 		break;
1116 	case 2:
1117 		/* Invalidate matching LPID */
1118 		gp = kvmhv_get_nested(kvm, lpid, false);
1119 		if (gp) {
1120 			kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1121 			kvmhv_put_nested(gp);
1122 		}
1123 		break;
1124 	case 3:
1125 		/* Invalidate ALL LPIDs */
1126 		kvmhv_emulate_tlbie_all_lpid(vcpu, ric);
1127 		break;
1128 	default:
1129 		ret = -EINVAL;
1130 		break;
1131 	}
1132 
1133 	return ret;
1134 }
1135 
1136 /*
1137  * This handles the H_TLB_INVALIDATE hcall.
1138  * Parameters are (r4) tlbie instruction code, (r5) rS contents,
1139  * (r6) rB contents.
1140  */
1141 long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu)
1142 {
1143 	int ret;
1144 
1145 	ret = kvmhv_emulate_priv_tlbie(vcpu, kvmppc_get_gpr(vcpu, 4),
1146 			kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6));
1147 	if (ret)
1148 		return H_PARAMETER;
1149 	return H_SUCCESS;
1150 }
1151 
1152 /* Used to convert a nested guest real address to a L1 guest real address */
1153 static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu,
1154 				       struct kvm_nested_guest *gp,
1155 				       unsigned long n_gpa, unsigned long dsisr,
1156 				       struct kvmppc_pte *gpte_p)
1157 {
1158 	u64 fault_addr, flags = dsisr & DSISR_ISSTORE;
1159 	int ret;
1160 
1161 	ret = kvmppc_mmu_walk_radix_tree(vcpu, n_gpa, gpte_p, gp->l1_gr_to_hr,
1162 					 &fault_addr);
1163 
1164 	if (ret) {
1165 		/* We didn't find a pte */
1166 		if (ret == -EINVAL) {
1167 			/* Unsupported mmu config */
1168 			flags |= DSISR_UNSUPP_MMU;
1169 		} else if (ret == -ENOENT) {
1170 			/* No translation found */
1171 			flags |= DSISR_NOHPTE;
1172 		} else if (ret == -EFAULT) {
1173 			/* Couldn't access L1 real address */
1174 			flags |= DSISR_PRTABLE_FAULT;
1175 			vcpu->arch.fault_gpa = fault_addr;
1176 		} else {
1177 			/* Unknown error */
1178 			return ret;
1179 		}
1180 		goto forward_to_l1;
1181 	} else {
1182 		/* We found a pte -> check permissions */
1183 		if (dsisr & DSISR_ISSTORE) {
1184 			/* Can we write? */
1185 			if (!gpte_p->may_write) {
1186 				flags |= DSISR_PROTFAULT;
1187 				goto forward_to_l1;
1188 			}
1189 		} else if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
1190 			/* Can we execute? */
1191 			if (!gpte_p->may_execute) {
1192 				flags |= SRR1_ISI_N_G_OR_CIP;
1193 				goto forward_to_l1;
1194 			}
1195 		} else {
1196 			/* Can we read? */
1197 			if (!gpte_p->may_read && !gpte_p->may_write) {
1198 				flags |= DSISR_PROTFAULT;
1199 				goto forward_to_l1;
1200 			}
1201 		}
1202 	}
1203 
1204 	return 0;
1205 
1206 forward_to_l1:
1207 	vcpu->arch.fault_dsisr = flags;
1208 	if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
1209 		vcpu->arch.shregs.msr &= SRR1_MSR_BITS;
1210 		vcpu->arch.shregs.msr |= flags;
1211 	}
1212 	return RESUME_HOST;
1213 }
1214 
1215 static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu,
1216 				       struct kvm_nested_guest *gp,
1217 				       unsigned long n_gpa,
1218 				       struct kvmppc_pte gpte,
1219 				       unsigned long dsisr)
1220 {
1221 	struct kvm *kvm = vcpu->kvm;
1222 	bool writing = !!(dsisr & DSISR_ISSTORE);
1223 	u64 pgflags;
1224 	long ret;
1225 
1226 	/* Are the rc bits set in the L1 partition scoped pte? */
1227 	pgflags = _PAGE_ACCESSED;
1228 	if (writing)
1229 		pgflags |= _PAGE_DIRTY;
1230 	if (pgflags & ~gpte.rc)
1231 		return RESUME_HOST;
1232 
1233 	spin_lock(&kvm->mmu_lock);
1234 	/* Set the rc bit in the pte of our (L0) pgtable for the L1 guest */
1235 	ret = kvmppc_hv_handle_set_rc(kvm, false, writing,
1236 				      gpte.raddr, kvm->arch.lpid);
1237 	if (!ret) {
1238 		ret = -EINVAL;
1239 		goto out_unlock;
1240 	}
1241 
1242 	/* Set the rc bit in the pte of the shadow_pgtable for the nest guest */
1243 	ret = kvmppc_hv_handle_set_rc(kvm, true, writing,
1244 				      n_gpa, gp->l1_lpid);
1245 	if (!ret)
1246 		ret = -EINVAL;
1247 	else
1248 		ret = 0;
1249 
1250 out_unlock:
1251 	spin_unlock(&kvm->mmu_lock);
1252 	return ret;
1253 }
1254 
1255 static inline int kvmppc_radix_level_to_shift(int level)
1256 {
1257 	switch (level) {
1258 	case 2:
1259 		return PUD_SHIFT;
1260 	case 1:
1261 		return PMD_SHIFT;
1262 	default:
1263 		return PAGE_SHIFT;
1264 	}
1265 }
1266 
1267 static inline int kvmppc_radix_shift_to_level(int shift)
1268 {
1269 	if (shift == PUD_SHIFT)
1270 		return 2;
1271 	if (shift == PMD_SHIFT)
1272 		return 1;
1273 	if (shift == PAGE_SHIFT)
1274 		return 0;
1275 	WARN_ON_ONCE(1);
1276 	return 0;
1277 }
1278 
1279 /* called with gp->tlb_lock held */
1280 static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
1281 					  struct kvm_nested_guest *gp)
1282 {
1283 	struct kvm *kvm = vcpu->kvm;
1284 	struct kvm_memory_slot *memslot;
1285 	struct rmap_nested *n_rmap;
1286 	struct kvmppc_pte gpte;
1287 	pte_t pte, *pte_p;
1288 	unsigned long mmu_seq;
1289 	unsigned long dsisr = vcpu->arch.fault_dsisr;
1290 	unsigned long ea = vcpu->arch.fault_dar;
1291 	unsigned long *rmapp;
1292 	unsigned long n_gpa, gpa, gfn, perm = 0UL;
1293 	unsigned int shift, l1_shift, level;
1294 	bool writing = !!(dsisr & DSISR_ISSTORE);
1295 	bool kvm_ro = false;
1296 	long int ret;
1297 
1298 	if (!gp->l1_gr_to_hr) {
1299 		kvmhv_update_ptbl_cache(gp);
1300 		if (!gp->l1_gr_to_hr)
1301 			return RESUME_HOST;
1302 	}
1303 
1304 	/* Convert the nested guest real address into a L1 guest real address */
1305 
1306 	n_gpa = vcpu->arch.fault_gpa & ~0xF000000000000FFFULL;
1307 	if (!(dsisr & DSISR_PRTABLE_FAULT))
1308 		n_gpa |= ea & 0xFFF;
1309 	ret = kvmhv_translate_addr_nested(vcpu, gp, n_gpa, dsisr, &gpte);
1310 
1311 	/*
1312 	 * If the hardware found a translation but we don't now have a usable
1313 	 * translation in the l1 partition-scoped tree, remove the shadow pte
1314 	 * and let the guest retry.
1315 	 */
1316 	if (ret == RESUME_HOST &&
1317 	    (dsisr & (DSISR_PROTFAULT | DSISR_BADACCESS | DSISR_NOEXEC_OR_G |
1318 		      DSISR_BAD_COPYPASTE)))
1319 		goto inval;
1320 	if (ret)
1321 		return ret;
1322 
1323 	/* Failed to set the reference/change bits */
1324 	if (dsisr & DSISR_SET_RC) {
1325 		ret = kvmhv_handle_nested_set_rc(vcpu, gp, n_gpa, gpte, dsisr);
1326 		if (ret == RESUME_HOST)
1327 			return ret;
1328 		if (ret)
1329 			goto inval;
1330 		dsisr &= ~DSISR_SET_RC;
1331 		if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
1332 			       DSISR_PROTFAULT)))
1333 			return RESUME_GUEST;
1334 	}
1335 
1336 	/*
1337 	 * We took an HISI or HDSI while we were running a nested guest which
1338 	 * means we have no partition scoped translation for that. This means
1339 	 * we need to insert a pte for the mapping into our shadow_pgtable.
1340 	 */
1341 
1342 	l1_shift = gpte.page_shift;
1343 	if (l1_shift < PAGE_SHIFT) {
1344 		/* We don't support l1 using a page size smaller than our own */
1345 		pr_err("KVM: L1 guest page shift (%d) less than our own (%d)\n",
1346 			l1_shift, PAGE_SHIFT);
1347 		return -EINVAL;
1348 	}
1349 	gpa = gpte.raddr;
1350 	gfn = gpa >> PAGE_SHIFT;
1351 
1352 	/* 1. Get the corresponding host memslot */
1353 
1354 	memslot = gfn_to_memslot(kvm, gfn);
1355 	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
1356 		if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS)) {
1357 			/* unusual error -> reflect to the guest as a DSI */
1358 			kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
1359 			return RESUME_GUEST;
1360 		}
1361 
1362 		/* passthrough of emulated MMIO case */
1363 		return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
1364 	}
1365 	if (memslot->flags & KVM_MEM_READONLY) {
1366 		if (writing) {
1367 			/* Give the guest a DSI */
1368 			kvmppc_core_queue_data_storage(vcpu, ea,
1369 					DSISR_ISSTORE | DSISR_PROTFAULT);
1370 			return RESUME_GUEST;
1371 		}
1372 		kvm_ro = true;
1373 	}
1374 
1375 	/* 2. Find the host pte for this L1 guest real address */
1376 
1377 	/* Used to check for invalidations in progress */
1378 	mmu_seq = kvm->mmu_notifier_seq;
1379 	smp_rmb();
1380 
1381 	/* See if can find translation in our partition scoped tables for L1 */
1382 	pte = __pte(0);
1383 	spin_lock(&kvm->mmu_lock);
1384 	pte_p = find_kvm_secondary_pte(kvm, gpa, &shift);
1385 	if (!shift)
1386 		shift = PAGE_SHIFT;
1387 	if (pte_p)
1388 		pte = *pte_p;
1389 	spin_unlock(&kvm->mmu_lock);
1390 
1391 	if (!pte_present(pte) || (writing && !(pte_val(pte) & _PAGE_WRITE))) {
1392 		/* No suitable pte found -> try to insert a mapping */
1393 		ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot,
1394 					writing, kvm_ro, &pte, &level);
1395 		if (ret == -EAGAIN)
1396 			return RESUME_GUEST;
1397 		else if (ret)
1398 			return ret;
1399 		shift = kvmppc_radix_level_to_shift(level);
1400 	}
1401 	/* Align gfn to the start of the page */
1402 	gfn = (gpa & ~((1UL << shift) - 1)) >> PAGE_SHIFT;
1403 
1404 	/* 3. Compute the pte we need to insert for nest_gpa -> host r_addr */
1405 
1406 	/* The permissions is the combination of the host and l1 guest ptes */
1407 	perm |= gpte.may_read ? 0UL : _PAGE_READ;
1408 	perm |= gpte.may_write ? 0UL : _PAGE_WRITE;
1409 	perm |= gpte.may_execute ? 0UL : _PAGE_EXEC;
1410 	/* Only set accessed/dirty (rc) bits if set in host and l1 guest ptes */
1411 	perm |= (gpte.rc & _PAGE_ACCESSED) ? 0UL : _PAGE_ACCESSED;
1412 	perm |= ((gpte.rc & _PAGE_DIRTY) && writing) ? 0UL : _PAGE_DIRTY;
1413 	pte = __pte(pte_val(pte) & ~perm);
1414 
1415 	/* What size pte can we insert? */
1416 	if (shift > l1_shift) {
1417 		u64 mask;
1418 		unsigned int actual_shift = PAGE_SHIFT;
1419 		if (PMD_SHIFT < l1_shift)
1420 			actual_shift = PMD_SHIFT;
1421 		mask = (1UL << shift) - (1UL << actual_shift);
1422 		pte = __pte(pte_val(pte) | (gpa & mask));
1423 		shift = actual_shift;
1424 	}
1425 	level = kvmppc_radix_shift_to_level(shift);
1426 	n_gpa &= ~((1UL << shift) - 1);
1427 
1428 	/* 4. Insert the pte into our shadow_pgtable */
1429 
1430 	n_rmap = kzalloc(sizeof(*n_rmap), GFP_KERNEL);
1431 	if (!n_rmap)
1432 		return RESUME_GUEST; /* Let the guest try again */
1433 	n_rmap->rmap = (n_gpa & RMAP_NESTED_GPA_MASK) |
1434 		(((unsigned long) gp->l1_lpid) << RMAP_NESTED_LPID_SHIFT);
1435 	rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1436 	ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level,
1437 				mmu_seq, gp->shadow_lpid, rmapp, &n_rmap);
1438 	kfree(n_rmap);
1439 	if (ret == -EAGAIN)
1440 		ret = RESUME_GUEST;	/* Let the guest try again */
1441 
1442 	return ret;
1443 
1444  inval:
1445 	kvmhv_invalidate_shadow_pte(vcpu, gp, n_gpa, NULL);
1446 	return RESUME_GUEST;
1447 }
1448 
1449 long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu)
1450 {
1451 	struct kvm_nested_guest *gp = vcpu->arch.nested;
1452 	long int ret;
1453 
1454 	mutex_lock(&gp->tlb_lock);
1455 	ret = __kvmhv_nested_page_fault(vcpu, gp);
1456 	mutex_unlock(&gp->tlb_lock);
1457 	return ret;
1458 }
1459 
1460 int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid)
1461 {
1462 	int ret = -1;
1463 
1464 	spin_lock(&kvm->mmu_lock);
1465 	while (++lpid <= kvm->arch.max_nested_lpid) {
1466 		if (kvm->arch.nested_guests[lpid]) {
1467 			ret = lpid;
1468 			break;
1469 		}
1470 	}
1471 	spin_unlock(&kvm->mmu_lock);
1472 	return ret;
1473 }
1474