1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright IBM Corporation, 2018
4  * Authors Suraj Jitindar Singh <sjitindarsingh@gmail.com>
5  *	   Paul Mackerras <paulus@ozlabs.org>
6  *
7  * Description: KVM functions specific to running nested KVM-HV guests
8  * on Book3S processors (specifically POWER9 and later).
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/kvm_host.h>
13 #include <linux/llist.h>
14 #include <linux/pgtable.h>
15 
16 #include <asm/kvm_ppc.h>
17 #include <asm/kvm_book3s.h>
18 #include <asm/mmu.h>
19 #include <asm/pgalloc.h>
20 #include <asm/pte-walk.h>
21 #include <asm/reg.h>
22 
23 static struct patb_entry *pseries_partition_tb;
24 
25 static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp);
26 static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free);
27 
28 void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
29 {
30 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
31 
32 	hr->pcr = vc->pcr | PCR_MASK;
33 	hr->dpdes = vc->dpdes;
34 	hr->hfscr = vcpu->arch.hfscr;
35 	hr->tb_offset = vc->tb_offset;
36 	hr->dawr0 = vcpu->arch.dawr;
37 	hr->dawrx0 = vcpu->arch.dawrx;
38 	hr->ciabr = vcpu->arch.ciabr;
39 	hr->purr = vcpu->arch.purr;
40 	hr->spurr = vcpu->arch.spurr;
41 	hr->ic = vcpu->arch.ic;
42 	hr->vtb = vc->vtb;
43 	hr->srr0 = vcpu->arch.shregs.srr0;
44 	hr->srr1 = vcpu->arch.shregs.srr1;
45 	hr->sprg[0] = vcpu->arch.shregs.sprg0;
46 	hr->sprg[1] = vcpu->arch.shregs.sprg1;
47 	hr->sprg[2] = vcpu->arch.shregs.sprg2;
48 	hr->sprg[3] = vcpu->arch.shregs.sprg3;
49 	hr->pidr = vcpu->arch.pid;
50 	hr->cfar = vcpu->arch.cfar;
51 	hr->ppr = vcpu->arch.ppr;
52 }
53 
54 static void byteswap_pt_regs(struct pt_regs *regs)
55 {
56 	unsigned long *addr = (unsigned long *) regs;
57 
58 	for (; addr < ((unsigned long *) (regs + 1)); addr++)
59 		*addr = swab64(*addr);
60 }
61 
62 static void byteswap_hv_regs(struct hv_guest_state *hr)
63 {
64 	hr->version = swab64(hr->version);
65 	hr->lpid = swab32(hr->lpid);
66 	hr->vcpu_token = swab32(hr->vcpu_token);
67 	hr->lpcr = swab64(hr->lpcr);
68 	hr->pcr = swab64(hr->pcr) | PCR_MASK;
69 	hr->amor = swab64(hr->amor);
70 	hr->dpdes = swab64(hr->dpdes);
71 	hr->hfscr = swab64(hr->hfscr);
72 	hr->tb_offset = swab64(hr->tb_offset);
73 	hr->dawr0 = swab64(hr->dawr0);
74 	hr->dawrx0 = swab64(hr->dawrx0);
75 	hr->ciabr = swab64(hr->ciabr);
76 	hr->hdec_expiry = swab64(hr->hdec_expiry);
77 	hr->purr = swab64(hr->purr);
78 	hr->spurr = swab64(hr->spurr);
79 	hr->ic = swab64(hr->ic);
80 	hr->vtb = swab64(hr->vtb);
81 	hr->hdar = swab64(hr->hdar);
82 	hr->hdsisr = swab64(hr->hdsisr);
83 	hr->heir = swab64(hr->heir);
84 	hr->asdr = swab64(hr->asdr);
85 	hr->srr0 = swab64(hr->srr0);
86 	hr->srr1 = swab64(hr->srr1);
87 	hr->sprg[0] = swab64(hr->sprg[0]);
88 	hr->sprg[1] = swab64(hr->sprg[1]);
89 	hr->sprg[2] = swab64(hr->sprg[2]);
90 	hr->sprg[3] = swab64(hr->sprg[3]);
91 	hr->pidr = swab64(hr->pidr);
92 	hr->cfar = swab64(hr->cfar);
93 	hr->ppr = swab64(hr->ppr);
94 }
95 
96 static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap,
97 				 struct hv_guest_state *hr)
98 {
99 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
100 
101 	hr->dpdes = vc->dpdes;
102 	hr->hfscr = vcpu->arch.hfscr;
103 	hr->purr = vcpu->arch.purr;
104 	hr->spurr = vcpu->arch.spurr;
105 	hr->ic = vcpu->arch.ic;
106 	hr->vtb = vc->vtb;
107 	hr->srr0 = vcpu->arch.shregs.srr0;
108 	hr->srr1 = vcpu->arch.shregs.srr1;
109 	hr->sprg[0] = vcpu->arch.shregs.sprg0;
110 	hr->sprg[1] = vcpu->arch.shregs.sprg1;
111 	hr->sprg[2] = vcpu->arch.shregs.sprg2;
112 	hr->sprg[3] = vcpu->arch.shregs.sprg3;
113 	hr->pidr = vcpu->arch.pid;
114 	hr->cfar = vcpu->arch.cfar;
115 	hr->ppr = vcpu->arch.ppr;
116 	switch (trap) {
117 	case BOOK3S_INTERRUPT_H_DATA_STORAGE:
118 		hr->hdar = vcpu->arch.fault_dar;
119 		hr->hdsisr = vcpu->arch.fault_dsisr;
120 		hr->asdr = vcpu->arch.fault_gpa;
121 		break;
122 	case BOOK3S_INTERRUPT_H_INST_STORAGE:
123 		hr->asdr = vcpu->arch.fault_gpa;
124 		break;
125 	case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
126 		hr->heir = vcpu->arch.emul_inst;
127 		break;
128 	}
129 }
130 
131 static void sanitise_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
132 {
133 	/*
134 	 * Don't let L1 enable features for L2 which we've disabled for L1,
135 	 * but preserve the interrupt cause field.
136 	 */
137 	hr->hfscr &= (HFSCR_INTR_CAUSE | vcpu->arch.hfscr);
138 
139 	/* Don't let data address watchpoint match in hypervisor state */
140 	hr->dawrx0 &= ~DAWRX_HYP;
141 
142 	/* Don't let completed instruction address breakpt match in HV state */
143 	if ((hr->ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
144 		hr->ciabr &= ~CIABR_PRIV;
145 }
146 
147 static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
148 {
149 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
150 
151 	vc->pcr = hr->pcr | PCR_MASK;
152 	vc->dpdes = hr->dpdes;
153 	vcpu->arch.hfscr = hr->hfscr;
154 	vcpu->arch.dawr = hr->dawr0;
155 	vcpu->arch.dawrx = hr->dawrx0;
156 	vcpu->arch.ciabr = hr->ciabr;
157 	vcpu->arch.purr = hr->purr;
158 	vcpu->arch.spurr = hr->spurr;
159 	vcpu->arch.ic = hr->ic;
160 	vc->vtb = hr->vtb;
161 	vcpu->arch.shregs.srr0 = hr->srr0;
162 	vcpu->arch.shregs.srr1 = hr->srr1;
163 	vcpu->arch.shregs.sprg0 = hr->sprg[0];
164 	vcpu->arch.shregs.sprg1 = hr->sprg[1];
165 	vcpu->arch.shregs.sprg2 = hr->sprg[2];
166 	vcpu->arch.shregs.sprg3 = hr->sprg[3];
167 	vcpu->arch.pid = hr->pidr;
168 	vcpu->arch.cfar = hr->cfar;
169 	vcpu->arch.ppr = hr->ppr;
170 }
171 
172 void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
173 				   struct hv_guest_state *hr)
174 {
175 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
176 
177 	vc->dpdes = hr->dpdes;
178 	vcpu->arch.hfscr = hr->hfscr;
179 	vcpu->arch.purr = hr->purr;
180 	vcpu->arch.spurr = hr->spurr;
181 	vcpu->arch.ic = hr->ic;
182 	vc->vtb = hr->vtb;
183 	vcpu->arch.fault_dar = hr->hdar;
184 	vcpu->arch.fault_dsisr = hr->hdsisr;
185 	vcpu->arch.fault_gpa = hr->asdr;
186 	vcpu->arch.emul_inst = hr->heir;
187 	vcpu->arch.shregs.srr0 = hr->srr0;
188 	vcpu->arch.shregs.srr1 = hr->srr1;
189 	vcpu->arch.shregs.sprg0 = hr->sprg[0];
190 	vcpu->arch.shregs.sprg1 = hr->sprg[1];
191 	vcpu->arch.shregs.sprg2 = hr->sprg[2];
192 	vcpu->arch.shregs.sprg3 = hr->sprg[3];
193 	vcpu->arch.pid = hr->pidr;
194 	vcpu->arch.cfar = hr->cfar;
195 	vcpu->arch.ppr = hr->ppr;
196 }
197 
198 static void kvmhv_nested_mmio_needed(struct kvm_vcpu *vcpu, u64 regs_ptr)
199 {
200 	/* No need to reflect the page fault to L1, we've handled it */
201 	vcpu->arch.trap = 0;
202 
203 	/*
204 	 * Since the L2 gprs have already been written back into L1 memory when
205 	 * we complete the mmio, store the L1 memory location of the L2 gpr
206 	 * being loaded into by the mmio so that the loaded value can be
207 	 * written there in kvmppc_complete_mmio_load()
208 	 */
209 	if (((vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) == KVM_MMIO_REG_GPR)
210 	    && (vcpu->mmio_is_write == 0)) {
211 		vcpu->arch.nested_io_gpr = (gpa_t) regs_ptr +
212 					   offsetof(struct pt_regs,
213 						    gpr[vcpu->arch.io_gpr]);
214 		vcpu->arch.io_gpr = KVM_MMIO_REG_NESTED_GPR;
215 	}
216 }
217 
218 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
219 {
220 	long int err, r;
221 	struct kvm_nested_guest *l2;
222 	struct pt_regs l2_regs, saved_l1_regs;
223 	struct hv_guest_state l2_hv, saved_l1_hv;
224 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
225 	u64 hv_ptr, regs_ptr;
226 	u64 hdec_exp;
227 	s64 delta_purr, delta_spurr, delta_ic, delta_vtb;
228 	u64 mask;
229 	unsigned long lpcr;
230 
231 	if (vcpu->kvm->arch.l1_ptcr == 0)
232 		return H_NOT_AVAILABLE;
233 
234 	/* copy parameters in */
235 	hv_ptr = kvmppc_get_gpr(vcpu, 4);
236 	err = kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv,
237 				  sizeof(struct hv_guest_state));
238 	if (err)
239 		return H_PARAMETER;
240 	if (kvmppc_need_byteswap(vcpu))
241 		byteswap_hv_regs(&l2_hv);
242 	if (l2_hv.version != HV_GUEST_STATE_VERSION)
243 		return H_P2;
244 
245 	regs_ptr = kvmppc_get_gpr(vcpu, 5);
246 	err = kvm_vcpu_read_guest(vcpu, regs_ptr, &l2_regs,
247 				  sizeof(struct pt_regs));
248 	if (err)
249 		return H_PARAMETER;
250 	if (kvmppc_need_byteswap(vcpu))
251 		byteswap_pt_regs(&l2_regs);
252 	if (l2_hv.vcpu_token >= NR_CPUS)
253 		return H_PARAMETER;
254 
255 	/* translate lpid */
256 	l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true);
257 	if (!l2)
258 		return H_PARAMETER;
259 	if (!l2->l1_gr_to_hr) {
260 		mutex_lock(&l2->tlb_lock);
261 		kvmhv_update_ptbl_cache(l2);
262 		mutex_unlock(&l2->tlb_lock);
263 	}
264 
265 	/* save l1 values of things */
266 	vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
267 	saved_l1_regs = vcpu->arch.regs;
268 	kvmhv_save_hv_regs(vcpu, &saved_l1_hv);
269 
270 	/* convert TB values/offsets to host (L0) values */
271 	hdec_exp = l2_hv.hdec_expiry - vc->tb_offset;
272 	vc->tb_offset += l2_hv.tb_offset;
273 
274 	/* set L1 state to L2 state */
275 	vcpu->arch.nested = l2;
276 	vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token;
277 	vcpu->arch.regs = l2_regs;
278 	vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
279 	mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD |
280 		LPCR_LPES | LPCR_MER;
281 	lpcr = (vc->lpcr & ~mask) | (l2_hv.lpcr & mask);
282 	sanitise_hv_regs(vcpu, &l2_hv);
283 	restore_hv_regs(vcpu, &l2_hv);
284 
285 	vcpu->arch.ret = RESUME_GUEST;
286 	vcpu->arch.trap = 0;
287 	do {
288 		if (mftb() >= hdec_exp) {
289 			vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
290 			r = RESUME_HOST;
291 			break;
292 		}
293 		r = kvmhv_run_single_vcpu(vcpu, hdec_exp, lpcr);
294 	} while (is_kvmppc_resume_guest(r));
295 
296 	/* save L2 state for return */
297 	l2_regs = vcpu->arch.regs;
298 	l2_regs.msr = vcpu->arch.shregs.msr;
299 	delta_purr = vcpu->arch.purr - l2_hv.purr;
300 	delta_spurr = vcpu->arch.spurr - l2_hv.spurr;
301 	delta_ic = vcpu->arch.ic - l2_hv.ic;
302 	delta_vtb = vc->vtb - l2_hv.vtb;
303 	save_hv_return_state(vcpu, vcpu->arch.trap, &l2_hv);
304 
305 	/* restore L1 state */
306 	vcpu->arch.nested = NULL;
307 	vcpu->arch.regs = saved_l1_regs;
308 	vcpu->arch.shregs.msr = saved_l1_regs.msr & ~MSR_TS_MASK;
309 	/* set L1 MSR TS field according to L2 transaction state */
310 	if (l2_regs.msr & MSR_TS_MASK)
311 		vcpu->arch.shregs.msr |= MSR_TS_S;
312 	vc->tb_offset = saved_l1_hv.tb_offset;
313 	restore_hv_regs(vcpu, &saved_l1_hv);
314 	vcpu->arch.purr += delta_purr;
315 	vcpu->arch.spurr += delta_spurr;
316 	vcpu->arch.ic += delta_ic;
317 	vc->vtb += delta_vtb;
318 
319 	kvmhv_put_nested(l2);
320 
321 	/* copy l2_hv_state and regs back to guest */
322 	if (kvmppc_need_byteswap(vcpu)) {
323 		byteswap_hv_regs(&l2_hv);
324 		byteswap_pt_regs(&l2_regs);
325 	}
326 	err = kvm_vcpu_write_guest(vcpu, hv_ptr, &l2_hv,
327 				   sizeof(struct hv_guest_state));
328 	if (err)
329 		return H_AUTHORITY;
330 	err = kvm_vcpu_write_guest(vcpu, regs_ptr, &l2_regs,
331 				   sizeof(struct pt_regs));
332 	if (err)
333 		return H_AUTHORITY;
334 
335 	if (r == -EINTR)
336 		return H_INTERRUPT;
337 
338 	if (vcpu->mmio_needed) {
339 		kvmhv_nested_mmio_needed(vcpu, regs_ptr);
340 		return H_TOO_HARD;
341 	}
342 
343 	return vcpu->arch.trap;
344 }
345 
346 long kvmhv_nested_init(void)
347 {
348 	long int ptb_order;
349 	unsigned long ptcr;
350 	long rc;
351 
352 	if (!kvmhv_on_pseries())
353 		return 0;
354 	if (!radix_enabled())
355 		return -ENODEV;
356 
357 	/* find log base 2 of KVMPPC_NR_LPIDS, rounding up */
358 	ptb_order = __ilog2(KVMPPC_NR_LPIDS - 1) + 1;
359 	if (ptb_order < 8)
360 		ptb_order = 8;
361 	pseries_partition_tb = kmalloc(sizeof(struct patb_entry) << ptb_order,
362 				       GFP_KERNEL);
363 	if (!pseries_partition_tb) {
364 		pr_err("kvm-hv: failed to allocated nested partition table\n");
365 		return -ENOMEM;
366 	}
367 
368 	ptcr = __pa(pseries_partition_tb) | (ptb_order - 8);
369 	rc = plpar_hcall_norets(H_SET_PARTITION_TABLE, ptcr);
370 	if (rc != H_SUCCESS) {
371 		pr_err("kvm-hv: Parent hypervisor does not support nesting (rc=%ld)\n",
372 		       rc);
373 		kfree(pseries_partition_tb);
374 		pseries_partition_tb = NULL;
375 		return -ENODEV;
376 	}
377 
378 	return 0;
379 }
380 
381 void kvmhv_nested_exit(void)
382 {
383 	/*
384 	 * N.B. the kvmhv_on_pseries() test is there because it enables
385 	 * the compiler to remove the call to plpar_hcall_norets()
386 	 * when CONFIG_PPC_PSERIES=n.
387 	 */
388 	if (kvmhv_on_pseries() && pseries_partition_tb) {
389 		plpar_hcall_norets(H_SET_PARTITION_TABLE, 0);
390 		kfree(pseries_partition_tb);
391 		pseries_partition_tb = NULL;
392 	}
393 }
394 
395 static void kvmhv_flush_lpid(unsigned int lpid)
396 {
397 	long rc;
398 
399 	if (!kvmhv_on_pseries()) {
400 		radix__flush_all_lpid(lpid);
401 		return;
402 	}
403 
404 	rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1),
405 				lpid, TLBIEL_INVAL_SET_LPID);
406 	if (rc)
407 		pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc);
408 }
409 
410 void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1)
411 {
412 	if (!kvmhv_on_pseries()) {
413 		mmu_partition_table_set_entry(lpid, dw0, dw1, true);
414 		return;
415 	}
416 
417 	pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
418 	pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
419 	/* L0 will do the necessary barriers */
420 	kvmhv_flush_lpid(lpid);
421 }
422 
423 static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
424 {
425 	unsigned long dw0;
426 
427 	dw0 = PATB_HR | radix__get_tree_size() |
428 		__pa(gp->shadow_pgtable) | RADIX_PGD_INDEX_SIZE;
429 	kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table);
430 }
431 
432 void kvmhv_vm_nested_init(struct kvm *kvm)
433 {
434 	kvm->arch.max_nested_lpid = -1;
435 }
436 
437 /*
438  * Handle the H_SET_PARTITION_TABLE hcall.
439  * r4 = guest real address of partition table + log_2(size) - 12
440  * (formatted as for the PTCR).
441  */
442 long kvmhv_set_partition_table(struct kvm_vcpu *vcpu)
443 {
444 	struct kvm *kvm = vcpu->kvm;
445 	unsigned long ptcr = kvmppc_get_gpr(vcpu, 4);
446 	int srcu_idx;
447 	long ret = H_SUCCESS;
448 
449 	srcu_idx = srcu_read_lock(&kvm->srcu);
450 	/*
451 	 * Limit the partition table to 4096 entries (because that's what
452 	 * hardware supports), and check the base address.
453 	 */
454 	if ((ptcr & PRTS_MASK) > 12 - 8 ||
455 	    !kvm_is_visible_gfn(vcpu->kvm, (ptcr & PRTB_MASK) >> PAGE_SHIFT))
456 		ret = H_PARAMETER;
457 	srcu_read_unlock(&kvm->srcu, srcu_idx);
458 	if (ret == H_SUCCESS)
459 		kvm->arch.l1_ptcr = ptcr;
460 	return ret;
461 }
462 
463 /*
464  * Handle the H_COPY_TOFROM_GUEST hcall.
465  * r4 = L1 lpid of nested guest
466  * r5 = pid
467  * r6 = eaddr to access
468  * r7 = to buffer (L1 gpa)
469  * r8 = from buffer (L1 gpa)
470  * r9 = n bytes to copy
471  */
472 long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu)
473 {
474 	struct kvm_nested_guest *gp;
475 	int l1_lpid = kvmppc_get_gpr(vcpu, 4);
476 	int pid = kvmppc_get_gpr(vcpu, 5);
477 	gva_t eaddr = kvmppc_get_gpr(vcpu, 6);
478 	gpa_t gp_to = (gpa_t) kvmppc_get_gpr(vcpu, 7);
479 	gpa_t gp_from = (gpa_t) kvmppc_get_gpr(vcpu, 8);
480 	void *buf;
481 	unsigned long n = kvmppc_get_gpr(vcpu, 9);
482 	bool is_load = !!gp_to;
483 	long rc;
484 
485 	if (gp_to && gp_from) /* One must be NULL to determine the direction */
486 		return H_PARAMETER;
487 
488 	if (eaddr & (0xFFFUL << 52))
489 		return H_PARAMETER;
490 
491 	buf = kzalloc(n, GFP_KERNEL);
492 	if (!buf)
493 		return H_NO_MEM;
494 
495 	gp = kvmhv_get_nested(vcpu->kvm, l1_lpid, false);
496 	if (!gp) {
497 		rc = H_PARAMETER;
498 		goto out_free;
499 	}
500 
501 	mutex_lock(&gp->tlb_lock);
502 
503 	if (is_load) {
504 		/* Load from the nested guest into our buffer */
505 		rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
506 						     eaddr, buf, NULL, n);
507 		if (rc)
508 			goto not_found;
509 
510 		/* Write what was loaded into our buffer back to the L1 guest */
511 		rc = kvm_vcpu_write_guest(vcpu, gp_to, buf, n);
512 		if (rc)
513 			goto not_found;
514 	} else {
515 		/* Load the data to be stored from the L1 guest into our buf */
516 		rc = kvm_vcpu_read_guest(vcpu, gp_from, buf, n);
517 		if (rc)
518 			goto not_found;
519 
520 		/* Store from our buffer into the nested guest */
521 		rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
522 						     eaddr, NULL, buf, n);
523 		if (rc)
524 			goto not_found;
525 	}
526 
527 out_unlock:
528 	mutex_unlock(&gp->tlb_lock);
529 	kvmhv_put_nested(gp);
530 out_free:
531 	kfree(buf);
532 	return rc;
533 not_found:
534 	rc = H_NOT_FOUND;
535 	goto out_unlock;
536 }
537 
538 /*
539  * Reload the partition table entry for a guest.
540  * Caller must hold gp->tlb_lock.
541  */
542 static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp)
543 {
544 	int ret;
545 	struct patb_entry ptbl_entry;
546 	unsigned long ptbl_addr;
547 	struct kvm *kvm = gp->l1_host;
548 
549 	ret = -EFAULT;
550 	ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4);
551 	if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 8)))
552 		ret = kvm_read_guest(kvm, ptbl_addr,
553 				     &ptbl_entry, sizeof(ptbl_entry));
554 	if (ret) {
555 		gp->l1_gr_to_hr = 0;
556 		gp->process_table = 0;
557 	} else {
558 		gp->l1_gr_to_hr = be64_to_cpu(ptbl_entry.patb0);
559 		gp->process_table = be64_to_cpu(ptbl_entry.patb1);
560 	}
561 	kvmhv_set_nested_ptbl(gp);
562 }
563 
564 struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid)
565 {
566 	struct kvm_nested_guest *gp;
567 	long shadow_lpid;
568 
569 	gp = kzalloc(sizeof(*gp), GFP_KERNEL);
570 	if (!gp)
571 		return NULL;
572 	gp->l1_host = kvm;
573 	gp->l1_lpid = lpid;
574 	mutex_init(&gp->tlb_lock);
575 	gp->shadow_pgtable = pgd_alloc(kvm->mm);
576 	if (!gp->shadow_pgtable)
577 		goto out_free;
578 	shadow_lpid = kvmppc_alloc_lpid();
579 	if (shadow_lpid < 0)
580 		goto out_free2;
581 	gp->shadow_lpid = shadow_lpid;
582 	gp->radix = 1;
583 
584 	memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu));
585 
586 	return gp;
587 
588  out_free2:
589 	pgd_free(kvm->mm, gp->shadow_pgtable);
590  out_free:
591 	kfree(gp);
592 	return NULL;
593 }
594 
595 /*
596  * Free up any resources allocated for a nested guest.
597  */
598 static void kvmhv_release_nested(struct kvm_nested_guest *gp)
599 {
600 	struct kvm *kvm = gp->l1_host;
601 
602 	if (gp->shadow_pgtable) {
603 		/*
604 		 * No vcpu is using this struct and no call to
605 		 * kvmhv_get_nested can find this struct,
606 		 * so we don't need to hold kvm->mmu_lock.
607 		 */
608 		kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
609 					  gp->shadow_lpid);
610 		pgd_free(kvm->mm, gp->shadow_pgtable);
611 	}
612 	kvmhv_set_ptbl_entry(gp->shadow_lpid, 0, 0);
613 	kvmppc_free_lpid(gp->shadow_lpid);
614 	kfree(gp);
615 }
616 
617 static void kvmhv_remove_nested(struct kvm_nested_guest *gp)
618 {
619 	struct kvm *kvm = gp->l1_host;
620 	int lpid = gp->l1_lpid;
621 	long ref;
622 
623 	spin_lock(&kvm->mmu_lock);
624 	if (gp == kvm->arch.nested_guests[lpid]) {
625 		kvm->arch.nested_guests[lpid] = NULL;
626 		if (lpid == kvm->arch.max_nested_lpid) {
627 			while (--lpid >= 0 && !kvm->arch.nested_guests[lpid])
628 				;
629 			kvm->arch.max_nested_lpid = lpid;
630 		}
631 		--gp->refcnt;
632 	}
633 	ref = gp->refcnt;
634 	spin_unlock(&kvm->mmu_lock);
635 	if (ref == 0)
636 		kvmhv_release_nested(gp);
637 }
638 
639 /*
640  * Free up all nested resources allocated for this guest.
641  * This is called with no vcpus of the guest running, when
642  * switching the guest to HPT mode or when destroying the
643  * guest.
644  */
645 void kvmhv_release_all_nested(struct kvm *kvm)
646 {
647 	int i;
648 	struct kvm_nested_guest *gp;
649 	struct kvm_nested_guest *freelist = NULL;
650 	struct kvm_memory_slot *memslot;
651 	int srcu_idx;
652 
653 	spin_lock(&kvm->mmu_lock);
654 	for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
655 		gp = kvm->arch.nested_guests[i];
656 		if (!gp)
657 			continue;
658 		kvm->arch.nested_guests[i] = NULL;
659 		if (--gp->refcnt == 0) {
660 			gp->next = freelist;
661 			freelist = gp;
662 		}
663 	}
664 	kvm->arch.max_nested_lpid = -1;
665 	spin_unlock(&kvm->mmu_lock);
666 	while ((gp = freelist) != NULL) {
667 		freelist = gp->next;
668 		kvmhv_release_nested(gp);
669 	}
670 
671 	srcu_idx = srcu_read_lock(&kvm->srcu);
672 	kvm_for_each_memslot(memslot, kvm_memslots(kvm))
673 		kvmhv_free_memslot_nest_rmap(memslot);
674 	srcu_read_unlock(&kvm->srcu, srcu_idx);
675 }
676 
677 /* caller must hold gp->tlb_lock */
678 static void kvmhv_flush_nested(struct kvm_nested_guest *gp)
679 {
680 	struct kvm *kvm = gp->l1_host;
681 
682 	spin_lock(&kvm->mmu_lock);
683 	kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid);
684 	spin_unlock(&kvm->mmu_lock);
685 	kvmhv_flush_lpid(gp->shadow_lpid);
686 	kvmhv_update_ptbl_cache(gp);
687 	if (gp->l1_gr_to_hr == 0)
688 		kvmhv_remove_nested(gp);
689 }
690 
691 struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
692 					  bool create)
693 {
694 	struct kvm_nested_guest *gp, *newgp;
695 
696 	if (l1_lpid >= KVM_MAX_NESTED_GUESTS ||
697 	    l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4)))
698 		return NULL;
699 
700 	spin_lock(&kvm->mmu_lock);
701 	gp = kvm->arch.nested_guests[l1_lpid];
702 	if (gp)
703 		++gp->refcnt;
704 	spin_unlock(&kvm->mmu_lock);
705 
706 	if (gp || !create)
707 		return gp;
708 
709 	newgp = kvmhv_alloc_nested(kvm, l1_lpid);
710 	if (!newgp)
711 		return NULL;
712 	spin_lock(&kvm->mmu_lock);
713 	if (kvm->arch.nested_guests[l1_lpid]) {
714 		/* someone else beat us to it */
715 		gp = kvm->arch.nested_guests[l1_lpid];
716 	} else {
717 		kvm->arch.nested_guests[l1_lpid] = newgp;
718 		++newgp->refcnt;
719 		gp = newgp;
720 		newgp = NULL;
721 		if (l1_lpid > kvm->arch.max_nested_lpid)
722 			kvm->arch.max_nested_lpid = l1_lpid;
723 	}
724 	++gp->refcnt;
725 	spin_unlock(&kvm->mmu_lock);
726 
727 	if (newgp)
728 		kvmhv_release_nested(newgp);
729 
730 	return gp;
731 }
732 
733 void kvmhv_put_nested(struct kvm_nested_guest *gp)
734 {
735 	struct kvm *kvm = gp->l1_host;
736 	long ref;
737 
738 	spin_lock(&kvm->mmu_lock);
739 	ref = --gp->refcnt;
740 	spin_unlock(&kvm->mmu_lock);
741 	if (ref == 0)
742 		kvmhv_release_nested(gp);
743 }
744 
745 static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid)
746 {
747 	if (lpid > kvm->arch.max_nested_lpid)
748 		return NULL;
749 	return kvm->arch.nested_guests[lpid];
750 }
751 
752 pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
753 				 unsigned long ea, unsigned *hshift)
754 {
755 	struct kvm_nested_guest *gp;
756 	pte_t *pte;
757 
758 	gp = kvmhv_find_nested(kvm, lpid);
759 	if (!gp)
760 		return NULL;
761 
762 	VM_WARN(!spin_is_locked(&kvm->mmu_lock),
763 		"%s called with kvm mmu_lock not held \n", __func__);
764 	pte = __find_linux_pte(gp->shadow_pgtable, ea, NULL, hshift);
765 
766 	return pte;
767 }
768 
769 static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1, u64 rmap_2)
770 {
771 	return !((rmap_1 ^ rmap_2) & (RMAP_NESTED_LPID_MASK |
772 				       RMAP_NESTED_GPA_MASK));
773 }
774 
775 void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
776 			    struct rmap_nested **n_rmap)
777 {
778 	struct llist_node *entry = ((struct llist_head *) rmapp)->first;
779 	struct rmap_nested *cursor;
780 	u64 rmap, new_rmap = (*n_rmap)->rmap;
781 
782 	/* Are there any existing entries? */
783 	if (!(*rmapp)) {
784 		/* No -> use the rmap as a single entry */
785 		*rmapp = new_rmap | RMAP_NESTED_IS_SINGLE_ENTRY;
786 		return;
787 	}
788 
789 	/* Do any entries match what we're trying to insert? */
790 	for_each_nest_rmap_safe(cursor, entry, &rmap) {
791 		if (kvmhv_n_rmap_is_equal(rmap, new_rmap))
792 			return;
793 	}
794 
795 	/* Do we need to create a list or just add the new entry? */
796 	rmap = *rmapp;
797 	if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */
798 		*rmapp = 0UL;
799 	llist_add(&((*n_rmap)->list), (struct llist_head *) rmapp);
800 	if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */
801 		(*n_rmap)->list.next = (struct llist_node *) rmap;
802 
803 	/* Set NULL so not freed by caller */
804 	*n_rmap = NULL;
805 }
806 
807 static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap,
808 				      unsigned long clr, unsigned long set,
809 				      unsigned long hpa, unsigned long mask)
810 {
811 	unsigned long gpa;
812 	unsigned int shift, lpid;
813 	pte_t *ptep;
814 
815 	gpa = n_rmap & RMAP_NESTED_GPA_MASK;
816 	lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
817 
818 	/* Find the pte */
819 	ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
820 	/*
821 	 * If the pte is present and the pfn is still the same, update the pte.
822 	 * If the pfn has changed then this is a stale rmap entry, the nested
823 	 * gpa actually points somewhere else now, and there is nothing to do.
824 	 * XXX A future optimisation would be to remove the rmap entry here.
825 	 */
826 	if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa)) {
827 		__radix_pte_update(ptep, clr, set);
828 		kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
829 	}
830 }
831 
832 /*
833  * For a given list of rmap entries, update the rc bits in all ptes in shadow
834  * page tables for nested guests which are referenced by the rmap list.
835  */
836 void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
837 				    unsigned long clr, unsigned long set,
838 				    unsigned long hpa, unsigned long nbytes)
839 {
840 	struct llist_node *entry = ((struct llist_head *) rmapp)->first;
841 	struct rmap_nested *cursor;
842 	unsigned long rmap, mask;
843 
844 	if ((clr | set) & ~(_PAGE_DIRTY | _PAGE_ACCESSED))
845 		return;
846 
847 	mask = PTE_RPN_MASK & ~(nbytes - 1);
848 	hpa &= mask;
849 
850 	for_each_nest_rmap_safe(cursor, entry, &rmap)
851 		kvmhv_update_nest_rmap_rc(kvm, rmap, clr, set, hpa, mask);
852 }
853 
854 static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
855 				   unsigned long hpa, unsigned long mask)
856 {
857 	struct kvm_nested_guest *gp;
858 	unsigned long gpa;
859 	unsigned int shift, lpid;
860 	pte_t *ptep;
861 
862 	gpa = n_rmap & RMAP_NESTED_GPA_MASK;
863 	lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
864 	gp = kvmhv_find_nested(kvm, lpid);
865 	if (!gp)
866 		return;
867 
868 	/* Find and invalidate the pte */
869 	ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
870 	/* Don't spuriously invalidate ptes if the pfn has changed */
871 	if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa))
872 		kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
873 }
874 
875 static void kvmhv_remove_nest_rmap_list(struct kvm *kvm, unsigned long *rmapp,
876 					unsigned long hpa, unsigned long mask)
877 {
878 	struct llist_node *entry = llist_del_all((struct llist_head *) rmapp);
879 	struct rmap_nested *cursor;
880 	unsigned long rmap;
881 
882 	for_each_nest_rmap_safe(cursor, entry, &rmap) {
883 		kvmhv_remove_nest_rmap(kvm, rmap, hpa, mask);
884 		kfree(cursor);
885 	}
886 }
887 
888 /* called with kvm->mmu_lock held */
889 void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
890 				  const struct kvm_memory_slot *memslot,
891 				  unsigned long gpa, unsigned long hpa,
892 				  unsigned long nbytes)
893 {
894 	unsigned long gfn, end_gfn;
895 	unsigned long addr_mask;
896 
897 	if (!memslot)
898 		return;
899 	gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn;
900 	end_gfn = gfn + (nbytes >> PAGE_SHIFT);
901 
902 	addr_mask = PTE_RPN_MASK & ~(nbytes - 1);
903 	hpa &= addr_mask;
904 
905 	for (; gfn < end_gfn; gfn++) {
906 		unsigned long *rmap = &memslot->arch.rmap[gfn];
907 		kvmhv_remove_nest_rmap_list(kvm, rmap, hpa, addr_mask);
908 	}
909 }
910 
911 static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free)
912 {
913 	unsigned long page;
914 
915 	for (page = 0; page < free->npages; page++) {
916 		unsigned long rmap, *rmapp = &free->arch.rmap[page];
917 		struct rmap_nested *cursor;
918 		struct llist_node *entry;
919 
920 		entry = llist_del_all((struct llist_head *) rmapp);
921 		for_each_nest_rmap_safe(cursor, entry, &rmap)
922 			kfree(cursor);
923 	}
924 }
925 
926 static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu,
927 					struct kvm_nested_guest *gp,
928 					long gpa, int *shift_ret)
929 {
930 	struct kvm *kvm = vcpu->kvm;
931 	bool ret = false;
932 	pte_t *ptep;
933 	int shift;
934 
935 	spin_lock(&kvm->mmu_lock);
936 	ptep = find_kvm_nested_guest_pte(kvm, gp->l1_lpid, gpa, &shift);
937 	if (!shift)
938 		shift = PAGE_SHIFT;
939 	if (ptep && pte_present(*ptep)) {
940 		kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
941 		ret = true;
942 	}
943 	spin_unlock(&kvm->mmu_lock);
944 
945 	if (shift_ret)
946 		*shift_ret = shift;
947 	return ret;
948 }
949 
950 static inline int get_ric(unsigned int instr)
951 {
952 	return (instr >> 18) & 0x3;
953 }
954 
955 static inline int get_prs(unsigned int instr)
956 {
957 	return (instr >> 17) & 0x1;
958 }
959 
960 static inline int get_r(unsigned int instr)
961 {
962 	return (instr >> 16) & 0x1;
963 }
964 
965 static inline int get_lpid(unsigned long r_val)
966 {
967 	return r_val & 0xffffffff;
968 }
969 
970 static inline int get_is(unsigned long r_val)
971 {
972 	return (r_val >> 10) & 0x3;
973 }
974 
975 static inline int get_ap(unsigned long r_val)
976 {
977 	return (r_val >> 5) & 0x7;
978 }
979 
980 static inline long get_epn(unsigned long r_val)
981 {
982 	return r_val >> 12;
983 }
984 
985 static int kvmhv_emulate_tlbie_tlb_addr(struct kvm_vcpu *vcpu, int lpid,
986 					int ap, long epn)
987 {
988 	struct kvm *kvm = vcpu->kvm;
989 	struct kvm_nested_guest *gp;
990 	long npages;
991 	int shift, shadow_shift;
992 	unsigned long addr;
993 
994 	shift = ap_to_shift(ap);
995 	addr = epn << 12;
996 	if (shift < 0)
997 		/* Invalid ap encoding */
998 		return -EINVAL;
999 
1000 	addr &= ~((1UL << shift) - 1);
1001 	npages = 1UL << (shift - PAGE_SHIFT);
1002 
1003 	gp = kvmhv_get_nested(kvm, lpid, false);
1004 	if (!gp) /* No such guest -> nothing to do */
1005 		return 0;
1006 	mutex_lock(&gp->tlb_lock);
1007 
1008 	/* There may be more than one host page backing this single guest pte */
1009 	do {
1010 		kvmhv_invalidate_shadow_pte(vcpu, gp, addr, &shadow_shift);
1011 
1012 		npages -= 1UL << (shadow_shift - PAGE_SHIFT);
1013 		addr += 1UL << shadow_shift;
1014 	} while (npages > 0);
1015 
1016 	mutex_unlock(&gp->tlb_lock);
1017 	kvmhv_put_nested(gp);
1018 	return 0;
1019 }
1020 
1021 static void kvmhv_emulate_tlbie_lpid(struct kvm_vcpu *vcpu,
1022 				     struct kvm_nested_guest *gp, int ric)
1023 {
1024 	struct kvm *kvm = vcpu->kvm;
1025 
1026 	mutex_lock(&gp->tlb_lock);
1027 	switch (ric) {
1028 	case 0:
1029 		/* Invalidate TLB */
1030 		spin_lock(&kvm->mmu_lock);
1031 		kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
1032 					  gp->shadow_lpid);
1033 		kvmhv_flush_lpid(gp->shadow_lpid);
1034 		spin_unlock(&kvm->mmu_lock);
1035 		break;
1036 	case 1:
1037 		/*
1038 		 * Invalidate PWC
1039 		 * We don't cache this -> nothing to do
1040 		 */
1041 		break;
1042 	case 2:
1043 		/* Invalidate TLB, PWC and caching of partition table entries */
1044 		kvmhv_flush_nested(gp);
1045 		break;
1046 	default:
1047 		break;
1048 	}
1049 	mutex_unlock(&gp->tlb_lock);
1050 }
1051 
1052 static void kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu *vcpu, int ric)
1053 {
1054 	struct kvm *kvm = vcpu->kvm;
1055 	struct kvm_nested_guest *gp;
1056 	int i;
1057 
1058 	spin_lock(&kvm->mmu_lock);
1059 	for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
1060 		gp = kvm->arch.nested_guests[i];
1061 		if (gp) {
1062 			spin_unlock(&kvm->mmu_lock);
1063 			kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1064 			spin_lock(&kvm->mmu_lock);
1065 		}
1066 	}
1067 	spin_unlock(&kvm->mmu_lock);
1068 }
1069 
1070 static int kvmhv_emulate_priv_tlbie(struct kvm_vcpu *vcpu, unsigned int instr,
1071 				    unsigned long rsval, unsigned long rbval)
1072 {
1073 	struct kvm *kvm = vcpu->kvm;
1074 	struct kvm_nested_guest *gp;
1075 	int r, ric, prs, is, ap;
1076 	int lpid;
1077 	long epn;
1078 	int ret = 0;
1079 
1080 	ric = get_ric(instr);
1081 	prs = get_prs(instr);
1082 	r = get_r(instr);
1083 	lpid = get_lpid(rsval);
1084 	is = get_is(rbval);
1085 
1086 	/*
1087 	 * These cases are invalid and are not handled:
1088 	 * r   != 1 -> Only radix supported
1089 	 * prs == 1 -> Not HV privileged
1090 	 * ric == 3 -> No cluster bombs for radix
1091 	 * is  == 1 -> Partition scoped translations not associated with pid
1092 	 * (!is) && (ric == 1 || ric == 2) -> Not supported by ISA
1093 	 */
1094 	if ((!r) || (prs) || (ric == 3) || (is == 1) ||
1095 	    ((!is) && (ric == 1 || ric == 2)))
1096 		return -EINVAL;
1097 
1098 	switch (is) {
1099 	case 0:
1100 		/*
1101 		 * We know ric == 0
1102 		 * Invalidate TLB for a given target address
1103 		 */
1104 		epn = get_epn(rbval);
1105 		ap = get_ap(rbval);
1106 		ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap, epn);
1107 		break;
1108 	case 2:
1109 		/* Invalidate matching LPID */
1110 		gp = kvmhv_get_nested(kvm, lpid, false);
1111 		if (gp) {
1112 			kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1113 			kvmhv_put_nested(gp);
1114 		}
1115 		break;
1116 	case 3:
1117 		/* Invalidate ALL LPIDs */
1118 		kvmhv_emulate_tlbie_all_lpid(vcpu, ric);
1119 		break;
1120 	default:
1121 		ret = -EINVAL;
1122 		break;
1123 	}
1124 
1125 	return ret;
1126 }
1127 
1128 /*
1129  * This handles the H_TLB_INVALIDATE hcall.
1130  * Parameters are (r4) tlbie instruction code, (r5) rS contents,
1131  * (r6) rB contents.
1132  */
1133 long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu)
1134 {
1135 	int ret;
1136 
1137 	ret = kvmhv_emulate_priv_tlbie(vcpu, kvmppc_get_gpr(vcpu, 4),
1138 			kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6));
1139 	if (ret)
1140 		return H_PARAMETER;
1141 	return H_SUCCESS;
1142 }
1143 
1144 /* Used to convert a nested guest real address to a L1 guest real address */
1145 static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu,
1146 				       struct kvm_nested_guest *gp,
1147 				       unsigned long n_gpa, unsigned long dsisr,
1148 				       struct kvmppc_pte *gpte_p)
1149 {
1150 	u64 fault_addr, flags = dsisr & DSISR_ISSTORE;
1151 	int ret;
1152 
1153 	ret = kvmppc_mmu_walk_radix_tree(vcpu, n_gpa, gpte_p, gp->l1_gr_to_hr,
1154 					 &fault_addr);
1155 
1156 	if (ret) {
1157 		/* We didn't find a pte */
1158 		if (ret == -EINVAL) {
1159 			/* Unsupported mmu config */
1160 			flags |= DSISR_UNSUPP_MMU;
1161 		} else if (ret == -ENOENT) {
1162 			/* No translation found */
1163 			flags |= DSISR_NOHPTE;
1164 		} else if (ret == -EFAULT) {
1165 			/* Couldn't access L1 real address */
1166 			flags |= DSISR_PRTABLE_FAULT;
1167 			vcpu->arch.fault_gpa = fault_addr;
1168 		} else {
1169 			/* Unknown error */
1170 			return ret;
1171 		}
1172 		goto forward_to_l1;
1173 	} else {
1174 		/* We found a pte -> check permissions */
1175 		if (dsisr & DSISR_ISSTORE) {
1176 			/* Can we write? */
1177 			if (!gpte_p->may_write) {
1178 				flags |= DSISR_PROTFAULT;
1179 				goto forward_to_l1;
1180 			}
1181 		} else if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
1182 			/* Can we execute? */
1183 			if (!gpte_p->may_execute) {
1184 				flags |= SRR1_ISI_N_G_OR_CIP;
1185 				goto forward_to_l1;
1186 			}
1187 		} else {
1188 			/* Can we read? */
1189 			if (!gpte_p->may_read && !gpte_p->may_write) {
1190 				flags |= DSISR_PROTFAULT;
1191 				goto forward_to_l1;
1192 			}
1193 		}
1194 	}
1195 
1196 	return 0;
1197 
1198 forward_to_l1:
1199 	vcpu->arch.fault_dsisr = flags;
1200 	if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
1201 		vcpu->arch.shregs.msr &= SRR1_MSR_BITS;
1202 		vcpu->arch.shregs.msr |= flags;
1203 	}
1204 	return RESUME_HOST;
1205 }
1206 
1207 static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu,
1208 				       struct kvm_nested_guest *gp,
1209 				       unsigned long n_gpa,
1210 				       struct kvmppc_pte gpte,
1211 				       unsigned long dsisr)
1212 {
1213 	struct kvm *kvm = vcpu->kvm;
1214 	bool writing = !!(dsisr & DSISR_ISSTORE);
1215 	u64 pgflags;
1216 	long ret;
1217 
1218 	/* Are the rc bits set in the L1 partition scoped pte? */
1219 	pgflags = _PAGE_ACCESSED;
1220 	if (writing)
1221 		pgflags |= _PAGE_DIRTY;
1222 	if (pgflags & ~gpte.rc)
1223 		return RESUME_HOST;
1224 
1225 	spin_lock(&kvm->mmu_lock);
1226 	/* Set the rc bit in the pte of our (L0) pgtable for the L1 guest */
1227 	ret = kvmppc_hv_handle_set_rc(kvm, false, writing,
1228 				      gpte.raddr, kvm->arch.lpid);
1229 	if (!ret) {
1230 		ret = -EINVAL;
1231 		goto out_unlock;
1232 	}
1233 
1234 	/* Set the rc bit in the pte of the shadow_pgtable for the nest guest */
1235 	ret = kvmppc_hv_handle_set_rc(kvm, true, writing,
1236 				      n_gpa, gp->l1_lpid);
1237 	if (!ret)
1238 		ret = -EINVAL;
1239 	else
1240 		ret = 0;
1241 
1242 out_unlock:
1243 	spin_unlock(&kvm->mmu_lock);
1244 	return ret;
1245 }
1246 
1247 static inline int kvmppc_radix_level_to_shift(int level)
1248 {
1249 	switch (level) {
1250 	case 2:
1251 		return PUD_SHIFT;
1252 	case 1:
1253 		return PMD_SHIFT;
1254 	default:
1255 		return PAGE_SHIFT;
1256 	}
1257 }
1258 
1259 static inline int kvmppc_radix_shift_to_level(int shift)
1260 {
1261 	if (shift == PUD_SHIFT)
1262 		return 2;
1263 	if (shift == PMD_SHIFT)
1264 		return 1;
1265 	if (shift == PAGE_SHIFT)
1266 		return 0;
1267 	WARN_ON_ONCE(1);
1268 	return 0;
1269 }
1270 
1271 /* called with gp->tlb_lock held */
1272 static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
1273 					  struct kvm_nested_guest *gp)
1274 {
1275 	struct kvm *kvm = vcpu->kvm;
1276 	struct kvm_memory_slot *memslot;
1277 	struct rmap_nested *n_rmap;
1278 	struct kvmppc_pte gpte;
1279 	pte_t pte, *pte_p;
1280 	unsigned long mmu_seq;
1281 	unsigned long dsisr = vcpu->arch.fault_dsisr;
1282 	unsigned long ea = vcpu->arch.fault_dar;
1283 	unsigned long *rmapp;
1284 	unsigned long n_gpa, gpa, gfn, perm = 0UL;
1285 	unsigned int shift, l1_shift, level;
1286 	bool writing = !!(dsisr & DSISR_ISSTORE);
1287 	bool kvm_ro = false;
1288 	long int ret;
1289 
1290 	if (!gp->l1_gr_to_hr) {
1291 		kvmhv_update_ptbl_cache(gp);
1292 		if (!gp->l1_gr_to_hr)
1293 			return RESUME_HOST;
1294 	}
1295 
1296 	/* Convert the nested guest real address into a L1 guest real address */
1297 
1298 	n_gpa = vcpu->arch.fault_gpa & ~0xF000000000000FFFULL;
1299 	if (!(dsisr & DSISR_PRTABLE_FAULT))
1300 		n_gpa |= ea & 0xFFF;
1301 	ret = kvmhv_translate_addr_nested(vcpu, gp, n_gpa, dsisr, &gpte);
1302 
1303 	/*
1304 	 * If the hardware found a translation but we don't now have a usable
1305 	 * translation in the l1 partition-scoped tree, remove the shadow pte
1306 	 * and let the guest retry.
1307 	 */
1308 	if (ret == RESUME_HOST &&
1309 	    (dsisr & (DSISR_PROTFAULT | DSISR_BADACCESS | DSISR_NOEXEC_OR_G |
1310 		      DSISR_BAD_COPYPASTE)))
1311 		goto inval;
1312 	if (ret)
1313 		return ret;
1314 
1315 	/* Failed to set the reference/change bits */
1316 	if (dsisr & DSISR_SET_RC) {
1317 		ret = kvmhv_handle_nested_set_rc(vcpu, gp, n_gpa, gpte, dsisr);
1318 		if (ret == RESUME_HOST)
1319 			return ret;
1320 		if (ret)
1321 			goto inval;
1322 		dsisr &= ~DSISR_SET_RC;
1323 		if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
1324 			       DSISR_PROTFAULT)))
1325 			return RESUME_GUEST;
1326 	}
1327 
1328 	/*
1329 	 * We took an HISI or HDSI while we were running a nested guest which
1330 	 * means we have no partition scoped translation for that. This means
1331 	 * we need to insert a pte for the mapping into our shadow_pgtable.
1332 	 */
1333 
1334 	l1_shift = gpte.page_shift;
1335 	if (l1_shift < PAGE_SHIFT) {
1336 		/* We don't support l1 using a page size smaller than our own */
1337 		pr_err("KVM: L1 guest page shift (%d) less than our own (%d)\n",
1338 			l1_shift, PAGE_SHIFT);
1339 		return -EINVAL;
1340 	}
1341 	gpa = gpte.raddr;
1342 	gfn = gpa >> PAGE_SHIFT;
1343 
1344 	/* 1. Get the corresponding host memslot */
1345 
1346 	memslot = gfn_to_memslot(kvm, gfn);
1347 	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
1348 		if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS)) {
1349 			/* unusual error -> reflect to the guest as a DSI */
1350 			kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
1351 			return RESUME_GUEST;
1352 		}
1353 
1354 		/* passthrough of emulated MMIO case */
1355 		return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
1356 	}
1357 	if (memslot->flags & KVM_MEM_READONLY) {
1358 		if (writing) {
1359 			/* Give the guest a DSI */
1360 			kvmppc_core_queue_data_storage(vcpu, ea,
1361 					DSISR_ISSTORE | DSISR_PROTFAULT);
1362 			return RESUME_GUEST;
1363 		}
1364 		kvm_ro = true;
1365 	}
1366 
1367 	/* 2. Find the host pte for this L1 guest real address */
1368 
1369 	/* Used to check for invalidations in progress */
1370 	mmu_seq = kvm->mmu_notifier_seq;
1371 	smp_rmb();
1372 
1373 	/* See if can find translation in our partition scoped tables for L1 */
1374 	pte = __pte(0);
1375 	spin_lock(&kvm->mmu_lock);
1376 	pte_p = find_kvm_secondary_pte(kvm, gpa, &shift);
1377 	if (!shift)
1378 		shift = PAGE_SHIFT;
1379 	if (pte_p)
1380 		pte = *pte_p;
1381 	spin_unlock(&kvm->mmu_lock);
1382 
1383 	if (!pte_present(pte) || (writing && !(pte_val(pte) & _PAGE_WRITE))) {
1384 		/* No suitable pte found -> try to insert a mapping */
1385 		ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot,
1386 					writing, kvm_ro, &pte, &level);
1387 		if (ret == -EAGAIN)
1388 			return RESUME_GUEST;
1389 		else if (ret)
1390 			return ret;
1391 		shift = kvmppc_radix_level_to_shift(level);
1392 	}
1393 	/* Align gfn to the start of the page */
1394 	gfn = (gpa & ~((1UL << shift) - 1)) >> PAGE_SHIFT;
1395 
1396 	/* 3. Compute the pte we need to insert for nest_gpa -> host r_addr */
1397 
1398 	/* The permissions is the combination of the host and l1 guest ptes */
1399 	perm |= gpte.may_read ? 0UL : _PAGE_READ;
1400 	perm |= gpte.may_write ? 0UL : _PAGE_WRITE;
1401 	perm |= gpte.may_execute ? 0UL : _PAGE_EXEC;
1402 	/* Only set accessed/dirty (rc) bits if set in host and l1 guest ptes */
1403 	perm |= (gpte.rc & _PAGE_ACCESSED) ? 0UL : _PAGE_ACCESSED;
1404 	perm |= ((gpte.rc & _PAGE_DIRTY) && writing) ? 0UL : _PAGE_DIRTY;
1405 	pte = __pte(pte_val(pte) & ~perm);
1406 
1407 	/* What size pte can we insert? */
1408 	if (shift > l1_shift) {
1409 		u64 mask;
1410 		unsigned int actual_shift = PAGE_SHIFT;
1411 		if (PMD_SHIFT < l1_shift)
1412 			actual_shift = PMD_SHIFT;
1413 		mask = (1UL << shift) - (1UL << actual_shift);
1414 		pte = __pte(pte_val(pte) | (gpa & mask));
1415 		shift = actual_shift;
1416 	}
1417 	level = kvmppc_radix_shift_to_level(shift);
1418 	n_gpa &= ~((1UL << shift) - 1);
1419 
1420 	/* 4. Insert the pte into our shadow_pgtable */
1421 
1422 	n_rmap = kzalloc(sizeof(*n_rmap), GFP_KERNEL);
1423 	if (!n_rmap)
1424 		return RESUME_GUEST; /* Let the guest try again */
1425 	n_rmap->rmap = (n_gpa & RMAP_NESTED_GPA_MASK) |
1426 		(((unsigned long) gp->l1_lpid) << RMAP_NESTED_LPID_SHIFT);
1427 	rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1428 	ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level,
1429 				mmu_seq, gp->shadow_lpid, rmapp, &n_rmap);
1430 	kfree(n_rmap);
1431 	if (ret == -EAGAIN)
1432 		ret = RESUME_GUEST;	/* Let the guest try again */
1433 
1434 	return ret;
1435 
1436  inval:
1437 	kvmhv_invalidate_shadow_pte(vcpu, gp, n_gpa, NULL);
1438 	return RESUME_GUEST;
1439 }
1440 
1441 long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu)
1442 {
1443 	struct kvm_nested_guest *gp = vcpu->arch.nested;
1444 	long int ret;
1445 
1446 	mutex_lock(&gp->tlb_lock);
1447 	ret = __kvmhv_nested_page_fault(vcpu, gp);
1448 	mutex_unlock(&gp->tlb_lock);
1449 	return ret;
1450 }
1451 
1452 int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid)
1453 {
1454 	int ret = -1;
1455 
1456 	spin_lock(&kvm->mmu_lock);
1457 	while (++lpid <= kvm->arch.max_nested_lpid) {
1458 		if (kvm->arch.nested_guests[lpid]) {
1459 			ret = lpid;
1460 			break;
1461 		}
1462 	}
1463 	spin_unlock(&kvm->mmu_lock);
1464 	return ret;
1465 }
1466