1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright IBM Corporation, 2018
4 * Authors Suraj Jitindar Singh <sjitindarsingh@gmail.com>
5 * Paul Mackerras <paulus@ozlabs.org>
6 *
7 * Description: KVM functions specific to running nested KVM-HV guests
8 * on Book3S processors (specifically POWER9 and later).
9 */
10
11 #include <linux/kernel.h>
12 #include <linux/kvm_host.h>
13 #include <linux/llist.h>
14 #include <linux/pgtable.h>
15
16 #include <asm/kvm_ppc.h>
17 #include <asm/kvm_book3s.h>
18 #include <asm/mmu.h>
19 #include <asm/pgalloc.h>
20 #include <asm/pte-walk.h>
21 #include <asm/reg.h>
22 #include <asm/plpar_wrappers.h>
23 #include <asm/firmware.h>
24
25 static struct patb_entry *pseries_partition_tb;
26
27 static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp);
28 static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free);
29
kvmhv_save_hv_regs(struct kvm_vcpu * vcpu,struct hv_guest_state * hr)30 void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
31 {
32 struct kvmppc_vcore *vc = vcpu->arch.vcore;
33
34 hr->pcr = vc->pcr | PCR_MASK;
35 hr->dpdes = vcpu->arch.doorbell_request;
36 hr->hfscr = vcpu->arch.hfscr;
37 hr->tb_offset = vc->tb_offset;
38 hr->dawr0 = vcpu->arch.dawr0;
39 hr->dawrx0 = vcpu->arch.dawrx0;
40 hr->ciabr = vcpu->arch.ciabr;
41 hr->purr = vcpu->arch.purr;
42 hr->spurr = vcpu->arch.spurr;
43 hr->ic = vcpu->arch.ic;
44 hr->vtb = vc->vtb;
45 hr->srr0 = vcpu->arch.shregs.srr0;
46 hr->srr1 = vcpu->arch.shregs.srr1;
47 hr->sprg[0] = vcpu->arch.shregs.sprg0;
48 hr->sprg[1] = vcpu->arch.shregs.sprg1;
49 hr->sprg[2] = vcpu->arch.shregs.sprg2;
50 hr->sprg[3] = vcpu->arch.shregs.sprg3;
51 hr->pidr = vcpu->arch.pid;
52 hr->cfar = vcpu->arch.cfar;
53 hr->ppr = vcpu->arch.ppr;
54 hr->dawr1 = vcpu->arch.dawr1;
55 hr->dawrx1 = vcpu->arch.dawrx1;
56 }
57
58 /* Use noinline_for_stack due to https://bugs.llvm.org/show_bug.cgi?id=49610 */
byteswap_pt_regs(struct pt_regs * regs)59 static noinline_for_stack void byteswap_pt_regs(struct pt_regs *regs)
60 {
61 unsigned long *addr = (unsigned long *) regs;
62
63 for (; addr < ((unsigned long *) (regs + 1)); addr++)
64 *addr = swab64(*addr);
65 }
66
byteswap_hv_regs(struct hv_guest_state * hr)67 static void byteswap_hv_regs(struct hv_guest_state *hr)
68 {
69 hr->version = swab64(hr->version);
70 hr->lpid = swab32(hr->lpid);
71 hr->vcpu_token = swab32(hr->vcpu_token);
72 hr->lpcr = swab64(hr->lpcr);
73 hr->pcr = swab64(hr->pcr) | PCR_MASK;
74 hr->amor = swab64(hr->amor);
75 hr->dpdes = swab64(hr->dpdes);
76 hr->hfscr = swab64(hr->hfscr);
77 hr->tb_offset = swab64(hr->tb_offset);
78 hr->dawr0 = swab64(hr->dawr0);
79 hr->dawrx0 = swab64(hr->dawrx0);
80 hr->ciabr = swab64(hr->ciabr);
81 hr->hdec_expiry = swab64(hr->hdec_expiry);
82 hr->purr = swab64(hr->purr);
83 hr->spurr = swab64(hr->spurr);
84 hr->ic = swab64(hr->ic);
85 hr->vtb = swab64(hr->vtb);
86 hr->hdar = swab64(hr->hdar);
87 hr->hdsisr = swab64(hr->hdsisr);
88 hr->heir = swab64(hr->heir);
89 hr->asdr = swab64(hr->asdr);
90 hr->srr0 = swab64(hr->srr0);
91 hr->srr1 = swab64(hr->srr1);
92 hr->sprg[0] = swab64(hr->sprg[0]);
93 hr->sprg[1] = swab64(hr->sprg[1]);
94 hr->sprg[2] = swab64(hr->sprg[2]);
95 hr->sprg[3] = swab64(hr->sprg[3]);
96 hr->pidr = swab64(hr->pidr);
97 hr->cfar = swab64(hr->cfar);
98 hr->ppr = swab64(hr->ppr);
99 hr->dawr1 = swab64(hr->dawr1);
100 hr->dawrx1 = swab64(hr->dawrx1);
101 }
102
save_hv_return_state(struct kvm_vcpu * vcpu,struct hv_guest_state * hr)103 static void save_hv_return_state(struct kvm_vcpu *vcpu,
104 struct hv_guest_state *hr)
105 {
106 struct kvmppc_vcore *vc = vcpu->arch.vcore;
107
108 hr->dpdes = vcpu->arch.doorbell_request;
109 hr->purr = vcpu->arch.purr;
110 hr->spurr = vcpu->arch.spurr;
111 hr->ic = vcpu->arch.ic;
112 hr->vtb = vc->vtb;
113 hr->srr0 = vcpu->arch.shregs.srr0;
114 hr->srr1 = vcpu->arch.shregs.srr1;
115 hr->sprg[0] = vcpu->arch.shregs.sprg0;
116 hr->sprg[1] = vcpu->arch.shregs.sprg1;
117 hr->sprg[2] = vcpu->arch.shregs.sprg2;
118 hr->sprg[3] = vcpu->arch.shregs.sprg3;
119 hr->pidr = vcpu->arch.pid;
120 hr->cfar = vcpu->arch.cfar;
121 hr->ppr = vcpu->arch.ppr;
122 switch (vcpu->arch.trap) {
123 case BOOK3S_INTERRUPT_H_DATA_STORAGE:
124 hr->hdar = vcpu->arch.fault_dar;
125 hr->hdsisr = vcpu->arch.fault_dsisr;
126 hr->asdr = vcpu->arch.fault_gpa;
127 break;
128 case BOOK3S_INTERRUPT_H_INST_STORAGE:
129 hr->asdr = vcpu->arch.fault_gpa;
130 break;
131 case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
132 hr->hfscr = ((~HFSCR_INTR_CAUSE & hr->hfscr) |
133 (HFSCR_INTR_CAUSE & vcpu->arch.hfscr));
134 break;
135 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
136 hr->heir = vcpu->arch.emul_inst;
137 break;
138 }
139 }
140
restore_hv_regs(struct kvm_vcpu * vcpu,const struct hv_guest_state * hr)141 static void restore_hv_regs(struct kvm_vcpu *vcpu, const struct hv_guest_state *hr)
142 {
143 struct kvmppc_vcore *vc = vcpu->arch.vcore;
144
145 vc->pcr = hr->pcr | PCR_MASK;
146 vcpu->arch.doorbell_request = hr->dpdes;
147 vcpu->arch.hfscr = hr->hfscr;
148 vcpu->arch.dawr0 = hr->dawr0;
149 vcpu->arch.dawrx0 = hr->dawrx0;
150 vcpu->arch.ciabr = hr->ciabr;
151 vcpu->arch.purr = hr->purr;
152 vcpu->arch.spurr = hr->spurr;
153 vcpu->arch.ic = hr->ic;
154 vc->vtb = hr->vtb;
155 vcpu->arch.shregs.srr0 = hr->srr0;
156 vcpu->arch.shregs.srr1 = hr->srr1;
157 vcpu->arch.shregs.sprg0 = hr->sprg[0];
158 vcpu->arch.shregs.sprg1 = hr->sprg[1];
159 vcpu->arch.shregs.sprg2 = hr->sprg[2];
160 vcpu->arch.shregs.sprg3 = hr->sprg[3];
161 vcpu->arch.pid = hr->pidr;
162 vcpu->arch.cfar = hr->cfar;
163 vcpu->arch.ppr = hr->ppr;
164 vcpu->arch.dawr1 = hr->dawr1;
165 vcpu->arch.dawrx1 = hr->dawrx1;
166 }
167
kvmhv_restore_hv_return_state(struct kvm_vcpu * vcpu,struct hv_guest_state * hr)168 void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
169 struct hv_guest_state *hr)
170 {
171 struct kvmppc_vcore *vc = vcpu->arch.vcore;
172
173 /*
174 * This L2 vCPU might have received a doorbell while H_ENTER_NESTED was being handled.
175 * Make sure we preserve the doorbell if it was either:
176 * a) Sent after H_ENTER_NESTED was called on this vCPU (arch.doorbell_request would be 1)
177 * b) Doorbell was not handled and L2 exited for some other reason (hr->dpdes would be 1)
178 */
179 vcpu->arch.doorbell_request = vcpu->arch.doorbell_request | hr->dpdes;
180 vcpu->arch.hfscr = hr->hfscr;
181 vcpu->arch.purr = hr->purr;
182 vcpu->arch.spurr = hr->spurr;
183 vcpu->arch.ic = hr->ic;
184 vc->vtb = hr->vtb;
185 vcpu->arch.fault_dar = hr->hdar;
186 vcpu->arch.fault_dsisr = hr->hdsisr;
187 vcpu->arch.fault_gpa = hr->asdr;
188 vcpu->arch.emul_inst = hr->heir;
189 vcpu->arch.shregs.srr0 = hr->srr0;
190 vcpu->arch.shregs.srr1 = hr->srr1;
191 vcpu->arch.shregs.sprg0 = hr->sprg[0];
192 vcpu->arch.shregs.sprg1 = hr->sprg[1];
193 vcpu->arch.shregs.sprg2 = hr->sprg[2];
194 vcpu->arch.shregs.sprg3 = hr->sprg[3];
195 vcpu->arch.pid = hr->pidr;
196 vcpu->arch.cfar = hr->cfar;
197 vcpu->arch.ppr = hr->ppr;
198 }
199
kvmhv_nested_mmio_needed(struct kvm_vcpu * vcpu,u64 regs_ptr)200 static void kvmhv_nested_mmio_needed(struct kvm_vcpu *vcpu, u64 regs_ptr)
201 {
202 /* No need to reflect the page fault to L1, we've handled it */
203 vcpu->arch.trap = 0;
204
205 /*
206 * Since the L2 gprs have already been written back into L1 memory when
207 * we complete the mmio, store the L1 memory location of the L2 gpr
208 * being loaded into by the mmio so that the loaded value can be
209 * written there in kvmppc_complete_mmio_load()
210 */
211 if (((vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) == KVM_MMIO_REG_GPR)
212 && (vcpu->mmio_is_write == 0)) {
213 vcpu->arch.nested_io_gpr = (gpa_t) regs_ptr +
214 offsetof(struct pt_regs,
215 gpr[vcpu->arch.io_gpr]);
216 vcpu->arch.io_gpr = KVM_MMIO_REG_NESTED_GPR;
217 }
218 }
219
kvmhv_read_guest_state_and_regs(struct kvm_vcpu * vcpu,struct hv_guest_state * l2_hv,struct pt_regs * l2_regs,u64 hv_ptr,u64 regs_ptr)220 static int kvmhv_read_guest_state_and_regs(struct kvm_vcpu *vcpu,
221 struct hv_guest_state *l2_hv,
222 struct pt_regs *l2_regs,
223 u64 hv_ptr, u64 regs_ptr)
224 {
225 int size;
226
227 if (kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv->version,
228 sizeof(l2_hv->version)))
229 return -1;
230
231 if (kvmppc_need_byteswap(vcpu))
232 l2_hv->version = swab64(l2_hv->version);
233
234 size = hv_guest_state_size(l2_hv->version);
235 if (size < 0)
236 return -1;
237
238 return kvm_vcpu_read_guest(vcpu, hv_ptr, l2_hv, size) ||
239 kvm_vcpu_read_guest(vcpu, regs_ptr, l2_regs,
240 sizeof(struct pt_regs));
241 }
242
kvmhv_write_guest_state_and_regs(struct kvm_vcpu * vcpu,struct hv_guest_state * l2_hv,struct pt_regs * l2_regs,u64 hv_ptr,u64 regs_ptr)243 static int kvmhv_write_guest_state_and_regs(struct kvm_vcpu *vcpu,
244 struct hv_guest_state *l2_hv,
245 struct pt_regs *l2_regs,
246 u64 hv_ptr, u64 regs_ptr)
247 {
248 int size;
249
250 size = hv_guest_state_size(l2_hv->version);
251 if (size < 0)
252 return -1;
253
254 return kvm_vcpu_write_guest(vcpu, hv_ptr, l2_hv, size) ||
255 kvm_vcpu_write_guest(vcpu, regs_ptr, l2_regs,
256 sizeof(struct pt_regs));
257 }
258
load_l2_hv_regs(struct kvm_vcpu * vcpu,const struct hv_guest_state * l2_hv,const struct hv_guest_state * l1_hv,u64 * lpcr)259 static void load_l2_hv_regs(struct kvm_vcpu *vcpu,
260 const struct hv_guest_state *l2_hv,
261 const struct hv_guest_state *l1_hv, u64 *lpcr)
262 {
263 struct kvmppc_vcore *vc = vcpu->arch.vcore;
264 u64 mask;
265
266 restore_hv_regs(vcpu, l2_hv);
267
268 /*
269 * Don't let L1 change LPCR bits for the L2 except these:
270 */
271 mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD | LPCR_MER;
272
273 /*
274 * Additional filtering is required depending on hardware
275 * and configuration.
276 */
277 *lpcr = kvmppc_filter_lpcr_hv(vcpu->kvm,
278 (vc->lpcr & ~mask) | (*lpcr & mask));
279
280 /*
281 * Don't let L1 enable features for L2 which we don't allow for L1,
282 * but preserve the interrupt cause field.
283 */
284 vcpu->arch.hfscr = l2_hv->hfscr & (HFSCR_INTR_CAUSE | vcpu->arch.hfscr_permitted);
285
286 /* Don't let data address watchpoint match in hypervisor state */
287 vcpu->arch.dawrx0 = l2_hv->dawrx0 & ~DAWRX_HYP;
288 vcpu->arch.dawrx1 = l2_hv->dawrx1 & ~DAWRX_HYP;
289
290 /* Don't let completed instruction address breakpt match in HV state */
291 if ((l2_hv->ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
292 vcpu->arch.ciabr = l2_hv->ciabr & ~CIABR_PRIV;
293 }
294
kvmhv_enter_nested_guest(struct kvm_vcpu * vcpu)295 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
296 {
297 long int err, r;
298 struct kvm_nested_guest *l2;
299 struct pt_regs l2_regs, saved_l1_regs;
300 struct hv_guest_state l2_hv = {0}, saved_l1_hv;
301 struct kvmppc_vcore *vc = vcpu->arch.vcore;
302 u64 hv_ptr, regs_ptr;
303 u64 hdec_exp, lpcr;
304 s64 delta_purr, delta_spurr, delta_ic, delta_vtb;
305
306 if (vcpu->kvm->arch.l1_ptcr == 0)
307 return H_NOT_AVAILABLE;
308
309 if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
310 return H_BAD_MODE;
311
312 /* copy parameters in */
313 hv_ptr = kvmppc_get_gpr(vcpu, 4);
314 regs_ptr = kvmppc_get_gpr(vcpu, 5);
315 kvm_vcpu_srcu_read_lock(vcpu);
316 err = kvmhv_read_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
317 hv_ptr, regs_ptr);
318 kvm_vcpu_srcu_read_unlock(vcpu);
319 if (err)
320 return H_PARAMETER;
321
322 if (kvmppc_need_byteswap(vcpu))
323 byteswap_hv_regs(&l2_hv);
324 if (l2_hv.version > HV_GUEST_STATE_VERSION)
325 return H_P2;
326
327 if (kvmppc_need_byteswap(vcpu))
328 byteswap_pt_regs(&l2_regs);
329 if (l2_hv.vcpu_token >= NR_CPUS)
330 return H_PARAMETER;
331
332 /*
333 * L1 must have set up a suspended state to enter the L2 in a
334 * transactional state, and only in that case. These have to be
335 * filtered out here to prevent causing a TM Bad Thing in the
336 * host HRFID. We could synthesize a TM Bad Thing back to the L1
337 * here but there doesn't seem like much point.
338 */
339 if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) {
340 if (!MSR_TM_ACTIVE(l2_regs.msr))
341 return H_BAD_MODE;
342 } else {
343 if (l2_regs.msr & MSR_TS_MASK)
344 return H_BAD_MODE;
345 if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_TS_MASK))
346 return H_BAD_MODE;
347 }
348
349 /* translate lpid */
350 l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true);
351 if (!l2)
352 return H_PARAMETER;
353 if (!l2->l1_gr_to_hr) {
354 mutex_lock(&l2->tlb_lock);
355 kvmhv_update_ptbl_cache(l2);
356 mutex_unlock(&l2->tlb_lock);
357 }
358
359 /* save l1 values of things */
360 vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
361 saved_l1_regs = vcpu->arch.regs;
362 kvmhv_save_hv_regs(vcpu, &saved_l1_hv);
363
364 /* convert TB values/offsets to host (L0) values */
365 hdec_exp = l2_hv.hdec_expiry - vc->tb_offset;
366 vc->tb_offset += l2_hv.tb_offset;
367 vcpu->arch.dec_expires += l2_hv.tb_offset;
368
369 /* set L1 state to L2 state */
370 vcpu->arch.nested = l2;
371 vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token;
372 vcpu->arch.nested_hfscr = l2_hv.hfscr;
373 vcpu->arch.regs = l2_regs;
374
375 /* Guest must always run with ME enabled, HV disabled. */
376 vcpu->arch.shregs.msr = (vcpu->arch.regs.msr | MSR_ME) & ~MSR_HV;
377
378 lpcr = l2_hv.lpcr;
379 load_l2_hv_regs(vcpu, &l2_hv, &saved_l1_hv, &lpcr);
380
381 vcpu->arch.ret = RESUME_GUEST;
382 vcpu->arch.trap = 0;
383 do {
384 r = kvmhv_run_single_vcpu(vcpu, hdec_exp, lpcr);
385 } while (is_kvmppc_resume_guest(r));
386
387 /* save L2 state for return */
388 l2_regs = vcpu->arch.regs;
389 l2_regs.msr = vcpu->arch.shregs.msr;
390 delta_purr = vcpu->arch.purr - l2_hv.purr;
391 delta_spurr = vcpu->arch.spurr - l2_hv.spurr;
392 delta_ic = vcpu->arch.ic - l2_hv.ic;
393 delta_vtb = vc->vtb - l2_hv.vtb;
394 save_hv_return_state(vcpu, &l2_hv);
395
396 /* restore L1 state */
397 vcpu->arch.nested = NULL;
398 vcpu->arch.regs = saved_l1_regs;
399 vcpu->arch.shregs.msr = saved_l1_regs.msr & ~MSR_TS_MASK;
400 /* set L1 MSR TS field according to L2 transaction state */
401 if (l2_regs.msr & MSR_TS_MASK)
402 vcpu->arch.shregs.msr |= MSR_TS_S;
403 vc->tb_offset = saved_l1_hv.tb_offset;
404 /* XXX: is this always the same delta as saved_l1_hv.tb_offset? */
405 vcpu->arch.dec_expires -= l2_hv.tb_offset;
406 restore_hv_regs(vcpu, &saved_l1_hv);
407 vcpu->arch.purr += delta_purr;
408 vcpu->arch.spurr += delta_spurr;
409 vcpu->arch.ic += delta_ic;
410 vc->vtb += delta_vtb;
411
412 kvmhv_put_nested(l2);
413
414 /* copy l2_hv_state and regs back to guest */
415 if (kvmppc_need_byteswap(vcpu)) {
416 byteswap_hv_regs(&l2_hv);
417 byteswap_pt_regs(&l2_regs);
418 }
419 kvm_vcpu_srcu_read_lock(vcpu);
420 err = kvmhv_write_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
421 hv_ptr, regs_ptr);
422 kvm_vcpu_srcu_read_unlock(vcpu);
423 if (err)
424 return H_AUTHORITY;
425
426 if (r == -EINTR)
427 return H_INTERRUPT;
428
429 if (vcpu->mmio_needed) {
430 kvmhv_nested_mmio_needed(vcpu, regs_ptr);
431 return H_TOO_HARD;
432 }
433
434 return vcpu->arch.trap;
435 }
436
kvmhv_nested_init(void)437 long kvmhv_nested_init(void)
438 {
439 long int ptb_order;
440 unsigned long ptcr;
441 long rc;
442
443 if (!kvmhv_on_pseries())
444 return 0;
445 if (!radix_enabled())
446 return -ENODEV;
447
448 /* Partition table entry is 1<<4 bytes in size, hence the 4. */
449 ptb_order = KVM_MAX_NESTED_GUESTS_SHIFT + 4;
450 /* Minimum partition table size is 1<<12 bytes */
451 if (ptb_order < 12)
452 ptb_order = 12;
453 pseries_partition_tb = kmalloc(sizeof(struct patb_entry) << ptb_order,
454 GFP_KERNEL);
455 if (!pseries_partition_tb) {
456 pr_err("kvm-hv: failed to allocated nested partition table\n");
457 return -ENOMEM;
458 }
459
460 ptcr = __pa(pseries_partition_tb) | (ptb_order - 12);
461 rc = plpar_hcall_norets(H_SET_PARTITION_TABLE, ptcr);
462 if (rc != H_SUCCESS) {
463 pr_err("kvm-hv: Parent hypervisor does not support nesting (rc=%ld)\n",
464 rc);
465 kfree(pseries_partition_tb);
466 pseries_partition_tb = NULL;
467 return -ENODEV;
468 }
469
470 return 0;
471 }
472
kvmhv_nested_exit(void)473 void kvmhv_nested_exit(void)
474 {
475 /*
476 * N.B. the kvmhv_on_pseries() test is there because it enables
477 * the compiler to remove the call to plpar_hcall_norets()
478 * when CONFIG_PPC_PSERIES=n.
479 */
480 if (kvmhv_on_pseries() && pseries_partition_tb) {
481 plpar_hcall_norets(H_SET_PARTITION_TABLE, 0);
482 kfree(pseries_partition_tb);
483 pseries_partition_tb = NULL;
484 }
485 }
486
kvmhv_flush_lpid(unsigned int lpid)487 static void kvmhv_flush_lpid(unsigned int lpid)
488 {
489 long rc;
490
491 if (!kvmhv_on_pseries()) {
492 radix__flush_all_lpid(lpid);
493 return;
494 }
495
496 if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE))
497 rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1),
498 lpid, TLBIEL_INVAL_SET_LPID);
499 else
500 rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
501 H_RPTI_TYPE_NESTED |
502 H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC |
503 H_RPTI_TYPE_PAT,
504 H_RPTI_PAGE_ALL, 0, -1UL);
505 if (rc)
506 pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc);
507 }
508
kvmhv_set_ptbl_entry(unsigned int lpid,u64 dw0,u64 dw1)509 void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1)
510 {
511 if (!kvmhv_on_pseries()) {
512 mmu_partition_table_set_entry(lpid, dw0, dw1, true);
513 return;
514 }
515
516 pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
517 pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
518 /* L0 will do the necessary barriers */
519 kvmhv_flush_lpid(lpid);
520 }
521
kvmhv_set_nested_ptbl(struct kvm_nested_guest * gp)522 static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
523 {
524 unsigned long dw0;
525
526 dw0 = PATB_HR | radix__get_tree_size() |
527 __pa(gp->shadow_pgtable) | RADIX_PGD_INDEX_SIZE;
528 kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table);
529 }
530
531 /*
532 * Handle the H_SET_PARTITION_TABLE hcall.
533 * r4 = guest real address of partition table + log_2(size) - 12
534 * (formatted as for the PTCR).
535 */
kvmhv_set_partition_table(struct kvm_vcpu * vcpu)536 long kvmhv_set_partition_table(struct kvm_vcpu *vcpu)
537 {
538 struct kvm *kvm = vcpu->kvm;
539 unsigned long ptcr = kvmppc_get_gpr(vcpu, 4);
540 int srcu_idx;
541 long ret = H_SUCCESS;
542
543 srcu_idx = srcu_read_lock(&kvm->srcu);
544 /* Check partition size and base address. */
545 if ((ptcr & PRTS_MASK) + 12 - 4 > KVM_MAX_NESTED_GUESTS_SHIFT ||
546 !kvm_is_visible_gfn(vcpu->kvm, (ptcr & PRTB_MASK) >> PAGE_SHIFT))
547 ret = H_PARAMETER;
548 srcu_read_unlock(&kvm->srcu, srcu_idx);
549 if (ret == H_SUCCESS)
550 kvm->arch.l1_ptcr = ptcr;
551
552 return ret;
553 }
554
555 /*
556 * Handle the H_COPY_TOFROM_GUEST hcall.
557 * r4 = L1 lpid of nested guest
558 * r5 = pid
559 * r6 = eaddr to access
560 * r7 = to buffer (L1 gpa)
561 * r8 = from buffer (L1 gpa)
562 * r9 = n bytes to copy
563 */
kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu * vcpu)564 long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu)
565 {
566 struct kvm_nested_guest *gp;
567 int l1_lpid = kvmppc_get_gpr(vcpu, 4);
568 int pid = kvmppc_get_gpr(vcpu, 5);
569 gva_t eaddr = kvmppc_get_gpr(vcpu, 6);
570 gpa_t gp_to = (gpa_t) kvmppc_get_gpr(vcpu, 7);
571 gpa_t gp_from = (gpa_t) kvmppc_get_gpr(vcpu, 8);
572 void *buf;
573 unsigned long n = kvmppc_get_gpr(vcpu, 9);
574 bool is_load = !!gp_to;
575 long rc;
576
577 if (gp_to && gp_from) /* One must be NULL to determine the direction */
578 return H_PARAMETER;
579
580 if (eaddr & (0xFFFUL << 52))
581 return H_PARAMETER;
582
583 buf = kzalloc(n, GFP_KERNEL | __GFP_NOWARN);
584 if (!buf)
585 return H_NO_MEM;
586
587 gp = kvmhv_get_nested(vcpu->kvm, l1_lpid, false);
588 if (!gp) {
589 rc = H_PARAMETER;
590 goto out_free;
591 }
592
593 mutex_lock(&gp->tlb_lock);
594
595 if (is_load) {
596 /* Load from the nested guest into our buffer */
597 rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
598 eaddr, buf, NULL, n);
599 if (rc)
600 goto not_found;
601
602 /* Write what was loaded into our buffer back to the L1 guest */
603 kvm_vcpu_srcu_read_lock(vcpu);
604 rc = kvm_vcpu_write_guest(vcpu, gp_to, buf, n);
605 kvm_vcpu_srcu_read_unlock(vcpu);
606 if (rc)
607 goto not_found;
608 } else {
609 /* Load the data to be stored from the L1 guest into our buf */
610 kvm_vcpu_srcu_read_lock(vcpu);
611 rc = kvm_vcpu_read_guest(vcpu, gp_from, buf, n);
612 kvm_vcpu_srcu_read_unlock(vcpu);
613 if (rc)
614 goto not_found;
615
616 /* Store from our buffer into the nested guest */
617 rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
618 eaddr, NULL, buf, n);
619 if (rc)
620 goto not_found;
621 }
622
623 out_unlock:
624 mutex_unlock(&gp->tlb_lock);
625 kvmhv_put_nested(gp);
626 out_free:
627 kfree(buf);
628 return rc;
629 not_found:
630 rc = H_NOT_FOUND;
631 goto out_unlock;
632 }
633
634 /*
635 * Reload the partition table entry for a guest.
636 * Caller must hold gp->tlb_lock.
637 */
kvmhv_update_ptbl_cache(struct kvm_nested_guest * gp)638 static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp)
639 {
640 int ret;
641 struct patb_entry ptbl_entry;
642 unsigned long ptbl_addr;
643 struct kvm *kvm = gp->l1_host;
644
645 ret = -EFAULT;
646 ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4);
647 if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4))) {
648 int srcu_idx = srcu_read_lock(&kvm->srcu);
649 ret = kvm_read_guest(kvm, ptbl_addr,
650 &ptbl_entry, sizeof(ptbl_entry));
651 srcu_read_unlock(&kvm->srcu, srcu_idx);
652 }
653 if (ret) {
654 gp->l1_gr_to_hr = 0;
655 gp->process_table = 0;
656 } else {
657 gp->l1_gr_to_hr = be64_to_cpu(ptbl_entry.patb0);
658 gp->process_table = be64_to_cpu(ptbl_entry.patb1);
659 }
660 kvmhv_set_nested_ptbl(gp);
661 }
662
kvmhv_vm_nested_init(struct kvm * kvm)663 void kvmhv_vm_nested_init(struct kvm *kvm)
664 {
665 idr_init(&kvm->arch.kvm_nested_guest_idr);
666 }
667
__find_nested(struct kvm * kvm,int lpid)668 static struct kvm_nested_guest *__find_nested(struct kvm *kvm, int lpid)
669 {
670 return idr_find(&kvm->arch.kvm_nested_guest_idr, lpid);
671 }
672
__prealloc_nested(struct kvm * kvm,int lpid)673 static bool __prealloc_nested(struct kvm *kvm, int lpid)
674 {
675 if (idr_alloc(&kvm->arch.kvm_nested_guest_idr,
676 NULL, lpid, lpid + 1, GFP_KERNEL) != lpid)
677 return false;
678 return true;
679 }
680
__add_nested(struct kvm * kvm,int lpid,struct kvm_nested_guest * gp)681 static void __add_nested(struct kvm *kvm, int lpid, struct kvm_nested_guest *gp)
682 {
683 if (idr_replace(&kvm->arch.kvm_nested_guest_idr, gp, lpid))
684 WARN_ON(1);
685 }
686
__remove_nested(struct kvm * kvm,int lpid)687 static void __remove_nested(struct kvm *kvm, int lpid)
688 {
689 idr_remove(&kvm->arch.kvm_nested_guest_idr, lpid);
690 }
691
kvmhv_alloc_nested(struct kvm * kvm,unsigned int lpid)692 static struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid)
693 {
694 struct kvm_nested_guest *gp;
695 long shadow_lpid;
696
697 gp = kzalloc(sizeof(*gp), GFP_KERNEL);
698 if (!gp)
699 return NULL;
700 gp->l1_host = kvm;
701 gp->l1_lpid = lpid;
702 mutex_init(&gp->tlb_lock);
703 gp->shadow_pgtable = pgd_alloc(kvm->mm);
704 if (!gp->shadow_pgtable)
705 goto out_free;
706 shadow_lpid = kvmppc_alloc_lpid();
707 if (shadow_lpid < 0)
708 goto out_free2;
709 gp->shadow_lpid = shadow_lpid;
710 gp->radix = 1;
711
712 memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu));
713
714 return gp;
715
716 out_free2:
717 pgd_free(kvm->mm, gp->shadow_pgtable);
718 out_free:
719 kfree(gp);
720 return NULL;
721 }
722
723 /*
724 * Free up any resources allocated for a nested guest.
725 */
kvmhv_release_nested(struct kvm_nested_guest * gp)726 static void kvmhv_release_nested(struct kvm_nested_guest *gp)
727 {
728 struct kvm *kvm = gp->l1_host;
729
730 if (gp->shadow_pgtable) {
731 /*
732 * No vcpu is using this struct and no call to
733 * kvmhv_get_nested can find this struct,
734 * so we don't need to hold kvm->mmu_lock.
735 */
736 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
737 gp->shadow_lpid);
738 pgd_free(kvm->mm, gp->shadow_pgtable);
739 }
740 kvmhv_set_ptbl_entry(gp->shadow_lpid, 0, 0);
741 kvmppc_free_lpid(gp->shadow_lpid);
742 kfree(gp);
743 }
744
kvmhv_remove_nested(struct kvm_nested_guest * gp)745 static void kvmhv_remove_nested(struct kvm_nested_guest *gp)
746 {
747 struct kvm *kvm = gp->l1_host;
748 int lpid = gp->l1_lpid;
749 long ref;
750
751 spin_lock(&kvm->mmu_lock);
752 if (gp == __find_nested(kvm, lpid)) {
753 __remove_nested(kvm, lpid);
754 --gp->refcnt;
755 }
756 ref = gp->refcnt;
757 spin_unlock(&kvm->mmu_lock);
758 if (ref == 0)
759 kvmhv_release_nested(gp);
760 }
761
762 /*
763 * Free up all nested resources allocated for this guest.
764 * This is called with no vcpus of the guest running, when
765 * switching the guest to HPT mode or when destroying the
766 * guest.
767 */
kvmhv_release_all_nested(struct kvm * kvm)768 void kvmhv_release_all_nested(struct kvm *kvm)
769 {
770 int lpid;
771 struct kvm_nested_guest *gp;
772 struct kvm_nested_guest *freelist = NULL;
773 struct kvm_memory_slot *memslot;
774 int srcu_idx, bkt;
775
776 spin_lock(&kvm->mmu_lock);
777 idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) {
778 __remove_nested(kvm, lpid);
779 if (--gp->refcnt == 0) {
780 gp->next = freelist;
781 freelist = gp;
782 }
783 }
784 idr_destroy(&kvm->arch.kvm_nested_guest_idr);
785 /* idr is empty and may be reused at this point */
786 spin_unlock(&kvm->mmu_lock);
787 while ((gp = freelist) != NULL) {
788 freelist = gp->next;
789 kvmhv_release_nested(gp);
790 }
791
792 srcu_idx = srcu_read_lock(&kvm->srcu);
793 kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm))
794 kvmhv_free_memslot_nest_rmap(memslot);
795 srcu_read_unlock(&kvm->srcu, srcu_idx);
796 }
797
798 /* caller must hold gp->tlb_lock */
kvmhv_flush_nested(struct kvm_nested_guest * gp)799 static void kvmhv_flush_nested(struct kvm_nested_guest *gp)
800 {
801 struct kvm *kvm = gp->l1_host;
802
803 spin_lock(&kvm->mmu_lock);
804 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid);
805 spin_unlock(&kvm->mmu_lock);
806 kvmhv_flush_lpid(gp->shadow_lpid);
807 kvmhv_update_ptbl_cache(gp);
808 if (gp->l1_gr_to_hr == 0)
809 kvmhv_remove_nested(gp);
810 }
811
kvmhv_get_nested(struct kvm * kvm,int l1_lpid,bool create)812 struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
813 bool create)
814 {
815 struct kvm_nested_guest *gp, *newgp;
816
817 if (l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4)))
818 return NULL;
819
820 spin_lock(&kvm->mmu_lock);
821 gp = __find_nested(kvm, l1_lpid);
822 if (gp)
823 ++gp->refcnt;
824 spin_unlock(&kvm->mmu_lock);
825
826 if (gp || !create)
827 return gp;
828
829 newgp = kvmhv_alloc_nested(kvm, l1_lpid);
830 if (!newgp)
831 return NULL;
832
833 if (!__prealloc_nested(kvm, l1_lpid)) {
834 kvmhv_release_nested(newgp);
835 return NULL;
836 }
837
838 spin_lock(&kvm->mmu_lock);
839 gp = __find_nested(kvm, l1_lpid);
840 if (!gp) {
841 __add_nested(kvm, l1_lpid, newgp);
842 ++newgp->refcnt;
843 gp = newgp;
844 newgp = NULL;
845 }
846 ++gp->refcnt;
847 spin_unlock(&kvm->mmu_lock);
848
849 if (newgp)
850 kvmhv_release_nested(newgp);
851
852 return gp;
853 }
854
kvmhv_put_nested(struct kvm_nested_guest * gp)855 void kvmhv_put_nested(struct kvm_nested_guest *gp)
856 {
857 struct kvm *kvm = gp->l1_host;
858 long ref;
859
860 spin_lock(&kvm->mmu_lock);
861 ref = --gp->refcnt;
862 spin_unlock(&kvm->mmu_lock);
863 if (ref == 0)
864 kvmhv_release_nested(gp);
865 }
866
find_kvm_nested_guest_pte(struct kvm * kvm,unsigned long lpid,unsigned long ea,unsigned * hshift)867 pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
868 unsigned long ea, unsigned *hshift)
869 {
870 struct kvm_nested_guest *gp;
871 pte_t *pte;
872
873 gp = __find_nested(kvm, lpid);
874 if (!gp)
875 return NULL;
876
877 VM_WARN(!spin_is_locked(&kvm->mmu_lock),
878 "%s called with kvm mmu_lock not held \n", __func__);
879 pte = __find_linux_pte(gp->shadow_pgtable, ea, NULL, hshift);
880
881 return pte;
882 }
883
kvmhv_n_rmap_is_equal(u64 rmap_1,u64 rmap_2)884 static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1, u64 rmap_2)
885 {
886 return !((rmap_1 ^ rmap_2) & (RMAP_NESTED_LPID_MASK |
887 RMAP_NESTED_GPA_MASK));
888 }
889
kvmhv_insert_nest_rmap(struct kvm * kvm,unsigned long * rmapp,struct rmap_nested ** n_rmap)890 void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
891 struct rmap_nested **n_rmap)
892 {
893 struct llist_node *entry = ((struct llist_head *) rmapp)->first;
894 struct rmap_nested *cursor;
895 u64 rmap, new_rmap = (*n_rmap)->rmap;
896
897 /* Are there any existing entries? */
898 if (!(*rmapp)) {
899 /* No -> use the rmap as a single entry */
900 *rmapp = new_rmap | RMAP_NESTED_IS_SINGLE_ENTRY;
901 return;
902 }
903
904 /* Do any entries match what we're trying to insert? */
905 for_each_nest_rmap_safe(cursor, entry, &rmap) {
906 if (kvmhv_n_rmap_is_equal(rmap, new_rmap))
907 return;
908 }
909
910 /* Do we need to create a list or just add the new entry? */
911 rmap = *rmapp;
912 if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */
913 *rmapp = 0UL;
914 llist_add(&((*n_rmap)->list), (struct llist_head *) rmapp);
915 if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */
916 (*n_rmap)->list.next = (struct llist_node *) rmap;
917
918 /* Set NULL so not freed by caller */
919 *n_rmap = NULL;
920 }
921
kvmhv_update_nest_rmap_rc(struct kvm * kvm,u64 n_rmap,unsigned long clr,unsigned long set,unsigned long hpa,unsigned long mask)922 static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap,
923 unsigned long clr, unsigned long set,
924 unsigned long hpa, unsigned long mask)
925 {
926 unsigned long gpa;
927 unsigned int shift, lpid;
928 pte_t *ptep;
929
930 gpa = n_rmap & RMAP_NESTED_GPA_MASK;
931 lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
932
933 /* Find the pte */
934 ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
935 /*
936 * If the pte is present and the pfn is still the same, update the pte.
937 * If the pfn has changed then this is a stale rmap entry, the nested
938 * gpa actually points somewhere else now, and there is nothing to do.
939 * XXX A future optimisation would be to remove the rmap entry here.
940 */
941 if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa)) {
942 __radix_pte_update(ptep, clr, set);
943 kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
944 }
945 }
946
947 /*
948 * For a given list of rmap entries, update the rc bits in all ptes in shadow
949 * page tables for nested guests which are referenced by the rmap list.
950 */
kvmhv_update_nest_rmap_rc_list(struct kvm * kvm,unsigned long * rmapp,unsigned long clr,unsigned long set,unsigned long hpa,unsigned long nbytes)951 void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
952 unsigned long clr, unsigned long set,
953 unsigned long hpa, unsigned long nbytes)
954 {
955 struct llist_node *entry = ((struct llist_head *) rmapp)->first;
956 struct rmap_nested *cursor;
957 unsigned long rmap, mask;
958
959 if ((clr | set) & ~(_PAGE_DIRTY | _PAGE_ACCESSED))
960 return;
961
962 mask = PTE_RPN_MASK & ~(nbytes - 1);
963 hpa &= mask;
964
965 for_each_nest_rmap_safe(cursor, entry, &rmap)
966 kvmhv_update_nest_rmap_rc(kvm, rmap, clr, set, hpa, mask);
967 }
968
kvmhv_remove_nest_rmap(struct kvm * kvm,u64 n_rmap,unsigned long hpa,unsigned long mask)969 static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
970 unsigned long hpa, unsigned long mask)
971 {
972 struct kvm_nested_guest *gp;
973 unsigned long gpa;
974 unsigned int shift, lpid;
975 pte_t *ptep;
976
977 gpa = n_rmap & RMAP_NESTED_GPA_MASK;
978 lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
979 gp = __find_nested(kvm, lpid);
980 if (!gp)
981 return;
982
983 /* Find and invalidate the pte */
984 ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
985 /* Don't spuriously invalidate ptes if the pfn has changed */
986 if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa))
987 kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
988 }
989
kvmhv_remove_nest_rmap_list(struct kvm * kvm,unsigned long * rmapp,unsigned long hpa,unsigned long mask)990 static void kvmhv_remove_nest_rmap_list(struct kvm *kvm, unsigned long *rmapp,
991 unsigned long hpa, unsigned long mask)
992 {
993 struct llist_node *entry = llist_del_all((struct llist_head *) rmapp);
994 struct rmap_nested *cursor;
995 unsigned long rmap;
996
997 for_each_nest_rmap_safe(cursor, entry, &rmap) {
998 kvmhv_remove_nest_rmap(kvm, rmap, hpa, mask);
999 kfree(cursor);
1000 }
1001 }
1002
1003 /* called with kvm->mmu_lock held */
kvmhv_remove_nest_rmap_range(struct kvm * kvm,const struct kvm_memory_slot * memslot,unsigned long gpa,unsigned long hpa,unsigned long nbytes)1004 void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
1005 const struct kvm_memory_slot *memslot,
1006 unsigned long gpa, unsigned long hpa,
1007 unsigned long nbytes)
1008 {
1009 unsigned long gfn, end_gfn;
1010 unsigned long addr_mask;
1011
1012 if (!memslot)
1013 return;
1014 gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn;
1015 end_gfn = gfn + (nbytes >> PAGE_SHIFT);
1016
1017 addr_mask = PTE_RPN_MASK & ~(nbytes - 1);
1018 hpa &= addr_mask;
1019
1020 for (; gfn < end_gfn; gfn++) {
1021 unsigned long *rmap = &memslot->arch.rmap[gfn];
1022 kvmhv_remove_nest_rmap_list(kvm, rmap, hpa, addr_mask);
1023 }
1024 }
1025
kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot * free)1026 static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free)
1027 {
1028 unsigned long page;
1029
1030 for (page = 0; page < free->npages; page++) {
1031 unsigned long rmap, *rmapp = &free->arch.rmap[page];
1032 struct rmap_nested *cursor;
1033 struct llist_node *entry;
1034
1035 entry = llist_del_all((struct llist_head *) rmapp);
1036 for_each_nest_rmap_safe(cursor, entry, &rmap)
1037 kfree(cursor);
1038 }
1039 }
1040
kvmhv_invalidate_shadow_pte(struct kvm_vcpu * vcpu,struct kvm_nested_guest * gp,long gpa,int * shift_ret)1041 static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu,
1042 struct kvm_nested_guest *gp,
1043 long gpa, int *shift_ret)
1044 {
1045 struct kvm *kvm = vcpu->kvm;
1046 bool ret = false;
1047 pte_t *ptep;
1048 int shift;
1049
1050 spin_lock(&kvm->mmu_lock);
1051 ptep = find_kvm_nested_guest_pte(kvm, gp->l1_lpid, gpa, &shift);
1052 if (!shift)
1053 shift = PAGE_SHIFT;
1054 if (ptep && pte_present(*ptep)) {
1055 kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
1056 ret = true;
1057 }
1058 spin_unlock(&kvm->mmu_lock);
1059
1060 if (shift_ret)
1061 *shift_ret = shift;
1062 return ret;
1063 }
1064
get_ric(unsigned int instr)1065 static inline int get_ric(unsigned int instr)
1066 {
1067 return (instr >> 18) & 0x3;
1068 }
1069
get_prs(unsigned int instr)1070 static inline int get_prs(unsigned int instr)
1071 {
1072 return (instr >> 17) & 0x1;
1073 }
1074
get_r(unsigned int instr)1075 static inline int get_r(unsigned int instr)
1076 {
1077 return (instr >> 16) & 0x1;
1078 }
1079
get_lpid(unsigned long r_val)1080 static inline int get_lpid(unsigned long r_val)
1081 {
1082 return r_val & 0xffffffff;
1083 }
1084
get_is(unsigned long r_val)1085 static inline int get_is(unsigned long r_val)
1086 {
1087 return (r_val >> 10) & 0x3;
1088 }
1089
get_ap(unsigned long r_val)1090 static inline int get_ap(unsigned long r_val)
1091 {
1092 return (r_val >> 5) & 0x7;
1093 }
1094
get_epn(unsigned long r_val)1095 static inline long get_epn(unsigned long r_val)
1096 {
1097 return r_val >> 12;
1098 }
1099
kvmhv_emulate_tlbie_tlb_addr(struct kvm_vcpu * vcpu,int lpid,int ap,long epn)1100 static int kvmhv_emulate_tlbie_tlb_addr(struct kvm_vcpu *vcpu, int lpid,
1101 int ap, long epn)
1102 {
1103 struct kvm *kvm = vcpu->kvm;
1104 struct kvm_nested_guest *gp;
1105 long npages;
1106 int shift, shadow_shift;
1107 unsigned long addr;
1108
1109 shift = ap_to_shift(ap);
1110 addr = epn << 12;
1111 if (shift < 0)
1112 /* Invalid ap encoding */
1113 return -EINVAL;
1114
1115 addr &= ~((1UL << shift) - 1);
1116 npages = 1UL << (shift - PAGE_SHIFT);
1117
1118 gp = kvmhv_get_nested(kvm, lpid, false);
1119 if (!gp) /* No such guest -> nothing to do */
1120 return 0;
1121 mutex_lock(&gp->tlb_lock);
1122
1123 /* There may be more than one host page backing this single guest pte */
1124 do {
1125 kvmhv_invalidate_shadow_pte(vcpu, gp, addr, &shadow_shift);
1126
1127 npages -= 1UL << (shadow_shift - PAGE_SHIFT);
1128 addr += 1UL << shadow_shift;
1129 } while (npages > 0);
1130
1131 mutex_unlock(&gp->tlb_lock);
1132 kvmhv_put_nested(gp);
1133 return 0;
1134 }
1135
kvmhv_emulate_tlbie_lpid(struct kvm_vcpu * vcpu,struct kvm_nested_guest * gp,int ric)1136 static void kvmhv_emulate_tlbie_lpid(struct kvm_vcpu *vcpu,
1137 struct kvm_nested_guest *gp, int ric)
1138 {
1139 struct kvm *kvm = vcpu->kvm;
1140
1141 mutex_lock(&gp->tlb_lock);
1142 switch (ric) {
1143 case 0:
1144 /* Invalidate TLB */
1145 spin_lock(&kvm->mmu_lock);
1146 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
1147 gp->shadow_lpid);
1148 kvmhv_flush_lpid(gp->shadow_lpid);
1149 spin_unlock(&kvm->mmu_lock);
1150 break;
1151 case 1:
1152 /*
1153 * Invalidate PWC
1154 * We don't cache this -> nothing to do
1155 */
1156 break;
1157 case 2:
1158 /* Invalidate TLB, PWC and caching of partition table entries */
1159 kvmhv_flush_nested(gp);
1160 break;
1161 default:
1162 break;
1163 }
1164 mutex_unlock(&gp->tlb_lock);
1165 }
1166
kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu * vcpu,int ric)1167 static void kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu *vcpu, int ric)
1168 {
1169 struct kvm *kvm = vcpu->kvm;
1170 struct kvm_nested_guest *gp;
1171 int lpid;
1172
1173 spin_lock(&kvm->mmu_lock);
1174 idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) {
1175 spin_unlock(&kvm->mmu_lock);
1176 kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1177 spin_lock(&kvm->mmu_lock);
1178 }
1179 spin_unlock(&kvm->mmu_lock);
1180 }
1181
kvmhv_emulate_priv_tlbie(struct kvm_vcpu * vcpu,unsigned int instr,unsigned long rsval,unsigned long rbval)1182 static int kvmhv_emulate_priv_tlbie(struct kvm_vcpu *vcpu, unsigned int instr,
1183 unsigned long rsval, unsigned long rbval)
1184 {
1185 struct kvm *kvm = vcpu->kvm;
1186 struct kvm_nested_guest *gp;
1187 int r, ric, prs, is, ap;
1188 int lpid;
1189 long epn;
1190 int ret = 0;
1191
1192 ric = get_ric(instr);
1193 prs = get_prs(instr);
1194 r = get_r(instr);
1195 lpid = get_lpid(rsval);
1196 is = get_is(rbval);
1197
1198 /*
1199 * These cases are invalid and are not handled:
1200 * r != 1 -> Only radix supported
1201 * prs == 1 -> Not HV privileged
1202 * ric == 3 -> No cluster bombs for radix
1203 * is == 1 -> Partition scoped translations not associated with pid
1204 * (!is) && (ric == 1 || ric == 2) -> Not supported by ISA
1205 */
1206 if ((!r) || (prs) || (ric == 3) || (is == 1) ||
1207 ((!is) && (ric == 1 || ric == 2)))
1208 return -EINVAL;
1209
1210 switch (is) {
1211 case 0:
1212 /*
1213 * We know ric == 0
1214 * Invalidate TLB for a given target address
1215 */
1216 epn = get_epn(rbval);
1217 ap = get_ap(rbval);
1218 ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap, epn);
1219 break;
1220 case 2:
1221 /* Invalidate matching LPID */
1222 gp = kvmhv_get_nested(kvm, lpid, false);
1223 if (gp) {
1224 kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1225 kvmhv_put_nested(gp);
1226 }
1227 break;
1228 case 3:
1229 /* Invalidate ALL LPIDs */
1230 kvmhv_emulate_tlbie_all_lpid(vcpu, ric);
1231 break;
1232 default:
1233 ret = -EINVAL;
1234 break;
1235 }
1236
1237 return ret;
1238 }
1239
1240 /*
1241 * This handles the H_TLB_INVALIDATE hcall.
1242 * Parameters are (r4) tlbie instruction code, (r5) rS contents,
1243 * (r6) rB contents.
1244 */
kvmhv_do_nested_tlbie(struct kvm_vcpu * vcpu)1245 long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu)
1246 {
1247 int ret;
1248
1249 ret = kvmhv_emulate_priv_tlbie(vcpu, kvmppc_get_gpr(vcpu, 4),
1250 kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6));
1251 if (ret)
1252 return H_PARAMETER;
1253 return H_SUCCESS;
1254 }
1255
do_tlb_invalidate_nested_all(struct kvm_vcpu * vcpu,unsigned long lpid,unsigned long ric)1256 static long do_tlb_invalidate_nested_all(struct kvm_vcpu *vcpu,
1257 unsigned long lpid, unsigned long ric)
1258 {
1259 struct kvm *kvm = vcpu->kvm;
1260 struct kvm_nested_guest *gp;
1261
1262 gp = kvmhv_get_nested(kvm, lpid, false);
1263 if (gp) {
1264 kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1265 kvmhv_put_nested(gp);
1266 }
1267 return H_SUCCESS;
1268 }
1269
1270 /*
1271 * Number of pages above which we invalidate the entire LPID rather than
1272 * flush individual pages.
1273 */
1274 static unsigned long tlb_range_flush_page_ceiling __read_mostly = 33;
1275
do_tlb_invalidate_nested_tlb(struct kvm_vcpu * vcpu,unsigned long lpid,unsigned long pg_sizes,unsigned long start,unsigned long end)1276 static long do_tlb_invalidate_nested_tlb(struct kvm_vcpu *vcpu,
1277 unsigned long lpid,
1278 unsigned long pg_sizes,
1279 unsigned long start,
1280 unsigned long end)
1281 {
1282 int ret = H_P4;
1283 unsigned long addr, nr_pages;
1284 struct mmu_psize_def *def;
1285 unsigned long psize, ap, page_size;
1286 bool flush_lpid;
1287
1288 for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
1289 def = &mmu_psize_defs[psize];
1290 if (!(pg_sizes & def->h_rpt_pgsize))
1291 continue;
1292
1293 nr_pages = (end - start) >> def->shift;
1294 flush_lpid = nr_pages > tlb_range_flush_page_ceiling;
1295 if (flush_lpid)
1296 return do_tlb_invalidate_nested_all(vcpu, lpid,
1297 RIC_FLUSH_TLB);
1298 addr = start;
1299 ap = mmu_get_ap(psize);
1300 page_size = 1UL << def->shift;
1301 do {
1302 ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap,
1303 get_epn(addr));
1304 if (ret)
1305 return H_P4;
1306 addr += page_size;
1307 } while (addr < end);
1308 }
1309 return ret;
1310 }
1311
1312 /*
1313 * Performs partition-scoped invalidations for nested guests
1314 * as part of H_RPT_INVALIDATE hcall.
1315 */
do_h_rpt_invalidate_pat(struct kvm_vcpu * vcpu,unsigned long lpid,unsigned long type,unsigned long pg_sizes,unsigned long start,unsigned long end)1316 long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
1317 unsigned long type, unsigned long pg_sizes,
1318 unsigned long start, unsigned long end)
1319 {
1320 /*
1321 * If L2 lpid isn't valid, we need to return H_PARAMETER.
1322 *
1323 * However, nested KVM issues a L2 lpid flush call when creating
1324 * partition table entries for L2. This happens even before the
1325 * corresponding shadow lpid is created in HV which happens in
1326 * H_ENTER_NESTED call. Since we can't differentiate this case from
1327 * the invalid case, we ignore such flush requests and return success.
1328 */
1329 if (!__find_nested(vcpu->kvm, lpid))
1330 return H_SUCCESS;
1331
1332 /*
1333 * A flush all request can be handled by a full lpid flush only.
1334 */
1335 if ((type & H_RPTI_TYPE_NESTED_ALL) == H_RPTI_TYPE_NESTED_ALL)
1336 return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_ALL);
1337
1338 /*
1339 * We don't need to handle a PWC flush like process table here,
1340 * because intermediate partition scoped table in nested guest doesn't
1341 * really have PWC. Only level we have PWC is in L0 and for nested
1342 * invalidate at L0 we always do kvm_flush_lpid() which does
1343 * radix__flush_all_lpid(). For range invalidate at any level, we
1344 * are not removing the higher level page tables and hence there is
1345 * no PWC invalidate needed.
1346 *
1347 * if (type & H_RPTI_TYPE_PWC) {
1348 * ret = do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_PWC);
1349 * if (ret)
1350 * return H_P4;
1351 * }
1352 */
1353
1354 if (start == 0 && end == -1)
1355 return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_TLB);
1356
1357 if (type & H_RPTI_TYPE_TLB)
1358 return do_tlb_invalidate_nested_tlb(vcpu, lpid, pg_sizes,
1359 start, end);
1360 return H_SUCCESS;
1361 }
1362
1363 /* Used to convert a nested guest real address to a L1 guest real address */
kvmhv_translate_addr_nested(struct kvm_vcpu * vcpu,struct kvm_nested_guest * gp,unsigned long n_gpa,unsigned long dsisr,struct kvmppc_pte * gpte_p)1364 static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu,
1365 struct kvm_nested_guest *gp,
1366 unsigned long n_gpa, unsigned long dsisr,
1367 struct kvmppc_pte *gpte_p)
1368 {
1369 u64 fault_addr, flags = dsisr & DSISR_ISSTORE;
1370 int ret;
1371
1372 ret = kvmppc_mmu_walk_radix_tree(vcpu, n_gpa, gpte_p, gp->l1_gr_to_hr,
1373 &fault_addr);
1374
1375 if (ret) {
1376 /* We didn't find a pte */
1377 if (ret == -EINVAL) {
1378 /* Unsupported mmu config */
1379 flags |= DSISR_UNSUPP_MMU;
1380 } else if (ret == -ENOENT) {
1381 /* No translation found */
1382 flags |= DSISR_NOHPTE;
1383 } else if (ret == -EFAULT) {
1384 /* Couldn't access L1 real address */
1385 flags |= DSISR_PRTABLE_FAULT;
1386 vcpu->arch.fault_gpa = fault_addr;
1387 } else {
1388 /* Unknown error */
1389 return ret;
1390 }
1391 goto forward_to_l1;
1392 } else {
1393 /* We found a pte -> check permissions */
1394 if (dsisr & DSISR_ISSTORE) {
1395 /* Can we write? */
1396 if (!gpte_p->may_write) {
1397 flags |= DSISR_PROTFAULT;
1398 goto forward_to_l1;
1399 }
1400 } else if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
1401 /* Can we execute? */
1402 if (!gpte_p->may_execute) {
1403 flags |= SRR1_ISI_N_G_OR_CIP;
1404 goto forward_to_l1;
1405 }
1406 } else {
1407 /* Can we read? */
1408 if (!gpte_p->may_read && !gpte_p->may_write) {
1409 flags |= DSISR_PROTFAULT;
1410 goto forward_to_l1;
1411 }
1412 }
1413 }
1414
1415 return 0;
1416
1417 forward_to_l1:
1418 vcpu->arch.fault_dsisr = flags;
1419 if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
1420 vcpu->arch.shregs.msr &= SRR1_MSR_BITS;
1421 vcpu->arch.shregs.msr |= flags;
1422 }
1423 return RESUME_HOST;
1424 }
1425
kvmhv_handle_nested_set_rc(struct kvm_vcpu * vcpu,struct kvm_nested_guest * gp,unsigned long n_gpa,struct kvmppc_pte gpte,unsigned long dsisr)1426 static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu,
1427 struct kvm_nested_guest *gp,
1428 unsigned long n_gpa,
1429 struct kvmppc_pte gpte,
1430 unsigned long dsisr)
1431 {
1432 struct kvm *kvm = vcpu->kvm;
1433 bool writing = !!(dsisr & DSISR_ISSTORE);
1434 u64 pgflags;
1435 long ret;
1436
1437 /* Are the rc bits set in the L1 partition scoped pte? */
1438 pgflags = _PAGE_ACCESSED;
1439 if (writing)
1440 pgflags |= _PAGE_DIRTY;
1441 if (pgflags & ~gpte.rc)
1442 return RESUME_HOST;
1443
1444 spin_lock(&kvm->mmu_lock);
1445 /* Set the rc bit in the pte of our (L0) pgtable for the L1 guest */
1446 ret = kvmppc_hv_handle_set_rc(kvm, false, writing,
1447 gpte.raddr, kvm->arch.lpid);
1448 if (!ret) {
1449 ret = -EINVAL;
1450 goto out_unlock;
1451 }
1452
1453 /* Set the rc bit in the pte of the shadow_pgtable for the nest guest */
1454 ret = kvmppc_hv_handle_set_rc(kvm, true, writing,
1455 n_gpa, gp->l1_lpid);
1456 if (!ret)
1457 ret = -EINVAL;
1458 else
1459 ret = 0;
1460
1461 out_unlock:
1462 spin_unlock(&kvm->mmu_lock);
1463 return ret;
1464 }
1465
kvmppc_radix_level_to_shift(int level)1466 static inline int kvmppc_radix_level_to_shift(int level)
1467 {
1468 switch (level) {
1469 case 2:
1470 return PUD_SHIFT;
1471 case 1:
1472 return PMD_SHIFT;
1473 default:
1474 return PAGE_SHIFT;
1475 }
1476 }
1477
kvmppc_radix_shift_to_level(int shift)1478 static inline int kvmppc_radix_shift_to_level(int shift)
1479 {
1480 if (shift == PUD_SHIFT)
1481 return 2;
1482 if (shift == PMD_SHIFT)
1483 return 1;
1484 if (shift == PAGE_SHIFT)
1485 return 0;
1486 WARN_ON_ONCE(1);
1487 return 0;
1488 }
1489
1490 /* called with gp->tlb_lock held */
__kvmhv_nested_page_fault(struct kvm_vcpu * vcpu,struct kvm_nested_guest * gp)1491 static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
1492 struct kvm_nested_guest *gp)
1493 {
1494 struct kvm *kvm = vcpu->kvm;
1495 struct kvm_memory_slot *memslot;
1496 struct rmap_nested *n_rmap;
1497 struct kvmppc_pte gpte;
1498 pte_t pte, *pte_p;
1499 unsigned long mmu_seq;
1500 unsigned long dsisr = vcpu->arch.fault_dsisr;
1501 unsigned long ea = vcpu->arch.fault_dar;
1502 unsigned long *rmapp;
1503 unsigned long n_gpa, gpa, gfn, perm = 0UL;
1504 unsigned int shift, l1_shift, level;
1505 bool writing = !!(dsisr & DSISR_ISSTORE);
1506 bool kvm_ro = false;
1507 long int ret;
1508
1509 if (!gp->l1_gr_to_hr) {
1510 kvmhv_update_ptbl_cache(gp);
1511 if (!gp->l1_gr_to_hr)
1512 return RESUME_HOST;
1513 }
1514
1515 /* Convert the nested guest real address into a L1 guest real address */
1516
1517 n_gpa = vcpu->arch.fault_gpa & ~0xF000000000000FFFULL;
1518 if (!(dsisr & DSISR_PRTABLE_FAULT))
1519 n_gpa |= ea & 0xFFF;
1520 ret = kvmhv_translate_addr_nested(vcpu, gp, n_gpa, dsisr, &gpte);
1521
1522 /*
1523 * If the hardware found a translation but we don't now have a usable
1524 * translation in the l1 partition-scoped tree, remove the shadow pte
1525 * and let the guest retry.
1526 */
1527 if (ret == RESUME_HOST &&
1528 (dsisr & (DSISR_PROTFAULT | DSISR_BADACCESS | DSISR_NOEXEC_OR_G |
1529 DSISR_BAD_COPYPASTE)))
1530 goto inval;
1531 if (ret)
1532 return ret;
1533
1534 /* Failed to set the reference/change bits */
1535 if (dsisr & DSISR_SET_RC) {
1536 ret = kvmhv_handle_nested_set_rc(vcpu, gp, n_gpa, gpte, dsisr);
1537 if (ret == RESUME_HOST)
1538 return ret;
1539 if (ret)
1540 goto inval;
1541 dsisr &= ~DSISR_SET_RC;
1542 if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
1543 DSISR_PROTFAULT)))
1544 return RESUME_GUEST;
1545 }
1546
1547 /*
1548 * We took an HISI or HDSI while we were running a nested guest which
1549 * means we have no partition scoped translation for that. This means
1550 * we need to insert a pte for the mapping into our shadow_pgtable.
1551 */
1552
1553 l1_shift = gpte.page_shift;
1554 if (l1_shift < PAGE_SHIFT) {
1555 /* We don't support l1 using a page size smaller than our own */
1556 pr_err("KVM: L1 guest page shift (%d) less than our own (%d)\n",
1557 l1_shift, PAGE_SHIFT);
1558 return -EINVAL;
1559 }
1560 gpa = gpte.raddr;
1561 gfn = gpa >> PAGE_SHIFT;
1562
1563 /* 1. Get the corresponding host memslot */
1564
1565 memslot = gfn_to_memslot(kvm, gfn);
1566 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
1567 if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS)) {
1568 /* unusual error -> reflect to the guest as a DSI */
1569 kvmppc_core_queue_data_storage(vcpu,
1570 kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
1571 ea, dsisr);
1572 return RESUME_GUEST;
1573 }
1574
1575 /* passthrough of emulated MMIO case */
1576 return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
1577 }
1578 if (memslot->flags & KVM_MEM_READONLY) {
1579 if (writing) {
1580 /* Give the guest a DSI */
1581 kvmppc_core_queue_data_storage(vcpu,
1582 kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
1583 ea, DSISR_ISSTORE | DSISR_PROTFAULT);
1584 return RESUME_GUEST;
1585 }
1586 kvm_ro = true;
1587 }
1588
1589 /* 2. Find the host pte for this L1 guest real address */
1590
1591 /* Used to check for invalidations in progress */
1592 mmu_seq = kvm->mmu_invalidate_seq;
1593 smp_rmb();
1594
1595 /* See if can find translation in our partition scoped tables for L1 */
1596 pte = __pte(0);
1597 spin_lock(&kvm->mmu_lock);
1598 pte_p = find_kvm_secondary_pte(kvm, gpa, &shift);
1599 if (!shift)
1600 shift = PAGE_SHIFT;
1601 if (pte_p)
1602 pte = *pte_p;
1603 spin_unlock(&kvm->mmu_lock);
1604
1605 if (!pte_present(pte) || (writing && !(pte_val(pte) & _PAGE_WRITE))) {
1606 /* No suitable pte found -> try to insert a mapping */
1607 ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot,
1608 writing, kvm_ro, &pte, &level);
1609 if (ret == -EAGAIN)
1610 return RESUME_GUEST;
1611 else if (ret)
1612 return ret;
1613 shift = kvmppc_radix_level_to_shift(level);
1614 }
1615 /* Align gfn to the start of the page */
1616 gfn = (gpa & ~((1UL << shift) - 1)) >> PAGE_SHIFT;
1617
1618 /* 3. Compute the pte we need to insert for nest_gpa -> host r_addr */
1619
1620 /* The permissions is the combination of the host and l1 guest ptes */
1621 perm |= gpte.may_read ? 0UL : _PAGE_READ;
1622 perm |= gpte.may_write ? 0UL : _PAGE_WRITE;
1623 perm |= gpte.may_execute ? 0UL : _PAGE_EXEC;
1624 /* Only set accessed/dirty (rc) bits if set in host and l1 guest ptes */
1625 perm |= (gpte.rc & _PAGE_ACCESSED) ? 0UL : _PAGE_ACCESSED;
1626 perm |= ((gpte.rc & _PAGE_DIRTY) && writing) ? 0UL : _PAGE_DIRTY;
1627 pte = __pte(pte_val(pte) & ~perm);
1628
1629 /* What size pte can we insert? */
1630 if (shift > l1_shift) {
1631 u64 mask;
1632 unsigned int actual_shift = PAGE_SHIFT;
1633 if (PMD_SHIFT < l1_shift)
1634 actual_shift = PMD_SHIFT;
1635 mask = (1UL << shift) - (1UL << actual_shift);
1636 pte = __pte(pte_val(pte) | (gpa & mask));
1637 shift = actual_shift;
1638 }
1639 level = kvmppc_radix_shift_to_level(shift);
1640 n_gpa &= ~((1UL << shift) - 1);
1641
1642 /* 4. Insert the pte into our shadow_pgtable */
1643
1644 n_rmap = kzalloc(sizeof(*n_rmap), GFP_KERNEL);
1645 if (!n_rmap)
1646 return RESUME_GUEST; /* Let the guest try again */
1647 n_rmap->rmap = (n_gpa & RMAP_NESTED_GPA_MASK) |
1648 (((unsigned long) gp->l1_lpid) << RMAP_NESTED_LPID_SHIFT);
1649 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1650 ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level,
1651 mmu_seq, gp->shadow_lpid, rmapp, &n_rmap);
1652 kfree(n_rmap);
1653 if (ret == -EAGAIN)
1654 ret = RESUME_GUEST; /* Let the guest try again */
1655
1656 return ret;
1657
1658 inval:
1659 kvmhv_invalidate_shadow_pte(vcpu, gp, n_gpa, NULL);
1660 return RESUME_GUEST;
1661 }
1662
kvmhv_nested_page_fault(struct kvm_vcpu * vcpu)1663 long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu)
1664 {
1665 struct kvm_nested_guest *gp = vcpu->arch.nested;
1666 long int ret;
1667
1668 mutex_lock(&gp->tlb_lock);
1669 ret = __kvmhv_nested_page_fault(vcpu, gp);
1670 mutex_unlock(&gp->tlb_lock);
1671 return ret;
1672 }
1673
kvmhv_nested_next_lpid(struct kvm * kvm,int lpid)1674 int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid)
1675 {
1676 int ret = lpid + 1;
1677
1678 spin_lock(&kvm->mmu_lock);
1679 if (!idr_get_next(&kvm->arch.kvm_nested_guest_idr, &ret))
1680 ret = -1;
1681 spin_unlock(&kvm->mmu_lock);
1682
1683 return ret;
1684 }
1685