1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9 * GNU General Public License for more details.
10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 *
13 * Derived from book3s_rmhandlers.S and other files, which are:
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
23#include <asm/mmu.h>
24#include <asm/page.h>
25#include <asm/ptrace.h>
26#include <asm/hvcall.h>
27#include <asm/asm-offsets.h>
28#include <asm/exception-64s.h>
29#include <asm/kvm_book3s_asm.h>
30#include <asm/mmu-hash64.h>
31#include <asm/tm.h>
32
33#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
34
35/* Values in HSTATE_NAPPING(r13) */
36#define NAPPING_CEDE	1
37#define NAPPING_NOVCPU	2
38
39/*
40 * Call kvmppc_hv_entry in real mode.
41 * Must be called with interrupts hard-disabled.
42 *
43 * Input Registers:
44 *
45 * LR = return address to continue at after eventually re-enabling MMU
46 */
47_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
48	mflr	r0
49	std	r0, PPC_LR_STKOFF(r1)
50	stdu	r1, -112(r1)
51	mfmsr	r10
52	LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
53	li	r0,MSR_RI
54	andc	r0,r10,r0
55	li	r6,MSR_IR | MSR_DR
56	andc	r6,r10,r6
57	mtmsrd	r0,1		/* clear RI in MSR */
58	mtsrr0	r5
59	mtsrr1	r6
60	RFI
61
62kvmppc_call_hv_entry:
63	ld	r4, HSTATE_KVM_VCPU(r13)
64	bl	kvmppc_hv_entry
65
66	/* Back from guest - restore host state and return to caller */
67
68BEGIN_FTR_SECTION
69	/* Restore host DABR and DABRX */
70	ld	r5,HSTATE_DABR(r13)
71	li	r6,7
72	mtspr	SPRN_DABR,r5
73	mtspr	SPRN_DABRX,r6
74END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
75
76	/* Restore SPRG3 */
77	ld	r3,PACA_SPRG_VDSO(r13)
78	mtspr	SPRN_SPRG_VDSO_WRITE,r3
79
80	/* Reload the host's PMU registers */
81	ld	r3, PACALPPACAPTR(r13)	/* is the host using the PMU? */
82	lbz	r4, LPPACA_PMCINUSE(r3)
83	cmpwi	r4, 0
84	beq	23f			/* skip if not */
85BEGIN_FTR_SECTION
86	ld	r3, HSTATE_MMCR(r13)
87	andi.	r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
88	cmpwi	r4, MMCR0_PMAO
89	beql	kvmppc_fix_pmao
90END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
91	lwz	r3, HSTATE_PMC(r13)
92	lwz	r4, HSTATE_PMC + 4(r13)
93	lwz	r5, HSTATE_PMC + 8(r13)
94	lwz	r6, HSTATE_PMC + 12(r13)
95	lwz	r8, HSTATE_PMC + 16(r13)
96	lwz	r9, HSTATE_PMC + 20(r13)
97BEGIN_FTR_SECTION
98	lwz	r10, HSTATE_PMC + 24(r13)
99	lwz	r11, HSTATE_PMC + 28(r13)
100END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
101	mtspr	SPRN_PMC1, r3
102	mtspr	SPRN_PMC2, r4
103	mtspr	SPRN_PMC3, r5
104	mtspr	SPRN_PMC4, r6
105	mtspr	SPRN_PMC5, r8
106	mtspr	SPRN_PMC6, r9
107BEGIN_FTR_SECTION
108	mtspr	SPRN_PMC7, r10
109	mtspr	SPRN_PMC8, r11
110END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
111	ld	r3, HSTATE_MMCR(r13)
112	ld	r4, HSTATE_MMCR + 8(r13)
113	ld	r5, HSTATE_MMCR + 16(r13)
114	ld	r6, HSTATE_MMCR + 24(r13)
115	ld	r7, HSTATE_MMCR + 32(r13)
116	mtspr	SPRN_MMCR1, r4
117	mtspr	SPRN_MMCRA, r5
118	mtspr	SPRN_SIAR, r6
119	mtspr	SPRN_SDAR, r7
120BEGIN_FTR_SECTION
121	ld	r8, HSTATE_MMCR + 40(r13)
122	ld	r9, HSTATE_MMCR + 48(r13)
123	mtspr	SPRN_MMCR2, r8
124	mtspr	SPRN_SIER, r9
125END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
126	mtspr	SPRN_MMCR0, r3
127	isync
12823:
129
130	/*
131	 * Reload DEC.  HDEC interrupts were disabled when
132	 * we reloaded the host's LPCR value.
133	 */
134	ld	r3, HSTATE_DECEXP(r13)
135	mftb	r4
136	subf	r4, r4, r3
137	mtspr	SPRN_DEC, r4
138
139	/*
140	 * For external and machine check interrupts, we need
141	 * to call the Linux handler to process the interrupt.
142	 * We do that by jumping to absolute address 0x500 for
143	 * external interrupts, or the machine_check_fwnmi label
144	 * for machine checks (since firmware might have patched
145	 * the vector area at 0x200).  The [h]rfid at the end of the
146	 * handler will return to the book3s_hv_interrupts.S code.
147	 * For other interrupts we do the rfid to get back
148	 * to the book3s_hv_interrupts.S code here.
149	 */
150	ld	r8, 112+PPC_LR_STKOFF(r1)
151	addi	r1, r1, 112
152	ld	r7, HSTATE_HOST_MSR(r13)
153
154	cmpwi	cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
155	cmpwi	r12, BOOK3S_INTERRUPT_EXTERNAL
156BEGIN_FTR_SECTION
157	beq	11f
158	cmpwi	cr2, r12, BOOK3S_INTERRUPT_HMI
159	beq	cr2, 14f			/* HMI check */
160END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
161
162	/* RFI into the highmem handler, or branch to interrupt handler */
163	mfmsr	r6
164	li	r0, MSR_RI
165	andc	r6, r6, r0
166	mtmsrd	r6, 1			/* Clear RI in MSR */
167	mtsrr0	r8
168	mtsrr1	r7
169	beqa	0x500			/* external interrupt (PPC970) */
170	beq	cr1, 13f		/* machine check */
171	RFI
172
173	/* On POWER7, we have external interrupts set to use HSRR0/1 */
17411:	mtspr	SPRN_HSRR0, r8
175	mtspr	SPRN_HSRR1, r7
176	ba	0x500
177
17813:	b	machine_check_fwnmi
179
18014:	mtspr	SPRN_HSRR0, r8
181	mtspr	SPRN_HSRR1, r7
182	b	hmi_exception_after_realmode
183
184kvmppc_primary_no_guest:
185	/* We handle this much like a ceded vcpu */
186	/* set our bit in napping_threads */
187	ld	r5, HSTATE_KVM_VCORE(r13)
188	lbz	r7, HSTATE_PTID(r13)
189	li	r0, 1
190	sld	r0, r0, r7
191	addi	r6, r5, VCORE_NAPPING_THREADS
1921:	lwarx	r3, 0, r6
193	or	r3, r3, r0
194	stwcx.	r3, 0, r6
195	bne	1b
196	/* order napping_threads update vs testing entry_exit_count */
197	isync
198	li	r12, 0
199	lwz	r7, VCORE_ENTRY_EXIT(r5)
200	cmpwi	r7, 0x100
201	bge	kvm_novcpu_exit	/* another thread already exiting */
202	li	r3, NAPPING_NOVCPU
203	stb	r3, HSTATE_NAPPING(r13)
204	li	r3, 1
205	stb	r3, HSTATE_HWTHREAD_REQ(r13)
206
207	b	kvm_do_nap
208
209kvm_novcpu_wakeup:
210	ld	r1, HSTATE_HOST_R1(r13)
211	ld	r5, HSTATE_KVM_VCORE(r13)
212	li	r0, 0
213	stb	r0, HSTATE_NAPPING(r13)
214	stb	r0, HSTATE_HWTHREAD_REQ(r13)
215
216	/* check the wake reason */
217	bl	kvmppc_check_wake_reason
218
219	/* see if any other thread is already exiting */
220	lwz	r0, VCORE_ENTRY_EXIT(r5)
221	cmpwi	r0, 0x100
222	bge	kvm_novcpu_exit
223
224	/* clear our bit in napping_threads */
225	lbz	r7, HSTATE_PTID(r13)
226	li	r0, 1
227	sld	r0, r0, r7
228	addi	r6, r5, VCORE_NAPPING_THREADS
2294:	lwarx	r7, 0, r6
230	andc	r7, r7, r0
231	stwcx.	r7, 0, r6
232	bne	4b
233
234	/* See if the wake reason means we need to exit */
235	cmpdi	r3, 0
236	bge	kvm_novcpu_exit
237
238	/* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
239	ld	r4, HSTATE_KVM_VCPU(r13)
240	cmpdi	r4, 0
241	bne	kvmppc_got_guest
242
243kvm_novcpu_exit:
244	b	hdec_soon
245
246/*
247 * We come in here when wakened from nap mode.
248 * Relocation is off and most register values are lost.
249 * r13 points to the PACA.
250 */
251	.globl	kvm_start_guest
252kvm_start_guest:
253
254	/* Set runlatch bit the minute you wake up from nap */
255	mfspr	r1, SPRN_CTRLF
256	ori 	r1, r1, 1
257	mtspr	SPRN_CTRLT, r1
258
259	ld	r2,PACATOC(r13)
260
261	li	r0,KVM_HWTHREAD_IN_KVM
262	stb	r0,HSTATE_HWTHREAD_STATE(r13)
263
264	/* NV GPR values from power7_idle() will no longer be valid */
265	li	r0,1
266	stb	r0,PACA_NAPSTATELOST(r13)
267
268	/* were we napping due to cede? */
269	lbz	r0,HSTATE_NAPPING(r13)
270	cmpwi	r0,NAPPING_CEDE
271	beq	kvm_end_cede
272	cmpwi	r0,NAPPING_NOVCPU
273	beq	kvm_novcpu_wakeup
274
275	ld	r1,PACAEMERGSP(r13)
276	subi	r1,r1,STACK_FRAME_OVERHEAD
277
278	/*
279	 * We weren't napping due to cede, so this must be a secondary
280	 * thread being woken up to run a guest, or being woken up due
281	 * to a stray IPI.  (Or due to some machine check or hypervisor
282	 * maintenance interrupt while the core is in KVM.)
283	 */
284
285	/* Check the wake reason in SRR1 to see why we got here */
286	bl	kvmppc_check_wake_reason
287	cmpdi	r3, 0
288	bge	kvm_no_guest
289
290	/* get vcpu pointer, NULL if we have no vcpu to run */
291	ld	r4,HSTATE_KVM_VCPU(r13)
292	cmpdi	r4,0
293	/* if we have no vcpu to run, go back to sleep */
294	beq	kvm_no_guest
295
296	/* Set HSTATE_DSCR(r13) to something sensible */
297	ld	r6, PACA_DSCR(r13)
298	std	r6, HSTATE_DSCR(r13)
299
300	bl	kvmppc_hv_entry
301
302	/* Back from the guest, go back to nap */
303	/* Clear our vcpu pointer so we don't come back in early */
304	li	r0, 0
305	std	r0, HSTATE_KVM_VCPU(r13)
306	/*
307	 * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing
308	 * the nap_count, because once the increment to nap_count is
309	 * visible we could be given another vcpu.
310	 */
311	lwsync
312
313	/* increment the nap count and then go to nap mode */
314	ld	r4, HSTATE_KVM_VCORE(r13)
315	addi	r4, r4, VCORE_NAP_COUNT
31651:	lwarx	r3, 0, r4
317	addi	r3, r3, 1
318	stwcx.	r3, 0, r4
319	bne	51b
320
321kvm_no_guest:
322	li	r0, KVM_HWTHREAD_IN_NAP
323	stb	r0, HSTATE_HWTHREAD_STATE(r13)
324kvm_do_nap:
325	/* Clear the runlatch bit before napping */
326	mfspr	r2, SPRN_CTRLF
327	clrrdi	r2, r2, 1
328	mtspr	SPRN_CTRLT, r2
329
330	li	r3, LPCR_PECE0
331	mfspr	r4, SPRN_LPCR
332	rlwimi	r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
333	mtspr	SPRN_LPCR, r4
334	isync
335	std	r0, HSTATE_SCRATCH0(r13)
336	ptesync
337	ld	r0, HSTATE_SCRATCH0(r13)
3381:	cmpd	r0, r0
339	bne	1b
340	nap
341	b	.
342
343/******************************************************************************
344 *                                                                            *
345 *                               Entry code                                   *
346 *                                                                            *
347 *****************************************************************************/
348
349.global kvmppc_hv_entry
350kvmppc_hv_entry:
351
352	/* Required state:
353	 *
354	 * R4 = vcpu pointer (or NULL)
355	 * MSR = ~IR|DR
356	 * R13 = PACA
357	 * R1 = host R1
358	 * R2 = TOC
359	 * all other volatile GPRS = free
360	 */
361	mflr	r0
362	std	r0, PPC_LR_STKOFF(r1)
363	stdu	r1, -112(r1)
364
365	/* Save R1 in the PACA */
366	std	r1, HSTATE_HOST_R1(r13)
367
368	li	r6, KVM_GUEST_MODE_HOST_HV
369	stb	r6, HSTATE_IN_GUEST(r13)
370
371	/* Clear out SLB */
372	li	r6,0
373	slbmte	r6,r6
374	slbia
375	ptesync
376
377BEGIN_FTR_SECTION
378	b	30f
379END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
380	/*
381	 * POWER7 host -> guest partition switch code.
382	 * We don't have to lock against concurrent tlbies,
383	 * but we do have to coordinate across hardware threads.
384	 */
385	/* Increment entry count iff exit count is zero. */
386	ld	r5,HSTATE_KVM_VCORE(r13)
387	addi	r9,r5,VCORE_ENTRY_EXIT
38821:	lwarx	r3,0,r9
389	cmpwi	r3,0x100		/* any threads starting to exit? */
390	bge	secondary_too_late	/* if so we're too late to the party */
391	addi	r3,r3,1
392	stwcx.	r3,0,r9
393	bne	21b
394
395	/* Primary thread switches to guest partition. */
396	ld	r9,VCORE_KVM(r5)	/* pointer to struct kvm */
397	lbz	r6,HSTATE_PTID(r13)
398	cmpwi	r6,0
399	bne	20f
400	ld	r6,KVM_SDR1(r9)
401	lwz	r7,KVM_LPID(r9)
402	li	r0,LPID_RSVD		/* switch to reserved LPID */
403	mtspr	SPRN_LPID,r0
404	ptesync
405	mtspr	SPRN_SDR1,r6		/* switch to partition page table */
406	mtspr	SPRN_LPID,r7
407	isync
408
409	/* See if we need to flush the TLB */
410	lhz	r6,PACAPACAINDEX(r13)	/* test_bit(cpu, need_tlb_flush) */
411	clrldi	r7,r6,64-6		/* extract bit number (6 bits) */
412	srdi	r6,r6,6			/* doubleword number */
413	sldi	r6,r6,3			/* address offset */
414	add	r6,r6,r9
415	addi	r6,r6,KVM_NEED_FLUSH	/* dword in kvm->arch.need_tlb_flush */
416	li	r0,1
417	sld	r0,r0,r7
418	ld	r7,0(r6)
419	and.	r7,r7,r0
420	beq	22f
42123:	ldarx	r7,0,r6			/* if set, clear the bit */
422	andc	r7,r7,r0
423	stdcx.	r7,0,r6
424	bne	23b
425	/* Flush the TLB of any entries for this LPID */
426	/* use arch 2.07S as a proxy for POWER8 */
427BEGIN_FTR_SECTION
428	li	r6,512			/* POWER8 has 512 sets */
429FTR_SECTION_ELSE
430	li	r6,128			/* POWER7 has 128 sets */
431ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
432	mtctr	r6
433	li	r7,0x800		/* IS field = 0b10 */
434	ptesync
43528:	tlbiel	r7
436	addi	r7,r7,0x1000
437	bdnz	28b
438	ptesync
439
440	/* Add timebase offset onto timebase */
44122:	ld	r8,VCORE_TB_OFFSET(r5)
442	cmpdi	r8,0
443	beq	37f
444	mftb	r6		/* current host timebase */
445	add	r8,r8,r6
446	mtspr	SPRN_TBU40,r8	/* update upper 40 bits */
447	mftb	r7		/* check if lower 24 bits overflowed */
448	clrldi	r6,r6,40
449	clrldi	r7,r7,40
450	cmpld	r7,r6
451	bge	37f
452	addis	r8,r8,0x100	/* if so, increment upper 40 bits */
453	mtspr	SPRN_TBU40,r8
454
455	/* Load guest PCR value to select appropriate compat mode */
45637:	ld	r7, VCORE_PCR(r5)
457	cmpdi	r7, 0
458	beq	38f
459	mtspr	SPRN_PCR, r7
46038:
461
462BEGIN_FTR_SECTION
463	/* DPDES is shared between threads */
464	ld	r8, VCORE_DPDES(r5)
465	mtspr	SPRN_DPDES, r8
466END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
467
468	li	r0,1
469	stb	r0,VCORE_IN_GUEST(r5)	/* signal secondaries to continue */
470	b	10f
471
472	/* Secondary threads wait for primary to have done partition switch */
47320:	lbz	r0,VCORE_IN_GUEST(r5)
474	cmpwi	r0,0
475	beq	20b
476
477	/* Set LPCR and RMOR. */
47810:	ld	r8,VCORE_LPCR(r5)
479	mtspr	SPRN_LPCR,r8
480	ld	r8,KVM_RMOR(r9)
481	mtspr	SPRN_RMOR,r8
482	isync
483
484	/* Check if HDEC expires soon */
485	mfspr	r3,SPRN_HDEC
486	cmpwi	r3,512		/* 1 microsecond */
487	li	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
488	blt	hdec_soon
489	b	31f
490
491	/*
492	 * PPC970 host -> guest partition switch code.
493	 * We have to lock against concurrent tlbies,
494	 * using native_tlbie_lock to lock against host tlbies
495	 * and kvm->arch.tlbie_lock to lock against guest tlbies.
496	 * We also have to invalidate the TLB since its
497	 * entries aren't tagged with the LPID.
498	 */
49930:	ld	r5,HSTATE_KVM_VCORE(r13)
500	ld	r9,VCORE_KVM(r5)	/* pointer to struct kvm */
501
502	/* first take native_tlbie_lock */
503	.section ".toc","aw"
504toc_tlbie_lock:
505	.tc	native_tlbie_lock[TC],native_tlbie_lock
506	.previous
507	ld	r3,toc_tlbie_lock@toc(r2)
508#ifdef __BIG_ENDIAN__
509	lwz	r8,PACA_LOCK_TOKEN(r13)
510#else
511	lwz	r8,PACAPACAINDEX(r13)
512#endif
51324:	lwarx	r0,0,r3
514	cmpwi	r0,0
515	bne	24b
516	stwcx.	r8,0,r3
517	bne	24b
518	isync
519
520	ld	r5,HSTATE_KVM_VCORE(r13)
521	ld	r7,VCORE_LPCR(r5)	/* use vcore->lpcr to store HID4 */
522	li	r0,0x18f
523	rotldi	r0,r0,HID4_LPID5_SH	/* all lpid bits in HID4 = 1 */
524	or	r0,r7,r0
525	ptesync
526	sync
527	mtspr	SPRN_HID4,r0		/* switch to reserved LPID */
528	isync
529	li	r0,0
530	stw	r0,0(r3)		/* drop native_tlbie_lock */
531
532	/* invalidate the whole TLB */
533	li	r0,256
534	mtctr	r0
535	li	r6,0
53625:	tlbiel	r6
537	addi	r6,r6,0x1000
538	bdnz	25b
539	ptesync
540
541	/* Take the guest's tlbie_lock */
542	addi	r3,r9,KVM_TLBIE_LOCK
54324:	lwarx	r0,0,r3
544	cmpwi	r0,0
545	bne	24b
546	stwcx.	r8,0,r3
547	bne	24b
548	isync
549	ld	r6,KVM_SDR1(r9)
550	mtspr	SPRN_SDR1,r6		/* switch to partition page table */
551
552	/* Set up HID4 with the guest's LPID etc. */
553	sync
554	mtspr	SPRN_HID4,r7
555	isync
556
557	/* drop the guest's tlbie_lock */
558	li	r0,0
559	stw	r0,0(r3)
560
561	/* Check if HDEC expires soon */
562	mfspr	r3,SPRN_HDEC
563	cmpwi	r3,10
564	li	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
565	blt	hdec_soon
566
567	/* Enable HDEC interrupts */
568	mfspr	r0,SPRN_HID0
569	li	r3,1
570	rldimi	r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
571	sync
572	mtspr	SPRN_HID0,r0
573	mfspr	r0,SPRN_HID0
574	mfspr	r0,SPRN_HID0
575	mfspr	r0,SPRN_HID0
576	mfspr	r0,SPRN_HID0
577	mfspr	r0,SPRN_HID0
578	mfspr	r0,SPRN_HID0
57931:
580	/* Do we have a guest vcpu to run? */
581	cmpdi	r4, 0
582	beq	kvmppc_primary_no_guest
583kvmppc_got_guest:
584
585	/* Load up guest SLB entries */
586	lwz	r5,VCPU_SLB_MAX(r4)
587	cmpwi	r5,0
588	beq	9f
589	mtctr	r5
590	addi	r6,r4,VCPU_SLB
5911:	ld	r8,VCPU_SLB_E(r6)
592	ld	r9,VCPU_SLB_V(r6)
593	slbmte	r9,r8
594	addi	r6,r6,VCPU_SLB_SIZE
595	bdnz	1b
5969:
597	/* Increment yield count if they have a VPA */
598	ld	r3, VCPU_VPA(r4)
599	cmpdi	r3, 0
600	beq	25f
601	li	r6, LPPACA_YIELDCOUNT
602	LWZX_BE	r5, r3, r6
603	addi	r5, r5, 1
604	STWX_BE	r5, r3, r6
605	li	r6, 1
606	stb	r6, VCPU_VPA_DIRTY(r4)
60725:
608
609BEGIN_FTR_SECTION
610	/* Save purr/spurr */
611	mfspr	r5,SPRN_PURR
612	mfspr	r6,SPRN_SPURR
613	std	r5,HSTATE_PURR(r13)
614	std	r6,HSTATE_SPURR(r13)
615	ld	r7,VCPU_PURR(r4)
616	ld	r8,VCPU_SPURR(r4)
617	mtspr	SPRN_PURR,r7
618	mtspr	SPRN_SPURR,r8
619END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
620
621BEGIN_FTR_SECTION
622	/* Set partition DABR */
623	/* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
624	lwz	r5,VCPU_DABRX(r4)
625	ld	r6,VCPU_DABR(r4)
626	mtspr	SPRN_DABRX,r5
627	mtspr	SPRN_DABR,r6
628 BEGIN_FTR_SECTION_NESTED(89)
629	isync
630 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
631END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
632
633#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
634BEGIN_FTR_SECTION
635	b	skip_tm
636END_FTR_SECTION_IFCLR(CPU_FTR_TM)
637
638	/* Turn on TM/FP/VSX/VMX so we can restore them. */
639	mfmsr	r5
640	li	r6, MSR_TM >> 32
641	sldi	r6, r6, 32
642	or	r5, r5, r6
643	ori	r5, r5, MSR_FP
644	oris	r5, r5, (MSR_VEC | MSR_VSX)@h
645	mtmsrd	r5
646
647	/*
648	 * The user may change these outside of a transaction, so they must
649	 * always be context switched.
650	 */
651	ld	r5, VCPU_TFHAR(r4)
652	ld	r6, VCPU_TFIAR(r4)
653	ld	r7, VCPU_TEXASR(r4)
654	mtspr	SPRN_TFHAR, r5
655	mtspr	SPRN_TFIAR, r6
656	mtspr	SPRN_TEXASR, r7
657
658	ld	r5, VCPU_MSR(r4)
659	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
660	beq	skip_tm	/* TM not active in guest */
661
662	/* Make sure the failure summary is set, otherwise we'll program check
663	 * when we trechkpt.  It's possible that this might have been not set
664	 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
665	 * host.
666	 */
667	oris	r7, r7, (TEXASR_FS)@h
668	mtspr	SPRN_TEXASR, r7
669
670	/*
671	 * We need to load up the checkpointed state for the guest.
672	 * We need to do this early as it will blow away any GPRs, VSRs and
673	 * some SPRs.
674	 */
675
676	mr	r31, r4
677	addi	r3, r31, VCPU_FPRS_TM
678	bl	load_fp_state
679	addi	r3, r31, VCPU_VRS_TM
680	bl	load_vr_state
681	mr	r4, r31
682	lwz	r7, VCPU_VRSAVE_TM(r4)
683	mtspr	SPRN_VRSAVE, r7
684
685	ld	r5, VCPU_LR_TM(r4)
686	lwz	r6, VCPU_CR_TM(r4)
687	ld	r7, VCPU_CTR_TM(r4)
688	ld	r8, VCPU_AMR_TM(r4)
689	ld	r9, VCPU_TAR_TM(r4)
690	mtlr	r5
691	mtcr	r6
692	mtctr	r7
693	mtspr	SPRN_AMR, r8
694	mtspr	SPRN_TAR, r9
695
696	/*
697	 * Load up PPR and DSCR values but don't put them in the actual SPRs
698	 * till the last moment to avoid running with userspace PPR and DSCR for
699	 * too long.
700	 */
701	ld	r29, VCPU_DSCR_TM(r4)
702	ld	r30, VCPU_PPR_TM(r4)
703
704	std	r2, PACATMSCRATCH(r13) /* Save TOC */
705
706	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
707	li	r5, 0
708	mtmsrd	r5, 1
709
710	/* Load GPRs r0-r28 */
711	reg = 0
712	.rept	29
713	ld	reg, VCPU_GPRS_TM(reg)(r31)
714	reg = reg + 1
715	.endr
716
717	mtspr	SPRN_DSCR, r29
718	mtspr	SPRN_PPR, r30
719
720	/* Load final GPRs */
721	ld	29, VCPU_GPRS_TM(29)(r31)
722	ld	30, VCPU_GPRS_TM(30)(r31)
723	ld	31, VCPU_GPRS_TM(31)(r31)
724
725	/* TM checkpointed state is now setup.  All GPRs are now volatile. */
726	TRECHKPT
727
728	/* Now let's get back the state we need. */
729	HMT_MEDIUM
730	GET_PACA(r13)
731	ld	r29, HSTATE_DSCR(r13)
732	mtspr	SPRN_DSCR, r29
733	ld	r4, HSTATE_KVM_VCPU(r13)
734	ld	r1, HSTATE_HOST_R1(r13)
735	ld	r2, PACATMSCRATCH(r13)
736
737	/* Set the MSR RI since we have our registers back. */
738	li	r5, MSR_RI
739	mtmsrd	r5, 1
740skip_tm:
741#endif
742
743	/* Load guest PMU registers */
744	/* R4 is live here (vcpu pointer) */
745	li	r3, 1
746	sldi	r3, r3, 31		/* MMCR0_FC (freeze counters) bit */
747	mtspr	SPRN_MMCR0, r3		/* freeze all counters, disable ints */
748	isync
749BEGIN_FTR_SECTION
750	ld	r3, VCPU_MMCR(r4)
751	andi.	r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
752	cmpwi	r5, MMCR0_PMAO
753	beql	kvmppc_fix_pmao
754END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
755	lwz	r3, VCPU_PMC(r4)	/* always load up guest PMU registers */
756	lwz	r5, VCPU_PMC + 4(r4)	/* to prevent information leak */
757	lwz	r6, VCPU_PMC + 8(r4)
758	lwz	r7, VCPU_PMC + 12(r4)
759	lwz	r8, VCPU_PMC + 16(r4)
760	lwz	r9, VCPU_PMC + 20(r4)
761BEGIN_FTR_SECTION
762	lwz	r10, VCPU_PMC + 24(r4)
763	lwz	r11, VCPU_PMC + 28(r4)
764END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
765	mtspr	SPRN_PMC1, r3
766	mtspr	SPRN_PMC2, r5
767	mtspr	SPRN_PMC3, r6
768	mtspr	SPRN_PMC4, r7
769	mtspr	SPRN_PMC5, r8
770	mtspr	SPRN_PMC6, r9
771BEGIN_FTR_SECTION
772	mtspr	SPRN_PMC7, r10
773	mtspr	SPRN_PMC8, r11
774END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
775	ld	r3, VCPU_MMCR(r4)
776	ld	r5, VCPU_MMCR + 8(r4)
777	ld	r6, VCPU_MMCR + 16(r4)
778	ld	r7, VCPU_SIAR(r4)
779	ld	r8, VCPU_SDAR(r4)
780	mtspr	SPRN_MMCR1, r5
781	mtspr	SPRN_MMCRA, r6
782	mtspr	SPRN_SIAR, r7
783	mtspr	SPRN_SDAR, r8
784BEGIN_FTR_SECTION
785	ld	r5, VCPU_MMCR + 24(r4)
786	ld	r6, VCPU_SIER(r4)
787	lwz	r7, VCPU_PMC + 24(r4)
788	lwz	r8, VCPU_PMC + 28(r4)
789	ld	r9, VCPU_MMCR + 32(r4)
790	mtspr	SPRN_MMCR2, r5
791	mtspr	SPRN_SIER, r6
792	mtspr	SPRN_SPMC1, r7
793	mtspr	SPRN_SPMC2, r8
794	mtspr	SPRN_MMCRS, r9
795END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
796	mtspr	SPRN_MMCR0, r3
797	isync
798
799	/* Load up FP, VMX and VSX registers */
800	bl	kvmppc_load_fp
801
802	ld	r14, VCPU_GPR(R14)(r4)
803	ld	r15, VCPU_GPR(R15)(r4)
804	ld	r16, VCPU_GPR(R16)(r4)
805	ld	r17, VCPU_GPR(R17)(r4)
806	ld	r18, VCPU_GPR(R18)(r4)
807	ld	r19, VCPU_GPR(R19)(r4)
808	ld	r20, VCPU_GPR(R20)(r4)
809	ld	r21, VCPU_GPR(R21)(r4)
810	ld	r22, VCPU_GPR(R22)(r4)
811	ld	r23, VCPU_GPR(R23)(r4)
812	ld	r24, VCPU_GPR(R24)(r4)
813	ld	r25, VCPU_GPR(R25)(r4)
814	ld	r26, VCPU_GPR(R26)(r4)
815	ld	r27, VCPU_GPR(R27)(r4)
816	ld	r28, VCPU_GPR(R28)(r4)
817	ld	r29, VCPU_GPR(R29)(r4)
818	ld	r30, VCPU_GPR(R30)(r4)
819	ld	r31, VCPU_GPR(R31)(r4)
820
821BEGIN_FTR_SECTION
822	/* Switch DSCR to guest value */
823	ld	r5, VCPU_DSCR(r4)
824	mtspr	SPRN_DSCR, r5
825END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
826
827BEGIN_FTR_SECTION
828	/* Skip next section on POWER7 or PPC970 */
829	b	8f
830END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
831	/* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
832	mfmsr	r8
833	li	r0, 1
834	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
835	mtmsrd	r8
836
837	/* Load up POWER8-specific registers */
838	ld	r5, VCPU_IAMR(r4)
839	lwz	r6, VCPU_PSPB(r4)
840	ld	r7, VCPU_FSCR(r4)
841	mtspr	SPRN_IAMR, r5
842	mtspr	SPRN_PSPB, r6
843	mtspr	SPRN_FSCR, r7
844	ld	r5, VCPU_DAWR(r4)
845	ld	r6, VCPU_DAWRX(r4)
846	ld	r7, VCPU_CIABR(r4)
847	ld	r8, VCPU_TAR(r4)
848	mtspr	SPRN_DAWR, r5
849	mtspr	SPRN_DAWRX, r6
850	mtspr	SPRN_CIABR, r7
851	mtspr	SPRN_TAR, r8
852	ld	r5, VCPU_IC(r4)
853	ld	r6, VCPU_VTB(r4)
854	mtspr	SPRN_IC, r5
855	mtspr	SPRN_VTB, r6
856	ld	r8, VCPU_EBBHR(r4)
857	mtspr	SPRN_EBBHR, r8
858	ld	r5, VCPU_EBBRR(r4)
859	ld	r6, VCPU_BESCR(r4)
860	ld	r7, VCPU_CSIGR(r4)
861	ld	r8, VCPU_TACR(r4)
862	mtspr	SPRN_EBBRR, r5
863	mtspr	SPRN_BESCR, r6
864	mtspr	SPRN_CSIGR, r7
865	mtspr	SPRN_TACR, r8
866	ld	r5, VCPU_TCSCR(r4)
867	ld	r6, VCPU_ACOP(r4)
868	lwz	r7, VCPU_GUEST_PID(r4)
869	ld	r8, VCPU_WORT(r4)
870	mtspr	SPRN_TCSCR, r5
871	mtspr	SPRN_ACOP, r6
872	mtspr	SPRN_PID, r7
873	mtspr	SPRN_WORT, r8
8748:
875
876	/*
877	 * Set the decrementer to the guest decrementer.
878	 */
879	ld	r8,VCPU_DEC_EXPIRES(r4)
880	/* r8 is a host timebase value here, convert to guest TB */
881	ld	r5,HSTATE_KVM_VCORE(r13)
882	ld	r6,VCORE_TB_OFFSET(r5)
883	add	r8,r8,r6
884	mftb	r7
885	subf	r3,r7,r8
886	mtspr	SPRN_DEC,r3
887	stw	r3,VCPU_DEC(r4)
888
889	ld	r5, VCPU_SPRG0(r4)
890	ld	r6, VCPU_SPRG1(r4)
891	ld	r7, VCPU_SPRG2(r4)
892	ld	r8, VCPU_SPRG3(r4)
893	mtspr	SPRN_SPRG0, r5
894	mtspr	SPRN_SPRG1, r6
895	mtspr	SPRN_SPRG2, r7
896	mtspr	SPRN_SPRG3, r8
897
898	/* Load up DAR and DSISR */
899	ld	r5, VCPU_DAR(r4)
900	lwz	r6, VCPU_DSISR(r4)
901	mtspr	SPRN_DAR, r5
902	mtspr	SPRN_DSISR, r6
903
904BEGIN_FTR_SECTION
905	/* Restore AMR and UAMOR, set AMOR to all 1s */
906	ld	r5,VCPU_AMR(r4)
907	ld	r6,VCPU_UAMOR(r4)
908	li	r7,-1
909	mtspr	SPRN_AMR,r5
910	mtspr	SPRN_UAMOR,r6
911	mtspr	SPRN_AMOR,r7
912END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
913
914	/* Restore state of CTRL run bit; assume 1 on entry */
915	lwz	r5,VCPU_CTRL(r4)
916	andi.	r5,r5,1
917	bne	4f
918	mfspr	r6,SPRN_CTRLF
919	clrrdi	r6,r6,1
920	mtspr	SPRN_CTRLT,r6
9214:
922	ld	r6, VCPU_CTR(r4)
923	lwz	r7, VCPU_XER(r4)
924
925	mtctr	r6
926	mtxer	r7
927
928kvmppc_cede_reentry:		/* r4 = vcpu, r13 = paca */
929	ld	r10, VCPU_PC(r4)
930	ld	r11, VCPU_MSR(r4)
931	ld	r6, VCPU_SRR0(r4)
932	ld	r7, VCPU_SRR1(r4)
933	mtspr	SPRN_SRR0, r6
934	mtspr	SPRN_SRR1, r7
935
936deliver_guest_interrupt:
937	/* r11 = vcpu->arch.msr & ~MSR_HV */
938	rldicl	r11, r11, 63 - MSR_HV_LG, 1
939	rotldi	r11, r11, 1 + MSR_HV_LG
940	ori	r11, r11, MSR_ME
941
942	/* Check if we can deliver an external or decrementer interrupt now */
943	ld	r0, VCPU_PENDING_EXC(r4)
944	rldicl	r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
945	cmpdi	cr1, r0, 0
946	andi.	r8, r11, MSR_EE
947BEGIN_FTR_SECTION
948	mfspr	r8, SPRN_LPCR
949	/* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
950	rldimi	r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
951	mtspr	SPRN_LPCR, r8
952	isync
953END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
954	beq	5f
955	li	r0, BOOK3S_INTERRUPT_EXTERNAL
956	bne	cr1, 12f
957	mfspr	r0, SPRN_DEC
958	cmpwi	r0, 0
959	li	r0, BOOK3S_INTERRUPT_DECREMENTER
960	bge	5f
961
96212:	mtspr	SPRN_SRR0, r10
963	mr	r10,r0
964	mtspr	SPRN_SRR1, r11
965	mr	r9, r4
966	bl	kvmppc_msr_interrupt
9675:
968
969/*
970 * Required state:
971 * R4 = vcpu
972 * R10: value for HSRR0
973 * R11: value for HSRR1
974 * R13 = PACA
975 */
976fast_guest_return:
977	li	r0,0
978	stb	r0,VCPU_CEDED(r4)	/* cancel cede */
979	mtspr	SPRN_HSRR0,r10
980	mtspr	SPRN_HSRR1,r11
981
982	/* Activate guest mode, so faults get handled by KVM */
983	li	r9, KVM_GUEST_MODE_GUEST_HV
984	stb	r9, HSTATE_IN_GUEST(r13)
985
986	/* Enter guest */
987
988BEGIN_FTR_SECTION
989	ld	r5, VCPU_CFAR(r4)
990	mtspr	SPRN_CFAR, r5
991END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
992BEGIN_FTR_SECTION
993	ld	r0, VCPU_PPR(r4)
994END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
995
996	ld	r5, VCPU_LR(r4)
997	lwz	r6, VCPU_CR(r4)
998	mtlr	r5
999	mtcr	r6
1000
1001	ld	r1, VCPU_GPR(R1)(r4)
1002	ld	r2, VCPU_GPR(R2)(r4)
1003	ld	r3, VCPU_GPR(R3)(r4)
1004	ld	r5, VCPU_GPR(R5)(r4)
1005	ld	r6, VCPU_GPR(R6)(r4)
1006	ld	r7, VCPU_GPR(R7)(r4)
1007	ld	r8, VCPU_GPR(R8)(r4)
1008	ld	r9, VCPU_GPR(R9)(r4)
1009	ld	r10, VCPU_GPR(R10)(r4)
1010	ld	r11, VCPU_GPR(R11)(r4)
1011	ld	r12, VCPU_GPR(R12)(r4)
1012	ld	r13, VCPU_GPR(R13)(r4)
1013
1014BEGIN_FTR_SECTION
1015	mtspr	SPRN_PPR, r0
1016END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1017	ld	r0, VCPU_GPR(R0)(r4)
1018	ld	r4, VCPU_GPR(R4)(r4)
1019
1020	hrfid
1021	b	.
1022
1023/******************************************************************************
1024 *                                                                            *
1025 *                               Exit code                                    *
1026 *                                                                            *
1027 *****************************************************************************/
1028
1029/*
1030 * We come here from the first-level interrupt handlers.
1031 */
1032	.globl	kvmppc_interrupt_hv
1033kvmppc_interrupt_hv:
1034	/*
1035	 * Register contents:
1036	 * R12		= interrupt vector
1037	 * R13		= PACA
1038	 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
1039	 * guest R13 saved in SPRN_SCRATCH0
1040	 */
1041	std	r9, HSTATE_SCRATCH2(r13)
1042
1043	lbz	r9, HSTATE_IN_GUEST(r13)
1044	cmpwi	r9, KVM_GUEST_MODE_HOST_HV
1045	beq	kvmppc_bad_host_intr
1046#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1047	cmpwi	r9, KVM_GUEST_MODE_GUEST
1048	ld	r9, HSTATE_SCRATCH2(r13)
1049	beq	kvmppc_interrupt_pr
1050#endif
1051	/* We're now back in the host but in guest MMU context */
1052	li	r9, KVM_GUEST_MODE_HOST_HV
1053	stb	r9, HSTATE_IN_GUEST(r13)
1054
1055	ld	r9, HSTATE_KVM_VCPU(r13)
1056
1057	/* Save registers */
1058
1059	std	r0, VCPU_GPR(R0)(r9)
1060	std	r1, VCPU_GPR(R1)(r9)
1061	std	r2, VCPU_GPR(R2)(r9)
1062	std	r3, VCPU_GPR(R3)(r9)
1063	std	r4, VCPU_GPR(R4)(r9)
1064	std	r5, VCPU_GPR(R5)(r9)
1065	std	r6, VCPU_GPR(R6)(r9)
1066	std	r7, VCPU_GPR(R7)(r9)
1067	std	r8, VCPU_GPR(R8)(r9)
1068	ld	r0, HSTATE_SCRATCH2(r13)
1069	std	r0, VCPU_GPR(R9)(r9)
1070	std	r10, VCPU_GPR(R10)(r9)
1071	std	r11, VCPU_GPR(R11)(r9)
1072	ld	r3, HSTATE_SCRATCH0(r13)
1073	lwz	r4, HSTATE_SCRATCH1(r13)
1074	std	r3, VCPU_GPR(R12)(r9)
1075	stw	r4, VCPU_CR(r9)
1076BEGIN_FTR_SECTION
1077	ld	r3, HSTATE_CFAR(r13)
1078	std	r3, VCPU_CFAR(r9)
1079END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1080BEGIN_FTR_SECTION
1081	ld	r4, HSTATE_PPR(r13)
1082	std	r4, VCPU_PPR(r9)
1083END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1084
1085	/* Restore R1/R2 so we can handle faults */
1086	ld	r1, HSTATE_HOST_R1(r13)
1087	ld	r2, PACATOC(r13)
1088
1089	mfspr	r10, SPRN_SRR0
1090	mfspr	r11, SPRN_SRR1
1091	std	r10, VCPU_SRR0(r9)
1092	std	r11, VCPU_SRR1(r9)
1093	andi.	r0, r12, 2		/* need to read HSRR0/1? */
1094	beq	1f
1095	mfspr	r10, SPRN_HSRR0
1096	mfspr	r11, SPRN_HSRR1
1097	clrrdi	r12, r12, 2
10981:	std	r10, VCPU_PC(r9)
1099	std	r11, VCPU_MSR(r9)
1100
1101	GET_SCRATCH0(r3)
1102	mflr	r4
1103	std	r3, VCPU_GPR(R13)(r9)
1104	std	r4, VCPU_LR(r9)
1105
1106	stw	r12,VCPU_TRAP(r9)
1107
1108	/* Save HEIR (HV emulation assist reg) in last_inst
1109	   if this is an HEI (HV emulation interrupt, e40) */
1110	li	r3,KVM_INST_FETCH_FAILED
1111BEGIN_FTR_SECTION
1112	cmpwi	r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1113	bne	11f
1114	mfspr	r3,SPRN_HEIR
1115END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
111611:	stw	r3,VCPU_LAST_INST(r9)
1117
1118	/* these are volatile across C function calls */
1119	mfctr	r3
1120	mfxer	r4
1121	std	r3, VCPU_CTR(r9)
1122	stw	r4, VCPU_XER(r9)
1123
1124BEGIN_FTR_SECTION
1125	/* If this is a page table miss then see if it's theirs or ours */
1126	cmpwi	r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1127	beq	kvmppc_hdsi
1128	cmpwi	r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1129	beq	kvmppc_hisi
1130END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1131
1132	/* See if this is a leftover HDEC interrupt */
1133	cmpwi	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1134	bne	2f
1135	mfspr	r3,SPRN_HDEC
1136	cmpwi	r3,0
1137	bge	ignore_hdec
11382:
1139	/* See if this is an hcall we can handle in real mode */
1140	cmpwi	r12,BOOK3S_INTERRUPT_SYSCALL
1141	beq	hcall_try_real_mode
1142
1143	/* Only handle external interrupts here on arch 206 and later */
1144BEGIN_FTR_SECTION
1145	b	ext_interrupt_to_host
1146END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1147
1148	/* External interrupt ? */
1149	cmpwi	r12, BOOK3S_INTERRUPT_EXTERNAL
1150	bne+	ext_interrupt_to_host
1151
1152	/* External interrupt, first check for host_ipi. If this is
1153	 * set, we know the host wants us out so let's do it now
1154	 */
1155	bl	kvmppc_read_intr
1156	cmpdi	r3, 0
1157	bgt	ext_interrupt_to_host
1158
1159	/* Check if any CPU is heading out to the host, if so head out too */
1160	ld	r5, HSTATE_KVM_VCORE(r13)
1161	lwz	r0, VCORE_ENTRY_EXIT(r5)
1162	cmpwi	r0, 0x100
1163	bge	ext_interrupt_to_host
1164
1165	/* Return to guest after delivering any pending interrupt */
1166	mr	r4, r9
1167	b	deliver_guest_interrupt
1168
1169ext_interrupt_to_host:
1170
1171guest_exit_cont:		/* r9 = vcpu, r12 = trap, r13 = paca */
1172	/* Save more register state  */
1173	mfdar	r6
1174	mfdsisr	r7
1175	std	r6, VCPU_DAR(r9)
1176	stw	r7, VCPU_DSISR(r9)
1177BEGIN_FTR_SECTION
1178	/* don't overwrite fault_dar/fault_dsisr if HDSI */
1179	cmpwi	r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1180	beq	6f
1181END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1182	std	r6, VCPU_FAULT_DAR(r9)
1183	stw	r7, VCPU_FAULT_DSISR(r9)
1184
1185	/* See if it is a machine check */
1186	cmpwi	r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1187	beq	machine_check_realmode
1188mc_cont:
1189
1190	/* Save guest CTRL register, set runlatch to 1 */
11916:	mfspr	r6,SPRN_CTRLF
1192	stw	r6,VCPU_CTRL(r9)
1193	andi.	r0,r6,1
1194	bne	4f
1195	ori	r6,r6,1
1196	mtspr	SPRN_CTRLT,r6
11974:
1198	/* Read the guest SLB and save it away */
1199	lwz	r0,VCPU_SLB_NR(r9)	/* number of entries in SLB */
1200	mtctr	r0
1201	li	r6,0
1202	addi	r7,r9,VCPU_SLB
1203	li	r5,0
12041:	slbmfee	r8,r6
1205	andis.	r0,r8,SLB_ESID_V@h
1206	beq	2f
1207	add	r8,r8,r6		/* put index in */
1208	slbmfev	r3,r6
1209	std	r8,VCPU_SLB_E(r7)
1210	std	r3,VCPU_SLB_V(r7)
1211	addi	r7,r7,VCPU_SLB_SIZE
1212	addi	r5,r5,1
12132:	addi	r6,r6,1
1214	bdnz	1b
1215	stw	r5,VCPU_SLB_MAX(r9)
1216
1217	/*
1218	 * Save the guest PURR/SPURR
1219	 */
1220BEGIN_FTR_SECTION
1221	mfspr	r5,SPRN_PURR
1222	mfspr	r6,SPRN_SPURR
1223	ld	r7,VCPU_PURR(r9)
1224	ld	r8,VCPU_SPURR(r9)
1225	std	r5,VCPU_PURR(r9)
1226	std	r6,VCPU_SPURR(r9)
1227	subf	r5,r7,r5
1228	subf	r6,r8,r6
1229
1230	/*
1231	 * Restore host PURR/SPURR and add guest times
1232	 * so that the time in the guest gets accounted.
1233	 */
1234	ld	r3,HSTATE_PURR(r13)
1235	ld	r4,HSTATE_SPURR(r13)
1236	add	r3,r3,r5
1237	add	r4,r4,r6
1238	mtspr	SPRN_PURR,r3
1239	mtspr	SPRN_SPURR,r4
1240END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
1241
1242	/* Save DEC */
1243	mfspr	r5,SPRN_DEC
1244	mftb	r6
1245	extsw	r5,r5
1246	add	r5,r5,r6
1247	/* r5 is a guest timebase value here, convert to host TB */
1248	ld	r3,HSTATE_KVM_VCORE(r13)
1249	ld	r4,VCORE_TB_OFFSET(r3)
1250	subf	r5,r4,r5
1251	std	r5,VCPU_DEC_EXPIRES(r9)
1252
1253BEGIN_FTR_SECTION
1254	b	8f
1255END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1256	/* Save POWER8-specific registers */
1257	mfspr	r5, SPRN_IAMR
1258	mfspr	r6, SPRN_PSPB
1259	mfspr	r7, SPRN_FSCR
1260	std	r5, VCPU_IAMR(r9)
1261	stw	r6, VCPU_PSPB(r9)
1262	std	r7, VCPU_FSCR(r9)
1263	mfspr	r5, SPRN_IC
1264	mfspr	r6, SPRN_VTB
1265	mfspr	r7, SPRN_TAR
1266	std	r5, VCPU_IC(r9)
1267	std	r6, VCPU_VTB(r9)
1268	std	r7, VCPU_TAR(r9)
1269	mfspr	r8, SPRN_EBBHR
1270	std	r8, VCPU_EBBHR(r9)
1271	mfspr	r5, SPRN_EBBRR
1272	mfspr	r6, SPRN_BESCR
1273	mfspr	r7, SPRN_CSIGR
1274	mfspr	r8, SPRN_TACR
1275	std	r5, VCPU_EBBRR(r9)
1276	std	r6, VCPU_BESCR(r9)
1277	std	r7, VCPU_CSIGR(r9)
1278	std	r8, VCPU_TACR(r9)
1279	mfspr	r5, SPRN_TCSCR
1280	mfspr	r6, SPRN_ACOP
1281	mfspr	r7, SPRN_PID
1282	mfspr	r8, SPRN_WORT
1283	std	r5, VCPU_TCSCR(r9)
1284	std	r6, VCPU_ACOP(r9)
1285	stw	r7, VCPU_GUEST_PID(r9)
1286	std	r8, VCPU_WORT(r9)
12878:
1288
1289	/* Save and reset AMR and UAMOR before turning on the MMU */
1290BEGIN_FTR_SECTION
1291	mfspr	r5,SPRN_AMR
1292	mfspr	r6,SPRN_UAMOR
1293	std	r5,VCPU_AMR(r9)
1294	std	r6,VCPU_UAMOR(r9)
1295	li	r6,0
1296	mtspr	SPRN_AMR,r6
1297END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1298
1299	/* Switch DSCR back to host value */
1300BEGIN_FTR_SECTION
1301	mfspr	r8, SPRN_DSCR
1302	ld	r7, HSTATE_DSCR(r13)
1303	std	r8, VCPU_DSCR(r9)
1304	mtspr	SPRN_DSCR, r7
1305END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1306
1307	/* Save non-volatile GPRs */
1308	std	r14, VCPU_GPR(R14)(r9)
1309	std	r15, VCPU_GPR(R15)(r9)
1310	std	r16, VCPU_GPR(R16)(r9)
1311	std	r17, VCPU_GPR(R17)(r9)
1312	std	r18, VCPU_GPR(R18)(r9)
1313	std	r19, VCPU_GPR(R19)(r9)
1314	std	r20, VCPU_GPR(R20)(r9)
1315	std	r21, VCPU_GPR(R21)(r9)
1316	std	r22, VCPU_GPR(R22)(r9)
1317	std	r23, VCPU_GPR(R23)(r9)
1318	std	r24, VCPU_GPR(R24)(r9)
1319	std	r25, VCPU_GPR(R25)(r9)
1320	std	r26, VCPU_GPR(R26)(r9)
1321	std	r27, VCPU_GPR(R27)(r9)
1322	std	r28, VCPU_GPR(R28)(r9)
1323	std	r29, VCPU_GPR(R29)(r9)
1324	std	r30, VCPU_GPR(R30)(r9)
1325	std	r31, VCPU_GPR(R31)(r9)
1326
1327	/* Save SPRGs */
1328	mfspr	r3, SPRN_SPRG0
1329	mfspr	r4, SPRN_SPRG1
1330	mfspr	r5, SPRN_SPRG2
1331	mfspr	r6, SPRN_SPRG3
1332	std	r3, VCPU_SPRG0(r9)
1333	std	r4, VCPU_SPRG1(r9)
1334	std	r5, VCPU_SPRG2(r9)
1335	std	r6, VCPU_SPRG3(r9)
1336
1337	/* save FP state */
1338	mr	r3, r9
1339	bl	kvmppc_save_fp
1340
1341#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1342BEGIN_FTR_SECTION
1343	b	2f
1344END_FTR_SECTION_IFCLR(CPU_FTR_TM)
1345	/* Turn on TM. */
1346	mfmsr	r8
1347	li	r0, 1
1348	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1349	mtmsrd	r8
1350
1351	ld	r5, VCPU_MSR(r9)
1352	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
1353	beq	1f	/* TM not active in guest. */
1354
1355	li	r3, TM_CAUSE_KVM_RESCHED
1356
1357	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
1358	li	r5, 0
1359	mtmsrd	r5, 1
1360
1361	/* All GPRs are volatile at this point. */
1362	TRECLAIM(R3)
1363
1364	/* Temporarily store r13 and r9 so we have some regs to play with */
1365	SET_SCRATCH0(r13)
1366	GET_PACA(r13)
1367	std	r9, PACATMSCRATCH(r13)
1368	ld	r9, HSTATE_KVM_VCPU(r13)
1369
1370	/* Get a few more GPRs free. */
1371	std	r29, VCPU_GPRS_TM(29)(r9)
1372	std	r30, VCPU_GPRS_TM(30)(r9)
1373	std	r31, VCPU_GPRS_TM(31)(r9)
1374
1375	/* Save away PPR and DSCR soon so don't run with user values. */
1376	mfspr	r31, SPRN_PPR
1377	HMT_MEDIUM
1378	mfspr	r30, SPRN_DSCR
1379	ld	r29, HSTATE_DSCR(r13)
1380	mtspr	SPRN_DSCR, r29
1381
1382	/* Save all but r9, r13 & r29-r31 */
1383	reg = 0
1384	.rept	29
1385	.if (reg != 9) && (reg != 13)
1386	std	reg, VCPU_GPRS_TM(reg)(r9)
1387	.endif
1388	reg = reg + 1
1389	.endr
1390	/* ... now save r13 */
1391	GET_SCRATCH0(r4)
1392	std	r4, VCPU_GPRS_TM(13)(r9)
1393	/* ... and save r9 */
1394	ld	r4, PACATMSCRATCH(r13)
1395	std	r4, VCPU_GPRS_TM(9)(r9)
1396
1397	/* Reload stack pointer and TOC. */
1398	ld	r1, HSTATE_HOST_R1(r13)
1399	ld	r2, PACATOC(r13)
1400
1401	/* Set MSR RI now we have r1 and r13 back. */
1402	li	r5, MSR_RI
1403	mtmsrd	r5, 1
1404
1405	/* Save away checkpinted SPRs. */
1406	std	r31, VCPU_PPR_TM(r9)
1407	std	r30, VCPU_DSCR_TM(r9)
1408	mflr	r5
1409	mfcr	r6
1410	mfctr	r7
1411	mfspr	r8, SPRN_AMR
1412	mfspr	r10, SPRN_TAR
1413	std	r5, VCPU_LR_TM(r9)
1414	stw	r6, VCPU_CR_TM(r9)
1415	std	r7, VCPU_CTR_TM(r9)
1416	std	r8, VCPU_AMR_TM(r9)
1417	std	r10, VCPU_TAR_TM(r9)
1418
1419	/* Restore r12 as trap number. */
1420	lwz	r12, VCPU_TRAP(r9)
1421
1422	/* Save FP/VSX. */
1423	addi	r3, r9, VCPU_FPRS_TM
1424	bl	store_fp_state
1425	addi	r3, r9, VCPU_VRS_TM
1426	bl	store_vr_state
1427	mfspr	r6, SPRN_VRSAVE
1428	stw	r6, VCPU_VRSAVE_TM(r9)
14291:
1430	/*
1431	 * We need to save these SPRs after the treclaim so that the software
1432	 * error code is recorded correctly in the TEXASR.  Also the user may
1433	 * change these outside of a transaction, so they must always be
1434	 * context switched.
1435	 */
1436	mfspr	r5, SPRN_TFHAR
1437	mfspr	r6, SPRN_TFIAR
1438	mfspr	r7, SPRN_TEXASR
1439	std	r5, VCPU_TFHAR(r9)
1440	std	r6, VCPU_TFIAR(r9)
1441	std	r7, VCPU_TEXASR(r9)
14422:
1443#endif
1444
1445	/* Increment yield count if they have a VPA */
1446	ld	r8, VCPU_VPA(r9)	/* do they have a VPA? */
1447	cmpdi	r8, 0
1448	beq	25f
1449	li	r4, LPPACA_YIELDCOUNT
1450	LWZX_BE	r3, r8, r4
1451	addi	r3, r3, 1
1452	STWX_BE	r3, r8, r4
1453	li	r3, 1
1454	stb	r3, VCPU_VPA_DIRTY(r9)
145525:
1456	/* Save PMU registers if requested */
1457	/* r8 and cr0.eq are live here */
1458BEGIN_FTR_SECTION
1459	/*
1460	 * POWER8 seems to have a hardware bug where setting
1461	 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1462	 * when some counters are already negative doesn't seem
1463	 * to cause a performance monitor alert (and hence interrupt).
1464	 * The effect of this is that when saving the PMU state,
1465	 * if there is no PMU alert pending when we read MMCR0
1466	 * before freezing the counters, but one becomes pending
1467	 * before we read the counters, we lose it.
1468	 * To work around this, we need a way to freeze the counters
1469	 * before reading MMCR0.  Normally, freezing the counters
1470	 * is done by writing MMCR0 (to set MMCR0[FC]) which
1471	 * unavoidably writes MMCR0[PMA0] as well.  On POWER8,
1472	 * we can also freeze the counters using MMCR2, by writing
1473	 * 1s to all the counter freeze condition bits (there are
1474	 * 9 bits each for 6 counters).
1475	 */
1476	li	r3, -1			/* set all freeze bits */
1477	clrrdi	r3, r3, 10
1478	mfspr	r10, SPRN_MMCR2
1479	mtspr	SPRN_MMCR2, r3
1480	isync
1481END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1482	li	r3, 1
1483	sldi	r3, r3, 31		/* MMCR0_FC (freeze counters) bit */
1484	mfspr	r4, SPRN_MMCR0		/* save MMCR0 */
1485	mtspr	SPRN_MMCR0, r3		/* freeze all counters, disable ints */
1486	mfspr	r6, SPRN_MMCRA
1487BEGIN_FTR_SECTION
1488	/* On P7, clear MMCRA in order to disable SDAR updates */
1489	li	r7, 0
1490	mtspr	SPRN_MMCRA, r7
1491END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1492	isync
1493	beq	21f			/* if no VPA, save PMU stuff anyway */
1494	lbz	r7, LPPACA_PMCINUSE(r8)
1495	cmpwi	r7, 0			/* did they ask for PMU stuff to be saved? */
1496	bne	21f
1497	std	r3, VCPU_MMCR(r9)	/* if not, set saved MMCR0 to FC */
1498	b	22f
149921:	mfspr	r5, SPRN_MMCR1
1500	mfspr	r7, SPRN_SIAR
1501	mfspr	r8, SPRN_SDAR
1502	std	r4, VCPU_MMCR(r9)
1503	std	r5, VCPU_MMCR + 8(r9)
1504	std	r6, VCPU_MMCR + 16(r9)
1505BEGIN_FTR_SECTION
1506	std	r10, VCPU_MMCR + 24(r9)
1507END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1508	std	r7, VCPU_SIAR(r9)
1509	std	r8, VCPU_SDAR(r9)
1510	mfspr	r3, SPRN_PMC1
1511	mfspr	r4, SPRN_PMC2
1512	mfspr	r5, SPRN_PMC3
1513	mfspr	r6, SPRN_PMC4
1514	mfspr	r7, SPRN_PMC5
1515	mfspr	r8, SPRN_PMC6
1516BEGIN_FTR_SECTION
1517	mfspr	r10, SPRN_PMC7
1518	mfspr	r11, SPRN_PMC8
1519END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1520	stw	r3, VCPU_PMC(r9)
1521	stw	r4, VCPU_PMC + 4(r9)
1522	stw	r5, VCPU_PMC + 8(r9)
1523	stw	r6, VCPU_PMC + 12(r9)
1524	stw	r7, VCPU_PMC + 16(r9)
1525	stw	r8, VCPU_PMC + 20(r9)
1526BEGIN_FTR_SECTION
1527	stw	r10, VCPU_PMC + 24(r9)
1528	stw	r11, VCPU_PMC + 28(r9)
1529END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1530BEGIN_FTR_SECTION
1531	mfspr	r5, SPRN_SIER
1532	mfspr	r6, SPRN_SPMC1
1533	mfspr	r7, SPRN_SPMC2
1534	mfspr	r8, SPRN_MMCRS
1535	std	r5, VCPU_SIER(r9)
1536	stw	r6, VCPU_PMC + 24(r9)
1537	stw	r7, VCPU_PMC + 28(r9)
1538	std	r8, VCPU_MMCR + 32(r9)
1539	lis	r4, 0x8000
1540	mtspr	SPRN_MMCRS, r4
1541END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
154222:
1543	/* Clear out SLB */
1544	li	r5,0
1545	slbmte	r5,r5
1546	slbia
1547	ptesync
1548
1549hdec_soon:			/* r12 = trap, r13 = paca */
1550BEGIN_FTR_SECTION
1551	b	32f
1552END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1553	/*
1554	 * POWER7 guest -> host partition switch code.
1555	 * We don't have to lock against tlbies but we do
1556	 * have to coordinate the hardware threads.
1557	 */
1558	/* Increment the threads-exiting-guest count in the 0xff00
1559	   bits of vcore->entry_exit_count */
1560	ld	r5,HSTATE_KVM_VCORE(r13)
1561	addi	r6,r5,VCORE_ENTRY_EXIT
156241:	lwarx	r3,0,r6
1563	addi	r0,r3,0x100
1564	stwcx.	r0,0,r6
1565	bne	41b
1566	isync		/* order stwcx. vs. reading napping_threads */
1567
1568	/*
1569	 * At this point we have an interrupt that we have to pass
1570	 * up to the kernel or qemu; we can't handle it in real mode.
1571	 * Thus we have to do a partition switch, so we have to
1572	 * collect the other threads, if we are the first thread
1573	 * to take an interrupt.  To do this, we set the HDEC to 0,
1574	 * which causes an HDEC interrupt in all threads within 2ns
1575	 * because the HDEC register is shared between all 4 threads.
1576	 * However, we don't need to bother if this is an HDEC
1577	 * interrupt, since the other threads will already be on their
1578	 * way here in that case.
1579	 */
1580	cmpwi	r3,0x100	/* Are we the first here? */
1581	bge	43f
1582	cmpwi	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1583	beq	40f
1584	li	r0,0
1585	mtspr	SPRN_HDEC,r0
158640:
1587	/*
1588	 * Send an IPI to any napping threads, since an HDEC interrupt
1589	 * doesn't wake CPUs up from nap.
1590	 */
1591	lwz	r3,VCORE_NAPPING_THREADS(r5)
1592	lbz	r4,HSTATE_PTID(r13)
1593	li	r0,1
1594	sld	r0,r0,r4
1595	andc.	r3,r3,r0		/* no sense IPI'ing ourselves */
1596	beq	43f
1597	/* Order entry/exit update vs. IPIs */
1598	sync
1599	mulli	r4,r4,PACA_SIZE		/* get paca for thread 0 */
1600	subf	r6,r4,r13
160142:	andi.	r0,r3,1
1602	beq	44f
1603	ld	r8,HSTATE_XICS_PHYS(r6)	/* get thread's XICS reg addr */
1604	li	r0,IPI_PRIORITY
1605	li	r7,XICS_MFRR
1606	stbcix	r0,r7,r8		/* trigger the IPI */
160744:	srdi.	r3,r3,1
1608	addi	r6,r6,PACA_SIZE
1609	bne	42b
1610
1611secondary_too_late:
1612	/* Secondary threads wait for primary to do partition switch */
161343:	ld	r5,HSTATE_KVM_VCORE(r13)
1614	ld	r4,VCORE_KVM(r5)	/* pointer to struct kvm */
1615	lbz	r3,HSTATE_PTID(r13)
1616	cmpwi	r3,0
1617	beq	15f
1618	HMT_LOW
161913:	lbz	r3,VCORE_IN_GUEST(r5)
1620	cmpwi	r3,0
1621	bne	13b
1622	HMT_MEDIUM
1623	b	16f
1624
1625	/* Primary thread waits for all the secondaries to exit guest */
162615:	lwz	r3,VCORE_ENTRY_EXIT(r5)
1627	srwi	r0,r3,8
1628	clrldi	r3,r3,56
1629	cmpw	r3,r0
1630	bne	15b
1631	isync
1632
1633	/* Primary thread switches back to host partition */
1634	ld	r6,KVM_HOST_SDR1(r4)
1635	lwz	r7,KVM_HOST_LPID(r4)
1636	li	r8,LPID_RSVD		/* switch to reserved LPID */
1637	mtspr	SPRN_LPID,r8
1638	ptesync
1639	mtspr	SPRN_SDR1,r6		/* switch to partition page table */
1640	mtspr	SPRN_LPID,r7
1641	isync
1642
1643BEGIN_FTR_SECTION
1644	/* DPDES is shared between threads */
1645	mfspr	r7, SPRN_DPDES
1646	std	r7, VCORE_DPDES(r5)
1647	/* clear DPDES so we don't get guest doorbells in the host */
1648	li	r8, 0
1649	mtspr	SPRN_DPDES, r8
1650END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1651
1652	/* Subtract timebase offset from timebase */
1653	ld	r8,VCORE_TB_OFFSET(r5)
1654	cmpdi	r8,0
1655	beq	17f
1656	mftb	r6			/* current guest timebase */
1657	subf	r8,r8,r6
1658	mtspr	SPRN_TBU40,r8		/* update upper 40 bits */
1659	mftb	r7			/* check if lower 24 bits overflowed */
1660	clrldi	r6,r6,40
1661	clrldi	r7,r7,40
1662	cmpld	r7,r6
1663	bge	17f
1664	addis	r8,r8,0x100		/* if so, increment upper 40 bits */
1665	mtspr	SPRN_TBU40,r8
1666
1667	/* Reset PCR */
166817:	ld	r0, VCORE_PCR(r5)
1669	cmpdi	r0, 0
1670	beq	18f
1671	li	r0, 0
1672	mtspr	SPRN_PCR, r0
167318:
1674	/* Signal secondary CPUs to continue */
1675	stb	r0,VCORE_IN_GUEST(r5)
1676	lis	r8,0x7fff		/* MAX_INT@h */
1677	mtspr	SPRN_HDEC,r8
1678
167916:	ld	r8,KVM_HOST_LPCR(r4)
1680	mtspr	SPRN_LPCR,r8
1681	isync
1682	b	33f
1683
1684	/*
1685	 * PPC970 guest -> host partition switch code.
1686	 * We have to lock against concurrent tlbies, and
1687	 * we have to flush the whole TLB.
1688	 */
168932:	ld	r5,HSTATE_KVM_VCORE(r13)
1690	ld	r4,VCORE_KVM(r5)	/* pointer to struct kvm */
1691
1692	/* Take the guest's tlbie_lock */
1693#ifdef __BIG_ENDIAN__
1694	lwz	r8,PACA_LOCK_TOKEN(r13)
1695#else
1696	lwz	r8,PACAPACAINDEX(r13)
1697#endif
1698	addi	r3,r4,KVM_TLBIE_LOCK
169924:	lwarx	r0,0,r3
1700	cmpwi	r0,0
1701	bne	24b
1702	stwcx.	r8,0,r3
1703	bne	24b
1704	isync
1705
1706	ld	r7,KVM_HOST_LPCR(r4)	/* use kvm->arch.host_lpcr for HID4 */
1707	li	r0,0x18f
1708	rotldi	r0,r0,HID4_LPID5_SH	/* all lpid bits in HID4 = 1 */
1709	or	r0,r7,r0
1710	ptesync
1711	sync
1712	mtspr	SPRN_HID4,r0		/* switch to reserved LPID */
1713	isync
1714	li	r0,0
1715	stw	r0,0(r3)		/* drop guest tlbie_lock */
1716
1717	/* invalidate the whole TLB */
1718	li	r0,256
1719	mtctr	r0
1720	li	r6,0
172125:	tlbiel	r6
1722	addi	r6,r6,0x1000
1723	bdnz	25b
1724	ptesync
1725
1726	/* take native_tlbie_lock */
1727	ld	r3,toc_tlbie_lock@toc(2)
172824:	lwarx	r0,0,r3
1729	cmpwi	r0,0
1730	bne	24b
1731	stwcx.	r8,0,r3
1732	bne	24b
1733	isync
1734
1735	ld	r6,KVM_HOST_SDR1(r4)
1736	mtspr	SPRN_SDR1,r6		/* switch to host page table */
1737
1738	/* Set up host HID4 value */
1739	sync
1740	mtspr	SPRN_HID4,r7
1741	isync
1742	li	r0,0
1743	stw	r0,0(r3)		/* drop native_tlbie_lock */
1744
1745	lis	r8,0x7fff		/* MAX_INT@h */
1746	mtspr	SPRN_HDEC,r8
1747
1748	/* Disable HDEC interrupts */
1749	mfspr	r0,SPRN_HID0
1750	li	r3,0
1751	rldimi	r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
1752	sync
1753	mtspr	SPRN_HID0,r0
1754	mfspr	r0,SPRN_HID0
1755	mfspr	r0,SPRN_HID0
1756	mfspr	r0,SPRN_HID0
1757	mfspr	r0,SPRN_HID0
1758	mfspr	r0,SPRN_HID0
1759	mfspr	r0,SPRN_HID0
1760
1761	/* load host SLB entries */
176233:	ld	r8,PACA_SLBSHADOWPTR(r13)
1763
1764	.rept	SLB_NUM_BOLTED
1765	li	r3, SLBSHADOW_SAVEAREA
1766	LDX_BE	r5, r8, r3
1767	addi	r3, r3, 8
1768	LDX_BE	r6, r8, r3
1769	andis.	r7,r5,SLB_ESID_V@h
1770	beq	1f
1771	slbmte	r6,r5
17721:	addi	r8,r8,16
1773	.endr
1774
1775	/* Unset guest mode */
1776	li	r0, KVM_GUEST_MODE_NONE
1777	stb	r0, HSTATE_IN_GUEST(r13)
1778
1779	ld	r0, 112+PPC_LR_STKOFF(r1)
1780	addi	r1, r1, 112
1781	mtlr	r0
1782	blr
1783
1784/*
1785 * Check whether an HDSI is an HPTE not found fault or something else.
1786 * If it is an HPTE not found fault that is due to the guest accessing
1787 * a page that they have mapped but which we have paged out, then
1788 * we continue on with the guest exit path.  In all other cases,
1789 * reflect the HDSI to the guest as a DSI.
1790 */
1791kvmppc_hdsi:
1792	mfspr	r4, SPRN_HDAR
1793	mfspr	r6, SPRN_HDSISR
1794	/* HPTE not found fault or protection fault? */
1795	andis.	r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1796	beq	1f			/* if not, send it to the guest */
1797	andi.	r0, r11, MSR_DR		/* data relocation enabled? */
1798	beq	3f
1799	clrrdi	r0, r4, 28
1800	PPC_SLBFEE_DOT(R5, R0)		/* if so, look up SLB */
1801	bne	1f			/* if no SLB entry found */
18024:	std	r4, VCPU_FAULT_DAR(r9)
1803	stw	r6, VCPU_FAULT_DSISR(r9)
1804
1805	/* Search the hash table. */
1806	mr	r3, r9			/* vcpu pointer */
1807	li	r7, 1			/* data fault */
1808	bl	kvmppc_hpte_hv_fault
1809	ld	r9, HSTATE_KVM_VCPU(r13)
1810	ld	r10, VCPU_PC(r9)
1811	ld	r11, VCPU_MSR(r9)
1812	li	r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1813	cmpdi	r3, 0			/* retry the instruction */
1814	beq	6f
1815	cmpdi	r3, -1			/* handle in kernel mode */
1816	beq	guest_exit_cont
1817	cmpdi	r3, -2			/* MMIO emulation; need instr word */
1818	beq	2f
1819
1820	/* Synthesize a DSI for the guest */
1821	ld	r4, VCPU_FAULT_DAR(r9)
1822	mr	r6, r3
18231:	mtspr	SPRN_DAR, r4
1824	mtspr	SPRN_DSISR, r6
1825	mtspr	SPRN_SRR0, r10
1826	mtspr	SPRN_SRR1, r11
1827	li	r10, BOOK3S_INTERRUPT_DATA_STORAGE
1828	bl	kvmppc_msr_interrupt
1829fast_interrupt_c_return:
18306:	ld	r7, VCPU_CTR(r9)
1831	lwz	r8, VCPU_XER(r9)
1832	mtctr	r7
1833	mtxer	r8
1834	mr	r4, r9
1835	b	fast_guest_return
1836
18373:	ld	r5, VCPU_KVM(r9)	/* not relocated, use VRMA */
1838	ld	r5, KVM_VRMA_SLB_V(r5)
1839	b	4b
1840
1841	/* If this is for emulated MMIO, load the instruction word */
18422:	li	r8, KVM_INST_FETCH_FAILED	/* In case lwz faults */
1843
1844	/* Set guest mode to 'jump over instruction' so if lwz faults
1845	 * we'll just continue at the next IP. */
1846	li	r0, KVM_GUEST_MODE_SKIP
1847	stb	r0, HSTATE_IN_GUEST(r13)
1848
1849	/* Do the access with MSR:DR enabled */
1850	mfmsr	r3
1851	ori	r4, r3, MSR_DR		/* Enable paging for data */
1852	mtmsrd	r4
1853	lwz	r8, 0(r10)
1854	mtmsrd	r3
1855
1856	/* Store the result */
1857	stw	r8, VCPU_LAST_INST(r9)
1858
1859	/* Unset guest mode. */
1860	li	r0, KVM_GUEST_MODE_HOST_HV
1861	stb	r0, HSTATE_IN_GUEST(r13)
1862	b	guest_exit_cont
1863
1864/*
1865 * Similarly for an HISI, reflect it to the guest as an ISI unless
1866 * it is an HPTE not found fault for a page that we have paged out.
1867 */
1868kvmppc_hisi:
1869	andis.	r0, r11, SRR1_ISI_NOPT@h
1870	beq	1f
1871	andi.	r0, r11, MSR_IR		/* instruction relocation enabled? */
1872	beq	3f
1873	clrrdi	r0, r10, 28
1874	PPC_SLBFEE_DOT(R5, R0)		/* if so, look up SLB */
1875	bne	1f			/* if no SLB entry found */
18764:
1877	/* Search the hash table. */
1878	mr	r3, r9			/* vcpu pointer */
1879	mr	r4, r10
1880	mr	r6, r11
1881	li	r7, 0			/* instruction fault */
1882	bl	kvmppc_hpte_hv_fault
1883	ld	r9, HSTATE_KVM_VCPU(r13)
1884	ld	r10, VCPU_PC(r9)
1885	ld	r11, VCPU_MSR(r9)
1886	li	r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1887	cmpdi	r3, 0			/* retry the instruction */
1888	beq	fast_interrupt_c_return
1889	cmpdi	r3, -1			/* handle in kernel mode */
1890	beq	guest_exit_cont
1891
1892	/* Synthesize an ISI for the guest */
1893	mr	r11, r3
18941:	mtspr	SPRN_SRR0, r10
1895	mtspr	SPRN_SRR1, r11
1896	li	r10, BOOK3S_INTERRUPT_INST_STORAGE
1897	bl	kvmppc_msr_interrupt
1898	b	fast_interrupt_c_return
1899
19003:	ld	r6, VCPU_KVM(r9)	/* not relocated, use VRMA */
1901	ld	r5, KVM_VRMA_SLB_V(r6)
1902	b	4b
1903
1904/*
1905 * Try to handle an hcall in real mode.
1906 * Returns to the guest if we handle it, or continues on up to
1907 * the kernel if we can't (i.e. if we don't have a handler for
1908 * it, or if the handler returns H_TOO_HARD).
1909 */
1910	.globl	hcall_try_real_mode
1911hcall_try_real_mode:
1912	ld	r3,VCPU_GPR(R3)(r9)
1913	andi.	r0,r11,MSR_PR
1914	/* sc 1 from userspace - reflect to guest syscall */
1915	bne	sc_1_fast_return
1916	clrrdi	r3,r3,2
1917	cmpldi	r3,hcall_real_table_end - hcall_real_table
1918	bge	guest_exit_cont
1919	/* See if this hcall is enabled for in-kernel handling */
1920	ld	r4, VCPU_KVM(r9)
1921	srdi	r0, r3, 8	/* r0 = (r3 / 4) >> 6 */
1922	sldi	r0, r0, 3	/* index into kvm->arch.enabled_hcalls[] */
1923	add	r4, r4, r0
1924	ld	r0, KVM_ENABLED_HCALLS(r4)
1925	rlwinm	r4, r3, 32-2, 0x3f	/* r4 = (r3 / 4) & 0x3f */
1926	srd	r0, r0, r4
1927	andi.	r0, r0, 1
1928	beq	guest_exit_cont
1929	/* Get pointer to handler, if any, and call it */
1930	LOAD_REG_ADDR(r4, hcall_real_table)
1931	lwax	r3,r3,r4
1932	cmpwi	r3,0
1933	beq	guest_exit_cont
1934	add	r12,r3,r4
1935	mtctr	r12
1936	mr	r3,r9		/* get vcpu pointer */
1937	ld	r4,VCPU_GPR(R4)(r9)
1938	bctrl
1939	cmpdi	r3,H_TOO_HARD
1940	beq	hcall_real_fallback
1941	ld	r4,HSTATE_KVM_VCPU(r13)
1942	std	r3,VCPU_GPR(R3)(r4)
1943	ld	r10,VCPU_PC(r4)
1944	ld	r11,VCPU_MSR(r4)
1945	b	fast_guest_return
1946
1947sc_1_fast_return:
1948	mtspr	SPRN_SRR0,r10
1949	mtspr	SPRN_SRR1,r11
1950	li	r10, BOOK3S_INTERRUPT_SYSCALL
1951	bl	kvmppc_msr_interrupt
1952	mr	r4,r9
1953	b	fast_guest_return
1954
1955	/* We've attempted a real mode hcall, but it's punted it back
1956	 * to userspace.  We need to restore some clobbered volatiles
1957	 * before resuming the pass-it-to-qemu path */
1958hcall_real_fallback:
1959	li	r12,BOOK3S_INTERRUPT_SYSCALL
1960	ld	r9, HSTATE_KVM_VCPU(r13)
1961
1962	b	guest_exit_cont
1963
1964	.globl	hcall_real_table
1965hcall_real_table:
1966	.long	0		/* 0 - unused */
1967	.long	DOTSYM(kvmppc_h_remove) - hcall_real_table
1968	.long	DOTSYM(kvmppc_h_enter) - hcall_real_table
1969	.long	DOTSYM(kvmppc_h_read) - hcall_real_table
1970	.long	0		/* 0x10 - H_CLEAR_MOD */
1971	.long	0		/* 0x14 - H_CLEAR_REF */
1972	.long	DOTSYM(kvmppc_h_protect) - hcall_real_table
1973	.long	DOTSYM(kvmppc_h_get_tce) - hcall_real_table
1974	.long	DOTSYM(kvmppc_h_put_tce) - hcall_real_table
1975	.long	0		/* 0x24 - H_SET_SPRG0 */
1976	.long	DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
1977	.long	0		/* 0x2c */
1978	.long	0		/* 0x30 */
1979	.long	0		/* 0x34 */
1980	.long	0		/* 0x38 */
1981	.long	0		/* 0x3c */
1982	.long	0		/* 0x40 */
1983	.long	0		/* 0x44 */
1984	.long	0		/* 0x48 */
1985	.long	0		/* 0x4c */
1986	.long	0		/* 0x50 */
1987	.long	0		/* 0x54 */
1988	.long	0		/* 0x58 */
1989	.long	0		/* 0x5c */
1990	.long	0		/* 0x60 */
1991#ifdef CONFIG_KVM_XICS
1992	.long	DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
1993	.long	DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
1994	.long	DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
1995	.long	0		/* 0x70 - H_IPOLL */
1996	.long	DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
1997#else
1998	.long	0		/* 0x64 - H_EOI */
1999	.long	0		/* 0x68 - H_CPPR */
2000	.long	0		/* 0x6c - H_IPI */
2001	.long	0		/* 0x70 - H_IPOLL */
2002	.long	0		/* 0x74 - H_XIRR */
2003#endif
2004	.long	0		/* 0x78 */
2005	.long	0		/* 0x7c */
2006	.long	0		/* 0x80 */
2007	.long	0		/* 0x84 */
2008	.long	0		/* 0x88 */
2009	.long	0		/* 0x8c */
2010	.long	0		/* 0x90 */
2011	.long	0		/* 0x94 */
2012	.long	0		/* 0x98 */
2013	.long	0		/* 0x9c */
2014	.long	0		/* 0xa0 */
2015	.long	0		/* 0xa4 */
2016	.long	0		/* 0xa8 */
2017	.long	0		/* 0xac */
2018	.long	0		/* 0xb0 */
2019	.long	0		/* 0xb4 */
2020	.long	0		/* 0xb8 */
2021	.long	0		/* 0xbc */
2022	.long	0		/* 0xc0 */
2023	.long	0		/* 0xc4 */
2024	.long	0		/* 0xc8 */
2025	.long	0		/* 0xcc */
2026	.long	0		/* 0xd0 */
2027	.long	0		/* 0xd4 */
2028	.long	0		/* 0xd8 */
2029	.long	0		/* 0xdc */
2030	.long	DOTSYM(kvmppc_h_cede) - hcall_real_table
2031	.long	0		/* 0xe4 */
2032	.long	0		/* 0xe8 */
2033	.long	0		/* 0xec */
2034	.long	0		/* 0xf0 */
2035	.long	0		/* 0xf4 */
2036	.long	0		/* 0xf8 */
2037	.long	0		/* 0xfc */
2038	.long	0		/* 0x100 */
2039	.long	0		/* 0x104 */
2040	.long	0		/* 0x108 */
2041	.long	0		/* 0x10c */
2042	.long	0		/* 0x110 */
2043	.long	0		/* 0x114 */
2044	.long	0		/* 0x118 */
2045	.long	0		/* 0x11c */
2046	.long	0		/* 0x120 */
2047	.long	DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
2048	.long	0		/* 0x128 */
2049	.long	0		/* 0x12c */
2050	.long	0		/* 0x130 */
2051	.long	DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
2052	.globl	hcall_real_table_end
2053hcall_real_table_end:
2054
2055ignore_hdec:
2056	mr	r4,r9
2057	b	fast_guest_return
2058
2059_GLOBAL(kvmppc_h_set_xdabr)
2060	andi.	r0, r5, DABRX_USER | DABRX_KERNEL
2061	beq	6f
2062	li	r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2063	andc.	r0, r5, r0
2064	beq	3f
20656:	li	r3, H_PARAMETER
2066	blr
2067
2068_GLOBAL(kvmppc_h_set_dabr)
2069	li	r5, DABRX_USER | DABRX_KERNEL
20703:
2071BEGIN_FTR_SECTION
2072	b	2f
2073END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2074	std	r4,VCPU_DABR(r3)
2075	stw	r5, VCPU_DABRX(r3)
2076	mtspr	SPRN_DABRX, r5
2077	/* Work around P7 bug where DABR can get corrupted on mtspr */
20781:	mtspr	SPRN_DABR,r4
2079	mfspr	r5, SPRN_DABR
2080	cmpd	r4, r5
2081	bne	1b
2082	isync
2083	li	r3,0
2084	blr
2085
2086	/* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
20872:	rlwimi	r5, r4, 5, DAWRX_DR | DAWRX_DW
2088	rlwimi	r5, r4, 1, DAWRX_WT
2089	clrrdi	r4, r4, 3
2090	std	r4, VCPU_DAWR(r3)
2091	std	r5, VCPU_DAWRX(r3)
2092	mtspr	SPRN_DAWR, r4
2093	mtspr	SPRN_DAWRX, r5
2094	li	r3, 0
2095	blr
2096
2097_GLOBAL(kvmppc_h_cede)
2098	ori	r11,r11,MSR_EE
2099	std	r11,VCPU_MSR(r3)
2100	li	r0,1
2101	stb	r0,VCPU_CEDED(r3)
2102	sync			/* order setting ceded vs. testing prodded */
2103	lbz	r5,VCPU_PRODDED(r3)
2104	cmpwi	r5,0
2105	bne	kvm_cede_prodded
2106	li	r0,0		/* set trap to 0 to say hcall is handled */
2107	stw	r0,VCPU_TRAP(r3)
2108	li	r0,H_SUCCESS
2109	std	r0,VCPU_GPR(R3)(r3)
2110BEGIN_FTR_SECTION
2111	b	kvm_cede_exit	/* just send it up to host on 970 */
2112END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
2113
2114	/*
2115	 * Set our bit in the bitmask of napping threads unless all the
2116	 * other threads are already napping, in which case we send this
2117	 * up to the host.
2118	 */
2119	ld	r5,HSTATE_KVM_VCORE(r13)
2120	lbz	r6,HSTATE_PTID(r13)
2121	lwz	r8,VCORE_ENTRY_EXIT(r5)
2122	clrldi	r8,r8,56
2123	li	r0,1
2124	sld	r0,r0,r6
2125	addi	r6,r5,VCORE_NAPPING_THREADS
212631:	lwarx	r4,0,r6
2127	or	r4,r4,r0
2128	PPC_POPCNTW(R7,R4)
2129	cmpw	r7,r8
2130	bge	kvm_cede_exit
2131	stwcx.	r4,0,r6
2132	bne	31b
2133	/* order napping_threads update vs testing entry_exit_count */
2134	isync
2135	li	r0,NAPPING_CEDE
2136	stb	r0,HSTATE_NAPPING(r13)
2137	lwz	r7,VCORE_ENTRY_EXIT(r5)
2138	cmpwi	r7,0x100
2139	bge	33f		/* another thread already exiting */
2140
2141/*
2142 * Although not specifically required by the architecture, POWER7
2143 * preserves the following registers in nap mode, even if an SMT mode
2144 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2145 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2146 */
2147	/* Save non-volatile GPRs */
2148	std	r14, VCPU_GPR(R14)(r3)
2149	std	r15, VCPU_GPR(R15)(r3)
2150	std	r16, VCPU_GPR(R16)(r3)
2151	std	r17, VCPU_GPR(R17)(r3)
2152	std	r18, VCPU_GPR(R18)(r3)
2153	std	r19, VCPU_GPR(R19)(r3)
2154	std	r20, VCPU_GPR(R20)(r3)
2155	std	r21, VCPU_GPR(R21)(r3)
2156	std	r22, VCPU_GPR(R22)(r3)
2157	std	r23, VCPU_GPR(R23)(r3)
2158	std	r24, VCPU_GPR(R24)(r3)
2159	std	r25, VCPU_GPR(R25)(r3)
2160	std	r26, VCPU_GPR(R26)(r3)
2161	std	r27, VCPU_GPR(R27)(r3)
2162	std	r28, VCPU_GPR(R28)(r3)
2163	std	r29, VCPU_GPR(R29)(r3)
2164	std	r30, VCPU_GPR(R30)(r3)
2165	std	r31, VCPU_GPR(R31)(r3)
2166
2167	/* save FP state */
2168	bl	kvmppc_save_fp
2169
2170	/*
2171	 * Take a nap until a decrementer or external or doobell interrupt
2172	 * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the
2173	 * runlatch bit before napping.
2174	 */
2175	mfspr	r2, SPRN_CTRLF
2176	clrrdi	r2, r2, 1
2177	mtspr	SPRN_CTRLT, r2
2178
2179	li	r0,1
2180	stb	r0,HSTATE_HWTHREAD_REQ(r13)
2181	mfspr	r5,SPRN_LPCR
2182	ori	r5,r5,LPCR_PECE0 | LPCR_PECE1
2183BEGIN_FTR_SECTION
2184	oris	r5,r5,LPCR_PECEDP@h
2185END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2186	mtspr	SPRN_LPCR,r5
2187	isync
2188	li	r0, 0
2189	std	r0, HSTATE_SCRATCH0(r13)
2190	ptesync
2191	ld	r0, HSTATE_SCRATCH0(r13)
21921:	cmpd	r0, r0
2193	bne	1b
2194	nap
2195	b	.
2196
219733:	mr	r4, r3
2198	li	r3, 0
2199	li	r12, 0
2200	b	34f
2201
2202kvm_end_cede:
2203	/* get vcpu pointer */
2204	ld	r4, HSTATE_KVM_VCPU(r13)
2205
2206	/* Woken by external or decrementer interrupt */
2207	ld	r1, HSTATE_HOST_R1(r13)
2208
2209	/* load up FP state */
2210	bl	kvmppc_load_fp
2211
2212	/* Load NV GPRS */
2213	ld	r14, VCPU_GPR(R14)(r4)
2214	ld	r15, VCPU_GPR(R15)(r4)
2215	ld	r16, VCPU_GPR(R16)(r4)
2216	ld	r17, VCPU_GPR(R17)(r4)
2217	ld	r18, VCPU_GPR(R18)(r4)
2218	ld	r19, VCPU_GPR(R19)(r4)
2219	ld	r20, VCPU_GPR(R20)(r4)
2220	ld	r21, VCPU_GPR(R21)(r4)
2221	ld	r22, VCPU_GPR(R22)(r4)
2222	ld	r23, VCPU_GPR(R23)(r4)
2223	ld	r24, VCPU_GPR(R24)(r4)
2224	ld	r25, VCPU_GPR(R25)(r4)
2225	ld	r26, VCPU_GPR(R26)(r4)
2226	ld	r27, VCPU_GPR(R27)(r4)
2227	ld	r28, VCPU_GPR(R28)(r4)
2228	ld	r29, VCPU_GPR(R29)(r4)
2229	ld	r30, VCPU_GPR(R30)(r4)
2230	ld	r31, VCPU_GPR(R31)(r4)
2231
2232	/* Check the wake reason in SRR1 to see why we got here */
2233	bl	kvmppc_check_wake_reason
2234
2235	/* clear our bit in vcore->napping_threads */
223634:	ld	r5,HSTATE_KVM_VCORE(r13)
2237	lbz	r7,HSTATE_PTID(r13)
2238	li	r0,1
2239	sld	r0,r0,r7
2240	addi	r6,r5,VCORE_NAPPING_THREADS
224132:	lwarx	r7,0,r6
2242	andc	r7,r7,r0
2243	stwcx.	r7,0,r6
2244	bne	32b
2245	li	r0,0
2246	stb	r0,HSTATE_NAPPING(r13)
2247
2248	/* See if the wake reason means we need to exit */
2249	stw	r12, VCPU_TRAP(r4)
2250	mr	r9, r4
2251	cmpdi	r3, 0
2252	bgt	guest_exit_cont
2253
2254	/* see if any other thread is already exiting */
2255	lwz	r0,VCORE_ENTRY_EXIT(r5)
2256	cmpwi	r0,0x100
2257	bge	guest_exit_cont
2258
2259	b	kvmppc_cede_reentry	/* if not go back to guest */
2260
2261	/* cede when already previously prodded case */
2262kvm_cede_prodded:
2263	li	r0,0
2264	stb	r0,VCPU_PRODDED(r3)
2265	sync			/* order testing prodded vs. clearing ceded */
2266	stb	r0,VCPU_CEDED(r3)
2267	li	r3,H_SUCCESS
2268	blr
2269
2270	/* we've ceded but we want to give control to the host */
2271kvm_cede_exit:
2272	b	hcall_real_fallback
2273
2274	/* Try to handle a machine check in real mode */
2275machine_check_realmode:
2276	mr	r3, r9		/* get vcpu pointer */
2277	bl	kvmppc_realmode_machine_check
2278	nop
2279	cmpdi	r3, 0		/* Did we handle MCE ? */
2280	ld	r9, HSTATE_KVM_VCPU(r13)
2281	li	r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2282	/*
2283	 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through
2284	 * machine check interrupt (set HSRR0 to 0x200). And for handled
2285	 * errors (no-fatal), just go back to guest execution with current
2286	 * HSRR0 instead of exiting guest. This new approach will inject
2287	 * machine check to guest for fatal error causing guest to crash.
2288	 *
2289	 * The old code used to return to host for unhandled errors which
2290	 * was causing guest to hang with soft lockups inside guest and
2291	 * makes it difficult to recover guest instance.
2292	 */
2293	ld	r10, VCPU_PC(r9)
2294	ld	r11, VCPU_MSR(r9)
2295	bne	2f	/* Continue guest execution. */
2296	/* If not, deliver a machine check.  SRR0/1 are already set */
2297	li	r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2298	ld	r11, VCPU_MSR(r9)
2299	bl	kvmppc_msr_interrupt
23002:	b	fast_interrupt_c_return
2301
2302/*
2303 * Check the reason we woke from nap, and take appropriate action.
2304 * Returns:
2305 *	0 if nothing needs to be done
2306 *	1 if something happened that needs to be handled by the host
2307 *	-1 if there was a guest wakeup (IPI)
2308 *
2309 * Also sets r12 to the interrupt vector for any interrupt that needs
2310 * to be handled now by the host (0x500 for external interrupt), or zero.
2311 */
2312kvmppc_check_wake_reason:
2313	mfspr	r6, SPRN_SRR1
2314BEGIN_FTR_SECTION
2315	rlwinm	r6, r6, 45-31, 0xf	/* extract wake reason field (P8) */
2316FTR_SECTION_ELSE
2317	rlwinm	r6, r6, 45-31, 0xe	/* P7 wake reason field is 3 bits */
2318ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2319	cmpwi	r6, 8			/* was it an external interrupt? */
2320	li	r12, BOOK3S_INTERRUPT_EXTERNAL
2321	beq	kvmppc_read_intr	/* if so, see what it was */
2322	li	r3, 0
2323	li	r12, 0
2324	cmpwi	r6, 6			/* was it the decrementer? */
2325	beq	0f
2326BEGIN_FTR_SECTION
2327	cmpwi	r6, 5			/* privileged doorbell? */
2328	beq	0f
2329	cmpwi	r6, 3			/* hypervisor doorbell? */
2330	beq	3f
2331END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2332	li	r3, 1			/* anything else, return 1 */
23330:	blr
2334
2335	/* hypervisor doorbell */
23363:	li	r12, BOOK3S_INTERRUPT_H_DOORBELL
2337	li	r3, 1
2338	blr
2339
2340/*
2341 * Determine what sort of external interrupt is pending (if any).
2342 * Returns:
2343 *	0 if no interrupt is pending
2344 *	1 if an interrupt is pending that needs to be handled by the host
2345 *	-1 if there was a guest wakeup IPI (which has now been cleared)
2346 */
2347kvmppc_read_intr:
2348	/* see if a host IPI is pending */
2349	li	r3, 1
2350	lbz	r0, HSTATE_HOST_IPI(r13)
2351	cmpwi	r0, 0
2352	bne	1f
2353
2354	/* Now read the interrupt from the ICP */
2355	ld	r6, HSTATE_XICS_PHYS(r13)
2356	li	r7, XICS_XIRR
2357	cmpdi	r6, 0
2358	beq-	1f
2359	lwzcix	r0, r6, r7
2360	/*
2361	 * Save XIRR for later. Since we get in in reverse endian on LE
2362	 * systems, save it byte reversed and fetch it back in host endian.
2363	 */
2364	li	r3, HSTATE_SAVED_XIRR
2365	STWX_BE	r0, r3, r13
2366#ifdef __LITTLE_ENDIAN__
2367	lwz	r3, HSTATE_SAVED_XIRR(r13)
2368#else
2369	mr	r3, r0
2370#endif
2371	rlwinm.	r3, r3, 0, 0xffffff
2372	sync
2373	beq	1f			/* if nothing pending in the ICP */
2374
2375	/* We found something in the ICP...
2376	 *
2377	 * If it's not an IPI, stash it in the PACA and return to
2378	 * the host, we don't (yet) handle directing real external
2379	 * interrupts directly to the guest
2380	 */
2381	cmpwi	r3, XICS_IPI		/* if there is, is it an IPI? */
2382	bne	42f
2383
2384	/* It's an IPI, clear the MFRR and EOI it */
2385	li	r3, 0xff
2386	li	r8, XICS_MFRR
2387	stbcix	r3, r6, r8		/* clear the IPI */
2388	stwcix	r0, r6, r7		/* EOI it */
2389	sync
2390
2391	/* We need to re-check host IPI now in case it got set in the
2392	 * meantime. If it's clear, we bounce the interrupt to the
2393	 * guest
2394	 */
2395	lbz	r0, HSTATE_HOST_IPI(r13)
2396	cmpwi	r0, 0
2397	bne-	43f
2398
2399	/* OK, it's an IPI for us */
2400	li	r3, -1
24011:	blr
2402
240342:	/* It's not an IPI and it's for the host. We saved a copy of XIRR in
2404	 * the PACA earlier, it will be picked up by the host ICP driver
2405	 */
2406	li	r3, 1
2407	b	1b
2408
240943:	/* We raced with the host, we need to resend that IPI, bummer */
2410	li	r0, IPI_PRIORITY
2411	stbcix	r0, r6, r8		/* set the IPI */
2412	sync
2413	li	r3, 1
2414	b	1b
2415
2416/*
2417 * Save away FP, VMX and VSX registers.
2418 * r3 = vcpu pointer
2419 * N.B. r30 and r31 are volatile across this function,
2420 * thus it is not callable from C.
2421 */
2422kvmppc_save_fp:
2423	mflr	r30
2424	mr	r31,r3
2425	mfmsr	r5
2426	ori	r8,r5,MSR_FP
2427#ifdef CONFIG_ALTIVEC
2428BEGIN_FTR_SECTION
2429	oris	r8,r8,MSR_VEC@h
2430END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2431#endif
2432#ifdef CONFIG_VSX
2433BEGIN_FTR_SECTION
2434	oris	r8,r8,MSR_VSX@h
2435END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2436#endif
2437	mtmsrd	r8
2438	isync
2439	addi	r3,r3,VCPU_FPRS
2440	bl	store_fp_state
2441#ifdef CONFIG_ALTIVEC
2442BEGIN_FTR_SECTION
2443	addi	r3,r31,VCPU_VRS
2444	bl	store_vr_state
2445END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2446#endif
2447	mfspr	r6,SPRN_VRSAVE
2448	stw	r6,VCPU_VRSAVE(r31)
2449	mtlr	r30
2450	blr
2451
2452/*
2453 * Load up FP, VMX and VSX registers
2454 * r4 = vcpu pointer
2455 * N.B. r30 and r31 are volatile across this function,
2456 * thus it is not callable from C.
2457 */
2458kvmppc_load_fp:
2459	mflr	r30
2460	mr	r31,r4
2461	mfmsr	r9
2462	ori	r8,r9,MSR_FP
2463#ifdef CONFIG_ALTIVEC
2464BEGIN_FTR_SECTION
2465	oris	r8,r8,MSR_VEC@h
2466END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2467#endif
2468#ifdef CONFIG_VSX
2469BEGIN_FTR_SECTION
2470	oris	r8,r8,MSR_VSX@h
2471END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2472#endif
2473	mtmsrd	r8
2474	isync
2475	addi	r3,r4,VCPU_FPRS
2476	bl	load_fp_state
2477#ifdef CONFIG_ALTIVEC
2478BEGIN_FTR_SECTION
2479	addi	r3,r31,VCPU_VRS
2480	bl	load_vr_state
2481END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2482#endif
2483	lwz	r7,VCPU_VRSAVE(r31)
2484	mtspr	SPRN_VRSAVE,r7
2485	mtlr	r30
2486	mr	r4,r31
2487	blr
2488
2489/*
2490 * We come here if we get any exception or interrupt while we are
2491 * executing host real mode code while in guest MMU context.
2492 * For now just spin, but we should do something better.
2493 */
2494kvmppc_bad_host_intr:
2495	b	.
2496
2497/*
2498 * This mimics the MSR transition on IRQ delivery.  The new guest MSR is taken
2499 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
2500 *   r11 has the guest MSR value (in/out)
2501 *   r9 has a vcpu pointer (in)
2502 *   r0 is used as a scratch register
2503 */
2504kvmppc_msr_interrupt:
2505	rldicl	r0, r11, 64 - MSR_TS_S_LG, 62
2506	cmpwi	r0, 2 /* Check if we are in transactional state..  */
2507	ld	r11, VCPU_INTR_MSR(r9)
2508	bne	1f
2509	/* ... if transactional, change to suspended */
2510	li	r0, 1
25111:	rldimi	r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
2512	blr
2513
2514/*
2515 * This works around a hardware bug on POWER8E processors, where
2516 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
2517 * performance monitor interrupt.  Instead, when we need to have
2518 * an interrupt pending, we have to arrange for a counter to overflow.
2519 */
2520kvmppc_fix_pmao:
2521	li	r3, 0
2522	mtspr	SPRN_MMCR2, r3
2523	lis	r3, (MMCR0_PMXE | MMCR0_FCECE)@h
2524	ori	r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
2525	mtspr	SPRN_MMCR0, r3
2526	lis	r3, 0x7fff
2527	ori	r3, r3, 0xffff
2528	mtspr	SPRN_PMC6, r3
2529	isync
2530	blr
2531