1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9 * GNU General Public License for more details.
10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 *
13 * Derived from book3s_rmhandlers.S and other files, which are:
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
23#include <asm/page.h>
24#include <asm/asm-offsets.h>
25#include <asm/exception-64s.h>
26
27/*****************************************************************************
28 *                                                                           *
29 *        Real Mode handlers that need to be in the linear mapping           *
30 *                                                                           *
31 ****************************************************************************/
32
33	.globl	kvmppc_skip_interrupt
34kvmppc_skip_interrupt:
35	mfspr	r13,SPRN_SRR0
36	addi	r13,r13,4
37	mtspr	SPRN_SRR0,r13
38	GET_SCRATCH0(r13)
39	rfid
40	b	.
41
42	.globl	kvmppc_skip_Hinterrupt
43kvmppc_skip_Hinterrupt:
44	mfspr	r13,SPRN_HSRR0
45	addi	r13,r13,4
46	mtspr	SPRN_HSRR0,r13
47	GET_SCRATCH0(r13)
48	hrfid
49	b	.
50
51/*
52 * Call kvmppc_handler_trampoline_enter in real mode.
53 * Must be called with interrupts hard-disabled.
54 *
55 * Input Registers:
56 *
57 * LR = return address to continue at after eventually re-enabling MMU
58 */
59_GLOBAL(kvmppc_hv_entry_trampoline)
60	mfmsr	r10
61	LOAD_REG_ADDR(r5, kvmppc_hv_entry)
62	li	r0,MSR_RI
63	andc	r0,r10,r0
64	li	r6,MSR_IR | MSR_DR
65	andc	r6,r10,r6
66	mtmsrd	r0,1		/* clear RI in MSR */
67	mtsrr0	r5
68	mtsrr1	r6
69	RFI
70
71#define ULONG_SIZE 		8
72#define VCPU_GPR(n)		(VCPU_GPRS + (n * ULONG_SIZE))
73
74/******************************************************************************
75 *                                                                            *
76 *                               Entry code                                   *
77 *                                                                            *
78 *****************************************************************************/
79
80#define XICS_XIRR		4
81#define XICS_QIRR		0xc
82
83/*
84 * We come in here when wakened from nap mode on a secondary hw thread.
85 * Relocation is off and most register values are lost.
86 * r13 points to the PACA.
87 */
88	.globl	kvm_start_guest
89kvm_start_guest:
90	ld	r1,PACAEMERGSP(r13)
91	subi	r1,r1,STACK_FRAME_OVERHEAD
92
93	/* get vcpu pointer */
94	ld	r4, HSTATE_KVM_VCPU(r13)
95
96	/* We got here with an IPI; clear it */
97	ld	r5, HSTATE_XICS_PHYS(r13)
98	li	r0, 0xff
99	li	r6, XICS_QIRR
100	li	r7, XICS_XIRR
101	lwzcix	r8, r5, r7		/* ack the interrupt */
102	sync
103	stbcix	r0, r5, r6		/* clear it */
104	stwcix	r8, r5, r7		/* EOI it */
105
106.global kvmppc_hv_entry
107kvmppc_hv_entry:
108
109	/* Required state:
110	 *
111	 * R4 = vcpu pointer
112	 * MSR = ~IR|DR
113	 * R13 = PACA
114	 * R1 = host R1
115	 * all other volatile GPRS = free
116	 */
117	mflr	r0
118	std	r0, HSTATE_VMHANDLER(r13)
119
120	ld	r14, VCPU_GPR(r14)(r4)
121	ld	r15, VCPU_GPR(r15)(r4)
122	ld	r16, VCPU_GPR(r16)(r4)
123	ld	r17, VCPU_GPR(r17)(r4)
124	ld	r18, VCPU_GPR(r18)(r4)
125	ld	r19, VCPU_GPR(r19)(r4)
126	ld	r20, VCPU_GPR(r20)(r4)
127	ld	r21, VCPU_GPR(r21)(r4)
128	ld	r22, VCPU_GPR(r22)(r4)
129	ld	r23, VCPU_GPR(r23)(r4)
130	ld	r24, VCPU_GPR(r24)(r4)
131	ld	r25, VCPU_GPR(r25)(r4)
132	ld	r26, VCPU_GPR(r26)(r4)
133	ld	r27, VCPU_GPR(r27)(r4)
134	ld	r28, VCPU_GPR(r28)(r4)
135	ld	r29, VCPU_GPR(r29)(r4)
136	ld	r30, VCPU_GPR(r30)(r4)
137	ld	r31, VCPU_GPR(r31)(r4)
138
139	/* Load guest PMU registers */
140	/* R4 is live here (vcpu pointer) */
141	li	r3, 1
142	sldi	r3, r3, 31		/* MMCR0_FC (freeze counters) bit */
143	mtspr	SPRN_MMCR0, r3		/* freeze all counters, disable ints */
144	isync
145	lwz	r3, VCPU_PMC(r4)	/* always load up guest PMU registers */
146	lwz	r5, VCPU_PMC + 4(r4)	/* to prevent information leak */
147	lwz	r6, VCPU_PMC + 8(r4)
148	lwz	r7, VCPU_PMC + 12(r4)
149	lwz	r8, VCPU_PMC + 16(r4)
150	lwz	r9, VCPU_PMC + 20(r4)
151BEGIN_FTR_SECTION
152	lwz	r10, VCPU_PMC + 24(r4)
153	lwz	r11, VCPU_PMC + 28(r4)
154END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
155	mtspr	SPRN_PMC1, r3
156	mtspr	SPRN_PMC2, r5
157	mtspr	SPRN_PMC3, r6
158	mtspr	SPRN_PMC4, r7
159	mtspr	SPRN_PMC5, r8
160	mtspr	SPRN_PMC6, r9
161BEGIN_FTR_SECTION
162	mtspr	SPRN_PMC7, r10
163	mtspr	SPRN_PMC8, r11
164END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
165	ld	r3, VCPU_MMCR(r4)
166	ld	r5, VCPU_MMCR + 8(r4)
167	ld	r6, VCPU_MMCR + 16(r4)
168	mtspr	SPRN_MMCR1, r5
169	mtspr	SPRN_MMCRA, r6
170	mtspr	SPRN_MMCR0, r3
171	isync
172
173	/* Load up FP, VMX and VSX registers */
174	bl	kvmppc_load_fp
175
176BEGIN_FTR_SECTION
177	/* Switch DSCR to guest value */
178	ld	r5, VCPU_DSCR(r4)
179	mtspr	SPRN_DSCR, r5
180END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
181
182	/*
183	 * Set the decrementer to the guest decrementer.
184	 */
185	ld	r8,VCPU_DEC_EXPIRES(r4)
186	mftb	r7
187	subf	r3,r7,r8
188	mtspr	SPRN_DEC,r3
189	stw	r3,VCPU_DEC(r4)
190
191	ld	r5, VCPU_SPRG0(r4)
192	ld	r6, VCPU_SPRG1(r4)
193	ld	r7, VCPU_SPRG2(r4)
194	ld	r8, VCPU_SPRG3(r4)
195	mtspr	SPRN_SPRG0, r5
196	mtspr	SPRN_SPRG1, r6
197	mtspr	SPRN_SPRG2, r7
198	mtspr	SPRN_SPRG3, r8
199
200	/* Save R1 in the PACA */
201	std	r1, HSTATE_HOST_R1(r13)
202
203	/* Increment yield count if they have a VPA */
204	ld	r3, VCPU_VPA(r4)
205	cmpdi	r3, 0
206	beq	25f
207	lwz	r5, LPPACA_YIELDCOUNT(r3)
208	addi	r5, r5, 1
209	stw	r5, LPPACA_YIELDCOUNT(r3)
21025:
211	/* Load up DAR and DSISR */
212	ld	r5, VCPU_DAR(r4)
213	lwz	r6, VCPU_DSISR(r4)
214	mtspr	SPRN_DAR, r5
215	mtspr	SPRN_DSISR, r6
216
217	/* Set partition DABR */
218	li	r5,3
219	ld	r6,VCPU_DABR(r4)
220	mtspr	SPRN_DABRX,r5
221	mtspr	SPRN_DABR,r6
222
223BEGIN_FTR_SECTION
224	/* Restore AMR and UAMOR, set AMOR to all 1s */
225	ld	r5,VCPU_AMR(r4)
226	ld	r6,VCPU_UAMOR(r4)
227	li	r7,-1
228	mtspr	SPRN_AMR,r5
229	mtspr	SPRN_UAMOR,r6
230	mtspr	SPRN_AMOR,r7
231END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
232
233	/* Clear out SLB */
234	li	r6,0
235	slbmte	r6,r6
236	slbia
237	ptesync
238
239BEGIN_FTR_SECTION
240	b	30f
241END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
242	/*
243	 * POWER7 host -> guest partition switch code.
244	 * We don't have to lock against concurrent tlbies,
245	 * but we do have to coordinate across hardware threads.
246	 */
247	/* Increment entry count iff exit count is zero. */
248	ld	r5,HSTATE_KVM_VCORE(r13)
249	addi	r9,r5,VCORE_ENTRY_EXIT
25021:	lwarx	r3,0,r9
251	cmpwi	r3,0x100		/* any threads starting to exit? */
252	bge	secondary_too_late	/* if so we're too late to the party */
253	addi	r3,r3,1
254	stwcx.	r3,0,r9
255	bne	21b
256
257	/* Primary thread switches to guest partition. */
258	ld	r9,VCPU_KVM(r4)		/* pointer to struct kvm */
259	lwz	r6,VCPU_PTID(r4)
260	cmpwi	r6,0
261	bne	20f
262	ld	r6,KVM_SDR1(r9)
263	lwz	r7,KVM_LPID(r9)
264	li	r0,LPID_RSVD		/* switch to reserved LPID */
265	mtspr	SPRN_LPID,r0
266	ptesync
267	mtspr	SPRN_SDR1,r6		/* switch to partition page table */
268	mtspr	SPRN_LPID,r7
269	isync
270	li	r0,1
271	stb	r0,VCORE_IN_GUEST(r5)	/* signal secondaries to continue */
272	b	10f
273
274	/* Secondary threads wait for primary to have done partition switch */
27520:	lbz	r0,VCORE_IN_GUEST(r5)
276	cmpwi	r0,0
277	beq	20b
278
279	/* Set LPCR.  Set the MER bit if there is a pending external irq. */
28010:	ld	r8,KVM_LPCR(r9)
281	ld	r0,VCPU_PENDING_EXC(r4)
282	li	r7,(1 << BOOK3S_IRQPRIO_EXTERNAL)
283	oris	r7,r7,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
284	and.	r0,r0,r7
285	beq	11f
286	ori	r8,r8,LPCR_MER
28711:	mtspr	SPRN_LPCR,r8
288	ld	r8,KVM_RMOR(r9)
289	mtspr	SPRN_RMOR,r8
290	isync
291
292	/* Check if HDEC expires soon */
293	mfspr	r3,SPRN_HDEC
294	cmpwi	r3,10
295	li	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
296	mr	r9,r4
297	blt	hdec_soon
298
299	/*
300	 * Invalidate the TLB if we could possibly have stale TLB
301	 * entries for this partition on this core due to the use
302	 * of tlbiel.
303	 * XXX maybe only need this on primary thread?
304	 */
305	ld	r9,VCPU_KVM(r4)		/* pointer to struct kvm */
306	lwz	r5,VCPU_VCPUID(r4)
307	lhz	r6,PACAPACAINDEX(r13)
308	rldimi	r6,r5,0,62		/* XXX map as if threads 1:1 p:v */
309	lhz	r8,VCPU_LAST_CPU(r4)
310	sldi	r7,r6,1			/* see if this is the same vcpu */
311	add	r7,r7,r9		/* as last ran on this pcpu */
312	lhz	r0,KVM_LAST_VCPU(r7)
313	cmpw	r6,r8			/* on the same cpu core as last time? */
314	bne	3f
315	cmpw	r0,r5			/* same vcpu as this core last ran? */
316	beq	1f
3173:	sth	r6,VCPU_LAST_CPU(r4)	/* if not, invalidate partition TLB */
318	sth	r5,KVM_LAST_VCPU(r7)
319	li	r6,128
320	mtctr	r6
321	li	r7,0x800		/* IS field = 0b10 */
322	ptesync
3232:	tlbiel	r7
324	addi	r7,r7,0x1000
325	bdnz	2b
326	ptesync
3271:
328
329	/* Save purr/spurr */
330	mfspr	r5,SPRN_PURR
331	mfspr	r6,SPRN_SPURR
332	std	r5,HSTATE_PURR(r13)
333	std	r6,HSTATE_SPURR(r13)
334	ld	r7,VCPU_PURR(r4)
335	ld	r8,VCPU_SPURR(r4)
336	mtspr	SPRN_PURR,r7
337	mtspr	SPRN_SPURR,r8
338	b	31f
339
340	/*
341	 * PPC970 host -> guest partition switch code.
342	 * We have to lock against concurrent tlbies,
343	 * using native_tlbie_lock to lock against host tlbies
344	 * and kvm->arch.tlbie_lock to lock against guest tlbies.
345	 * We also have to invalidate the TLB since its
346	 * entries aren't tagged with the LPID.
347	 */
34830:	ld	r9,VCPU_KVM(r4)		/* pointer to struct kvm */
349
350	/* first take native_tlbie_lock */
351	.section ".toc","aw"
352toc_tlbie_lock:
353	.tc	native_tlbie_lock[TC],native_tlbie_lock
354	.previous
355	ld	r3,toc_tlbie_lock@toc(2)
356	lwz	r8,PACA_LOCK_TOKEN(r13)
35724:	lwarx	r0,0,r3
358	cmpwi	r0,0
359	bne	24b
360	stwcx.	r8,0,r3
361	bne	24b
362	isync
363
364	ld	r7,KVM_LPCR(r9)		/* use kvm->arch.lpcr to store HID4 */
365	li	r0,0x18f
366	rotldi	r0,r0,HID4_LPID5_SH	/* all lpid bits in HID4 = 1 */
367	or	r0,r7,r0
368	ptesync
369	sync
370	mtspr	SPRN_HID4,r0		/* switch to reserved LPID */
371	isync
372	li	r0,0
373	stw	r0,0(r3)		/* drop native_tlbie_lock */
374
375	/* invalidate the whole TLB */
376	li	r0,256
377	mtctr	r0
378	li	r6,0
37925:	tlbiel	r6
380	addi	r6,r6,0x1000
381	bdnz	25b
382	ptesync
383
384	/* Take the guest's tlbie_lock */
385	addi	r3,r9,KVM_TLBIE_LOCK
38624:	lwarx	r0,0,r3
387	cmpwi	r0,0
388	bne	24b
389	stwcx.	r8,0,r3
390	bne	24b
391	isync
392	ld	r6,KVM_SDR1(r9)
393	mtspr	SPRN_SDR1,r6		/* switch to partition page table */
394
395	/* Set up HID4 with the guest's LPID etc. */
396	sync
397	mtspr	SPRN_HID4,r7
398	isync
399
400	/* drop the guest's tlbie_lock */
401	li	r0,0
402	stw	r0,0(r3)
403
404	/* Check if HDEC expires soon */
405	mfspr	r3,SPRN_HDEC
406	cmpwi	r3,10
407	li	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
408	mr	r9,r4
409	blt	hdec_soon
410
411	/* Enable HDEC interrupts */
412	mfspr	r0,SPRN_HID0
413	li	r3,1
414	rldimi	r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
415	sync
416	mtspr	SPRN_HID0,r0
417	mfspr	r0,SPRN_HID0
418	mfspr	r0,SPRN_HID0
419	mfspr	r0,SPRN_HID0
420	mfspr	r0,SPRN_HID0
421	mfspr	r0,SPRN_HID0
422	mfspr	r0,SPRN_HID0
423
424	/* Load up guest SLB entries */
42531:	lwz	r5,VCPU_SLB_MAX(r4)
426	cmpwi	r5,0
427	beq	9f
428	mtctr	r5
429	addi	r6,r4,VCPU_SLB
4301:	ld	r8,VCPU_SLB_E(r6)
431	ld	r9,VCPU_SLB_V(r6)
432	slbmte	r9,r8
433	addi	r6,r6,VCPU_SLB_SIZE
434	bdnz	1b
4359:
436
437	/* Restore state of CTRL run bit; assume 1 on entry */
438	lwz	r5,VCPU_CTRL(r4)
439	andi.	r5,r5,1
440	bne	4f
441	mfspr	r6,SPRN_CTRLF
442	clrrdi	r6,r6,1
443	mtspr	SPRN_CTRLT,r6
4444:
445	ld	r6, VCPU_CTR(r4)
446	lwz	r7, VCPU_XER(r4)
447
448	mtctr	r6
449	mtxer	r7
450
451	/* Move SRR0 and SRR1 into the respective regs */
452	ld	r6, VCPU_SRR0(r4)
453	ld	r7, VCPU_SRR1(r4)
454	mtspr	SPRN_SRR0, r6
455	mtspr	SPRN_SRR1, r7
456
457	ld	r10, VCPU_PC(r4)
458
459	ld	r11, VCPU_MSR(r4)	/* r10 = vcpu->arch.msr & ~MSR_HV */
460	rldicl	r11, r11, 63 - MSR_HV_LG, 1
461	rotldi	r11, r11, 1 + MSR_HV_LG
462	ori	r11, r11, MSR_ME
463
464fast_guest_return:
465	mtspr	SPRN_HSRR0,r10
466	mtspr	SPRN_HSRR1,r11
467
468	/* Activate guest mode, so faults get handled by KVM */
469	li	r9, KVM_GUEST_MODE_GUEST
470	stb	r9, HSTATE_IN_GUEST(r13)
471
472	/* Enter guest */
473
474	ld	r5, VCPU_LR(r4)
475	lwz	r6, VCPU_CR(r4)
476	mtlr	r5
477	mtcr	r6
478
479	ld	r0, VCPU_GPR(r0)(r4)
480	ld	r1, VCPU_GPR(r1)(r4)
481	ld	r2, VCPU_GPR(r2)(r4)
482	ld	r3, VCPU_GPR(r3)(r4)
483	ld	r5, VCPU_GPR(r5)(r4)
484	ld	r6, VCPU_GPR(r6)(r4)
485	ld	r7, VCPU_GPR(r7)(r4)
486	ld	r8, VCPU_GPR(r8)(r4)
487	ld	r9, VCPU_GPR(r9)(r4)
488	ld	r10, VCPU_GPR(r10)(r4)
489	ld	r11, VCPU_GPR(r11)(r4)
490	ld	r12, VCPU_GPR(r12)(r4)
491	ld	r13, VCPU_GPR(r13)(r4)
492
493	ld	r4, VCPU_GPR(r4)(r4)
494
495	hrfid
496	b	.
497
498/******************************************************************************
499 *                                                                            *
500 *                               Exit code                                    *
501 *                                                                            *
502 *****************************************************************************/
503
504/*
505 * We come here from the first-level interrupt handlers.
506 */
507	.globl	kvmppc_interrupt
508kvmppc_interrupt:
509	/*
510	 * Register contents:
511	 * R12		= interrupt vector
512	 * R13		= PACA
513	 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
514	 * guest R13 saved in SPRN_SCRATCH0
515	 */
516	/* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
517	std	r9, HSTATE_HOST_R2(r13)
518	ld	r9, HSTATE_KVM_VCPU(r13)
519
520	/* Save registers */
521
522	std	r0, VCPU_GPR(r0)(r9)
523	std	r1, VCPU_GPR(r1)(r9)
524	std	r2, VCPU_GPR(r2)(r9)
525	std	r3, VCPU_GPR(r3)(r9)
526	std	r4, VCPU_GPR(r4)(r9)
527	std	r5, VCPU_GPR(r5)(r9)
528	std	r6, VCPU_GPR(r6)(r9)
529	std	r7, VCPU_GPR(r7)(r9)
530	std	r8, VCPU_GPR(r8)(r9)
531	ld	r0, HSTATE_HOST_R2(r13)
532	std	r0, VCPU_GPR(r9)(r9)
533	std	r10, VCPU_GPR(r10)(r9)
534	std	r11, VCPU_GPR(r11)(r9)
535	ld	r3, HSTATE_SCRATCH0(r13)
536	lwz	r4, HSTATE_SCRATCH1(r13)
537	std	r3, VCPU_GPR(r12)(r9)
538	stw	r4, VCPU_CR(r9)
539
540	/* Restore R1/R2 so we can handle faults */
541	ld	r1, HSTATE_HOST_R1(r13)
542	ld	r2, PACATOC(r13)
543
544	mfspr	r10, SPRN_SRR0
545	mfspr	r11, SPRN_SRR1
546	std	r10, VCPU_SRR0(r9)
547	std	r11, VCPU_SRR1(r9)
548	andi.	r0, r12, 2		/* need to read HSRR0/1? */
549	beq	1f
550	mfspr	r10, SPRN_HSRR0
551	mfspr	r11, SPRN_HSRR1
552	clrrdi	r12, r12, 2
5531:	std	r10, VCPU_PC(r9)
554	std	r11, VCPU_MSR(r9)
555
556	GET_SCRATCH0(r3)
557	mflr	r4
558	std	r3, VCPU_GPR(r13)(r9)
559	std	r4, VCPU_LR(r9)
560
561	/* Unset guest mode */
562	li	r0, KVM_GUEST_MODE_NONE
563	stb	r0, HSTATE_IN_GUEST(r13)
564
565	stw	r12,VCPU_TRAP(r9)
566
567	/* See if this is a leftover HDEC interrupt */
568	cmpwi	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
569	bne	2f
570	mfspr	r3,SPRN_HDEC
571	cmpwi	r3,0
572	bge	ignore_hdec
5732:
574	/* See if this is something we can handle in real mode */
575	cmpwi	r12,BOOK3S_INTERRUPT_SYSCALL
576	beq	hcall_try_real_mode
577hcall_real_cont:
578
579	/* Check for mediated interrupts (could be done earlier really ...) */
580BEGIN_FTR_SECTION
581	cmpwi	r12,BOOK3S_INTERRUPT_EXTERNAL
582	bne+	1f
583	ld	r5,VCPU_KVM(r9)
584	ld	r5,KVM_LPCR(r5)
585	andi.	r0,r11,MSR_EE
586	beq	1f
587	andi.	r0,r5,LPCR_MER
588	bne	bounce_ext_interrupt
5891:
590END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
591
592	/* Save DEC */
593	mfspr	r5,SPRN_DEC
594	mftb	r6
595	extsw	r5,r5
596	add	r5,r5,r6
597	std	r5,VCPU_DEC_EXPIRES(r9)
598
599	/* Save HEIR (HV emulation assist reg) in last_inst
600	   if this is an HEI (HV emulation interrupt, e40) */
601	li	r3,-1
602BEGIN_FTR_SECTION
603	cmpwi	r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
604	bne	11f
605	mfspr	r3,SPRN_HEIR
606END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
60711:	stw	r3,VCPU_LAST_INST(r9)
608
609	/* Save more register state  */
610	mfxer	r5
611	mfdar	r6
612	mfdsisr	r7
613	mfctr	r8
614
615	stw	r5, VCPU_XER(r9)
616	std	r6, VCPU_DAR(r9)
617	stw	r7, VCPU_DSISR(r9)
618	std	r8, VCPU_CTR(r9)
619	/* grab HDAR & HDSISR if HV data storage interrupt (HDSI) */
620BEGIN_FTR_SECTION
621	cmpwi	r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
622	beq	6f
623END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
6247:	std	r6, VCPU_FAULT_DAR(r9)
625	stw	r7, VCPU_FAULT_DSISR(r9)
626
627	/* Save guest CTRL register, set runlatch to 1 */
628	mfspr	r6,SPRN_CTRLF
629	stw	r6,VCPU_CTRL(r9)
630	andi.	r0,r6,1
631	bne	4f
632	ori	r6,r6,1
633	mtspr	SPRN_CTRLT,r6
6344:
635	/* Read the guest SLB and save it away */
636	lwz	r0,VCPU_SLB_NR(r9)	/* number of entries in SLB */
637	mtctr	r0
638	li	r6,0
639	addi	r7,r9,VCPU_SLB
640	li	r5,0
6411:	slbmfee	r8,r6
642	andis.	r0,r8,SLB_ESID_V@h
643	beq	2f
644	add	r8,r8,r6		/* put index in */
645	slbmfev	r3,r6
646	std	r8,VCPU_SLB_E(r7)
647	std	r3,VCPU_SLB_V(r7)
648	addi	r7,r7,VCPU_SLB_SIZE
649	addi	r5,r5,1
6502:	addi	r6,r6,1
651	bdnz	1b
652	stw	r5,VCPU_SLB_MAX(r9)
653
654	/*
655	 * Save the guest PURR/SPURR
656	 */
657BEGIN_FTR_SECTION
658	mfspr	r5,SPRN_PURR
659	mfspr	r6,SPRN_SPURR
660	ld	r7,VCPU_PURR(r9)
661	ld	r8,VCPU_SPURR(r9)
662	std	r5,VCPU_PURR(r9)
663	std	r6,VCPU_SPURR(r9)
664	subf	r5,r7,r5
665	subf	r6,r8,r6
666
667	/*
668	 * Restore host PURR/SPURR and add guest times
669	 * so that the time in the guest gets accounted.
670	 */
671	ld	r3,HSTATE_PURR(r13)
672	ld	r4,HSTATE_SPURR(r13)
673	add	r3,r3,r5
674	add	r4,r4,r6
675	mtspr	SPRN_PURR,r3
676	mtspr	SPRN_SPURR,r4
677END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
678
679	/* Clear out SLB */
680	li	r5,0
681	slbmte	r5,r5
682	slbia
683	ptesync
684
685hdec_soon:
686BEGIN_FTR_SECTION
687	b	32f
688END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
689	/*
690	 * POWER7 guest -> host partition switch code.
691	 * We don't have to lock against tlbies but we do
692	 * have to coordinate the hardware threads.
693	 */
694	/* Increment the threads-exiting-guest count in the 0xff00
695	   bits of vcore->entry_exit_count */
696	lwsync
697	ld	r5,HSTATE_KVM_VCORE(r13)
698	addi	r6,r5,VCORE_ENTRY_EXIT
69941:	lwarx	r3,0,r6
700	addi	r0,r3,0x100
701	stwcx.	r0,0,r6
702	bne	41b
703
704	/*
705	 * At this point we have an interrupt that we have to pass
706	 * up to the kernel or qemu; we can't handle it in real mode.
707	 * Thus we have to do a partition switch, so we have to
708	 * collect the other threads, if we are the first thread
709	 * to take an interrupt.  To do this, we set the HDEC to 0,
710	 * which causes an HDEC interrupt in all threads within 2ns
711	 * because the HDEC register is shared between all 4 threads.
712	 * However, we don't need to bother if this is an HDEC
713	 * interrupt, since the other threads will already be on their
714	 * way here in that case.
715	 */
716	cmpwi	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
717	beq	40f
718	cmpwi	r3,0x100	/* Are we the first here? */
719	bge	40f
720	cmpwi	r3,1
721	ble	40f
722	li	r0,0
723	mtspr	SPRN_HDEC,r0
72440:
725
726	/* Secondary threads wait for primary to do partition switch */
727	ld	r4,VCPU_KVM(r9)		/* pointer to struct kvm */
728	ld	r5,HSTATE_KVM_VCORE(r13)
729	lwz	r3,VCPU_PTID(r9)
730	cmpwi	r3,0
731	beq	15f
732	HMT_LOW
73313:	lbz	r3,VCORE_IN_GUEST(r5)
734	cmpwi	r3,0
735	bne	13b
736	HMT_MEDIUM
737	b	16f
738
739	/* Primary thread waits for all the secondaries to exit guest */
74015:	lwz	r3,VCORE_ENTRY_EXIT(r5)
741	srwi	r0,r3,8
742	clrldi	r3,r3,56
743	cmpw	r3,r0
744	bne	15b
745	isync
746
747	/* Primary thread switches back to host partition */
748	ld	r6,KVM_HOST_SDR1(r4)
749	lwz	r7,KVM_HOST_LPID(r4)
750	li	r8,LPID_RSVD		/* switch to reserved LPID */
751	mtspr	SPRN_LPID,r8
752	ptesync
753	mtspr	SPRN_SDR1,r6		/* switch to partition page table */
754	mtspr	SPRN_LPID,r7
755	isync
756	li	r0,0
757	stb	r0,VCORE_IN_GUEST(r5)
758	lis	r8,0x7fff		/* MAX_INT@h */
759	mtspr	SPRN_HDEC,r8
760
76116:	ld	r8,KVM_HOST_LPCR(r4)
762	mtspr	SPRN_LPCR,r8
763	isync
764	b	33f
765
766	/*
767	 * PPC970 guest -> host partition switch code.
768	 * We have to lock against concurrent tlbies, and
769	 * we have to flush the whole TLB.
770	 */
77132:	ld	r4,VCPU_KVM(r9)		/* pointer to struct kvm */
772
773	/* Take the guest's tlbie_lock */
774	lwz	r8,PACA_LOCK_TOKEN(r13)
775	addi	r3,r4,KVM_TLBIE_LOCK
77624:	lwarx	r0,0,r3
777	cmpwi	r0,0
778	bne	24b
779	stwcx.	r8,0,r3
780	bne	24b
781	isync
782
783	ld	r7,KVM_HOST_LPCR(r4)	/* use kvm->arch.host_lpcr for HID4 */
784	li	r0,0x18f
785	rotldi	r0,r0,HID4_LPID5_SH	/* all lpid bits in HID4 = 1 */
786	or	r0,r7,r0
787	ptesync
788	sync
789	mtspr	SPRN_HID4,r0		/* switch to reserved LPID */
790	isync
791	li	r0,0
792	stw	r0,0(r3)		/* drop guest tlbie_lock */
793
794	/* invalidate the whole TLB */
795	li	r0,256
796	mtctr	r0
797	li	r6,0
79825:	tlbiel	r6
799	addi	r6,r6,0x1000
800	bdnz	25b
801	ptesync
802
803	/* take native_tlbie_lock */
804	ld	r3,toc_tlbie_lock@toc(2)
80524:	lwarx	r0,0,r3
806	cmpwi	r0,0
807	bne	24b
808	stwcx.	r8,0,r3
809	bne	24b
810	isync
811
812	ld	r6,KVM_HOST_SDR1(r4)
813	mtspr	SPRN_SDR1,r6		/* switch to host page table */
814
815	/* Set up host HID4 value */
816	sync
817	mtspr	SPRN_HID4,r7
818	isync
819	li	r0,0
820	stw	r0,0(r3)		/* drop native_tlbie_lock */
821
822	lis	r8,0x7fff		/* MAX_INT@h */
823	mtspr	SPRN_HDEC,r8
824
825	/* Disable HDEC interrupts */
826	mfspr	r0,SPRN_HID0
827	li	r3,0
828	rldimi	r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
829	sync
830	mtspr	SPRN_HID0,r0
831	mfspr	r0,SPRN_HID0
832	mfspr	r0,SPRN_HID0
833	mfspr	r0,SPRN_HID0
834	mfspr	r0,SPRN_HID0
835	mfspr	r0,SPRN_HID0
836	mfspr	r0,SPRN_HID0
837
838	/* load host SLB entries */
83933:	ld	r8,PACA_SLBSHADOWPTR(r13)
840
841	.rept	SLB_NUM_BOLTED
842	ld	r5,SLBSHADOW_SAVEAREA(r8)
843	ld	r6,SLBSHADOW_SAVEAREA+8(r8)
844	andis.	r7,r5,SLB_ESID_V@h
845	beq	1f
846	slbmte	r6,r5
8471:	addi	r8,r8,16
848	.endr
849
850	/* Save and reset AMR and UAMOR before turning on the MMU */
851BEGIN_FTR_SECTION
852	mfspr	r5,SPRN_AMR
853	mfspr	r6,SPRN_UAMOR
854	std	r5,VCPU_AMR(r9)
855	std	r6,VCPU_UAMOR(r9)
856	li	r6,0
857	mtspr	SPRN_AMR,r6
858END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
859
860	/* Restore host DABR and DABRX */
861	ld	r5,HSTATE_DABR(r13)
862	li	r6,7
863	mtspr	SPRN_DABR,r5
864	mtspr	SPRN_DABRX,r6
865
866	/* Switch DSCR back to host value */
867BEGIN_FTR_SECTION
868	mfspr	r8, SPRN_DSCR
869	ld	r7, HSTATE_DSCR(r13)
870	std	r8, VCPU_DSCR(r7)
871	mtspr	SPRN_DSCR, r7
872END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
873
874	/* Save non-volatile GPRs */
875	std	r14, VCPU_GPR(r14)(r9)
876	std	r15, VCPU_GPR(r15)(r9)
877	std	r16, VCPU_GPR(r16)(r9)
878	std	r17, VCPU_GPR(r17)(r9)
879	std	r18, VCPU_GPR(r18)(r9)
880	std	r19, VCPU_GPR(r19)(r9)
881	std	r20, VCPU_GPR(r20)(r9)
882	std	r21, VCPU_GPR(r21)(r9)
883	std	r22, VCPU_GPR(r22)(r9)
884	std	r23, VCPU_GPR(r23)(r9)
885	std	r24, VCPU_GPR(r24)(r9)
886	std	r25, VCPU_GPR(r25)(r9)
887	std	r26, VCPU_GPR(r26)(r9)
888	std	r27, VCPU_GPR(r27)(r9)
889	std	r28, VCPU_GPR(r28)(r9)
890	std	r29, VCPU_GPR(r29)(r9)
891	std	r30, VCPU_GPR(r30)(r9)
892	std	r31, VCPU_GPR(r31)(r9)
893
894	/* Save SPRGs */
895	mfspr	r3, SPRN_SPRG0
896	mfspr	r4, SPRN_SPRG1
897	mfspr	r5, SPRN_SPRG2
898	mfspr	r6, SPRN_SPRG3
899	std	r3, VCPU_SPRG0(r9)
900	std	r4, VCPU_SPRG1(r9)
901	std	r5, VCPU_SPRG2(r9)
902	std	r6, VCPU_SPRG3(r9)
903
904	/* Increment yield count if they have a VPA */
905	ld	r8, VCPU_VPA(r9)	/* do they have a VPA? */
906	cmpdi	r8, 0
907	beq	25f
908	lwz	r3, LPPACA_YIELDCOUNT(r8)
909	addi	r3, r3, 1
910	stw	r3, LPPACA_YIELDCOUNT(r8)
91125:
912	/* Save PMU registers if requested */
913	/* r8 and cr0.eq are live here */
914	li	r3, 1
915	sldi	r3, r3, 31		/* MMCR0_FC (freeze counters) bit */
916	mfspr	r4, SPRN_MMCR0		/* save MMCR0 */
917	mtspr	SPRN_MMCR0, r3		/* freeze all counters, disable ints */
918	isync
919	beq	21f			/* if no VPA, save PMU stuff anyway */
920	lbz	r7, LPPACA_PMCINUSE(r8)
921	cmpwi	r7, 0			/* did they ask for PMU stuff to be saved? */
922	bne	21f
923	std	r3, VCPU_MMCR(r9)	/* if not, set saved MMCR0 to FC */
924	b	22f
92521:	mfspr	r5, SPRN_MMCR1
926	mfspr	r6, SPRN_MMCRA
927	std	r4, VCPU_MMCR(r9)
928	std	r5, VCPU_MMCR + 8(r9)
929	std	r6, VCPU_MMCR + 16(r9)
930	mfspr	r3, SPRN_PMC1
931	mfspr	r4, SPRN_PMC2
932	mfspr	r5, SPRN_PMC3
933	mfspr	r6, SPRN_PMC4
934	mfspr	r7, SPRN_PMC5
935	mfspr	r8, SPRN_PMC6
936BEGIN_FTR_SECTION
937	mfspr	r10, SPRN_PMC7
938	mfspr	r11, SPRN_PMC8
939END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
940	stw	r3, VCPU_PMC(r9)
941	stw	r4, VCPU_PMC + 4(r9)
942	stw	r5, VCPU_PMC + 8(r9)
943	stw	r6, VCPU_PMC + 12(r9)
944	stw	r7, VCPU_PMC + 16(r9)
945	stw	r8, VCPU_PMC + 20(r9)
946BEGIN_FTR_SECTION
947	stw	r10, VCPU_PMC + 24(r9)
948	stw	r11, VCPU_PMC + 28(r9)
949END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
95022:
951	/* save FP state */
952	mr	r3, r9
953	bl	.kvmppc_save_fp
954
955	/* Secondary threads go off to take a nap on POWER7 */
956BEGIN_FTR_SECTION
957	lwz	r0,VCPU_PTID(r3)
958	cmpwi	r0,0
959	bne	secondary_nap
960END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
961
962	/*
963	 * Reload DEC.  HDEC interrupts were disabled when
964	 * we reloaded the host's LPCR value.
965	 */
966	ld	r3, HSTATE_DECEXP(r13)
967	mftb	r4
968	subf	r4, r4, r3
969	mtspr	SPRN_DEC, r4
970
971	/* Reload the host's PMU registers */
972	ld	r3, PACALPPACAPTR(r13)	/* is the host using the PMU? */
973	lbz	r4, LPPACA_PMCINUSE(r3)
974	cmpwi	r4, 0
975	beq	23f			/* skip if not */
976	lwz	r3, HSTATE_PMC(r13)
977	lwz	r4, HSTATE_PMC + 4(r13)
978	lwz	r5, HSTATE_PMC + 8(r13)
979	lwz	r6, HSTATE_PMC + 12(r13)
980	lwz	r8, HSTATE_PMC + 16(r13)
981	lwz	r9, HSTATE_PMC + 20(r13)
982BEGIN_FTR_SECTION
983	lwz	r10, HSTATE_PMC + 24(r13)
984	lwz	r11, HSTATE_PMC + 28(r13)
985END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
986	mtspr	SPRN_PMC1, r3
987	mtspr	SPRN_PMC2, r4
988	mtspr	SPRN_PMC3, r5
989	mtspr	SPRN_PMC4, r6
990	mtspr	SPRN_PMC5, r8
991	mtspr	SPRN_PMC6, r9
992BEGIN_FTR_SECTION
993	mtspr	SPRN_PMC7, r10
994	mtspr	SPRN_PMC8, r11
995END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
996	ld	r3, HSTATE_MMCR(r13)
997	ld	r4, HSTATE_MMCR + 8(r13)
998	ld	r5, HSTATE_MMCR + 16(r13)
999	mtspr	SPRN_MMCR1, r4
1000	mtspr	SPRN_MMCRA, r5
1001	mtspr	SPRN_MMCR0, r3
1002	isync
100323:
1004	/*
1005	 * For external and machine check interrupts, we need
1006	 * to call the Linux handler to process the interrupt.
1007	 * We do that by jumping to the interrupt vector address
1008	 * which we have in r12.  The [h]rfid at the end of the
1009	 * handler will return to the book3s_hv_interrupts.S code.
1010	 * For other interrupts we do the rfid to get back
1011	 * to the book3s_interrupts.S code here.
1012	 */
1013	ld	r8, HSTATE_VMHANDLER(r13)
1014	ld	r7, HSTATE_HOST_MSR(r13)
1015
1016	cmpwi	r12, BOOK3S_INTERRUPT_EXTERNAL
1017	beq	11f
1018	cmpwi	r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1019
1020	/* RFI into the highmem handler, or branch to interrupt handler */
102112:	mfmsr	r6
1022	mtctr	r12
1023	li	r0, MSR_RI
1024	andc	r6, r6, r0
1025	mtmsrd	r6, 1			/* Clear RI in MSR */
1026	mtsrr0	r8
1027	mtsrr1	r7
1028	beqctr
1029	RFI
1030
103111:
1032BEGIN_FTR_SECTION
1033	b	12b
1034END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1035	mtspr	SPRN_HSRR0, r8
1036	mtspr	SPRN_HSRR1, r7
1037	ba	0x500
1038
10396:	mfspr	r6,SPRN_HDAR
1040	mfspr	r7,SPRN_HDSISR
1041	b	7b
1042
1043/*
1044 * Try to handle an hcall in real mode.
1045 * Returns to the guest if we handle it, or continues on up to
1046 * the kernel if we can't (i.e. if we don't have a handler for
1047 * it, or if the handler returns H_TOO_HARD).
1048 */
1049	.globl	hcall_try_real_mode
1050hcall_try_real_mode:
1051	ld	r3,VCPU_GPR(r3)(r9)
1052	andi.	r0,r11,MSR_PR
1053	bne	hcall_real_cont
1054	clrrdi	r3,r3,2
1055	cmpldi	r3,hcall_real_table_end - hcall_real_table
1056	bge	hcall_real_cont
1057	LOAD_REG_ADDR(r4, hcall_real_table)
1058	lwzx	r3,r3,r4
1059	cmpwi	r3,0
1060	beq	hcall_real_cont
1061	add	r3,r3,r4
1062	mtctr	r3
1063	mr	r3,r9		/* get vcpu pointer */
1064	ld	r4,VCPU_GPR(r4)(r9)
1065	bctrl
1066	cmpdi	r3,H_TOO_HARD
1067	beq	hcall_real_fallback
1068	ld	r4,HSTATE_KVM_VCPU(r13)
1069	std	r3,VCPU_GPR(r3)(r4)
1070	ld	r10,VCPU_PC(r4)
1071	ld	r11,VCPU_MSR(r4)
1072	b	fast_guest_return
1073
1074	/* We've attempted a real mode hcall, but it's punted it back
1075	 * to userspace.  We need to restore some clobbered volatiles
1076	 * before resuming the pass-it-to-qemu path */
1077hcall_real_fallback:
1078	li	r12,BOOK3S_INTERRUPT_SYSCALL
1079	ld	r9, HSTATE_KVM_VCPU(r13)
1080	ld	r11, VCPU_MSR(r9)
1081
1082	b	hcall_real_cont
1083
1084	.globl	hcall_real_table
1085hcall_real_table:
1086	.long	0		/* 0 - unused */
1087	.long	.kvmppc_h_remove - hcall_real_table
1088	.long	.kvmppc_h_enter - hcall_real_table
1089	.long	.kvmppc_h_read - hcall_real_table
1090	.long	0		/* 0x10 - H_CLEAR_MOD */
1091	.long	0		/* 0x14 - H_CLEAR_REF */
1092	.long	.kvmppc_h_protect - hcall_real_table
1093	.long	0		/* 0x1c - H_GET_TCE */
1094	.long	.kvmppc_h_put_tce - hcall_real_table
1095	.long	0		/* 0x24 - H_SET_SPRG0 */
1096	.long	.kvmppc_h_set_dabr - hcall_real_table
1097	.long	0		/* 0x2c */
1098	.long	0		/* 0x30 */
1099	.long	0		/* 0x34 */
1100	.long	0		/* 0x38 */
1101	.long	0		/* 0x3c */
1102	.long	0		/* 0x40 */
1103	.long	0		/* 0x44 */
1104	.long	0		/* 0x48 */
1105	.long	0		/* 0x4c */
1106	.long	0		/* 0x50 */
1107	.long	0		/* 0x54 */
1108	.long	0		/* 0x58 */
1109	.long	0		/* 0x5c */
1110	.long	0		/* 0x60 */
1111	.long	0		/* 0x64 */
1112	.long	0		/* 0x68 */
1113	.long	0		/* 0x6c */
1114	.long	0		/* 0x70 */
1115	.long	0		/* 0x74 */
1116	.long	0		/* 0x78 */
1117	.long	0		/* 0x7c */
1118	.long	0		/* 0x80 */
1119	.long	0		/* 0x84 */
1120	.long	0		/* 0x88 */
1121	.long	0		/* 0x8c */
1122	.long	0		/* 0x90 */
1123	.long	0		/* 0x94 */
1124	.long	0		/* 0x98 */
1125	.long	0		/* 0x9c */
1126	.long	0		/* 0xa0 */
1127	.long	0		/* 0xa4 */
1128	.long	0		/* 0xa8 */
1129	.long	0		/* 0xac */
1130	.long	0		/* 0xb0 */
1131	.long	0		/* 0xb4 */
1132	.long	0		/* 0xb8 */
1133	.long	0		/* 0xbc */
1134	.long	0		/* 0xc0 */
1135	.long	0		/* 0xc4 */
1136	.long	0		/* 0xc8 */
1137	.long	0		/* 0xcc */
1138	.long	0		/* 0xd0 */
1139	.long	0		/* 0xd4 */
1140	.long	0		/* 0xd8 */
1141	.long	0		/* 0xdc */
1142	.long	0		/* 0xe0 */
1143	.long	0		/* 0xe4 */
1144	.long	0		/* 0xe8 */
1145	.long	0		/* 0xec */
1146	.long	0		/* 0xf0 */
1147	.long	0		/* 0xf4 */
1148	.long	0		/* 0xf8 */
1149	.long	0		/* 0xfc */
1150	.long	0		/* 0x100 */
1151	.long	0		/* 0x104 */
1152	.long	0		/* 0x108 */
1153	.long	0		/* 0x10c */
1154	.long	0		/* 0x110 */
1155	.long	0		/* 0x114 */
1156	.long	0		/* 0x118 */
1157	.long	0		/* 0x11c */
1158	.long	0		/* 0x120 */
1159	.long	.kvmppc_h_bulk_remove - hcall_real_table
1160hcall_real_table_end:
1161
1162ignore_hdec:
1163	mr	r4,r9
1164	b	fast_guest_return
1165
1166bounce_ext_interrupt:
1167	mr	r4,r9
1168	mtspr	SPRN_SRR0,r10
1169	mtspr	SPRN_SRR1,r11
1170	li	r10,BOOK3S_INTERRUPT_EXTERNAL
1171	LOAD_REG_IMMEDIATE(r11,MSR_SF | MSR_ME);
1172	b	fast_guest_return
1173
1174_GLOBAL(kvmppc_h_set_dabr)
1175	std	r4,VCPU_DABR(r3)
1176	mtspr	SPRN_DABR,r4
1177	li	r3,0
1178	blr
1179
1180secondary_too_late:
1181	ld	r5,HSTATE_KVM_VCORE(r13)
1182	HMT_LOW
118313:	lbz	r3,VCORE_IN_GUEST(r5)
1184	cmpwi	r3,0
1185	bne	13b
1186	HMT_MEDIUM
1187	ld	r11,PACA_SLBSHADOWPTR(r13)
1188
1189	.rept	SLB_NUM_BOLTED
1190	ld	r5,SLBSHADOW_SAVEAREA(r11)
1191	ld	r6,SLBSHADOW_SAVEAREA+8(r11)
1192	andis.	r7,r5,SLB_ESID_V@h
1193	beq	1f
1194	slbmte	r6,r5
11951:	addi	r11,r11,16
1196	.endr
1197	b	50f
1198
1199secondary_nap:
1200	/* Clear any pending IPI */
120150:	ld	r5, HSTATE_XICS_PHYS(r13)
1202	li	r0, 0xff
1203	li	r6, XICS_QIRR
1204	stbcix	r0, r5, r6
1205
1206	/* increment the nap count and then go to nap mode */
1207	ld	r4, HSTATE_KVM_VCORE(r13)
1208	addi	r4, r4, VCORE_NAP_COUNT
1209	lwsync				/* make previous updates visible */
121051:	lwarx	r3, 0, r4
1211	addi	r3, r3, 1
1212	stwcx.	r3, 0, r4
1213	bne	51b
1214	isync
1215
1216	mfspr	r4, SPRN_LPCR
1217	li	r0, LPCR_PECE
1218	andc	r4, r4, r0
1219	ori	r4, r4, LPCR_PECE0	/* exit nap on interrupt */
1220	mtspr	SPRN_LPCR, r4
1221	li	r0, 0
1222	std	r0, HSTATE_SCRATCH0(r13)
1223	ptesync
1224	ld	r0, HSTATE_SCRATCH0(r13)
12251:	cmpd	r0, r0
1226	bne	1b
1227	nap
1228	b	.
1229
1230/*
1231 * Save away FP, VMX and VSX registers.
1232 * r3 = vcpu pointer
1233 */
1234_GLOBAL(kvmppc_save_fp)
1235	mfmsr	r9
1236	ori	r8,r9,MSR_FP
1237#ifdef CONFIG_ALTIVEC
1238BEGIN_FTR_SECTION
1239	oris	r8,r8,MSR_VEC@h
1240END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1241#endif
1242#ifdef CONFIG_VSX
1243BEGIN_FTR_SECTION
1244	oris	r8,r8,MSR_VSX@h
1245END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1246#endif
1247	mtmsrd	r8
1248	isync
1249#ifdef CONFIG_VSX
1250BEGIN_FTR_SECTION
1251	reg = 0
1252	.rept	32
1253	li	r6,reg*16+VCPU_VSRS
1254	STXVD2X(reg,r6,r3)
1255	reg = reg + 1
1256	.endr
1257FTR_SECTION_ELSE
1258#endif
1259	reg = 0
1260	.rept	32
1261	stfd	reg,reg*8+VCPU_FPRS(r3)
1262	reg = reg + 1
1263	.endr
1264#ifdef CONFIG_VSX
1265ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
1266#endif
1267	mffs	fr0
1268	stfd	fr0,VCPU_FPSCR(r3)
1269
1270#ifdef CONFIG_ALTIVEC
1271BEGIN_FTR_SECTION
1272	reg = 0
1273	.rept	32
1274	li	r6,reg*16+VCPU_VRS
1275	stvx	reg,r6,r3
1276	reg = reg + 1
1277	.endr
1278	mfvscr	vr0
1279	li	r6,VCPU_VSCR
1280	stvx	vr0,r6,r3
1281END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1282#endif
1283	mfspr	r6,SPRN_VRSAVE
1284	stw	r6,VCPU_VRSAVE(r3)
1285	mtmsrd	r9
1286	isync
1287	blr
1288
1289/*
1290 * Load up FP, VMX and VSX registers
1291 * r4 = vcpu pointer
1292 */
1293	.globl	kvmppc_load_fp
1294kvmppc_load_fp:
1295	mfmsr	r9
1296	ori	r8,r9,MSR_FP
1297#ifdef CONFIG_ALTIVEC
1298BEGIN_FTR_SECTION
1299	oris	r8,r8,MSR_VEC@h
1300END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1301#endif
1302#ifdef CONFIG_VSX
1303BEGIN_FTR_SECTION
1304	oris	r8,r8,MSR_VSX@h
1305END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1306#endif
1307	mtmsrd	r8
1308	isync
1309	lfd	fr0,VCPU_FPSCR(r4)
1310	MTFSF_L(fr0)
1311#ifdef CONFIG_VSX
1312BEGIN_FTR_SECTION
1313	reg = 0
1314	.rept	32
1315	li	r7,reg*16+VCPU_VSRS
1316	LXVD2X(reg,r7,r4)
1317	reg = reg + 1
1318	.endr
1319FTR_SECTION_ELSE
1320#endif
1321	reg = 0
1322	.rept	32
1323	lfd	reg,reg*8+VCPU_FPRS(r4)
1324	reg = reg + 1
1325	.endr
1326#ifdef CONFIG_VSX
1327ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
1328#endif
1329
1330#ifdef CONFIG_ALTIVEC
1331BEGIN_FTR_SECTION
1332	li	r7,VCPU_VSCR
1333	lvx	vr0,r7,r4
1334	mtvscr	vr0
1335	reg = 0
1336	.rept	32
1337	li	r7,reg*16+VCPU_VRS
1338	lvx	reg,r7,r4
1339	reg = reg + 1
1340	.endr
1341END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1342#endif
1343	lwz	r7,VCPU_VRSAVE(r4)
1344	mtspr	SPRN_VRSAVE,r7
1345	blr
1346