1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9 * GNU General Public License for more details.
10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 *
13 * Derived from book3s_rmhandlers.S and other files, which are:
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
23#include <asm/mmu.h>
24#include <asm/page.h>
25#include <asm/ptrace.h>
26#include <asm/hvcall.h>
27#include <asm/asm-offsets.h>
28#include <asm/exception-64s.h>
29#include <asm/kvm_book3s_asm.h>
30#include <asm/mmu-hash64.h>
31
32/*****************************************************************************
33 *                                                                           *
34 *        Real Mode handlers that need to be in the linear mapping           *
35 *                                                                           *
36 ****************************************************************************/
37
38	.globl	kvmppc_skip_interrupt
39kvmppc_skip_interrupt:
40	mfspr	r13,SPRN_SRR0
41	addi	r13,r13,4
42	mtspr	SPRN_SRR0,r13
43	GET_SCRATCH0(r13)
44	rfid
45	b	.
46
47	.globl	kvmppc_skip_Hinterrupt
48kvmppc_skip_Hinterrupt:
49	mfspr	r13,SPRN_HSRR0
50	addi	r13,r13,4
51	mtspr	SPRN_HSRR0,r13
52	GET_SCRATCH0(r13)
53	hrfid
54	b	.
55
56/*
57 * Call kvmppc_hv_entry in real mode.
58 * Must be called with interrupts hard-disabled.
59 *
60 * Input Registers:
61 *
62 * LR = return address to continue at after eventually re-enabling MMU
63 */
64_GLOBAL(kvmppc_hv_entry_trampoline)
65	mfmsr	r10
66	LOAD_REG_ADDR(r5, kvmppc_hv_entry)
67	li	r0,MSR_RI
68	andc	r0,r10,r0
69	li	r6,MSR_IR | MSR_DR
70	andc	r6,r10,r6
71	mtmsrd	r0,1		/* clear RI in MSR */
72	mtsrr0	r5
73	mtsrr1	r6
74	RFI
75
76/******************************************************************************
77 *                                                                            *
78 *                               Entry code                                   *
79 *                                                                            *
80 *****************************************************************************/
81
82#define XICS_XIRR		4
83#define XICS_QIRR		0xc
84#define XICS_IPI		2	/* interrupt source # for IPIs */
85
86/*
87 * We come in here when wakened from nap mode on a secondary hw thread.
88 * Relocation is off and most register values are lost.
89 * r13 points to the PACA.
90 */
91	.globl	kvm_start_guest
92kvm_start_guest:
93	ld	r1,PACAEMERGSP(r13)
94	subi	r1,r1,STACK_FRAME_OVERHEAD
95	ld	r2,PACATOC(r13)
96
97	li	r0,KVM_HWTHREAD_IN_KVM
98	stb	r0,HSTATE_HWTHREAD_STATE(r13)
99
100	/* NV GPR values from power7_idle() will no longer be valid */
101	li	r0,1
102	stb	r0,PACA_NAPSTATELOST(r13)
103
104	/* get vcpu pointer, NULL if we have no vcpu to run */
105	ld	r4,HSTATE_KVM_VCPU(r13)
106	cmpdi	cr1,r4,0
107
108	/* Check the wake reason in SRR1 to see why we got here */
109	mfspr	r3,SPRN_SRR1
110	rlwinm	r3,r3,44-31,0x7		/* extract wake reason field */
111	cmpwi	r3,4			/* was it an external interrupt? */
112	bne	27f
113
114	/*
115	 * External interrupt - for now assume it is an IPI, since we
116	 * should never get any other interrupts sent to offline threads.
117	 * Only do this for secondary threads.
118	 */
119	beq	cr1,25f
120	lwz	r3,VCPU_PTID(r4)
121	cmpwi	r3,0
122	beq	27f
12325:	ld	r5,HSTATE_XICS_PHYS(r13)
124	li	r0,0xff
125	li	r6,XICS_QIRR
126	li	r7,XICS_XIRR
127	lwzcix	r8,r5,r7		/* get and ack the interrupt */
128	sync
129	clrldi.	r9,r8,40		/* get interrupt source ID. */
130	beq	27f			/* none there? */
131	cmpwi	r9,XICS_IPI
132	bne	26f
133	stbcix	r0,r5,r6		/* clear IPI */
13426:	stwcix	r8,r5,r7		/* EOI the interrupt */
135
13627:	/* XXX should handle hypervisor maintenance interrupts etc. here */
137
138	/* reload vcpu pointer after clearing the IPI */
139	ld	r4,HSTATE_KVM_VCPU(r13)
140	cmpdi	r4,0
141	/* if we have no vcpu to run, go back to sleep */
142	beq	kvm_no_guest
143
144	/* were we napping due to cede? */
145	lbz	r0,HSTATE_NAPPING(r13)
146	cmpwi	r0,0
147	bne	kvm_end_cede
148
149.global kvmppc_hv_entry
150kvmppc_hv_entry:
151
152	/* Required state:
153	 *
154	 * R4 = vcpu pointer
155	 * MSR = ~IR|DR
156	 * R13 = PACA
157	 * R1 = host R1
158	 * all other volatile GPRS = free
159	 */
160	mflr	r0
161	std	r0, HSTATE_VMHANDLER(r13)
162
163	/* Set partition DABR */
164	/* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
165	li	r5,3
166	ld	r6,VCPU_DABR(r4)
167	mtspr	SPRN_DABRX,r5
168	mtspr	SPRN_DABR,r6
169BEGIN_FTR_SECTION
170	isync
171END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
172
173	/* Load guest PMU registers */
174	/* R4 is live here (vcpu pointer) */
175	li	r3, 1
176	sldi	r3, r3, 31		/* MMCR0_FC (freeze counters) bit */
177	mtspr	SPRN_MMCR0, r3		/* freeze all counters, disable ints */
178	isync
179	lwz	r3, VCPU_PMC(r4)	/* always load up guest PMU registers */
180	lwz	r5, VCPU_PMC + 4(r4)	/* to prevent information leak */
181	lwz	r6, VCPU_PMC + 8(r4)
182	lwz	r7, VCPU_PMC + 12(r4)
183	lwz	r8, VCPU_PMC + 16(r4)
184	lwz	r9, VCPU_PMC + 20(r4)
185BEGIN_FTR_SECTION
186	lwz	r10, VCPU_PMC + 24(r4)
187	lwz	r11, VCPU_PMC + 28(r4)
188END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
189	mtspr	SPRN_PMC1, r3
190	mtspr	SPRN_PMC2, r5
191	mtspr	SPRN_PMC3, r6
192	mtspr	SPRN_PMC4, r7
193	mtspr	SPRN_PMC5, r8
194	mtspr	SPRN_PMC6, r9
195BEGIN_FTR_SECTION
196	mtspr	SPRN_PMC7, r10
197	mtspr	SPRN_PMC8, r11
198END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
199	ld	r3, VCPU_MMCR(r4)
200	ld	r5, VCPU_MMCR + 8(r4)
201	ld	r6, VCPU_MMCR + 16(r4)
202	mtspr	SPRN_MMCR1, r5
203	mtspr	SPRN_MMCRA, r6
204	mtspr	SPRN_MMCR0, r3
205	isync
206
207	/* Load up FP, VMX and VSX registers */
208	bl	kvmppc_load_fp
209
210	ld	r14, VCPU_GPR(R14)(r4)
211	ld	r15, VCPU_GPR(R15)(r4)
212	ld	r16, VCPU_GPR(R16)(r4)
213	ld	r17, VCPU_GPR(R17)(r4)
214	ld	r18, VCPU_GPR(R18)(r4)
215	ld	r19, VCPU_GPR(R19)(r4)
216	ld	r20, VCPU_GPR(R20)(r4)
217	ld	r21, VCPU_GPR(R21)(r4)
218	ld	r22, VCPU_GPR(R22)(r4)
219	ld	r23, VCPU_GPR(R23)(r4)
220	ld	r24, VCPU_GPR(R24)(r4)
221	ld	r25, VCPU_GPR(R25)(r4)
222	ld	r26, VCPU_GPR(R26)(r4)
223	ld	r27, VCPU_GPR(R27)(r4)
224	ld	r28, VCPU_GPR(R28)(r4)
225	ld	r29, VCPU_GPR(R29)(r4)
226	ld	r30, VCPU_GPR(R30)(r4)
227	ld	r31, VCPU_GPR(R31)(r4)
228
229BEGIN_FTR_SECTION
230	/* Switch DSCR to guest value */
231	ld	r5, VCPU_DSCR(r4)
232	mtspr	SPRN_DSCR, r5
233END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
234
235	/*
236	 * Set the decrementer to the guest decrementer.
237	 */
238	ld	r8,VCPU_DEC_EXPIRES(r4)
239	mftb	r7
240	subf	r3,r7,r8
241	mtspr	SPRN_DEC,r3
242	stw	r3,VCPU_DEC(r4)
243
244	ld	r5, VCPU_SPRG0(r4)
245	ld	r6, VCPU_SPRG1(r4)
246	ld	r7, VCPU_SPRG2(r4)
247	ld	r8, VCPU_SPRG3(r4)
248	mtspr	SPRN_SPRG0, r5
249	mtspr	SPRN_SPRG1, r6
250	mtspr	SPRN_SPRG2, r7
251	mtspr	SPRN_SPRG3, r8
252
253	/* Save R1 in the PACA */
254	std	r1, HSTATE_HOST_R1(r13)
255
256	/* Increment yield count if they have a VPA */
257	ld	r3, VCPU_VPA(r4)
258	cmpdi	r3, 0
259	beq	25f
260	lwz	r5, LPPACA_YIELDCOUNT(r3)
261	addi	r5, r5, 1
262	stw	r5, LPPACA_YIELDCOUNT(r3)
26325:
264	/* Load up DAR and DSISR */
265	ld	r5, VCPU_DAR(r4)
266	lwz	r6, VCPU_DSISR(r4)
267	mtspr	SPRN_DAR, r5
268	mtspr	SPRN_DSISR, r6
269
270BEGIN_FTR_SECTION
271	/* Restore AMR and UAMOR, set AMOR to all 1s */
272	ld	r5,VCPU_AMR(r4)
273	ld	r6,VCPU_UAMOR(r4)
274	li	r7,-1
275	mtspr	SPRN_AMR,r5
276	mtspr	SPRN_UAMOR,r6
277	mtspr	SPRN_AMOR,r7
278END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
279
280	/* Clear out SLB */
281	li	r6,0
282	slbmte	r6,r6
283	slbia
284	ptesync
285
286BEGIN_FTR_SECTION
287	b	30f
288END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
289	/*
290	 * POWER7 host -> guest partition switch code.
291	 * We don't have to lock against concurrent tlbies,
292	 * but we do have to coordinate across hardware threads.
293	 */
294	/* Increment entry count iff exit count is zero. */
295	ld	r5,HSTATE_KVM_VCORE(r13)
296	addi	r9,r5,VCORE_ENTRY_EXIT
29721:	lwarx	r3,0,r9
298	cmpwi	r3,0x100		/* any threads starting to exit? */
299	bge	secondary_too_late	/* if so we're too late to the party */
300	addi	r3,r3,1
301	stwcx.	r3,0,r9
302	bne	21b
303
304	/* Primary thread switches to guest partition. */
305	ld	r9,VCPU_KVM(r4)		/* pointer to struct kvm */
306	lwz	r6,VCPU_PTID(r4)
307	cmpwi	r6,0
308	bne	20f
309	ld	r6,KVM_SDR1(r9)
310	lwz	r7,KVM_LPID(r9)
311	li	r0,LPID_RSVD		/* switch to reserved LPID */
312	mtspr	SPRN_LPID,r0
313	ptesync
314	mtspr	SPRN_SDR1,r6		/* switch to partition page table */
315	mtspr	SPRN_LPID,r7
316	isync
317
318	/* See if we need to flush the TLB */
319	lhz	r6,PACAPACAINDEX(r13)	/* test_bit(cpu, need_tlb_flush) */
320	clrldi	r7,r6,64-6		/* extract bit number (6 bits) */
321	srdi	r6,r6,6			/* doubleword number */
322	sldi	r6,r6,3			/* address offset */
323	add	r6,r6,r9
324	addi	r6,r6,KVM_NEED_FLUSH	/* dword in kvm->arch.need_tlb_flush */
325	li	r0,1
326	sld	r0,r0,r7
327	ld	r7,0(r6)
328	and.	r7,r7,r0
329	beq	22f
33023:	ldarx	r7,0,r6			/* if set, clear the bit */
331	andc	r7,r7,r0
332	stdcx.	r7,0,r6
333	bne	23b
334	li	r6,128			/* and flush the TLB */
335	mtctr	r6
336	li	r7,0x800		/* IS field = 0b10 */
337	ptesync
33828:	tlbiel	r7
339	addi	r7,r7,0x1000
340	bdnz	28b
341	ptesync
342
34322:	li	r0,1
344	stb	r0,VCORE_IN_GUEST(r5)	/* signal secondaries to continue */
345	b	10f
346
347	/* Secondary threads wait for primary to have done partition switch */
34820:	lbz	r0,VCORE_IN_GUEST(r5)
349	cmpwi	r0,0
350	beq	20b
351
352	/* Set LPCR and RMOR. */
35310:	ld	r8,KVM_LPCR(r9)
354	mtspr	SPRN_LPCR,r8
355	ld	r8,KVM_RMOR(r9)
356	mtspr	SPRN_RMOR,r8
357	isync
358
359	/* Check if HDEC expires soon */
360	mfspr	r3,SPRN_HDEC
361	cmpwi	r3,10
362	li	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
363	mr	r9,r4
364	blt	hdec_soon
365
366	/* Save purr/spurr */
367	mfspr	r5,SPRN_PURR
368	mfspr	r6,SPRN_SPURR
369	std	r5,HSTATE_PURR(r13)
370	std	r6,HSTATE_SPURR(r13)
371	ld	r7,VCPU_PURR(r4)
372	ld	r8,VCPU_SPURR(r4)
373	mtspr	SPRN_PURR,r7
374	mtspr	SPRN_SPURR,r8
375	b	31f
376
377	/*
378	 * PPC970 host -> guest partition switch code.
379	 * We have to lock against concurrent tlbies,
380	 * using native_tlbie_lock to lock against host tlbies
381	 * and kvm->arch.tlbie_lock to lock against guest tlbies.
382	 * We also have to invalidate the TLB since its
383	 * entries aren't tagged with the LPID.
384	 */
38530:	ld	r9,VCPU_KVM(r4)		/* pointer to struct kvm */
386
387	/* first take native_tlbie_lock */
388	.section ".toc","aw"
389toc_tlbie_lock:
390	.tc	native_tlbie_lock[TC],native_tlbie_lock
391	.previous
392	ld	r3,toc_tlbie_lock@toc(2)
393	lwz	r8,PACA_LOCK_TOKEN(r13)
39424:	lwarx	r0,0,r3
395	cmpwi	r0,0
396	bne	24b
397	stwcx.	r8,0,r3
398	bne	24b
399	isync
400
401	ld	r7,KVM_LPCR(r9)		/* use kvm->arch.lpcr to store HID4 */
402	li	r0,0x18f
403	rotldi	r0,r0,HID4_LPID5_SH	/* all lpid bits in HID4 = 1 */
404	or	r0,r7,r0
405	ptesync
406	sync
407	mtspr	SPRN_HID4,r0		/* switch to reserved LPID */
408	isync
409	li	r0,0
410	stw	r0,0(r3)		/* drop native_tlbie_lock */
411
412	/* invalidate the whole TLB */
413	li	r0,256
414	mtctr	r0
415	li	r6,0
41625:	tlbiel	r6
417	addi	r6,r6,0x1000
418	bdnz	25b
419	ptesync
420
421	/* Take the guest's tlbie_lock */
422	addi	r3,r9,KVM_TLBIE_LOCK
42324:	lwarx	r0,0,r3
424	cmpwi	r0,0
425	bne	24b
426	stwcx.	r8,0,r3
427	bne	24b
428	isync
429	ld	r6,KVM_SDR1(r9)
430	mtspr	SPRN_SDR1,r6		/* switch to partition page table */
431
432	/* Set up HID4 with the guest's LPID etc. */
433	sync
434	mtspr	SPRN_HID4,r7
435	isync
436
437	/* drop the guest's tlbie_lock */
438	li	r0,0
439	stw	r0,0(r3)
440
441	/* Check if HDEC expires soon */
442	mfspr	r3,SPRN_HDEC
443	cmpwi	r3,10
444	li	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
445	mr	r9,r4
446	blt	hdec_soon
447
448	/* Enable HDEC interrupts */
449	mfspr	r0,SPRN_HID0
450	li	r3,1
451	rldimi	r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
452	sync
453	mtspr	SPRN_HID0,r0
454	mfspr	r0,SPRN_HID0
455	mfspr	r0,SPRN_HID0
456	mfspr	r0,SPRN_HID0
457	mfspr	r0,SPRN_HID0
458	mfspr	r0,SPRN_HID0
459	mfspr	r0,SPRN_HID0
460
461	/* Load up guest SLB entries */
46231:	lwz	r5,VCPU_SLB_MAX(r4)
463	cmpwi	r5,0
464	beq	9f
465	mtctr	r5
466	addi	r6,r4,VCPU_SLB
4671:	ld	r8,VCPU_SLB_E(r6)
468	ld	r9,VCPU_SLB_V(r6)
469	slbmte	r9,r8
470	addi	r6,r6,VCPU_SLB_SIZE
471	bdnz	1b
4729:
473
474	/* Restore state of CTRL run bit; assume 1 on entry */
475	lwz	r5,VCPU_CTRL(r4)
476	andi.	r5,r5,1
477	bne	4f
478	mfspr	r6,SPRN_CTRLF
479	clrrdi	r6,r6,1
480	mtspr	SPRN_CTRLT,r6
4814:
482	ld	r6, VCPU_CTR(r4)
483	lwz	r7, VCPU_XER(r4)
484
485	mtctr	r6
486	mtxer	r7
487
488kvmppc_cede_reentry:		/* r4 = vcpu, r13 = paca */
489	ld	r6, VCPU_SRR0(r4)
490	ld	r7, VCPU_SRR1(r4)
491	ld	r10, VCPU_PC(r4)
492	ld	r11, VCPU_MSR(r4)	/* r11 = vcpu->arch.msr & ~MSR_HV */
493
494	rldicl	r11, r11, 63 - MSR_HV_LG, 1
495	rotldi	r11, r11, 1 + MSR_HV_LG
496	ori	r11, r11, MSR_ME
497
498	/* Check if we can deliver an external or decrementer interrupt now */
499	ld	r0,VCPU_PENDING_EXC(r4)
500	li	r8,(1 << BOOK3S_IRQPRIO_EXTERNAL)
501	oris	r8,r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
502	and	r0,r0,r8
503	cmpdi	cr1,r0,0
504	andi.	r0,r11,MSR_EE
505	beq	cr1,11f
506BEGIN_FTR_SECTION
507	mfspr	r8,SPRN_LPCR
508	ori	r8,r8,LPCR_MER
509	mtspr	SPRN_LPCR,r8
510	isync
511END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
512	beq	5f
513	li	r0,BOOK3S_INTERRUPT_EXTERNAL
51412:	mr	r6,r10
515	mr	r10,r0
516	mr	r7,r11
517	li	r11,(MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
518	rotldi	r11,r11,63
519	b	5f
52011:	beq	5f
521	mfspr	r0,SPRN_DEC
522	cmpwi	r0,0
523	li	r0,BOOK3S_INTERRUPT_DECREMENTER
524	blt	12b
525
526	/* Move SRR0 and SRR1 into the respective regs */
5275:	mtspr	SPRN_SRR0, r6
528	mtspr	SPRN_SRR1, r7
529	li	r0,0
530	stb	r0,VCPU_CEDED(r4)	/* cancel cede */
531
532fast_guest_return:
533	mtspr	SPRN_HSRR0,r10
534	mtspr	SPRN_HSRR1,r11
535
536	/* Activate guest mode, so faults get handled by KVM */
537	li	r9, KVM_GUEST_MODE_GUEST
538	stb	r9, HSTATE_IN_GUEST(r13)
539
540	/* Enter guest */
541
542BEGIN_FTR_SECTION
543	ld	r5, VCPU_CFAR(r4)
544	mtspr	SPRN_CFAR, r5
545END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
546
547	ld	r5, VCPU_LR(r4)
548	lwz	r6, VCPU_CR(r4)
549	mtlr	r5
550	mtcr	r6
551
552	ld	r0, VCPU_GPR(R0)(r4)
553	ld	r1, VCPU_GPR(R1)(r4)
554	ld	r2, VCPU_GPR(R2)(r4)
555	ld	r3, VCPU_GPR(R3)(r4)
556	ld	r5, VCPU_GPR(R5)(r4)
557	ld	r6, VCPU_GPR(R6)(r4)
558	ld	r7, VCPU_GPR(R7)(r4)
559	ld	r8, VCPU_GPR(R8)(r4)
560	ld	r9, VCPU_GPR(R9)(r4)
561	ld	r10, VCPU_GPR(R10)(r4)
562	ld	r11, VCPU_GPR(R11)(r4)
563	ld	r12, VCPU_GPR(R12)(r4)
564	ld	r13, VCPU_GPR(R13)(r4)
565
566	ld	r4, VCPU_GPR(R4)(r4)
567
568	hrfid
569	b	.
570
571/******************************************************************************
572 *                                                                            *
573 *                               Exit code                                    *
574 *                                                                            *
575 *****************************************************************************/
576
577/*
578 * We come here from the first-level interrupt handlers.
579 */
580	.globl	kvmppc_interrupt
581kvmppc_interrupt:
582	/*
583	 * Register contents:
584	 * R12		= interrupt vector
585	 * R13		= PACA
586	 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
587	 * guest R13 saved in SPRN_SCRATCH0
588	 */
589	/* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
590	std	r9, HSTATE_HOST_R2(r13)
591	ld	r9, HSTATE_KVM_VCPU(r13)
592
593	/* Save registers */
594
595	std	r0, VCPU_GPR(R0)(r9)
596	std	r1, VCPU_GPR(R1)(r9)
597	std	r2, VCPU_GPR(R2)(r9)
598	std	r3, VCPU_GPR(R3)(r9)
599	std	r4, VCPU_GPR(R4)(r9)
600	std	r5, VCPU_GPR(R5)(r9)
601	std	r6, VCPU_GPR(R6)(r9)
602	std	r7, VCPU_GPR(R7)(r9)
603	std	r8, VCPU_GPR(R8)(r9)
604	ld	r0, HSTATE_HOST_R2(r13)
605	std	r0, VCPU_GPR(R9)(r9)
606	std	r10, VCPU_GPR(R10)(r9)
607	std	r11, VCPU_GPR(R11)(r9)
608	ld	r3, HSTATE_SCRATCH0(r13)
609	lwz	r4, HSTATE_SCRATCH1(r13)
610	std	r3, VCPU_GPR(R12)(r9)
611	stw	r4, VCPU_CR(r9)
612BEGIN_FTR_SECTION
613	ld	r3, HSTATE_CFAR(r13)
614	std	r3, VCPU_CFAR(r9)
615END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
616
617	/* Restore R1/R2 so we can handle faults */
618	ld	r1, HSTATE_HOST_R1(r13)
619	ld	r2, PACATOC(r13)
620
621	mfspr	r10, SPRN_SRR0
622	mfspr	r11, SPRN_SRR1
623	std	r10, VCPU_SRR0(r9)
624	std	r11, VCPU_SRR1(r9)
625	andi.	r0, r12, 2		/* need to read HSRR0/1? */
626	beq	1f
627	mfspr	r10, SPRN_HSRR0
628	mfspr	r11, SPRN_HSRR1
629	clrrdi	r12, r12, 2
6301:	std	r10, VCPU_PC(r9)
631	std	r11, VCPU_MSR(r9)
632
633	GET_SCRATCH0(r3)
634	mflr	r4
635	std	r3, VCPU_GPR(R13)(r9)
636	std	r4, VCPU_LR(r9)
637
638	/* Unset guest mode */
639	li	r0, KVM_GUEST_MODE_NONE
640	stb	r0, HSTATE_IN_GUEST(r13)
641
642	stw	r12,VCPU_TRAP(r9)
643
644	/* Save HEIR (HV emulation assist reg) in last_inst
645	   if this is an HEI (HV emulation interrupt, e40) */
646	li	r3,KVM_INST_FETCH_FAILED
647BEGIN_FTR_SECTION
648	cmpwi	r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
649	bne	11f
650	mfspr	r3,SPRN_HEIR
651END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
65211:	stw	r3,VCPU_LAST_INST(r9)
653
654	/* these are volatile across C function calls */
655	mfctr	r3
656	mfxer	r4
657	std	r3, VCPU_CTR(r9)
658	stw	r4, VCPU_XER(r9)
659
660BEGIN_FTR_SECTION
661	/* If this is a page table miss then see if it's theirs or ours */
662	cmpwi	r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
663	beq	kvmppc_hdsi
664	cmpwi	r12, BOOK3S_INTERRUPT_H_INST_STORAGE
665	beq	kvmppc_hisi
666END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
667
668	/* See if this is a leftover HDEC interrupt */
669	cmpwi	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
670	bne	2f
671	mfspr	r3,SPRN_HDEC
672	cmpwi	r3,0
673	bge	ignore_hdec
6742:
675	/* See if this is an hcall we can handle in real mode */
676	cmpwi	r12,BOOK3S_INTERRUPT_SYSCALL
677	beq	hcall_try_real_mode
678
679	/* Check for mediated interrupts (could be done earlier really ...) */
680BEGIN_FTR_SECTION
681	cmpwi	r12,BOOK3S_INTERRUPT_EXTERNAL
682	bne+	1f
683	andi.	r0,r11,MSR_EE
684	beq	1f
685	mfspr	r5,SPRN_LPCR
686	andi.	r0,r5,LPCR_MER
687	bne	bounce_ext_interrupt
6881:
689END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
690
691guest_exit_cont:		/* r9 = vcpu, r12 = trap, r13 = paca */
692	/* Save DEC */
693	mfspr	r5,SPRN_DEC
694	mftb	r6
695	extsw	r5,r5
696	add	r5,r5,r6
697	std	r5,VCPU_DEC_EXPIRES(r9)
698
699	/* Save more register state  */
700	mfdar	r6
701	mfdsisr	r7
702	std	r6, VCPU_DAR(r9)
703	stw	r7, VCPU_DSISR(r9)
704BEGIN_FTR_SECTION
705	/* don't overwrite fault_dar/fault_dsisr if HDSI */
706	cmpwi	r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
707	beq	6f
708END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
709	std	r6, VCPU_FAULT_DAR(r9)
710	stw	r7, VCPU_FAULT_DSISR(r9)
711
712	/* See if it is a machine check */
713	cmpwi	r12, BOOK3S_INTERRUPT_MACHINE_CHECK
714	beq	machine_check_realmode
715mc_cont:
716
717	/* Save guest CTRL register, set runlatch to 1 */
7186:	mfspr	r6,SPRN_CTRLF
719	stw	r6,VCPU_CTRL(r9)
720	andi.	r0,r6,1
721	bne	4f
722	ori	r6,r6,1
723	mtspr	SPRN_CTRLT,r6
7244:
725	/* Read the guest SLB and save it away */
726	lwz	r0,VCPU_SLB_NR(r9)	/* number of entries in SLB */
727	mtctr	r0
728	li	r6,0
729	addi	r7,r9,VCPU_SLB
730	li	r5,0
7311:	slbmfee	r8,r6
732	andis.	r0,r8,SLB_ESID_V@h
733	beq	2f
734	add	r8,r8,r6		/* put index in */
735	slbmfev	r3,r6
736	std	r8,VCPU_SLB_E(r7)
737	std	r3,VCPU_SLB_V(r7)
738	addi	r7,r7,VCPU_SLB_SIZE
739	addi	r5,r5,1
7402:	addi	r6,r6,1
741	bdnz	1b
742	stw	r5,VCPU_SLB_MAX(r9)
743
744	/*
745	 * Save the guest PURR/SPURR
746	 */
747BEGIN_FTR_SECTION
748	mfspr	r5,SPRN_PURR
749	mfspr	r6,SPRN_SPURR
750	ld	r7,VCPU_PURR(r9)
751	ld	r8,VCPU_SPURR(r9)
752	std	r5,VCPU_PURR(r9)
753	std	r6,VCPU_SPURR(r9)
754	subf	r5,r7,r5
755	subf	r6,r8,r6
756
757	/*
758	 * Restore host PURR/SPURR and add guest times
759	 * so that the time in the guest gets accounted.
760	 */
761	ld	r3,HSTATE_PURR(r13)
762	ld	r4,HSTATE_SPURR(r13)
763	add	r3,r3,r5
764	add	r4,r4,r6
765	mtspr	SPRN_PURR,r3
766	mtspr	SPRN_SPURR,r4
767END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
768
769	/* Clear out SLB */
770	li	r5,0
771	slbmte	r5,r5
772	slbia
773	ptesync
774
775hdec_soon:			/* r9 = vcpu, r12 = trap, r13 = paca */
776BEGIN_FTR_SECTION
777	b	32f
778END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
779	/*
780	 * POWER7 guest -> host partition switch code.
781	 * We don't have to lock against tlbies but we do
782	 * have to coordinate the hardware threads.
783	 */
784	/* Increment the threads-exiting-guest count in the 0xff00
785	   bits of vcore->entry_exit_count */
786	lwsync
787	ld	r5,HSTATE_KVM_VCORE(r13)
788	addi	r6,r5,VCORE_ENTRY_EXIT
78941:	lwarx	r3,0,r6
790	addi	r0,r3,0x100
791	stwcx.	r0,0,r6
792	bne	41b
793	lwsync
794
795	/*
796	 * At this point we have an interrupt that we have to pass
797	 * up to the kernel or qemu; we can't handle it in real mode.
798	 * Thus we have to do a partition switch, so we have to
799	 * collect the other threads, if we are the first thread
800	 * to take an interrupt.  To do this, we set the HDEC to 0,
801	 * which causes an HDEC interrupt in all threads within 2ns
802	 * because the HDEC register is shared between all 4 threads.
803	 * However, we don't need to bother if this is an HDEC
804	 * interrupt, since the other threads will already be on their
805	 * way here in that case.
806	 */
807	cmpwi	r3,0x100	/* Are we the first here? */
808	bge	43f
809	cmpwi	r3,1		/* Are any other threads in the guest? */
810	ble	43f
811	cmpwi	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
812	beq	40f
813	li	r0,0
814	mtspr	SPRN_HDEC,r0
81540:
816	/*
817	 * Send an IPI to any napping threads, since an HDEC interrupt
818	 * doesn't wake CPUs up from nap.
819	 */
820	lwz	r3,VCORE_NAPPING_THREADS(r5)
821	lwz	r4,VCPU_PTID(r9)
822	li	r0,1
823	sld	r0,r0,r4
824	andc.	r3,r3,r0		/* no sense IPI'ing ourselves */
825	beq	43f
826	mulli	r4,r4,PACA_SIZE		/* get paca for thread 0 */
827	subf	r6,r4,r13
82842:	andi.	r0,r3,1
829	beq	44f
830	ld	r8,HSTATE_XICS_PHYS(r6)	/* get thread's XICS reg addr */
831	li	r0,IPI_PRIORITY
832	li	r7,XICS_QIRR
833	stbcix	r0,r7,r8		/* trigger the IPI */
83444:	srdi.	r3,r3,1
835	addi	r6,r6,PACA_SIZE
836	bne	42b
837
838	/* Secondary threads wait for primary to do partition switch */
83943:	ld	r4,VCPU_KVM(r9)		/* pointer to struct kvm */
840	ld	r5,HSTATE_KVM_VCORE(r13)
841	lwz	r3,VCPU_PTID(r9)
842	cmpwi	r3,0
843	beq	15f
844	HMT_LOW
84513:	lbz	r3,VCORE_IN_GUEST(r5)
846	cmpwi	r3,0
847	bne	13b
848	HMT_MEDIUM
849	b	16f
850
851	/* Primary thread waits for all the secondaries to exit guest */
85215:	lwz	r3,VCORE_ENTRY_EXIT(r5)
853	srwi	r0,r3,8
854	clrldi	r3,r3,56
855	cmpw	r3,r0
856	bne	15b
857	isync
858
859	/* Primary thread switches back to host partition */
860	ld	r6,KVM_HOST_SDR1(r4)
861	lwz	r7,KVM_HOST_LPID(r4)
862	li	r8,LPID_RSVD		/* switch to reserved LPID */
863	mtspr	SPRN_LPID,r8
864	ptesync
865	mtspr	SPRN_SDR1,r6		/* switch to partition page table */
866	mtspr	SPRN_LPID,r7
867	isync
868	li	r0,0
869	stb	r0,VCORE_IN_GUEST(r5)
870	lis	r8,0x7fff		/* MAX_INT@h */
871	mtspr	SPRN_HDEC,r8
872
87316:	ld	r8,KVM_HOST_LPCR(r4)
874	mtspr	SPRN_LPCR,r8
875	isync
876	b	33f
877
878	/*
879	 * PPC970 guest -> host partition switch code.
880	 * We have to lock against concurrent tlbies, and
881	 * we have to flush the whole TLB.
882	 */
88332:	ld	r4,VCPU_KVM(r9)		/* pointer to struct kvm */
884
885	/* Take the guest's tlbie_lock */
886	lwz	r8,PACA_LOCK_TOKEN(r13)
887	addi	r3,r4,KVM_TLBIE_LOCK
88824:	lwarx	r0,0,r3
889	cmpwi	r0,0
890	bne	24b
891	stwcx.	r8,0,r3
892	bne	24b
893	isync
894
895	ld	r7,KVM_HOST_LPCR(r4)	/* use kvm->arch.host_lpcr for HID4 */
896	li	r0,0x18f
897	rotldi	r0,r0,HID4_LPID5_SH	/* all lpid bits in HID4 = 1 */
898	or	r0,r7,r0
899	ptesync
900	sync
901	mtspr	SPRN_HID4,r0		/* switch to reserved LPID */
902	isync
903	li	r0,0
904	stw	r0,0(r3)		/* drop guest tlbie_lock */
905
906	/* invalidate the whole TLB */
907	li	r0,256
908	mtctr	r0
909	li	r6,0
91025:	tlbiel	r6
911	addi	r6,r6,0x1000
912	bdnz	25b
913	ptesync
914
915	/* take native_tlbie_lock */
916	ld	r3,toc_tlbie_lock@toc(2)
91724:	lwarx	r0,0,r3
918	cmpwi	r0,0
919	bne	24b
920	stwcx.	r8,0,r3
921	bne	24b
922	isync
923
924	ld	r6,KVM_HOST_SDR1(r4)
925	mtspr	SPRN_SDR1,r6		/* switch to host page table */
926
927	/* Set up host HID4 value */
928	sync
929	mtspr	SPRN_HID4,r7
930	isync
931	li	r0,0
932	stw	r0,0(r3)		/* drop native_tlbie_lock */
933
934	lis	r8,0x7fff		/* MAX_INT@h */
935	mtspr	SPRN_HDEC,r8
936
937	/* Disable HDEC interrupts */
938	mfspr	r0,SPRN_HID0
939	li	r3,0
940	rldimi	r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
941	sync
942	mtspr	SPRN_HID0,r0
943	mfspr	r0,SPRN_HID0
944	mfspr	r0,SPRN_HID0
945	mfspr	r0,SPRN_HID0
946	mfspr	r0,SPRN_HID0
947	mfspr	r0,SPRN_HID0
948	mfspr	r0,SPRN_HID0
949
950	/* load host SLB entries */
95133:	ld	r8,PACA_SLBSHADOWPTR(r13)
952
953	.rept	SLB_NUM_BOLTED
954	ld	r5,SLBSHADOW_SAVEAREA(r8)
955	ld	r6,SLBSHADOW_SAVEAREA+8(r8)
956	andis.	r7,r5,SLB_ESID_V@h
957	beq	1f
958	slbmte	r6,r5
9591:	addi	r8,r8,16
960	.endr
961
962	/* Save and reset AMR and UAMOR before turning on the MMU */
963BEGIN_FTR_SECTION
964	mfspr	r5,SPRN_AMR
965	mfspr	r6,SPRN_UAMOR
966	std	r5,VCPU_AMR(r9)
967	std	r6,VCPU_UAMOR(r9)
968	li	r6,0
969	mtspr	SPRN_AMR,r6
970END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
971
972	/* Switch DSCR back to host value */
973BEGIN_FTR_SECTION
974	mfspr	r8, SPRN_DSCR
975	ld	r7, HSTATE_DSCR(r13)
976	std	r8, VCPU_DSCR(r7)
977	mtspr	SPRN_DSCR, r7
978END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
979
980	/* Save non-volatile GPRs */
981	std	r14, VCPU_GPR(R14)(r9)
982	std	r15, VCPU_GPR(R15)(r9)
983	std	r16, VCPU_GPR(R16)(r9)
984	std	r17, VCPU_GPR(R17)(r9)
985	std	r18, VCPU_GPR(R18)(r9)
986	std	r19, VCPU_GPR(R19)(r9)
987	std	r20, VCPU_GPR(R20)(r9)
988	std	r21, VCPU_GPR(R21)(r9)
989	std	r22, VCPU_GPR(R22)(r9)
990	std	r23, VCPU_GPR(R23)(r9)
991	std	r24, VCPU_GPR(R24)(r9)
992	std	r25, VCPU_GPR(R25)(r9)
993	std	r26, VCPU_GPR(R26)(r9)
994	std	r27, VCPU_GPR(R27)(r9)
995	std	r28, VCPU_GPR(R28)(r9)
996	std	r29, VCPU_GPR(R29)(r9)
997	std	r30, VCPU_GPR(R30)(r9)
998	std	r31, VCPU_GPR(R31)(r9)
999
1000	/* Save SPRGs */
1001	mfspr	r3, SPRN_SPRG0
1002	mfspr	r4, SPRN_SPRG1
1003	mfspr	r5, SPRN_SPRG2
1004	mfspr	r6, SPRN_SPRG3
1005	std	r3, VCPU_SPRG0(r9)
1006	std	r4, VCPU_SPRG1(r9)
1007	std	r5, VCPU_SPRG2(r9)
1008	std	r6, VCPU_SPRG3(r9)
1009
1010	/* save FP state */
1011	mr	r3, r9
1012	bl	.kvmppc_save_fp
1013
1014	/* Increment yield count if they have a VPA */
1015	ld	r8, VCPU_VPA(r9)	/* do they have a VPA? */
1016	cmpdi	r8, 0
1017	beq	25f
1018	lwz	r3, LPPACA_YIELDCOUNT(r8)
1019	addi	r3, r3, 1
1020	stw	r3, LPPACA_YIELDCOUNT(r8)
102125:
1022	/* Save PMU registers if requested */
1023	/* r8 and cr0.eq are live here */
1024	li	r3, 1
1025	sldi	r3, r3, 31		/* MMCR0_FC (freeze counters) bit */
1026	mfspr	r4, SPRN_MMCR0		/* save MMCR0 */
1027	mtspr	SPRN_MMCR0, r3		/* freeze all counters, disable ints */
1028	mfspr	r6, SPRN_MMCRA
1029BEGIN_FTR_SECTION
1030	/* On P7, clear MMCRA in order to disable SDAR updates */
1031	li	r7, 0
1032	mtspr	SPRN_MMCRA, r7
1033END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1034	isync
1035	beq	21f			/* if no VPA, save PMU stuff anyway */
1036	lbz	r7, LPPACA_PMCINUSE(r8)
1037	cmpwi	r7, 0			/* did they ask for PMU stuff to be saved? */
1038	bne	21f
1039	std	r3, VCPU_MMCR(r9)	/* if not, set saved MMCR0 to FC */
1040	b	22f
104121:	mfspr	r5, SPRN_MMCR1
1042	std	r4, VCPU_MMCR(r9)
1043	std	r5, VCPU_MMCR + 8(r9)
1044	std	r6, VCPU_MMCR + 16(r9)
1045	mfspr	r3, SPRN_PMC1
1046	mfspr	r4, SPRN_PMC2
1047	mfspr	r5, SPRN_PMC3
1048	mfspr	r6, SPRN_PMC4
1049	mfspr	r7, SPRN_PMC5
1050	mfspr	r8, SPRN_PMC6
1051BEGIN_FTR_SECTION
1052	mfspr	r10, SPRN_PMC7
1053	mfspr	r11, SPRN_PMC8
1054END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1055	stw	r3, VCPU_PMC(r9)
1056	stw	r4, VCPU_PMC + 4(r9)
1057	stw	r5, VCPU_PMC + 8(r9)
1058	stw	r6, VCPU_PMC + 12(r9)
1059	stw	r7, VCPU_PMC + 16(r9)
1060	stw	r8, VCPU_PMC + 20(r9)
1061BEGIN_FTR_SECTION
1062	stw	r10, VCPU_PMC + 24(r9)
1063	stw	r11, VCPU_PMC + 28(r9)
1064END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
106522:
1066
1067	/* Secondary threads go off to take a nap on POWER7 */
1068BEGIN_FTR_SECTION
1069	lwz	r0,VCPU_PTID(r9)
1070	cmpwi	r0,0
1071	bne	secondary_nap
1072END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1073
1074	/* Restore host DABR and DABRX */
1075	ld	r5,HSTATE_DABR(r13)
1076	li	r6,7
1077	mtspr	SPRN_DABR,r5
1078	mtspr	SPRN_DABRX,r6
1079
1080	/* Restore SPRG3 */
1081	ld	r3,PACA_SPRG3(r13)
1082	mtspr	SPRN_SPRG3,r3
1083
1084	/*
1085	 * Reload DEC.  HDEC interrupts were disabled when
1086	 * we reloaded the host's LPCR value.
1087	 */
1088	ld	r3, HSTATE_DECEXP(r13)
1089	mftb	r4
1090	subf	r4, r4, r3
1091	mtspr	SPRN_DEC, r4
1092
1093	/* Reload the host's PMU registers */
1094	ld	r3, PACALPPACAPTR(r13)	/* is the host using the PMU? */
1095	lbz	r4, LPPACA_PMCINUSE(r3)
1096	cmpwi	r4, 0
1097	beq	23f			/* skip if not */
1098	lwz	r3, HSTATE_PMC(r13)
1099	lwz	r4, HSTATE_PMC + 4(r13)
1100	lwz	r5, HSTATE_PMC + 8(r13)
1101	lwz	r6, HSTATE_PMC + 12(r13)
1102	lwz	r8, HSTATE_PMC + 16(r13)
1103	lwz	r9, HSTATE_PMC + 20(r13)
1104BEGIN_FTR_SECTION
1105	lwz	r10, HSTATE_PMC + 24(r13)
1106	lwz	r11, HSTATE_PMC + 28(r13)
1107END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1108	mtspr	SPRN_PMC1, r3
1109	mtspr	SPRN_PMC2, r4
1110	mtspr	SPRN_PMC3, r5
1111	mtspr	SPRN_PMC4, r6
1112	mtspr	SPRN_PMC5, r8
1113	mtspr	SPRN_PMC6, r9
1114BEGIN_FTR_SECTION
1115	mtspr	SPRN_PMC7, r10
1116	mtspr	SPRN_PMC8, r11
1117END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1118	ld	r3, HSTATE_MMCR(r13)
1119	ld	r4, HSTATE_MMCR + 8(r13)
1120	ld	r5, HSTATE_MMCR + 16(r13)
1121	mtspr	SPRN_MMCR1, r4
1122	mtspr	SPRN_MMCRA, r5
1123	mtspr	SPRN_MMCR0, r3
1124	isync
112523:
1126	/*
1127	 * For external and machine check interrupts, we need
1128	 * to call the Linux handler to process the interrupt.
1129	 * We do that by jumping to absolute address 0x500 for
1130	 * external interrupts, or the machine_check_fwnmi label
1131	 * for machine checks (since firmware might have patched
1132	 * the vector area at 0x200).  The [h]rfid at the end of the
1133	 * handler will return to the book3s_hv_interrupts.S code.
1134	 * For other interrupts we do the rfid to get back
1135	 * to the book3s_hv_interrupts.S code here.
1136	 */
1137	ld	r8, HSTATE_VMHANDLER(r13)
1138	ld	r7, HSTATE_HOST_MSR(r13)
1139
1140	cmpwi	cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1141	cmpwi	r12, BOOK3S_INTERRUPT_EXTERNAL
1142BEGIN_FTR_SECTION
1143	beq	11f
1144END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1145
1146	/* RFI into the highmem handler, or branch to interrupt handler */
1147	mfmsr	r6
1148	li	r0, MSR_RI
1149	andc	r6, r6, r0
1150	mtmsrd	r6, 1			/* Clear RI in MSR */
1151	mtsrr0	r8
1152	mtsrr1	r7
1153	beqa	0x500			/* external interrupt (PPC970) */
1154	beq	cr1, 13f		/* machine check */
1155	RFI
1156
1157	/* On POWER7, we have external interrupts set to use HSRR0/1 */
115811:	mtspr	SPRN_HSRR0, r8
1159	mtspr	SPRN_HSRR1, r7
1160	ba	0x500
1161
116213:	b	machine_check_fwnmi
1163
1164/*
1165 * Check whether an HDSI is an HPTE not found fault or something else.
1166 * If it is an HPTE not found fault that is due to the guest accessing
1167 * a page that they have mapped but which we have paged out, then
1168 * we continue on with the guest exit path.  In all other cases,
1169 * reflect the HDSI to the guest as a DSI.
1170 */
1171kvmppc_hdsi:
1172	mfspr	r4, SPRN_HDAR
1173	mfspr	r6, SPRN_HDSISR
1174	/* HPTE not found fault or protection fault? */
1175	andis.	r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1176	beq	1f			/* if not, send it to the guest */
1177	andi.	r0, r11, MSR_DR		/* data relocation enabled? */
1178	beq	3f
1179	clrrdi	r0, r4, 28
1180	PPC_SLBFEE_DOT(R5, R0)		/* if so, look up SLB */
1181	bne	1f			/* if no SLB entry found */
11824:	std	r4, VCPU_FAULT_DAR(r9)
1183	stw	r6, VCPU_FAULT_DSISR(r9)
1184
1185	/* Search the hash table. */
1186	mr	r3, r9			/* vcpu pointer */
1187	li	r7, 1			/* data fault */
1188	bl	.kvmppc_hpte_hv_fault
1189	ld	r9, HSTATE_KVM_VCPU(r13)
1190	ld	r10, VCPU_PC(r9)
1191	ld	r11, VCPU_MSR(r9)
1192	li	r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1193	cmpdi	r3, 0			/* retry the instruction */
1194	beq	6f
1195	cmpdi	r3, -1			/* handle in kernel mode */
1196	beq	guest_exit_cont
1197	cmpdi	r3, -2			/* MMIO emulation; need instr word */
1198	beq	2f
1199
1200	/* Synthesize a DSI for the guest */
1201	ld	r4, VCPU_FAULT_DAR(r9)
1202	mr	r6, r3
12031:	mtspr	SPRN_DAR, r4
1204	mtspr	SPRN_DSISR, r6
1205	mtspr	SPRN_SRR0, r10
1206	mtspr	SPRN_SRR1, r11
1207	li	r10, BOOK3S_INTERRUPT_DATA_STORAGE
1208	li	r11, (MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
1209	rotldi	r11, r11, 63
1210fast_interrupt_c_return:
12116:	ld	r7, VCPU_CTR(r9)
1212	lwz	r8, VCPU_XER(r9)
1213	mtctr	r7
1214	mtxer	r8
1215	mr	r4, r9
1216	b	fast_guest_return
1217
12183:	ld	r5, VCPU_KVM(r9)	/* not relocated, use VRMA */
1219	ld	r5, KVM_VRMA_SLB_V(r5)
1220	b	4b
1221
1222	/* If this is for emulated MMIO, load the instruction word */
12232:	li	r8, KVM_INST_FETCH_FAILED	/* In case lwz faults */
1224
1225	/* Set guest mode to 'jump over instruction' so if lwz faults
1226	 * we'll just continue at the next IP. */
1227	li	r0, KVM_GUEST_MODE_SKIP
1228	stb	r0, HSTATE_IN_GUEST(r13)
1229
1230	/* Do the access with MSR:DR enabled */
1231	mfmsr	r3
1232	ori	r4, r3, MSR_DR		/* Enable paging for data */
1233	mtmsrd	r4
1234	lwz	r8, 0(r10)
1235	mtmsrd	r3
1236
1237	/* Store the result */
1238	stw	r8, VCPU_LAST_INST(r9)
1239
1240	/* Unset guest mode. */
1241	li	r0, KVM_GUEST_MODE_NONE
1242	stb	r0, HSTATE_IN_GUEST(r13)
1243	b	guest_exit_cont
1244
1245/*
1246 * Similarly for an HISI, reflect it to the guest as an ISI unless
1247 * it is an HPTE not found fault for a page that we have paged out.
1248 */
1249kvmppc_hisi:
1250	andis.	r0, r11, SRR1_ISI_NOPT@h
1251	beq	1f
1252	andi.	r0, r11, MSR_IR		/* instruction relocation enabled? */
1253	beq	3f
1254	clrrdi	r0, r10, 28
1255	PPC_SLBFEE_DOT(R5, R0)		/* if so, look up SLB */
1256	bne	1f			/* if no SLB entry found */
12574:
1258	/* Search the hash table. */
1259	mr	r3, r9			/* vcpu pointer */
1260	mr	r4, r10
1261	mr	r6, r11
1262	li	r7, 0			/* instruction fault */
1263	bl	.kvmppc_hpte_hv_fault
1264	ld	r9, HSTATE_KVM_VCPU(r13)
1265	ld	r10, VCPU_PC(r9)
1266	ld	r11, VCPU_MSR(r9)
1267	li	r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1268	cmpdi	r3, 0			/* retry the instruction */
1269	beq	fast_interrupt_c_return
1270	cmpdi	r3, -1			/* handle in kernel mode */
1271	beq	guest_exit_cont
1272
1273	/* Synthesize an ISI for the guest */
1274	mr	r11, r3
12751:	mtspr	SPRN_SRR0, r10
1276	mtspr	SPRN_SRR1, r11
1277	li	r10, BOOK3S_INTERRUPT_INST_STORAGE
1278	li	r11, (MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
1279	rotldi	r11, r11, 63
1280	b	fast_interrupt_c_return
1281
12823:	ld	r6, VCPU_KVM(r9)	/* not relocated, use VRMA */
1283	ld	r5, KVM_VRMA_SLB_V(r6)
1284	b	4b
1285
1286/*
1287 * Try to handle an hcall in real mode.
1288 * Returns to the guest if we handle it, or continues on up to
1289 * the kernel if we can't (i.e. if we don't have a handler for
1290 * it, or if the handler returns H_TOO_HARD).
1291 */
1292	.globl	hcall_try_real_mode
1293hcall_try_real_mode:
1294	ld	r3,VCPU_GPR(R3)(r9)
1295	andi.	r0,r11,MSR_PR
1296	bne	guest_exit_cont
1297	clrrdi	r3,r3,2
1298	cmpldi	r3,hcall_real_table_end - hcall_real_table
1299	bge	guest_exit_cont
1300	LOAD_REG_ADDR(r4, hcall_real_table)
1301	lwzx	r3,r3,r4
1302	cmpwi	r3,0
1303	beq	guest_exit_cont
1304	add	r3,r3,r4
1305	mtctr	r3
1306	mr	r3,r9		/* get vcpu pointer */
1307	ld	r4,VCPU_GPR(R4)(r9)
1308	bctrl
1309	cmpdi	r3,H_TOO_HARD
1310	beq	hcall_real_fallback
1311	ld	r4,HSTATE_KVM_VCPU(r13)
1312	std	r3,VCPU_GPR(R3)(r4)
1313	ld	r10,VCPU_PC(r4)
1314	ld	r11,VCPU_MSR(r4)
1315	b	fast_guest_return
1316
1317	/* We've attempted a real mode hcall, but it's punted it back
1318	 * to userspace.  We need to restore some clobbered volatiles
1319	 * before resuming the pass-it-to-qemu path */
1320hcall_real_fallback:
1321	li	r12,BOOK3S_INTERRUPT_SYSCALL
1322	ld	r9, HSTATE_KVM_VCPU(r13)
1323
1324	b	guest_exit_cont
1325
1326	.globl	hcall_real_table
1327hcall_real_table:
1328	.long	0		/* 0 - unused */
1329	.long	.kvmppc_h_remove - hcall_real_table
1330	.long	.kvmppc_h_enter - hcall_real_table
1331	.long	.kvmppc_h_read - hcall_real_table
1332	.long	0		/* 0x10 - H_CLEAR_MOD */
1333	.long	0		/* 0x14 - H_CLEAR_REF */
1334	.long	.kvmppc_h_protect - hcall_real_table
1335	.long	0		/* 0x1c - H_GET_TCE */
1336	.long	.kvmppc_h_put_tce - hcall_real_table
1337	.long	0		/* 0x24 - H_SET_SPRG0 */
1338	.long	.kvmppc_h_set_dabr - hcall_real_table
1339	.long	0		/* 0x2c */
1340	.long	0		/* 0x30 */
1341	.long	0		/* 0x34 */
1342	.long	0		/* 0x38 */
1343	.long	0		/* 0x3c */
1344	.long	0		/* 0x40 */
1345	.long	0		/* 0x44 */
1346	.long	0		/* 0x48 */
1347	.long	0		/* 0x4c */
1348	.long	0		/* 0x50 */
1349	.long	0		/* 0x54 */
1350	.long	0		/* 0x58 */
1351	.long	0		/* 0x5c */
1352	.long	0		/* 0x60 */
1353	.long	0		/* 0x64 */
1354	.long	0		/* 0x68 */
1355	.long	0		/* 0x6c */
1356	.long	0		/* 0x70 */
1357	.long	0		/* 0x74 */
1358	.long	0		/* 0x78 */
1359	.long	0		/* 0x7c */
1360	.long	0		/* 0x80 */
1361	.long	0		/* 0x84 */
1362	.long	0		/* 0x88 */
1363	.long	0		/* 0x8c */
1364	.long	0		/* 0x90 */
1365	.long	0		/* 0x94 */
1366	.long	0		/* 0x98 */
1367	.long	0		/* 0x9c */
1368	.long	0		/* 0xa0 */
1369	.long	0		/* 0xa4 */
1370	.long	0		/* 0xa8 */
1371	.long	0		/* 0xac */
1372	.long	0		/* 0xb0 */
1373	.long	0		/* 0xb4 */
1374	.long	0		/* 0xb8 */
1375	.long	0		/* 0xbc */
1376	.long	0		/* 0xc0 */
1377	.long	0		/* 0xc4 */
1378	.long	0		/* 0xc8 */
1379	.long	0		/* 0xcc */
1380	.long	0		/* 0xd0 */
1381	.long	0		/* 0xd4 */
1382	.long	0		/* 0xd8 */
1383	.long	0		/* 0xdc */
1384	.long	.kvmppc_h_cede - hcall_real_table
1385	.long	0		/* 0xe4 */
1386	.long	0		/* 0xe8 */
1387	.long	0		/* 0xec */
1388	.long	0		/* 0xf0 */
1389	.long	0		/* 0xf4 */
1390	.long	0		/* 0xf8 */
1391	.long	0		/* 0xfc */
1392	.long	0		/* 0x100 */
1393	.long	0		/* 0x104 */
1394	.long	0		/* 0x108 */
1395	.long	0		/* 0x10c */
1396	.long	0		/* 0x110 */
1397	.long	0		/* 0x114 */
1398	.long	0		/* 0x118 */
1399	.long	0		/* 0x11c */
1400	.long	0		/* 0x120 */
1401	.long	.kvmppc_h_bulk_remove - hcall_real_table
1402hcall_real_table_end:
1403
1404ignore_hdec:
1405	mr	r4,r9
1406	b	fast_guest_return
1407
1408bounce_ext_interrupt:
1409	mr	r4,r9
1410	mtspr	SPRN_SRR0,r10
1411	mtspr	SPRN_SRR1,r11
1412	li	r10,BOOK3S_INTERRUPT_EXTERNAL
1413	li	r11,(MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
1414	rotldi	r11,r11,63
1415	b	fast_guest_return
1416
1417_GLOBAL(kvmppc_h_set_dabr)
1418	std	r4,VCPU_DABR(r3)
1419	/* Work around P7 bug where DABR can get corrupted on mtspr */
14201:	mtspr	SPRN_DABR,r4
1421	mfspr	r5, SPRN_DABR
1422	cmpd	r4, r5
1423	bne	1b
1424	isync
1425	li	r3,0
1426	blr
1427
1428_GLOBAL(kvmppc_h_cede)
1429	ori	r11,r11,MSR_EE
1430	std	r11,VCPU_MSR(r3)
1431	li	r0,1
1432	stb	r0,VCPU_CEDED(r3)
1433	sync			/* order setting ceded vs. testing prodded */
1434	lbz	r5,VCPU_PRODDED(r3)
1435	cmpwi	r5,0
1436	bne	kvm_cede_prodded
1437	li	r0,0		/* set trap to 0 to say hcall is handled */
1438	stw	r0,VCPU_TRAP(r3)
1439	li	r0,H_SUCCESS
1440	std	r0,VCPU_GPR(R3)(r3)
1441BEGIN_FTR_SECTION
1442	b	kvm_cede_exit	/* just send it up to host on 970 */
1443END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1444
1445	/*
1446	 * Set our bit in the bitmask of napping threads unless all the
1447	 * other threads are already napping, in which case we send this
1448	 * up to the host.
1449	 */
1450	ld	r5,HSTATE_KVM_VCORE(r13)
1451	lwz	r6,VCPU_PTID(r3)
1452	lwz	r8,VCORE_ENTRY_EXIT(r5)
1453	clrldi	r8,r8,56
1454	li	r0,1
1455	sld	r0,r0,r6
1456	addi	r6,r5,VCORE_NAPPING_THREADS
145731:	lwarx	r4,0,r6
1458	or	r4,r4,r0
1459	PPC_POPCNTW(R7,R4)
1460	cmpw	r7,r8
1461	bge	kvm_cede_exit
1462	stwcx.	r4,0,r6
1463	bne	31b
1464	li	r0,1
1465	stb	r0,HSTATE_NAPPING(r13)
1466	/* order napping_threads update vs testing entry_exit_count */
1467	lwsync
1468	mr	r4,r3
1469	lwz	r7,VCORE_ENTRY_EXIT(r5)
1470	cmpwi	r7,0x100
1471	bge	33f		/* another thread already exiting */
1472
1473/*
1474 * Although not specifically required by the architecture, POWER7
1475 * preserves the following registers in nap mode, even if an SMT mode
1476 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
1477 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
1478 */
1479	/* Save non-volatile GPRs */
1480	std	r14, VCPU_GPR(R14)(r3)
1481	std	r15, VCPU_GPR(R15)(r3)
1482	std	r16, VCPU_GPR(R16)(r3)
1483	std	r17, VCPU_GPR(R17)(r3)
1484	std	r18, VCPU_GPR(R18)(r3)
1485	std	r19, VCPU_GPR(R19)(r3)
1486	std	r20, VCPU_GPR(R20)(r3)
1487	std	r21, VCPU_GPR(R21)(r3)
1488	std	r22, VCPU_GPR(R22)(r3)
1489	std	r23, VCPU_GPR(R23)(r3)
1490	std	r24, VCPU_GPR(R24)(r3)
1491	std	r25, VCPU_GPR(R25)(r3)
1492	std	r26, VCPU_GPR(R26)(r3)
1493	std	r27, VCPU_GPR(R27)(r3)
1494	std	r28, VCPU_GPR(R28)(r3)
1495	std	r29, VCPU_GPR(R29)(r3)
1496	std	r30, VCPU_GPR(R30)(r3)
1497	std	r31, VCPU_GPR(R31)(r3)
1498
1499	/* save FP state */
1500	bl	.kvmppc_save_fp
1501
1502	/*
1503	 * Take a nap until a decrementer or external interrupt occurs,
1504	 * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR
1505	 */
1506	li	r0,1
1507	stb	r0,HSTATE_HWTHREAD_REQ(r13)
1508	mfspr	r5,SPRN_LPCR
1509	ori	r5,r5,LPCR_PECE0 | LPCR_PECE1
1510	mtspr	SPRN_LPCR,r5
1511	isync
1512	li	r0, 0
1513	std	r0, HSTATE_SCRATCH0(r13)
1514	ptesync
1515	ld	r0, HSTATE_SCRATCH0(r13)
15161:	cmpd	r0, r0
1517	bne	1b
1518	nap
1519	b	.
1520
1521kvm_end_cede:
1522	/* Woken by external or decrementer interrupt */
1523	ld	r1, HSTATE_HOST_R1(r13)
1524
1525	/* load up FP state */
1526	bl	kvmppc_load_fp
1527
1528	/* Load NV GPRS */
1529	ld	r14, VCPU_GPR(R14)(r4)
1530	ld	r15, VCPU_GPR(R15)(r4)
1531	ld	r16, VCPU_GPR(R16)(r4)
1532	ld	r17, VCPU_GPR(R17)(r4)
1533	ld	r18, VCPU_GPR(R18)(r4)
1534	ld	r19, VCPU_GPR(R19)(r4)
1535	ld	r20, VCPU_GPR(R20)(r4)
1536	ld	r21, VCPU_GPR(R21)(r4)
1537	ld	r22, VCPU_GPR(R22)(r4)
1538	ld	r23, VCPU_GPR(R23)(r4)
1539	ld	r24, VCPU_GPR(R24)(r4)
1540	ld	r25, VCPU_GPR(R25)(r4)
1541	ld	r26, VCPU_GPR(R26)(r4)
1542	ld	r27, VCPU_GPR(R27)(r4)
1543	ld	r28, VCPU_GPR(R28)(r4)
1544	ld	r29, VCPU_GPR(R29)(r4)
1545	ld	r30, VCPU_GPR(R30)(r4)
1546	ld	r31, VCPU_GPR(R31)(r4)
1547
1548	/* clear our bit in vcore->napping_threads */
154933:	ld	r5,HSTATE_KVM_VCORE(r13)
1550	lwz	r3,VCPU_PTID(r4)
1551	li	r0,1
1552	sld	r0,r0,r3
1553	addi	r6,r5,VCORE_NAPPING_THREADS
155432:	lwarx	r7,0,r6
1555	andc	r7,r7,r0
1556	stwcx.	r7,0,r6
1557	bne	32b
1558	li	r0,0
1559	stb	r0,HSTATE_NAPPING(r13)
1560
1561	/* see if any other thread is already exiting */
1562	lwz	r0,VCORE_ENTRY_EXIT(r5)
1563	cmpwi	r0,0x100
1564	blt	kvmppc_cede_reentry	/* if not go back to guest */
1565
1566	/* some threads are exiting, so go to the guest exit path */
1567	b	hcall_real_fallback
1568
1569	/* cede when already previously prodded case */
1570kvm_cede_prodded:
1571	li	r0,0
1572	stb	r0,VCPU_PRODDED(r3)
1573	sync			/* order testing prodded vs. clearing ceded */
1574	stb	r0,VCPU_CEDED(r3)
1575	li	r3,H_SUCCESS
1576	blr
1577
1578	/* we've ceded but we want to give control to the host */
1579kvm_cede_exit:
1580	li	r3,H_TOO_HARD
1581	blr
1582
1583	/* Try to handle a machine check in real mode */
1584machine_check_realmode:
1585	mr	r3, r9		/* get vcpu pointer */
1586	bl	.kvmppc_realmode_machine_check
1587	nop
1588	cmpdi	r3, 0		/* continue exiting from guest? */
1589	ld	r9, HSTATE_KVM_VCPU(r13)
1590	li	r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1591	beq	mc_cont
1592	/* If not, deliver a machine check.  SRR0/1 are already set */
1593	li	r10, BOOK3S_INTERRUPT_MACHINE_CHECK
1594	li	r11, (MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
1595	rotldi	r11, r11, 63
1596	b	fast_interrupt_c_return
1597
1598secondary_too_late:
1599	ld	r5,HSTATE_KVM_VCORE(r13)
1600	HMT_LOW
160113:	lbz	r3,VCORE_IN_GUEST(r5)
1602	cmpwi	r3,0
1603	bne	13b
1604	HMT_MEDIUM
1605	ld	r11,PACA_SLBSHADOWPTR(r13)
1606
1607	.rept	SLB_NUM_BOLTED
1608	ld	r5,SLBSHADOW_SAVEAREA(r11)
1609	ld	r6,SLBSHADOW_SAVEAREA+8(r11)
1610	andis.	r7,r5,SLB_ESID_V@h
1611	beq	1f
1612	slbmte	r6,r5
16131:	addi	r11,r11,16
1614	.endr
1615
1616secondary_nap:
1617	/* Clear our vcpu pointer so we don't come back in early */
1618	li	r0, 0
1619	std	r0, HSTATE_KVM_VCPU(r13)
1620	lwsync
1621	/* Clear any pending IPI - assume we're a secondary thread */
1622	ld	r5, HSTATE_XICS_PHYS(r13)
1623	li	r7, XICS_XIRR
1624	lwzcix	r3, r5, r7		/* ack any pending interrupt */
1625	rlwinm.	r0, r3, 0, 0xffffff	/* any pending? */
1626	beq	37f
1627	sync
1628	li	r0, 0xff
1629	li	r6, XICS_QIRR
1630	stbcix	r0, r5, r6		/* clear the IPI */
1631	stwcix	r3, r5, r7		/* EOI it */
163237:	sync
1633
1634	/* increment the nap count and then go to nap mode */
1635	ld	r4, HSTATE_KVM_VCORE(r13)
1636	addi	r4, r4, VCORE_NAP_COUNT
1637	lwsync				/* make previous updates visible */
163851:	lwarx	r3, 0, r4
1639	addi	r3, r3, 1
1640	stwcx.	r3, 0, r4
1641	bne	51b
1642
1643kvm_no_guest:
1644	li	r0, KVM_HWTHREAD_IN_NAP
1645	stb	r0, HSTATE_HWTHREAD_STATE(r13)
1646
1647	li	r3, LPCR_PECE0
1648	mfspr	r4, SPRN_LPCR
1649	rlwimi	r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
1650	mtspr	SPRN_LPCR, r4
1651	isync
1652	std	r0, HSTATE_SCRATCH0(r13)
1653	ptesync
1654	ld	r0, HSTATE_SCRATCH0(r13)
16551:	cmpd	r0, r0
1656	bne	1b
1657	nap
1658	b	.
1659
1660/*
1661 * Save away FP, VMX and VSX registers.
1662 * r3 = vcpu pointer
1663 */
1664_GLOBAL(kvmppc_save_fp)
1665	mfmsr	r5
1666	ori	r8,r5,MSR_FP
1667#ifdef CONFIG_ALTIVEC
1668BEGIN_FTR_SECTION
1669	oris	r8,r8,MSR_VEC@h
1670END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1671#endif
1672#ifdef CONFIG_VSX
1673BEGIN_FTR_SECTION
1674	oris	r8,r8,MSR_VSX@h
1675END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1676#endif
1677	mtmsrd	r8
1678	isync
1679#ifdef CONFIG_VSX
1680BEGIN_FTR_SECTION
1681	reg = 0
1682	.rept	32
1683	li	r6,reg*16+VCPU_VSRS
1684	STXVD2X(reg,R6,R3)
1685	reg = reg + 1
1686	.endr
1687FTR_SECTION_ELSE
1688#endif
1689	reg = 0
1690	.rept	32
1691	stfd	reg,reg*8+VCPU_FPRS(r3)
1692	reg = reg + 1
1693	.endr
1694#ifdef CONFIG_VSX
1695ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
1696#endif
1697	mffs	fr0
1698	stfd	fr0,VCPU_FPSCR(r3)
1699
1700#ifdef CONFIG_ALTIVEC
1701BEGIN_FTR_SECTION
1702	reg = 0
1703	.rept	32
1704	li	r6,reg*16+VCPU_VRS
1705	stvx	reg,r6,r3
1706	reg = reg + 1
1707	.endr
1708	mfvscr	vr0
1709	li	r6,VCPU_VSCR
1710	stvx	vr0,r6,r3
1711END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1712#endif
1713	mfspr	r6,SPRN_VRSAVE
1714	stw	r6,VCPU_VRSAVE(r3)
1715	mtmsrd	r5
1716	isync
1717	blr
1718
1719/*
1720 * Load up FP, VMX and VSX registers
1721 * r4 = vcpu pointer
1722 */
1723	.globl	kvmppc_load_fp
1724kvmppc_load_fp:
1725	mfmsr	r9
1726	ori	r8,r9,MSR_FP
1727#ifdef CONFIG_ALTIVEC
1728BEGIN_FTR_SECTION
1729	oris	r8,r8,MSR_VEC@h
1730END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1731#endif
1732#ifdef CONFIG_VSX
1733BEGIN_FTR_SECTION
1734	oris	r8,r8,MSR_VSX@h
1735END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1736#endif
1737	mtmsrd	r8
1738	isync
1739	lfd	fr0,VCPU_FPSCR(r4)
1740	MTFSF_L(fr0)
1741#ifdef CONFIG_VSX
1742BEGIN_FTR_SECTION
1743	reg = 0
1744	.rept	32
1745	li	r7,reg*16+VCPU_VSRS
1746	LXVD2X(reg,R7,R4)
1747	reg = reg + 1
1748	.endr
1749FTR_SECTION_ELSE
1750#endif
1751	reg = 0
1752	.rept	32
1753	lfd	reg,reg*8+VCPU_FPRS(r4)
1754	reg = reg + 1
1755	.endr
1756#ifdef CONFIG_VSX
1757ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
1758#endif
1759
1760#ifdef CONFIG_ALTIVEC
1761BEGIN_FTR_SECTION
1762	li	r7,VCPU_VSCR
1763	lvx	vr0,r7,r4
1764	mtvscr	vr0
1765	reg = 0
1766	.rept	32
1767	li	r7,reg*16+VCPU_VRS
1768	lvx	reg,r7,r4
1769	reg = reg + 1
1770	.endr
1771END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1772#endif
1773	lwz	r7,VCPU_VRSAVE(r4)
1774	mtspr	SPRN_VRSAVE,r7
1775	blr
1776