1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9 * GNU General Public License for more details.
10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 *
13 * Derived from book3s_rmhandlers.S and other files, which are:
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
23#include <asm/mmu.h>
24#include <asm/page.h>
25#include <asm/ptrace.h>
26#include <asm/hvcall.h>
27#include <asm/asm-offsets.h>
28#include <asm/exception-64s.h>
29#include <asm/kvm_book3s_asm.h>
30#include <asm/mmu-hash64.h>
31
32/*****************************************************************************
33 *                                                                           *
34 *        Real Mode handlers that need to be in the linear mapping           *
35 *                                                                           *
36 ****************************************************************************/
37
38	.globl	kvmppc_skip_interrupt
39kvmppc_skip_interrupt:
40	mfspr	r13,SPRN_SRR0
41	addi	r13,r13,4
42	mtspr	SPRN_SRR0,r13
43	GET_SCRATCH0(r13)
44	rfid
45	b	.
46
47	.globl	kvmppc_skip_Hinterrupt
48kvmppc_skip_Hinterrupt:
49	mfspr	r13,SPRN_HSRR0
50	addi	r13,r13,4
51	mtspr	SPRN_HSRR0,r13
52	GET_SCRATCH0(r13)
53	hrfid
54	b	.
55
56/*
57 * Call kvmppc_hv_entry in real mode.
58 * Must be called with interrupts hard-disabled.
59 *
60 * Input Registers:
61 *
62 * LR = return address to continue at after eventually re-enabling MMU
63 */
64_GLOBAL(kvmppc_hv_entry_trampoline)
65	mfmsr	r10
66	LOAD_REG_ADDR(r5, kvmppc_hv_entry)
67	li	r0,MSR_RI
68	andc	r0,r10,r0
69	li	r6,MSR_IR | MSR_DR
70	andc	r6,r10,r6
71	mtmsrd	r0,1		/* clear RI in MSR */
72	mtsrr0	r5
73	mtsrr1	r6
74	RFI
75
76/******************************************************************************
77 *                                                                            *
78 *                               Entry code                                   *
79 *                                                                            *
80 *****************************************************************************/
81
82#define XICS_XIRR		4
83#define XICS_QIRR		0xc
84#define XICS_IPI		2	/* interrupt source # for IPIs */
85
86/*
87 * We come in here when wakened from nap mode on a secondary hw thread.
88 * Relocation is off and most register values are lost.
89 * r13 points to the PACA.
90 */
91	.globl	kvm_start_guest
92kvm_start_guest:
93	ld	r1,PACAEMERGSP(r13)
94	subi	r1,r1,STACK_FRAME_OVERHEAD
95	ld	r2,PACATOC(r13)
96
97	li	r0,KVM_HWTHREAD_IN_KVM
98	stb	r0,HSTATE_HWTHREAD_STATE(r13)
99
100	/* NV GPR values from power7_idle() will no longer be valid */
101	li	r0,1
102	stb	r0,PACA_NAPSTATELOST(r13)
103
104	/* get vcpu pointer, NULL if we have no vcpu to run */
105	ld	r4,HSTATE_KVM_VCPU(r13)
106	cmpdi	cr1,r4,0
107
108	/* Check the wake reason in SRR1 to see why we got here */
109	mfspr	r3,SPRN_SRR1
110	rlwinm	r3,r3,44-31,0x7		/* extract wake reason field */
111	cmpwi	r3,4			/* was it an external interrupt? */
112	bne	27f
113
114	/*
115	 * External interrupt - for now assume it is an IPI, since we
116	 * should never get any other interrupts sent to offline threads.
117	 * Only do this for secondary threads.
118	 */
119	beq	cr1,25f
120	lwz	r3,VCPU_PTID(r4)
121	cmpwi	r3,0
122	beq	27f
12325:	ld	r5,HSTATE_XICS_PHYS(r13)
124	li	r0,0xff
125	li	r6,XICS_QIRR
126	li	r7,XICS_XIRR
127	lwzcix	r8,r5,r7		/* get and ack the interrupt */
128	sync
129	clrldi.	r9,r8,40		/* get interrupt source ID. */
130	beq	27f			/* none there? */
131	cmpwi	r9,XICS_IPI
132	bne	26f
133	stbcix	r0,r5,r6		/* clear IPI */
13426:	stwcix	r8,r5,r7		/* EOI the interrupt */
135
13627:	/* XXX should handle hypervisor maintenance interrupts etc. here */
137
138	/* reload vcpu pointer after clearing the IPI */
139	ld	r4,HSTATE_KVM_VCPU(r13)
140	cmpdi	r4,0
141	/* if we have no vcpu to run, go back to sleep */
142	beq	kvm_no_guest
143
144	/* were we napping due to cede? */
145	lbz	r0,HSTATE_NAPPING(r13)
146	cmpwi	r0,0
147	bne	kvm_end_cede
148
149.global kvmppc_hv_entry
150kvmppc_hv_entry:
151
152	/* Required state:
153	 *
154	 * R4 = vcpu pointer
155	 * MSR = ~IR|DR
156	 * R13 = PACA
157	 * R1 = host R1
158	 * all other volatile GPRS = free
159	 */
160	mflr	r0
161	std	r0, HSTATE_VMHANDLER(r13)
162
163	/* Set partition DABR */
164	/* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
165	li	r5,3
166	ld	r6,VCPU_DABR(r4)
167	mtspr	SPRN_DABRX,r5
168	mtspr	SPRN_DABR,r6
169BEGIN_FTR_SECTION
170	isync
171END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
172
173	/* Load guest PMU registers */
174	/* R4 is live here (vcpu pointer) */
175	li	r3, 1
176	sldi	r3, r3, 31		/* MMCR0_FC (freeze counters) bit */
177	mtspr	SPRN_MMCR0, r3		/* freeze all counters, disable ints */
178	isync
179	lwz	r3, VCPU_PMC(r4)	/* always load up guest PMU registers */
180	lwz	r5, VCPU_PMC + 4(r4)	/* to prevent information leak */
181	lwz	r6, VCPU_PMC + 8(r4)
182	lwz	r7, VCPU_PMC + 12(r4)
183	lwz	r8, VCPU_PMC + 16(r4)
184	lwz	r9, VCPU_PMC + 20(r4)
185BEGIN_FTR_SECTION
186	lwz	r10, VCPU_PMC + 24(r4)
187	lwz	r11, VCPU_PMC + 28(r4)
188END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
189	mtspr	SPRN_PMC1, r3
190	mtspr	SPRN_PMC2, r5
191	mtspr	SPRN_PMC3, r6
192	mtspr	SPRN_PMC4, r7
193	mtspr	SPRN_PMC5, r8
194	mtspr	SPRN_PMC6, r9
195BEGIN_FTR_SECTION
196	mtspr	SPRN_PMC7, r10
197	mtspr	SPRN_PMC8, r11
198END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
199	ld	r3, VCPU_MMCR(r4)
200	ld	r5, VCPU_MMCR + 8(r4)
201	ld	r6, VCPU_MMCR + 16(r4)
202	mtspr	SPRN_MMCR1, r5
203	mtspr	SPRN_MMCRA, r6
204	mtspr	SPRN_MMCR0, r3
205	isync
206
207	/* Load up FP, VMX and VSX registers */
208	bl	kvmppc_load_fp
209
210	ld	r14, VCPU_GPR(R14)(r4)
211	ld	r15, VCPU_GPR(R15)(r4)
212	ld	r16, VCPU_GPR(R16)(r4)
213	ld	r17, VCPU_GPR(R17)(r4)
214	ld	r18, VCPU_GPR(R18)(r4)
215	ld	r19, VCPU_GPR(R19)(r4)
216	ld	r20, VCPU_GPR(R20)(r4)
217	ld	r21, VCPU_GPR(R21)(r4)
218	ld	r22, VCPU_GPR(R22)(r4)
219	ld	r23, VCPU_GPR(R23)(r4)
220	ld	r24, VCPU_GPR(R24)(r4)
221	ld	r25, VCPU_GPR(R25)(r4)
222	ld	r26, VCPU_GPR(R26)(r4)
223	ld	r27, VCPU_GPR(R27)(r4)
224	ld	r28, VCPU_GPR(R28)(r4)
225	ld	r29, VCPU_GPR(R29)(r4)
226	ld	r30, VCPU_GPR(R30)(r4)
227	ld	r31, VCPU_GPR(R31)(r4)
228
229BEGIN_FTR_SECTION
230	/* Switch DSCR to guest value */
231	ld	r5, VCPU_DSCR(r4)
232	mtspr	SPRN_DSCR, r5
233END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
234
235	/*
236	 * Set the decrementer to the guest decrementer.
237	 */
238	ld	r8,VCPU_DEC_EXPIRES(r4)
239	mftb	r7
240	subf	r3,r7,r8
241	mtspr	SPRN_DEC,r3
242	stw	r3,VCPU_DEC(r4)
243
244	ld	r5, VCPU_SPRG0(r4)
245	ld	r6, VCPU_SPRG1(r4)
246	ld	r7, VCPU_SPRG2(r4)
247	ld	r8, VCPU_SPRG3(r4)
248	mtspr	SPRN_SPRG0, r5
249	mtspr	SPRN_SPRG1, r6
250	mtspr	SPRN_SPRG2, r7
251	mtspr	SPRN_SPRG3, r8
252
253	/* Save R1 in the PACA */
254	std	r1, HSTATE_HOST_R1(r13)
255
256	/* Increment yield count if they have a VPA */
257	ld	r3, VCPU_VPA(r4)
258	cmpdi	r3, 0
259	beq	25f
260	lwz	r5, LPPACA_YIELDCOUNT(r3)
261	addi	r5, r5, 1
262	stw	r5, LPPACA_YIELDCOUNT(r3)
26325:
264	/* Load up DAR and DSISR */
265	ld	r5, VCPU_DAR(r4)
266	lwz	r6, VCPU_DSISR(r4)
267	mtspr	SPRN_DAR, r5
268	mtspr	SPRN_DSISR, r6
269
270BEGIN_FTR_SECTION
271	/* Restore AMR and UAMOR, set AMOR to all 1s */
272	ld	r5,VCPU_AMR(r4)
273	ld	r6,VCPU_UAMOR(r4)
274	li	r7,-1
275	mtspr	SPRN_AMR,r5
276	mtspr	SPRN_UAMOR,r6
277	mtspr	SPRN_AMOR,r7
278END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
279
280	/* Clear out SLB */
281	li	r6,0
282	slbmte	r6,r6
283	slbia
284	ptesync
285
286BEGIN_FTR_SECTION
287	b	30f
288END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
289	/*
290	 * POWER7 host -> guest partition switch code.
291	 * We don't have to lock against concurrent tlbies,
292	 * but we do have to coordinate across hardware threads.
293	 */
294	/* Increment entry count iff exit count is zero. */
295	ld	r5,HSTATE_KVM_VCORE(r13)
296	addi	r9,r5,VCORE_ENTRY_EXIT
29721:	lwarx	r3,0,r9
298	cmpwi	r3,0x100		/* any threads starting to exit? */
299	bge	secondary_too_late	/* if so we're too late to the party */
300	addi	r3,r3,1
301	stwcx.	r3,0,r9
302	bne	21b
303
304	/* Primary thread switches to guest partition. */
305	ld	r9,VCPU_KVM(r4)		/* pointer to struct kvm */
306	lwz	r6,VCPU_PTID(r4)
307	cmpwi	r6,0
308	bne	20f
309	ld	r6,KVM_SDR1(r9)
310	lwz	r7,KVM_LPID(r9)
311	li	r0,LPID_RSVD		/* switch to reserved LPID */
312	mtspr	SPRN_LPID,r0
313	ptesync
314	mtspr	SPRN_SDR1,r6		/* switch to partition page table */
315	mtspr	SPRN_LPID,r7
316	isync
317
318	/* See if we need to flush the TLB */
319	lhz	r6,PACAPACAINDEX(r13)	/* test_bit(cpu, need_tlb_flush) */
320	clrldi	r7,r6,64-6		/* extract bit number (6 bits) */
321	srdi	r6,r6,6			/* doubleword number */
322	sldi	r6,r6,3			/* address offset */
323	add	r6,r6,r9
324	addi	r6,r6,KVM_NEED_FLUSH	/* dword in kvm->arch.need_tlb_flush */
325	li	r0,1
326	sld	r0,r0,r7
327	ld	r7,0(r6)
328	and.	r7,r7,r0
329	beq	22f
33023:	ldarx	r7,0,r6			/* if set, clear the bit */
331	andc	r7,r7,r0
332	stdcx.	r7,0,r6
333	bne	23b
334	li	r6,128			/* and flush the TLB */
335	mtctr	r6
336	li	r7,0x800		/* IS field = 0b10 */
337	ptesync
33828:	tlbiel	r7
339	addi	r7,r7,0x1000
340	bdnz	28b
341	ptesync
342
34322:	li	r0,1
344	stb	r0,VCORE_IN_GUEST(r5)	/* signal secondaries to continue */
345	b	10f
346
347	/* Secondary threads wait for primary to have done partition switch */
34820:	lbz	r0,VCORE_IN_GUEST(r5)
349	cmpwi	r0,0
350	beq	20b
351
352	/* Set LPCR and RMOR. */
35310:	ld	r8,KVM_LPCR(r9)
354	mtspr	SPRN_LPCR,r8
355	ld	r8,KVM_RMOR(r9)
356	mtspr	SPRN_RMOR,r8
357	isync
358
359	/* Check if HDEC expires soon */
360	mfspr	r3,SPRN_HDEC
361	cmpwi	r3,10
362	li	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
363	mr	r9,r4
364	blt	hdec_soon
365
366	/* Save purr/spurr */
367	mfspr	r5,SPRN_PURR
368	mfspr	r6,SPRN_SPURR
369	std	r5,HSTATE_PURR(r13)
370	std	r6,HSTATE_SPURR(r13)
371	ld	r7,VCPU_PURR(r4)
372	ld	r8,VCPU_SPURR(r4)
373	mtspr	SPRN_PURR,r7
374	mtspr	SPRN_SPURR,r8
375	b	31f
376
377	/*
378	 * PPC970 host -> guest partition switch code.
379	 * We have to lock against concurrent tlbies,
380	 * using native_tlbie_lock to lock against host tlbies
381	 * and kvm->arch.tlbie_lock to lock against guest tlbies.
382	 * We also have to invalidate the TLB since its
383	 * entries aren't tagged with the LPID.
384	 */
38530:	ld	r9,VCPU_KVM(r4)		/* pointer to struct kvm */
386
387	/* first take native_tlbie_lock */
388	.section ".toc","aw"
389toc_tlbie_lock:
390	.tc	native_tlbie_lock[TC],native_tlbie_lock
391	.previous
392	ld	r3,toc_tlbie_lock@toc(2)
393	lwz	r8,PACA_LOCK_TOKEN(r13)
39424:	lwarx	r0,0,r3
395	cmpwi	r0,0
396	bne	24b
397	stwcx.	r8,0,r3
398	bne	24b
399	isync
400
401	ld	r7,KVM_LPCR(r9)		/* use kvm->arch.lpcr to store HID4 */
402	li	r0,0x18f
403	rotldi	r0,r0,HID4_LPID5_SH	/* all lpid bits in HID4 = 1 */
404	or	r0,r7,r0
405	ptesync
406	sync
407	mtspr	SPRN_HID4,r0		/* switch to reserved LPID */
408	isync
409	li	r0,0
410	stw	r0,0(r3)		/* drop native_tlbie_lock */
411
412	/* invalidate the whole TLB */
413	li	r0,256
414	mtctr	r0
415	li	r6,0
41625:	tlbiel	r6
417	addi	r6,r6,0x1000
418	bdnz	25b
419	ptesync
420
421	/* Take the guest's tlbie_lock */
422	addi	r3,r9,KVM_TLBIE_LOCK
42324:	lwarx	r0,0,r3
424	cmpwi	r0,0
425	bne	24b
426	stwcx.	r8,0,r3
427	bne	24b
428	isync
429	ld	r6,KVM_SDR1(r9)
430	mtspr	SPRN_SDR1,r6		/* switch to partition page table */
431
432	/* Set up HID4 with the guest's LPID etc. */
433	sync
434	mtspr	SPRN_HID4,r7
435	isync
436
437	/* drop the guest's tlbie_lock */
438	li	r0,0
439	stw	r0,0(r3)
440
441	/* Check if HDEC expires soon */
442	mfspr	r3,SPRN_HDEC
443	cmpwi	r3,10
444	li	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
445	mr	r9,r4
446	blt	hdec_soon
447
448	/* Enable HDEC interrupts */
449	mfspr	r0,SPRN_HID0
450	li	r3,1
451	rldimi	r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
452	sync
453	mtspr	SPRN_HID0,r0
454	mfspr	r0,SPRN_HID0
455	mfspr	r0,SPRN_HID0
456	mfspr	r0,SPRN_HID0
457	mfspr	r0,SPRN_HID0
458	mfspr	r0,SPRN_HID0
459	mfspr	r0,SPRN_HID0
460
461	/* Load up guest SLB entries */
46231:	lwz	r5,VCPU_SLB_MAX(r4)
463	cmpwi	r5,0
464	beq	9f
465	mtctr	r5
466	addi	r6,r4,VCPU_SLB
4671:	ld	r8,VCPU_SLB_E(r6)
468	ld	r9,VCPU_SLB_V(r6)
469	slbmte	r9,r8
470	addi	r6,r6,VCPU_SLB_SIZE
471	bdnz	1b
4729:
473
474	/* Restore state of CTRL run bit; assume 1 on entry */
475	lwz	r5,VCPU_CTRL(r4)
476	andi.	r5,r5,1
477	bne	4f
478	mfspr	r6,SPRN_CTRLF
479	clrrdi	r6,r6,1
480	mtspr	SPRN_CTRLT,r6
4814:
482	ld	r6, VCPU_CTR(r4)
483	lwz	r7, VCPU_XER(r4)
484
485	mtctr	r6
486	mtxer	r7
487
488kvmppc_cede_reentry:		/* r4 = vcpu, r13 = paca */
489	ld	r6, VCPU_SRR0(r4)
490	ld	r7, VCPU_SRR1(r4)
491	ld	r10, VCPU_PC(r4)
492	ld	r11, VCPU_MSR(r4)	/* r11 = vcpu->arch.msr & ~MSR_HV */
493
494	rldicl	r11, r11, 63 - MSR_HV_LG, 1
495	rotldi	r11, r11, 1 + MSR_HV_LG
496	ori	r11, r11, MSR_ME
497
498	/* Check if we can deliver an external or decrementer interrupt now */
499	ld	r0,VCPU_PENDING_EXC(r4)
500	li	r8,(1 << BOOK3S_IRQPRIO_EXTERNAL)
501	oris	r8,r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
502	and	r0,r0,r8
503	cmpdi	cr1,r0,0
504	andi.	r0,r11,MSR_EE
505	beq	cr1,11f
506BEGIN_FTR_SECTION
507	mfspr	r8,SPRN_LPCR
508	ori	r8,r8,LPCR_MER
509	mtspr	SPRN_LPCR,r8
510	isync
511END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
512	beq	5f
513	li	r0,BOOK3S_INTERRUPT_EXTERNAL
51412:	mr	r6,r10
515	mr	r10,r0
516	mr	r7,r11
517	li	r11,(MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
518	rotldi	r11,r11,63
519	b	5f
52011:	beq	5f
521	mfspr	r0,SPRN_DEC
522	cmpwi	r0,0
523	li	r0,BOOK3S_INTERRUPT_DECREMENTER
524	blt	12b
525
526	/* Move SRR0 and SRR1 into the respective regs */
5275:	mtspr	SPRN_SRR0, r6
528	mtspr	SPRN_SRR1, r7
529	li	r0,0
530	stb	r0,VCPU_CEDED(r4)	/* cancel cede */
531
532fast_guest_return:
533	mtspr	SPRN_HSRR0,r10
534	mtspr	SPRN_HSRR1,r11
535
536	/* Activate guest mode, so faults get handled by KVM */
537	li	r9, KVM_GUEST_MODE_GUEST
538	stb	r9, HSTATE_IN_GUEST(r13)
539
540	/* Enter guest */
541
542	ld	r5, VCPU_LR(r4)
543	lwz	r6, VCPU_CR(r4)
544	mtlr	r5
545	mtcr	r6
546
547	ld	r0, VCPU_GPR(R0)(r4)
548	ld	r1, VCPU_GPR(R1)(r4)
549	ld	r2, VCPU_GPR(R2)(r4)
550	ld	r3, VCPU_GPR(R3)(r4)
551	ld	r5, VCPU_GPR(R5)(r4)
552	ld	r6, VCPU_GPR(R6)(r4)
553	ld	r7, VCPU_GPR(R7)(r4)
554	ld	r8, VCPU_GPR(R8)(r4)
555	ld	r9, VCPU_GPR(R9)(r4)
556	ld	r10, VCPU_GPR(R10)(r4)
557	ld	r11, VCPU_GPR(R11)(r4)
558	ld	r12, VCPU_GPR(R12)(r4)
559	ld	r13, VCPU_GPR(R13)(r4)
560
561	ld	r4, VCPU_GPR(R4)(r4)
562
563	hrfid
564	b	.
565
566/******************************************************************************
567 *                                                                            *
568 *                               Exit code                                    *
569 *                                                                            *
570 *****************************************************************************/
571
572/*
573 * We come here from the first-level interrupt handlers.
574 */
575	.globl	kvmppc_interrupt
576kvmppc_interrupt:
577	/*
578	 * Register contents:
579	 * R12		= interrupt vector
580	 * R13		= PACA
581	 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
582	 * guest R13 saved in SPRN_SCRATCH0
583	 */
584	/* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
585	std	r9, HSTATE_HOST_R2(r13)
586	ld	r9, HSTATE_KVM_VCPU(r13)
587
588	/* Save registers */
589
590	std	r0, VCPU_GPR(R0)(r9)
591	std	r1, VCPU_GPR(R1)(r9)
592	std	r2, VCPU_GPR(R2)(r9)
593	std	r3, VCPU_GPR(R3)(r9)
594	std	r4, VCPU_GPR(R4)(r9)
595	std	r5, VCPU_GPR(R5)(r9)
596	std	r6, VCPU_GPR(R6)(r9)
597	std	r7, VCPU_GPR(R7)(r9)
598	std	r8, VCPU_GPR(R8)(r9)
599	ld	r0, HSTATE_HOST_R2(r13)
600	std	r0, VCPU_GPR(R9)(r9)
601	std	r10, VCPU_GPR(R10)(r9)
602	std	r11, VCPU_GPR(R11)(r9)
603	ld	r3, HSTATE_SCRATCH0(r13)
604	lwz	r4, HSTATE_SCRATCH1(r13)
605	std	r3, VCPU_GPR(R12)(r9)
606	stw	r4, VCPU_CR(r9)
607
608	/* Restore R1/R2 so we can handle faults */
609	ld	r1, HSTATE_HOST_R1(r13)
610	ld	r2, PACATOC(r13)
611
612	mfspr	r10, SPRN_SRR0
613	mfspr	r11, SPRN_SRR1
614	std	r10, VCPU_SRR0(r9)
615	std	r11, VCPU_SRR1(r9)
616	andi.	r0, r12, 2		/* need to read HSRR0/1? */
617	beq	1f
618	mfspr	r10, SPRN_HSRR0
619	mfspr	r11, SPRN_HSRR1
620	clrrdi	r12, r12, 2
6211:	std	r10, VCPU_PC(r9)
622	std	r11, VCPU_MSR(r9)
623
624	GET_SCRATCH0(r3)
625	mflr	r4
626	std	r3, VCPU_GPR(R13)(r9)
627	std	r4, VCPU_LR(r9)
628
629	/* Unset guest mode */
630	li	r0, KVM_GUEST_MODE_NONE
631	stb	r0, HSTATE_IN_GUEST(r13)
632
633	stw	r12,VCPU_TRAP(r9)
634
635	/* Save HEIR (HV emulation assist reg) in last_inst
636	   if this is an HEI (HV emulation interrupt, e40) */
637	li	r3,KVM_INST_FETCH_FAILED
638BEGIN_FTR_SECTION
639	cmpwi	r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
640	bne	11f
641	mfspr	r3,SPRN_HEIR
642END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
64311:	stw	r3,VCPU_LAST_INST(r9)
644
645	/* these are volatile across C function calls */
646	mfctr	r3
647	mfxer	r4
648	std	r3, VCPU_CTR(r9)
649	stw	r4, VCPU_XER(r9)
650
651BEGIN_FTR_SECTION
652	/* If this is a page table miss then see if it's theirs or ours */
653	cmpwi	r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
654	beq	kvmppc_hdsi
655	cmpwi	r12, BOOK3S_INTERRUPT_H_INST_STORAGE
656	beq	kvmppc_hisi
657END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
658
659	/* See if this is a leftover HDEC interrupt */
660	cmpwi	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
661	bne	2f
662	mfspr	r3,SPRN_HDEC
663	cmpwi	r3,0
664	bge	ignore_hdec
6652:
666	/* See if this is an hcall we can handle in real mode */
667	cmpwi	r12,BOOK3S_INTERRUPT_SYSCALL
668	beq	hcall_try_real_mode
669
670	/* Check for mediated interrupts (could be done earlier really ...) */
671BEGIN_FTR_SECTION
672	cmpwi	r12,BOOK3S_INTERRUPT_EXTERNAL
673	bne+	1f
674	andi.	r0,r11,MSR_EE
675	beq	1f
676	mfspr	r5,SPRN_LPCR
677	andi.	r0,r5,LPCR_MER
678	bne	bounce_ext_interrupt
6791:
680END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
681
682guest_exit_cont:		/* r9 = vcpu, r12 = trap, r13 = paca */
683	/* Save DEC */
684	mfspr	r5,SPRN_DEC
685	mftb	r6
686	extsw	r5,r5
687	add	r5,r5,r6
688	std	r5,VCPU_DEC_EXPIRES(r9)
689
690	/* Save more register state  */
691	mfdar	r6
692	mfdsisr	r7
693	std	r6, VCPU_DAR(r9)
694	stw	r7, VCPU_DSISR(r9)
695BEGIN_FTR_SECTION
696	/* don't overwrite fault_dar/fault_dsisr if HDSI */
697	cmpwi	r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
698	beq	6f
699END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
700	std	r6, VCPU_FAULT_DAR(r9)
701	stw	r7, VCPU_FAULT_DSISR(r9)
702
703	/* See if it is a machine check */
704	cmpwi	r12, BOOK3S_INTERRUPT_MACHINE_CHECK
705	beq	machine_check_realmode
706mc_cont:
707
708	/* Save guest CTRL register, set runlatch to 1 */
7096:	mfspr	r6,SPRN_CTRLF
710	stw	r6,VCPU_CTRL(r9)
711	andi.	r0,r6,1
712	bne	4f
713	ori	r6,r6,1
714	mtspr	SPRN_CTRLT,r6
7154:
716	/* Read the guest SLB and save it away */
717	lwz	r0,VCPU_SLB_NR(r9)	/* number of entries in SLB */
718	mtctr	r0
719	li	r6,0
720	addi	r7,r9,VCPU_SLB
721	li	r5,0
7221:	slbmfee	r8,r6
723	andis.	r0,r8,SLB_ESID_V@h
724	beq	2f
725	add	r8,r8,r6		/* put index in */
726	slbmfev	r3,r6
727	std	r8,VCPU_SLB_E(r7)
728	std	r3,VCPU_SLB_V(r7)
729	addi	r7,r7,VCPU_SLB_SIZE
730	addi	r5,r5,1
7312:	addi	r6,r6,1
732	bdnz	1b
733	stw	r5,VCPU_SLB_MAX(r9)
734
735	/*
736	 * Save the guest PURR/SPURR
737	 */
738BEGIN_FTR_SECTION
739	mfspr	r5,SPRN_PURR
740	mfspr	r6,SPRN_SPURR
741	ld	r7,VCPU_PURR(r9)
742	ld	r8,VCPU_SPURR(r9)
743	std	r5,VCPU_PURR(r9)
744	std	r6,VCPU_SPURR(r9)
745	subf	r5,r7,r5
746	subf	r6,r8,r6
747
748	/*
749	 * Restore host PURR/SPURR and add guest times
750	 * so that the time in the guest gets accounted.
751	 */
752	ld	r3,HSTATE_PURR(r13)
753	ld	r4,HSTATE_SPURR(r13)
754	add	r3,r3,r5
755	add	r4,r4,r6
756	mtspr	SPRN_PURR,r3
757	mtspr	SPRN_SPURR,r4
758END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
759
760	/* Clear out SLB */
761	li	r5,0
762	slbmte	r5,r5
763	slbia
764	ptesync
765
766hdec_soon:			/* r9 = vcpu, r12 = trap, r13 = paca */
767BEGIN_FTR_SECTION
768	b	32f
769END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
770	/*
771	 * POWER7 guest -> host partition switch code.
772	 * We don't have to lock against tlbies but we do
773	 * have to coordinate the hardware threads.
774	 */
775	/* Increment the threads-exiting-guest count in the 0xff00
776	   bits of vcore->entry_exit_count */
777	lwsync
778	ld	r5,HSTATE_KVM_VCORE(r13)
779	addi	r6,r5,VCORE_ENTRY_EXIT
78041:	lwarx	r3,0,r6
781	addi	r0,r3,0x100
782	stwcx.	r0,0,r6
783	bne	41b
784	lwsync
785
786	/*
787	 * At this point we have an interrupt that we have to pass
788	 * up to the kernel or qemu; we can't handle it in real mode.
789	 * Thus we have to do a partition switch, so we have to
790	 * collect the other threads, if we are the first thread
791	 * to take an interrupt.  To do this, we set the HDEC to 0,
792	 * which causes an HDEC interrupt in all threads within 2ns
793	 * because the HDEC register is shared between all 4 threads.
794	 * However, we don't need to bother if this is an HDEC
795	 * interrupt, since the other threads will already be on their
796	 * way here in that case.
797	 */
798	cmpwi	r3,0x100	/* Are we the first here? */
799	bge	43f
800	cmpwi	r3,1		/* Are any other threads in the guest? */
801	ble	43f
802	cmpwi	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
803	beq	40f
804	li	r0,0
805	mtspr	SPRN_HDEC,r0
80640:
807	/*
808	 * Send an IPI to any napping threads, since an HDEC interrupt
809	 * doesn't wake CPUs up from nap.
810	 */
811	lwz	r3,VCORE_NAPPING_THREADS(r5)
812	lwz	r4,VCPU_PTID(r9)
813	li	r0,1
814	sld	r0,r0,r4
815	andc.	r3,r3,r0		/* no sense IPI'ing ourselves */
816	beq	43f
817	mulli	r4,r4,PACA_SIZE		/* get paca for thread 0 */
818	subf	r6,r4,r13
81942:	andi.	r0,r3,1
820	beq	44f
821	ld	r8,HSTATE_XICS_PHYS(r6)	/* get thread's XICS reg addr */
822	li	r0,IPI_PRIORITY
823	li	r7,XICS_QIRR
824	stbcix	r0,r7,r8		/* trigger the IPI */
82544:	srdi.	r3,r3,1
826	addi	r6,r6,PACA_SIZE
827	bne	42b
828
829	/* Secondary threads wait for primary to do partition switch */
83043:	ld	r4,VCPU_KVM(r9)		/* pointer to struct kvm */
831	ld	r5,HSTATE_KVM_VCORE(r13)
832	lwz	r3,VCPU_PTID(r9)
833	cmpwi	r3,0
834	beq	15f
835	HMT_LOW
83613:	lbz	r3,VCORE_IN_GUEST(r5)
837	cmpwi	r3,0
838	bne	13b
839	HMT_MEDIUM
840	b	16f
841
842	/* Primary thread waits for all the secondaries to exit guest */
84315:	lwz	r3,VCORE_ENTRY_EXIT(r5)
844	srwi	r0,r3,8
845	clrldi	r3,r3,56
846	cmpw	r3,r0
847	bne	15b
848	isync
849
850	/* Primary thread switches back to host partition */
851	ld	r6,KVM_HOST_SDR1(r4)
852	lwz	r7,KVM_HOST_LPID(r4)
853	li	r8,LPID_RSVD		/* switch to reserved LPID */
854	mtspr	SPRN_LPID,r8
855	ptesync
856	mtspr	SPRN_SDR1,r6		/* switch to partition page table */
857	mtspr	SPRN_LPID,r7
858	isync
859	li	r0,0
860	stb	r0,VCORE_IN_GUEST(r5)
861	lis	r8,0x7fff		/* MAX_INT@h */
862	mtspr	SPRN_HDEC,r8
863
86416:	ld	r8,KVM_HOST_LPCR(r4)
865	mtspr	SPRN_LPCR,r8
866	isync
867	b	33f
868
869	/*
870	 * PPC970 guest -> host partition switch code.
871	 * We have to lock against concurrent tlbies, and
872	 * we have to flush the whole TLB.
873	 */
87432:	ld	r4,VCPU_KVM(r9)		/* pointer to struct kvm */
875
876	/* Take the guest's tlbie_lock */
877	lwz	r8,PACA_LOCK_TOKEN(r13)
878	addi	r3,r4,KVM_TLBIE_LOCK
87924:	lwarx	r0,0,r3
880	cmpwi	r0,0
881	bne	24b
882	stwcx.	r8,0,r3
883	bne	24b
884	isync
885
886	ld	r7,KVM_HOST_LPCR(r4)	/* use kvm->arch.host_lpcr for HID4 */
887	li	r0,0x18f
888	rotldi	r0,r0,HID4_LPID5_SH	/* all lpid bits in HID4 = 1 */
889	or	r0,r7,r0
890	ptesync
891	sync
892	mtspr	SPRN_HID4,r0		/* switch to reserved LPID */
893	isync
894	li	r0,0
895	stw	r0,0(r3)		/* drop guest tlbie_lock */
896
897	/* invalidate the whole TLB */
898	li	r0,256
899	mtctr	r0
900	li	r6,0
90125:	tlbiel	r6
902	addi	r6,r6,0x1000
903	bdnz	25b
904	ptesync
905
906	/* take native_tlbie_lock */
907	ld	r3,toc_tlbie_lock@toc(2)
90824:	lwarx	r0,0,r3
909	cmpwi	r0,0
910	bne	24b
911	stwcx.	r8,0,r3
912	bne	24b
913	isync
914
915	ld	r6,KVM_HOST_SDR1(r4)
916	mtspr	SPRN_SDR1,r6		/* switch to host page table */
917
918	/* Set up host HID4 value */
919	sync
920	mtspr	SPRN_HID4,r7
921	isync
922	li	r0,0
923	stw	r0,0(r3)		/* drop native_tlbie_lock */
924
925	lis	r8,0x7fff		/* MAX_INT@h */
926	mtspr	SPRN_HDEC,r8
927
928	/* Disable HDEC interrupts */
929	mfspr	r0,SPRN_HID0
930	li	r3,0
931	rldimi	r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
932	sync
933	mtspr	SPRN_HID0,r0
934	mfspr	r0,SPRN_HID0
935	mfspr	r0,SPRN_HID0
936	mfspr	r0,SPRN_HID0
937	mfspr	r0,SPRN_HID0
938	mfspr	r0,SPRN_HID0
939	mfspr	r0,SPRN_HID0
940
941	/* load host SLB entries */
94233:	ld	r8,PACA_SLBSHADOWPTR(r13)
943
944	.rept	SLB_NUM_BOLTED
945	ld	r5,SLBSHADOW_SAVEAREA(r8)
946	ld	r6,SLBSHADOW_SAVEAREA+8(r8)
947	andis.	r7,r5,SLB_ESID_V@h
948	beq	1f
949	slbmte	r6,r5
9501:	addi	r8,r8,16
951	.endr
952
953	/* Save and reset AMR and UAMOR before turning on the MMU */
954BEGIN_FTR_SECTION
955	mfspr	r5,SPRN_AMR
956	mfspr	r6,SPRN_UAMOR
957	std	r5,VCPU_AMR(r9)
958	std	r6,VCPU_UAMOR(r9)
959	li	r6,0
960	mtspr	SPRN_AMR,r6
961END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
962
963	/* Switch DSCR back to host value */
964BEGIN_FTR_SECTION
965	mfspr	r8, SPRN_DSCR
966	ld	r7, HSTATE_DSCR(r13)
967	std	r8, VCPU_DSCR(r7)
968	mtspr	SPRN_DSCR, r7
969END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
970
971	/* Save non-volatile GPRs */
972	std	r14, VCPU_GPR(R14)(r9)
973	std	r15, VCPU_GPR(R15)(r9)
974	std	r16, VCPU_GPR(R16)(r9)
975	std	r17, VCPU_GPR(R17)(r9)
976	std	r18, VCPU_GPR(R18)(r9)
977	std	r19, VCPU_GPR(R19)(r9)
978	std	r20, VCPU_GPR(R20)(r9)
979	std	r21, VCPU_GPR(R21)(r9)
980	std	r22, VCPU_GPR(R22)(r9)
981	std	r23, VCPU_GPR(R23)(r9)
982	std	r24, VCPU_GPR(R24)(r9)
983	std	r25, VCPU_GPR(R25)(r9)
984	std	r26, VCPU_GPR(R26)(r9)
985	std	r27, VCPU_GPR(R27)(r9)
986	std	r28, VCPU_GPR(R28)(r9)
987	std	r29, VCPU_GPR(R29)(r9)
988	std	r30, VCPU_GPR(R30)(r9)
989	std	r31, VCPU_GPR(R31)(r9)
990
991	/* Save SPRGs */
992	mfspr	r3, SPRN_SPRG0
993	mfspr	r4, SPRN_SPRG1
994	mfspr	r5, SPRN_SPRG2
995	mfspr	r6, SPRN_SPRG3
996	std	r3, VCPU_SPRG0(r9)
997	std	r4, VCPU_SPRG1(r9)
998	std	r5, VCPU_SPRG2(r9)
999	std	r6, VCPU_SPRG3(r9)
1000
1001	/* save FP state */
1002	mr	r3, r9
1003	bl	.kvmppc_save_fp
1004
1005	/* Increment yield count if they have a VPA */
1006	ld	r8, VCPU_VPA(r9)	/* do they have a VPA? */
1007	cmpdi	r8, 0
1008	beq	25f
1009	lwz	r3, LPPACA_YIELDCOUNT(r8)
1010	addi	r3, r3, 1
1011	stw	r3, LPPACA_YIELDCOUNT(r8)
101225:
1013	/* Save PMU registers if requested */
1014	/* r8 and cr0.eq are live here */
1015	li	r3, 1
1016	sldi	r3, r3, 31		/* MMCR0_FC (freeze counters) bit */
1017	mfspr	r4, SPRN_MMCR0		/* save MMCR0 */
1018	mtspr	SPRN_MMCR0, r3		/* freeze all counters, disable ints */
1019	mfspr	r6, SPRN_MMCRA
1020BEGIN_FTR_SECTION
1021	/* On P7, clear MMCRA in order to disable SDAR updates */
1022	li	r7, 0
1023	mtspr	SPRN_MMCRA, r7
1024END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1025	isync
1026	beq	21f			/* if no VPA, save PMU stuff anyway */
1027	lbz	r7, LPPACA_PMCINUSE(r8)
1028	cmpwi	r7, 0			/* did they ask for PMU stuff to be saved? */
1029	bne	21f
1030	std	r3, VCPU_MMCR(r9)	/* if not, set saved MMCR0 to FC */
1031	b	22f
103221:	mfspr	r5, SPRN_MMCR1
1033	std	r4, VCPU_MMCR(r9)
1034	std	r5, VCPU_MMCR + 8(r9)
1035	std	r6, VCPU_MMCR + 16(r9)
1036	mfspr	r3, SPRN_PMC1
1037	mfspr	r4, SPRN_PMC2
1038	mfspr	r5, SPRN_PMC3
1039	mfspr	r6, SPRN_PMC4
1040	mfspr	r7, SPRN_PMC5
1041	mfspr	r8, SPRN_PMC6
1042BEGIN_FTR_SECTION
1043	mfspr	r10, SPRN_PMC7
1044	mfspr	r11, SPRN_PMC8
1045END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1046	stw	r3, VCPU_PMC(r9)
1047	stw	r4, VCPU_PMC + 4(r9)
1048	stw	r5, VCPU_PMC + 8(r9)
1049	stw	r6, VCPU_PMC + 12(r9)
1050	stw	r7, VCPU_PMC + 16(r9)
1051	stw	r8, VCPU_PMC + 20(r9)
1052BEGIN_FTR_SECTION
1053	stw	r10, VCPU_PMC + 24(r9)
1054	stw	r11, VCPU_PMC + 28(r9)
1055END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
105622:
1057
1058	/* Secondary threads go off to take a nap on POWER7 */
1059BEGIN_FTR_SECTION
1060	lwz	r0,VCPU_PTID(r9)
1061	cmpwi	r0,0
1062	bne	secondary_nap
1063END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1064
1065	/* Restore host DABR and DABRX */
1066	ld	r5,HSTATE_DABR(r13)
1067	li	r6,7
1068	mtspr	SPRN_DABR,r5
1069	mtspr	SPRN_DABRX,r6
1070
1071	/* Restore SPRG3 */
1072	ld	r3,PACA_SPRG3(r13)
1073	mtspr	SPRN_SPRG3,r3
1074
1075	/*
1076	 * Reload DEC.  HDEC interrupts were disabled when
1077	 * we reloaded the host's LPCR value.
1078	 */
1079	ld	r3, HSTATE_DECEXP(r13)
1080	mftb	r4
1081	subf	r4, r4, r3
1082	mtspr	SPRN_DEC, r4
1083
1084	/* Reload the host's PMU registers */
1085	ld	r3, PACALPPACAPTR(r13)	/* is the host using the PMU? */
1086	lbz	r4, LPPACA_PMCINUSE(r3)
1087	cmpwi	r4, 0
1088	beq	23f			/* skip if not */
1089	lwz	r3, HSTATE_PMC(r13)
1090	lwz	r4, HSTATE_PMC + 4(r13)
1091	lwz	r5, HSTATE_PMC + 8(r13)
1092	lwz	r6, HSTATE_PMC + 12(r13)
1093	lwz	r8, HSTATE_PMC + 16(r13)
1094	lwz	r9, HSTATE_PMC + 20(r13)
1095BEGIN_FTR_SECTION
1096	lwz	r10, HSTATE_PMC + 24(r13)
1097	lwz	r11, HSTATE_PMC + 28(r13)
1098END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1099	mtspr	SPRN_PMC1, r3
1100	mtspr	SPRN_PMC2, r4
1101	mtspr	SPRN_PMC3, r5
1102	mtspr	SPRN_PMC4, r6
1103	mtspr	SPRN_PMC5, r8
1104	mtspr	SPRN_PMC6, r9
1105BEGIN_FTR_SECTION
1106	mtspr	SPRN_PMC7, r10
1107	mtspr	SPRN_PMC8, r11
1108END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1109	ld	r3, HSTATE_MMCR(r13)
1110	ld	r4, HSTATE_MMCR + 8(r13)
1111	ld	r5, HSTATE_MMCR + 16(r13)
1112	mtspr	SPRN_MMCR1, r4
1113	mtspr	SPRN_MMCRA, r5
1114	mtspr	SPRN_MMCR0, r3
1115	isync
111623:
1117	/*
1118	 * For external and machine check interrupts, we need
1119	 * to call the Linux handler to process the interrupt.
1120	 * We do that by jumping to absolute address 0x500 for
1121	 * external interrupts, or the machine_check_fwnmi label
1122	 * for machine checks (since firmware might have patched
1123	 * the vector area at 0x200).  The [h]rfid at the end of the
1124	 * handler will return to the book3s_hv_interrupts.S code.
1125	 * For other interrupts we do the rfid to get back
1126	 * to the book3s_hv_interrupts.S code here.
1127	 */
1128	ld	r8, HSTATE_VMHANDLER(r13)
1129	ld	r7, HSTATE_HOST_MSR(r13)
1130
1131	cmpwi	cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1132	cmpwi	r12, BOOK3S_INTERRUPT_EXTERNAL
1133BEGIN_FTR_SECTION
1134	beq	11f
1135END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1136
1137	/* RFI into the highmem handler, or branch to interrupt handler */
1138	mfmsr	r6
1139	li	r0, MSR_RI
1140	andc	r6, r6, r0
1141	mtmsrd	r6, 1			/* Clear RI in MSR */
1142	mtsrr0	r8
1143	mtsrr1	r7
1144	beqa	0x500			/* external interrupt (PPC970) */
1145	beq	cr1, 13f		/* machine check */
1146	RFI
1147
1148	/* On POWER7, we have external interrupts set to use HSRR0/1 */
114911:	mtspr	SPRN_HSRR0, r8
1150	mtspr	SPRN_HSRR1, r7
1151	ba	0x500
1152
115313:	b	machine_check_fwnmi
1154
1155/*
1156 * Check whether an HDSI is an HPTE not found fault or something else.
1157 * If it is an HPTE not found fault that is due to the guest accessing
1158 * a page that they have mapped but which we have paged out, then
1159 * we continue on with the guest exit path.  In all other cases,
1160 * reflect the HDSI to the guest as a DSI.
1161 */
1162kvmppc_hdsi:
1163	mfspr	r4, SPRN_HDAR
1164	mfspr	r6, SPRN_HDSISR
1165	/* HPTE not found fault or protection fault? */
1166	andis.	r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1167	beq	1f			/* if not, send it to the guest */
1168	andi.	r0, r11, MSR_DR		/* data relocation enabled? */
1169	beq	3f
1170	clrrdi	r0, r4, 28
1171	PPC_SLBFEE_DOT(R5, R0)		/* if so, look up SLB */
1172	bne	1f			/* if no SLB entry found */
11734:	std	r4, VCPU_FAULT_DAR(r9)
1174	stw	r6, VCPU_FAULT_DSISR(r9)
1175
1176	/* Search the hash table. */
1177	mr	r3, r9			/* vcpu pointer */
1178	li	r7, 1			/* data fault */
1179	bl	.kvmppc_hpte_hv_fault
1180	ld	r9, HSTATE_KVM_VCPU(r13)
1181	ld	r10, VCPU_PC(r9)
1182	ld	r11, VCPU_MSR(r9)
1183	li	r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1184	cmpdi	r3, 0			/* retry the instruction */
1185	beq	6f
1186	cmpdi	r3, -1			/* handle in kernel mode */
1187	beq	guest_exit_cont
1188	cmpdi	r3, -2			/* MMIO emulation; need instr word */
1189	beq	2f
1190
1191	/* Synthesize a DSI for the guest */
1192	ld	r4, VCPU_FAULT_DAR(r9)
1193	mr	r6, r3
11941:	mtspr	SPRN_DAR, r4
1195	mtspr	SPRN_DSISR, r6
1196	mtspr	SPRN_SRR0, r10
1197	mtspr	SPRN_SRR1, r11
1198	li	r10, BOOK3S_INTERRUPT_DATA_STORAGE
1199	li	r11, (MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
1200	rotldi	r11, r11, 63
1201fast_interrupt_c_return:
12026:	ld	r7, VCPU_CTR(r9)
1203	lwz	r8, VCPU_XER(r9)
1204	mtctr	r7
1205	mtxer	r8
1206	mr	r4, r9
1207	b	fast_guest_return
1208
12093:	ld	r5, VCPU_KVM(r9)	/* not relocated, use VRMA */
1210	ld	r5, KVM_VRMA_SLB_V(r5)
1211	b	4b
1212
1213	/* If this is for emulated MMIO, load the instruction word */
12142:	li	r8, KVM_INST_FETCH_FAILED	/* In case lwz faults */
1215
1216	/* Set guest mode to 'jump over instruction' so if lwz faults
1217	 * we'll just continue at the next IP. */
1218	li	r0, KVM_GUEST_MODE_SKIP
1219	stb	r0, HSTATE_IN_GUEST(r13)
1220
1221	/* Do the access with MSR:DR enabled */
1222	mfmsr	r3
1223	ori	r4, r3, MSR_DR		/* Enable paging for data */
1224	mtmsrd	r4
1225	lwz	r8, 0(r10)
1226	mtmsrd	r3
1227
1228	/* Store the result */
1229	stw	r8, VCPU_LAST_INST(r9)
1230
1231	/* Unset guest mode. */
1232	li	r0, KVM_GUEST_MODE_NONE
1233	stb	r0, HSTATE_IN_GUEST(r13)
1234	b	guest_exit_cont
1235
1236/*
1237 * Similarly for an HISI, reflect it to the guest as an ISI unless
1238 * it is an HPTE not found fault for a page that we have paged out.
1239 */
1240kvmppc_hisi:
1241	andis.	r0, r11, SRR1_ISI_NOPT@h
1242	beq	1f
1243	andi.	r0, r11, MSR_IR		/* instruction relocation enabled? */
1244	beq	3f
1245	clrrdi	r0, r10, 28
1246	PPC_SLBFEE_DOT(R5, R0)		/* if so, look up SLB */
1247	bne	1f			/* if no SLB entry found */
12484:
1249	/* Search the hash table. */
1250	mr	r3, r9			/* vcpu pointer */
1251	mr	r4, r10
1252	mr	r6, r11
1253	li	r7, 0			/* instruction fault */
1254	bl	.kvmppc_hpte_hv_fault
1255	ld	r9, HSTATE_KVM_VCPU(r13)
1256	ld	r10, VCPU_PC(r9)
1257	ld	r11, VCPU_MSR(r9)
1258	li	r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1259	cmpdi	r3, 0			/* retry the instruction */
1260	beq	fast_interrupt_c_return
1261	cmpdi	r3, -1			/* handle in kernel mode */
1262	beq	guest_exit_cont
1263
1264	/* Synthesize an ISI for the guest */
1265	mr	r11, r3
12661:	mtspr	SPRN_SRR0, r10
1267	mtspr	SPRN_SRR1, r11
1268	li	r10, BOOK3S_INTERRUPT_INST_STORAGE
1269	li	r11, (MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
1270	rotldi	r11, r11, 63
1271	b	fast_interrupt_c_return
1272
12733:	ld	r6, VCPU_KVM(r9)	/* not relocated, use VRMA */
1274	ld	r5, KVM_VRMA_SLB_V(r6)
1275	b	4b
1276
1277/*
1278 * Try to handle an hcall in real mode.
1279 * Returns to the guest if we handle it, or continues on up to
1280 * the kernel if we can't (i.e. if we don't have a handler for
1281 * it, or if the handler returns H_TOO_HARD).
1282 */
1283	.globl	hcall_try_real_mode
1284hcall_try_real_mode:
1285	ld	r3,VCPU_GPR(R3)(r9)
1286	andi.	r0,r11,MSR_PR
1287	bne	guest_exit_cont
1288	clrrdi	r3,r3,2
1289	cmpldi	r3,hcall_real_table_end - hcall_real_table
1290	bge	guest_exit_cont
1291	LOAD_REG_ADDR(r4, hcall_real_table)
1292	lwzx	r3,r3,r4
1293	cmpwi	r3,0
1294	beq	guest_exit_cont
1295	add	r3,r3,r4
1296	mtctr	r3
1297	mr	r3,r9		/* get vcpu pointer */
1298	ld	r4,VCPU_GPR(R4)(r9)
1299	bctrl
1300	cmpdi	r3,H_TOO_HARD
1301	beq	hcall_real_fallback
1302	ld	r4,HSTATE_KVM_VCPU(r13)
1303	std	r3,VCPU_GPR(R3)(r4)
1304	ld	r10,VCPU_PC(r4)
1305	ld	r11,VCPU_MSR(r4)
1306	b	fast_guest_return
1307
1308	/* We've attempted a real mode hcall, but it's punted it back
1309	 * to userspace.  We need to restore some clobbered volatiles
1310	 * before resuming the pass-it-to-qemu path */
1311hcall_real_fallback:
1312	li	r12,BOOK3S_INTERRUPT_SYSCALL
1313	ld	r9, HSTATE_KVM_VCPU(r13)
1314
1315	b	guest_exit_cont
1316
1317	.globl	hcall_real_table
1318hcall_real_table:
1319	.long	0		/* 0 - unused */
1320	.long	.kvmppc_h_remove - hcall_real_table
1321	.long	.kvmppc_h_enter - hcall_real_table
1322	.long	.kvmppc_h_read - hcall_real_table
1323	.long	0		/* 0x10 - H_CLEAR_MOD */
1324	.long	0		/* 0x14 - H_CLEAR_REF */
1325	.long	.kvmppc_h_protect - hcall_real_table
1326	.long	0		/* 0x1c - H_GET_TCE */
1327	.long	.kvmppc_h_put_tce - hcall_real_table
1328	.long	0		/* 0x24 - H_SET_SPRG0 */
1329	.long	.kvmppc_h_set_dabr - hcall_real_table
1330	.long	0		/* 0x2c */
1331	.long	0		/* 0x30 */
1332	.long	0		/* 0x34 */
1333	.long	0		/* 0x38 */
1334	.long	0		/* 0x3c */
1335	.long	0		/* 0x40 */
1336	.long	0		/* 0x44 */
1337	.long	0		/* 0x48 */
1338	.long	0		/* 0x4c */
1339	.long	0		/* 0x50 */
1340	.long	0		/* 0x54 */
1341	.long	0		/* 0x58 */
1342	.long	0		/* 0x5c */
1343	.long	0		/* 0x60 */
1344	.long	0		/* 0x64 */
1345	.long	0		/* 0x68 */
1346	.long	0		/* 0x6c */
1347	.long	0		/* 0x70 */
1348	.long	0		/* 0x74 */
1349	.long	0		/* 0x78 */
1350	.long	0		/* 0x7c */
1351	.long	0		/* 0x80 */
1352	.long	0		/* 0x84 */
1353	.long	0		/* 0x88 */
1354	.long	0		/* 0x8c */
1355	.long	0		/* 0x90 */
1356	.long	0		/* 0x94 */
1357	.long	0		/* 0x98 */
1358	.long	0		/* 0x9c */
1359	.long	0		/* 0xa0 */
1360	.long	0		/* 0xa4 */
1361	.long	0		/* 0xa8 */
1362	.long	0		/* 0xac */
1363	.long	0		/* 0xb0 */
1364	.long	0		/* 0xb4 */
1365	.long	0		/* 0xb8 */
1366	.long	0		/* 0xbc */
1367	.long	0		/* 0xc0 */
1368	.long	0		/* 0xc4 */
1369	.long	0		/* 0xc8 */
1370	.long	0		/* 0xcc */
1371	.long	0		/* 0xd0 */
1372	.long	0		/* 0xd4 */
1373	.long	0		/* 0xd8 */
1374	.long	0		/* 0xdc */
1375	.long	.kvmppc_h_cede - hcall_real_table
1376	.long	0		/* 0xe4 */
1377	.long	0		/* 0xe8 */
1378	.long	0		/* 0xec */
1379	.long	0		/* 0xf0 */
1380	.long	0		/* 0xf4 */
1381	.long	0		/* 0xf8 */
1382	.long	0		/* 0xfc */
1383	.long	0		/* 0x100 */
1384	.long	0		/* 0x104 */
1385	.long	0		/* 0x108 */
1386	.long	0		/* 0x10c */
1387	.long	0		/* 0x110 */
1388	.long	0		/* 0x114 */
1389	.long	0		/* 0x118 */
1390	.long	0		/* 0x11c */
1391	.long	0		/* 0x120 */
1392	.long	.kvmppc_h_bulk_remove - hcall_real_table
1393hcall_real_table_end:
1394
1395ignore_hdec:
1396	mr	r4,r9
1397	b	fast_guest_return
1398
1399bounce_ext_interrupt:
1400	mr	r4,r9
1401	mtspr	SPRN_SRR0,r10
1402	mtspr	SPRN_SRR1,r11
1403	li	r10,BOOK3S_INTERRUPT_EXTERNAL
1404	li	r11,(MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
1405	rotldi	r11,r11,63
1406	b	fast_guest_return
1407
1408_GLOBAL(kvmppc_h_set_dabr)
1409	std	r4,VCPU_DABR(r3)
1410	/* Work around P7 bug where DABR can get corrupted on mtspr */
14111:	mtspr	SPRN_DABR,r4
1412	mfspr	r5, SPRN_DABR
1413	cmpd	r4, r5
1414	bne	1b
1415	isync
1416	li	r3,0
1417	blr
1418
1419_GLOBAL(kvmppc_h_cede)
1420	ori	r11,r11,MSR_EE
1421	std	r11,VCPU_MSR(r3)
1422	li	r0,1
1423	stb	r0,VCPU_CEDED(r3)
1424	sync			/* order setting ceded vs. testing prodded */
1425	lbz	r5,VCPU_PRODDED(r3)
1426	cmpwi	r5,0
1427	bne	kvm_cede_prodded
1428	li	r0,0		/* set trap to 0 to say hcall is handled */
1429	stw	r0,VCPU_TRAP(r3)
1430	li	r0,H_SUCCESS
1431	std	r0,VCPU_GPR(R3)(r3)
1432BEGIN_FTR_SECTION
1433	b	kvm_cede_exit	/* just send it up to host on 970 */
1434END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1435
1436	/*
1437	 * Set our bit in the bitmask of napping threads unless all the
1438	 * other threads are already napping, in which case we send this
1439	 * up to the host.
1440	 */
1441	ld	r5,HSTATE_KVM_VCORE(r13)
1442	lwz	r6,VCPU_PTID(r3)
1443	lwz	r8,VCORE_ENTRY_EXIT(r5)
1444	clrldi	r8,r8,56
1445	li	r0,1
1446	sld	r0,r0,r6
1447	addi	r6,r5,VCORE_NAPPING_THREADS
144831:	lwarx	r4,0,r6
1449	or	r4,r4,r0
1450	PPC_POPCNTW(R7,R4)
1451	cmpw	r7,r8
1452	bge	kvm_cede_exit
1453	stwcx.	r4,0,r6
1454	bne	31b
1455	li	r0,1
1456	stb	r0,HSTATE_NAPPING(r13)
1457	/* order napping_threads update vs testing entry_exit_count */
1458	lwsync
1459	mr	r4,r3
1460	lwz	r7,VCORE_ENTRY_EXIT(r5)
1461	cmpwi	r7,0x100
1462	bge	33f		/* another thread already exiting */
1463
1464/*
1465 * Although not specifically required by the architecture, POWER7
1466 * preserves the following registers in nap mode, even if an SMT mode
1467 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
1468 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
1469 */
1470	/* Save non-volatile GPRs */
1471	std	r14, VCPU_GPR(R14)(r3)
1472	std	r15, VCPU_GPR(R15)(r3)
1473	std	r16, VCPU_GPR(R16)(r3)
1474	std	r17, VCPU_GPR(R17)(r3)
1475	std	r18, VCPU_GPR(R18)(r3)
1476	std	r19, VCPU_GPR(R19)(r3)
1477	std	r20, VCPU_GPR(R20)(r3)
1478	std	r21, VCPU_GPR(R21)(r3)
1479	std	r22, VCPU_GPR(R22)(r3)
1480	std	r23, VCPU_GPR(R23)(r3)
1481	std	r24, VCPU_GPR(R24)(r3)
1482	std	r25, VCPU_GPR(R25)(r3)
1483	std	r26, VCPU_GPR(R26)(r3)
1484	std	r27, VCPU_GPR(R27)(r3)
1485	std	r28, VCPU_GPR(R28)(r3)
1486	std	r29, VCPU_GPR(R29)(r3)
1487	std	r30, VCPU_GPR(R30)(r3)
1488	std	r31, VCPU_GPR(R31)(r3)
1489
1490	/* save FP state */
1491	bl	.kvmppc_save_fp
1492
1493	/*
1494	 * Take a nap until a decrementer or external interrupt occurs,
1495	 * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR
1496	 */
1497	li	r0,1
1498	stb	r0,HSTATE_HWTHREAD_REQ(r13)
1499	mfspr	r5,SPRN_LPCR
1500	ori	r5,r5,LPCR_PECE0 | LPCR_PECE1
1501	mtspr	SPRN_LPCR,r5
1502	isync
1503	li	r0, 0
1504	std	r0, HSTATE_SCRATCH0(r13)
1505	ptesync
1506	ld	r0, HSTATE_SCRATCH0(r13)
15071:	cmpd	r0, r0
1508	bne	1b
1509	nap
1510	b	.
1511
1512kvm_end_cede:
1513	/* Woken by external or decrementer interrupt */
1514	ld	r1, HSTATE_HOST_R1(r13)
1515
1516	/* load up FP state */
1517	bl	kvmppc_load_fp
1518
1519	/* Load NV GPRS */
1520	ld	r14, VCPU_GPR(R14)(r4)
1521	ld	r15, VCPU_GPR(R15)(r4)
1522	ld	r16, VCPU_GPR(R16)(r4)
1523	ld	r17, VCPU_GPR(R17)(r4)
1524	ld	r18, VCPU_GPR(R18)(r4)
1525	ld	r19, VCPU_GPR(R19)(r4)
1526	ld	r20, VCPU_GPR(R20)(r4)
1527	ld	r21, VCPU_GPR(R21)(r4)
1528	ld	r22, VCPU_GPR(R22)(r4)
1529	ld	r23, VCPU_GPR(R23)(r4)
1530	ld	r24, VCPU_GPR(R24)(r4)
1531	ld	r25, VCPU_GPR(R25)(r4)
1532	ld	r26, VCPU_GPR(R26)(r4)
1533	ld	r27, VCPU_GPR(R27)(r4)
1534	ld	r28, VCPU_GPR(R28)(r4)
1535	ld	r29, VCPU_GPR(R29)(r4)
1536	ld	r30, VCPU_GPR(R30)(r4)
1537	ld	r31, VCPU_GPR(R31)(r4)
1538
1539	/* clear our bit in vcore->napping_threads */
154033:	ld	r5,HSTATE_KVM_VCORE(r13)
1541	lwz	r3,VCPU_PTID(r4)
1542	li	r0,1
1543	sld	r0,r0,r3
1544	addi	r6,r5,VCORE_NAPPING_THREADS
154532:	lwarx	r7,0,r6
1546	andc	r7,r7,r0
1547	stwcx.	r7,0,r6
1548	bne	32b
1549	li	r0,0
1550	stb	r0,HSTATE_NAPPING(r13)
1551
1552	/* see if any other thread is already exiting */
1553	lwz	r0,VCORE_ENTRY_EXIT(r5)
1554	cmpwi	r0,0x100
1555	blt	kvmppc_cede_reentry	/* if not go back to guest */
1556
1557	/* some threads are exiting, so go to the guest exit path */
1558	b	hcall_real_fallback
1559
1560	/* cede when already previously prodded case */
1561kvm_cede_prodded:
1562	li	r0,0
1563	stb	r0,VCPU_PRODDED(r3)
1564	sync			/* order testing prodded vs. clearing ceded */
1565	stb	r0,VCPU_CEDED(r3)
1566	li	r3,H_SUCCESS
1567	blr
1568
1569	/* we've ceded but we want to give control to the host */
1570kvm_cede_exit:
1571	li	r3,H_TOO_HARD
1572	blr
1573
1574	/* Try to handle a machine check in real mode */
1575machine_check_realmode:
1576	mr	r3, r9		/* get vcpu pointer */
1577	bl	.kvmppc_realmode_machine_check
1578	nop
1579	cmpdi	r3, 0		/* continue exiting from guest? */
1580	ld	r9, HSTATE_KVM_VCPU(r13)
1581	li	r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1582	beq	mc_cont
1583	/* If not, deliver a machine check.  SRR0/1 are already set */
1584	li	r10, BOOK3S_INTERRUPT_MACHINE_CHECK
1585	li	r11, (MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
1586	rotldi	r11, r11, 63
1587	b	fast_interrupt_c_return
1588
1589secondary_too_late:
1590	ld	r5,HSTATE_KVM_VCORE(r13)
1591	HMT_LOW
159213:	lbz	r3,VCORE_IN_GUEST(r5)
1593	cmpwi	r3,0
1594	bne	13b
1595	HMT_MEDIUM
1596	ld	r11,PACA_SLBSHADOWPTR(r13)
1597
1598	.rept	SLB_NUM_BOLTED
1599	ld	r5,SLBSHADOW_SAVEAREA(r11)
1600	ld	r6,SLBSHADOW_SAVEAREA+8(r11)
1601	andis.	r7,r5,SLB_ESID_V@h
1602	beq	1f
1603	slbmte	r6,r5
16041:	addi	r11,r11,16
1605	.endr
1606
1607secondary_nap:
1608	/* Clear our vcpu pointer so we don't come back in early */
1609	li	r0, 0
1610	std	r0, HSTATE_KVM_VCPU(r13)
1611	lwsync
1612	/* Clear any pending IPI - assume we're a secondary thread */
1613	ld	r5, HSTATE_XICS_PHYS(r13)
1614	li	r7, XICS_XIRR
1615	lwzcix	r3, r5, r7		/* ack any pending interrupt */
1616	rlwinm.	r0, r3, 0, 0xffffff	/* any pending? */
1617	beq	37f
1618	sync
1619	li	r0, 0xff
1620	li	r6, XICS_QIRR
1621	stbcix	r0, r5, r6		/* clear the IPI */
1622	stwcix	r3, r5, r7		/* EOI it */
162337:	sync
1624
1625	/* increment the nap count and then go to nap mode */
1626	ld	r4, HSTATE_KVM_VCORE(r13)
1627	addi	r4, r4, VCORE_NAP_COUNT
1628	lwsync				/* make previous updates visible */
162951:	lwarx	r3, 0, r4
1630	addi	r3, r3, 1
1631	stwcx.	r3, 0, r4
1632	bne	51b
1633
1634kvm_no_guest:
1635	li	r0, KVM_HWTHREAD_IN_NAP
1636	stb	r0, HSTATE_HWTHREAD_STATE(r13)
1637
1638	li	r3, LPCR_PECE0
1639	mfspr	r4, SPRN_LPCR
1640	rlwimi	r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
1641	mtspr	SPRN_LPCR, r4
1642	isync
1643	std	r0, HSTATE_SCRATCH0(r13)
1644	ptesync
1645	ld	r0, HSTATE_SCRATCH0(r13)
16461:	cmpd	r0, r0
1647	bne	1b
1648	nap
1649	b	.
1650
1651/*
1652 * Save away FP, VMX and VSX registers.
1653 * r3 = vcpu pointer
1654 */
1655_GLOBAL(kvmppc_save_fp)
1656	mfmsr	r5
1657	ori	r8,r5,MSR_FP
1658#ifdef CONFIG_ALTIVEC
1659BEGIN_FTR_SECTION
1660	oris	r8,r8,MSR_VEC@h
1661END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1662#endif
1663#ifdef CONFIG_VSX
1664BEGIN_FTR_SECTION
1665	oris	r8,r8,MSR_VSX@h
1666END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1667#endif
1668	mtmsrd	r8
1669	isync
1670#ifdef CONFIG_VSX
1671BEGIN_FTR_SECTION
1672	reg = 0
1673	.rept	32
1674	li	r6,reg*16+VCPU_VSRS
1675	STXVD2X(reg,R6,R3)
1676	reg = reg + 1
1677	.endr
1678FTR_SECTION_ELSE
1679#endif
1680	reg = 0
1681	.rept	32
1682	stfd	reg,reg*8+VCPU_FPRS(r3)
1683	reg = reg + 1
1684	.endr
1685#ifdef CONFIG_VSX
1686ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
1687#endif
1688	mffs	fr0
1689	stfd	fr0,VCPU_FPSCR(r3)
1690
1691#ifdef CONFIG_ALTIVEC
1692BEGIN_FTR_SECTION
1693	reg = 0
1694	.rept	32
1695	li	r6,reg*16+VCPU_VRS
1696	stvx	reg,r6,r3
1697	reg = reg + 1
1698	.endr
1699	mfvscr	vr0
1700	li	r6,VCPU_VSCR
1701	stvx	vr0,r6,r3
1702END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1703#endif
1704	mfspr	r6,SPRN_VRSAVE
1705	stw	r6,VCPU_VRSAVE(r3)
1706	mtmsrd	r5
1707	isync
1708	blr
1709
1710/*
1711 * Load up FP, VMX and VSX registers
1712 * r4 = vcpu pointer
1713 */
1714	.globl	kvmppc_load_fp
1715kvmppc_load_fp:
1716	mfmsr	r9
1717	ori	r8,r9,MSR_FP
1718#ifdef CONFIG_ALTIVEC
1719BEGIN_FTR_SECTION
1720	oris	r8,r8,MSR_VEC@h
1721END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1722#endif
1723#ifdef CONFIG_VSX
1724BEGIN_FTR_SECTION
1725	oris	r8,r8,MSR_VSX@h
1726END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1727#endif
1728	mtmsrd	r8
1729	isync
1730	lfd	fr0,VCPU_FPSCR(r4)
1731	MTFSF_L(fr0)
1732#ifdef CONFIG_VSX
1733BEGIN_FTR_SECTION
1734	reg = 0
1735	.rept	32
1736	li	r7,reg*16+VCPU_VSRS
1737	LXVD2X(reg,R7,R4)
1738	reg = reg + 1
1739	.endr
1740FTR_SECTION_ELSE
1741#endif
1742	reg = 0
1743	.rept	32
1744	lfd	reg,reg*8+VCPU_FPRS(r4)
1745	reg = reg + 1
1746	.endr
1747#ifdef CONFIG_VSX
1748ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
1749#endif
1750
1751#ifdef CONFIG_ALTIVEC
1752BEGIN_FTR_SECTION
1753	li	r7,VCPU_VSCR
1754	lvx	vr0,r7,r4
1755	mtvscr	vr0
1756	reg = 0
1757	.rept	32
1758	li	r7,reg*16+VCPU_VRS
1759	lvx	reg,r7,r4
1760	reg = reg + 1
1761	.endr
1762END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1763#endif
1764	lwz	r7,VCPU_VRSAVE(r4)
1765	mtspr	SPRN_VRSAVE,r7
1766	blr
1767