1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14 *
15 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16 *
17 * Derived from book3s_interrupts.S, which is:
18 * Copyright SUSE Linux Products GmbH 2009
19 *
20 * Authors: Alexander Graf <agraf@suse.de>
21 */
22
23#include <asm/ppc_asm.h>
24#include <asm/kvm_asm.h>
25#include <asm/reg.h>
26#include <asm/page.h>
27#include <asm/asm-offsets.h>
28#include <asm/exception-64s.h>
29#include <asm/ppc-opcode.h>
30
31/*****************************************************************************
32 *                                                                           *
33 *     Guest entry / exit code that is in kernel module memory (vmalloc)     *
34 *                                                                           *
35 ****************************************************************************/
36
37/* Registers:
38 *  none
39 */
40_GLOBAL(__kvmppc_vcore_entry)
41
42	/* Write correct stack frame */
43	mflr	r0
44	std	r0,PPC_LR_STKOFF(r1)
45
46	/* Save host state to the stack */
47	stdu	r1, -SWITCH_FRAME_SIZE(r1)
48
49	/* Save non-volatile registers (r14 - r31) and CR */
50	SAVE_NVGPRS(r1)
51	mfcr	r3
52	std	r3, _CCR(r1)
53
54	/* Save host DSCR */
55BEGIN_FTR_SECTION
56	mfspr	r3, SPRN_DSCR
57	std	r3, HSTATE_DSCR(r13)
58END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
59
60BEGIN_FTR_SECTION
61	/* Save host DABR */
62	mfspr	r3, SPRN_DABR
63	std	r3, HSTATE_DABR(r13)
64END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
65
66	/* Hard-disable interrupts */
67	mfmsr   r10
68	std	r10, HSTATE_HOST_MSR(r13)
69	rldicl  r10,r10,48,1
70	rotldi  r10,r10,16
71	mtmsrd  r10,1
72
73	/* Save host PMU registers */
74BEGIN_FTR_SECTION
75	/* Work around P8 PMAE bug */
76	li	r3, -1
77	clrrdi	r3, r3, 10
78	mfspr	r8, SPRN_MMCR2
79	mtspr	SPRN_MMCR2, r3		/* freeze all counters using MMCR2 */
80	isync
81END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
82	li	r3, 1
83	sldi	r3, r3, 31		/* MMCR0_FC (freeze counters) bit */
84	mfspr	r7, SPRN_MMCR0		/* save MMCR0 */
85	mtspr	SPRN_MMCR0, r3		/* freeze all counters, disable interrupts */
86	mfspr	r6, SPRN_MMCRA
87BEGIN_FTR_SECTION
88	/* On P7, clear MMCRA in order to disable SDAR updates */
89	li	r5, 0
90	mtspr	SPRN_MMCRA, r5
91END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
92	isync
93	ld	r3, PACALPPACAPTR(r13)	/* is the host using the PMU? */
94	lbz	r5, LPPACA_PMCINUSE(r3)
95	cmpwi	r5, 0
96	beq	31f			/* skip if not */
97	mfspr	r5, SPRN_MMCR1
98	mfspr	r9, SPRN_SIAR
99	mfspr	r10, SPRN_SDAR
100	std	r7, HSTATE_MMCR(r13)
101	std	r5, HSTATE_MMCR + 8(r13)
102	std	r6, HSTATE_MMCR + 16(r13)
103	std	r9, HSTATE_MMCR + 24(r13)
104	std	r10, HSTATE_MMCR + 32(r13)
105BEGIN_FTR_SECTION
106	mfspr	r9, SPRN_SIER
107	std	r8, HSTATE_MMCR + 40(r13)
108	std	r9, HSTATE_MMCR + 48(r13)
109END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
110	mfspr	r3, SPRN_PMC1
111	mfspr	r5, SPRN_PMC2
112	mfspr	r6, SPRN_PMC3
113	mfspr	r7, SPRN_PMC4
114	mfspr	r8, SPRN_PMC5
115	mfspr	r9, SPRN_PMC6
116BEGIN_FTR_SECTION
117	mfspr	r10, SPRN_PMC7
118	mfspr	r11, SPRN_PMC8
119END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
120	stw	r3, HSTATE_PMC(r13)
121	stw	r5, HSTATE_PMC + 4(r13)
122	stw	r6, HSTATE_PMC + 8(r13)
123	stw	r7, HSTATE_PMC + 12(r13)
124	stw	r8, HSTATE_PMC + 16(r13)
125	stw	r9, HSTATE_PMC + 20(r13)
126BEGIN_FTR_SECTION
127	stw	r10, HSTATE_PMC + 24(r13)
128	stw	r11, HSTATE_PMC + 28(r13)
129END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
130BEGIN_FTR_SECTION
131	mfspr	r9, SPRN_SIER
132	std	r8, HSTATE_MMCR + 40(r13)
133	std	r9, HSTATE_MMCR + 48(r13)
134END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
13531:
136
137	/*
138	 * Put whatever is in the decrementer into the
139	 * hypervisor decrementer.
140	 */
141	mfspr	r8,SPRN_DEC
142	mftb	r7
143	mtspr	SPRN_HDEC,r8
144	extsw	r8,r8
145	add	r8,r8,r7
146	std	r8,HSTATE_DECEXP(r13)
147
148#ifdef CONFIG_SMP
149	/*
150	 * On PPC970, if the guest vcpu has an external interrupt pending,
151	 * send ourselves an IPI so as to interrupt the guest once it
152	 * enables interrupts.  (It must have interrupts disabled,
153	 * otherwise we would already have delivered the interrupt.)
154	 *
155	 * XXX If this is a UP build, smp_send_reschedule is not available,
156	 * so the interrupt will be delayed until the next time the vcpu
157	 * enters the guest with interrupts enabled.
158	 */
159BEGIN_FTR_SECTION
160	ld	r4, HSTATE_KVM_VCPU(r13)
161	ld	r0, VCPU_PENDING_EXC(r4)
162	li	r7, (1 << BOOK3S_IRQPRIO_EXTERNAL)
163	oris	r7, r7, (1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
164	and.	r0, r0, r7
165	beq	32f
166	lhz	r3, PACAPACAINDEX(r13)
167	bl	smp_send_reschedule
168	nop
16932:
170END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
171#endif /* CONFIG_SMP */
172
173	/* Jump to partition switch code */
174	bl	.kvmppc_hv_entry_trampoline
175	nop
176
177/*
178 * We return here in virtual mode after the guest exits
179 * with something that we can't handle in real mode.
180 * Interrupts are enabled again at this point.
181 */
182
183	/*
184	 * Register usage at this point:
185	 *
186	 * R1       = host R1
187	 * R2       = host R2
188	 * R12      = exit handler id
189	 * R13      = PACA
190	 */
191
192	/* Restore non-volatile host registers (r14 - r31) and CR */
193	REST_NVGPRS(r1)
194	ld	r4, _CCR(r1)
195	mtcr	r4
196
197	addi    r1, r1, SWITCH_FRAME_SIZE
198	ld	r0, PPC_LR_STKOFF(r1)
199	mtlr	r0
200	blr
201