1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright SUSE Linux Products GmbH 2009
16  *
17  * Authors: Alexander Graf <agraf@suse.de>
18  */
19 
20 #ifndef __ASM_KVM_BOOK3S_H__
21 #define __ASM_KVM_BOOK3S_H__
22 
23 #include <linux/types.h>
24 #include <linux/kvm_host.h>
25 #include <asm/kvm_book3s_asm.h>
26 
27 struct kvmppc_bat {
28 	u64 raw;
29 	u32 bepi;
30 	u32 bepi_mask;
31 	u32 brpn;
32 	u8 wimg;
33 	u8 pp;
34 	bool vs		: 1;
35 	bool vp		: 1;
36 };
37 
38 struct kvmppc_sid_map {
39 	u64 guest_vsid;
40 	u64 guest_esid;
41 	u64 host_vsid;
42 	bool valid	: 1;
43 };
44 
45 #define SID_MAP_BITS    9
46 #define SID_MAP_NUM     (1 << SID_MAP_BITS)
47 #define SID_MAP_MASK    (SID_MAP_NUM - 1)
48 
49 #ifdef CONFIG_PPC_BOOK3S_64
50 #define SID_CONTEXTS	1
51 #else
52 #define SID_CONTEXTS	128
53 #define VSID_POOL_SIZE	(SID_CONTEXTS * 16)
54 #endif
55 
56 struct hpte_cache {
57 	struct hlist_node list_pte;
58 	struct hlist_node list_pte_long;
59 	struct hlist_node list_vpte;
60 	struct hlist_node list_vpte_long;
61 	struct rcu_head rcu_head;
62 	u64 host_va;
63 	u64 pfn;
64 	ulong slot;
65 	struct kvmppc_pte pte;
66 };
67 
68 struct kvmppc_vcpu_book3s {
69 	struct kvm_vcpu vcpu;
70 	struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
71 	struct kvmppc_sid_map sid_map[SID_MAP_NUM];
72 	struct {
73 		u64 esid;
74 		u64 vsid;
75 	} slb_shadow[64];
76 	u8 slb_shadow_max;
77 	struct kvmppc_bat ibat[8];
78 	struct kvmppc_bat dbat[8];
79 	u64 hid[6];
80 	u64 gqr[8];
81 	u64 sdr1;
82 	u64 hior;
83 	u64 msr_mask;
84 	u64 vsid_next;
85 #ifdef CONFIG_PPC_BOOK3S_32
86 	u32 vsid_pool[VSID_POOL_SIZE];
87 #else
88 	u64 vsid_first;
89 	u64 vsid_max;
90 #endif
91 	int context_id[SID_CONTEXTS];
92 
93 	struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
94 	struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
95 	struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
96 	struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
97 	int hpte_cache_count;
98 	spinlock_t mmu_lock;
99 };
100 
101 #define CONTEXT_HOST		0
102 #define CONTEXT_GUEST		1
103 #define CONTEXT_GUEST_END	2
104 
105 #define VSID_REAL	0x1fffffffffc00000ULL
106 #define VSID_BAT	0x1fffffffffb00000ULL
107 #define VSID_REAL_DR	0x2000000000000000ULL
108 #define VSID_REAL_IR	0x4000000000000000ULL
109 #define VSID_PR		0x8000000000000000ULL
110 
111 extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
112 extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
113 extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
114 extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
115 extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr);
116 extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
117 extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
118 extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
119 extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
120 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
121 extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
122 
123 extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
124 extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
125 extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
126 extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
127 extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
128 extern int kvmppc_mmu_hpte_sysinit(void);
129 extern void kvmppc_mmu_hpte_sysexit(void);
130 extern int kvmppc_mmu_hv_init(void);
131 
132 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
133 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
134 extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
135 extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
136 extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
137 			   bool upper, u32 val);
138 extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
139 extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
140 extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
141 
142 extern void kvmppc_handler_lowmem_trampoline(void);
143 extern void kvmppc_handler_trampoline_enter(void);
144 extern void kvmppc_rmcall(ulong srr0, ulong srr1);
145 extern void kvmppc_hv_entry_trampoline(void);
146 extern void kvmppc_load_up_fpu(void);
147 extern void kvmppc_load_up_altivec(void);
148 extern void kvmppc_load_up_vsx(void);
149 extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
150 extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
151 
152 static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
153 {
154 	return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu);
155 }
156 
157 extern void kvm_return_point(void);
158 
159 /* Also add subarch specific defines */
160 
161 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
162 #include <asm/kvm_book3s_32.h>
163 #endif
164 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
165 #include <asm/kvm_book3s_64.h>
166 #endif
167 
168 #ifdef CONFIG_KVM_BOOK3S_PR
169 
170 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
171 {
172 	return to_book3s(vcpu)->hior;
173 }
174 
175 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
176 			unsigned long pending_now, unsigned long old_pending)
177 {
178 	if (pending_now)
179 		vcpu->arch.shared->int_pending = 1;
180 	else if (old_pending)
181 		vcpu->arch.shared->int_pending = 0;
182 }
183 
184 static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
185 {
186 	if ( num < 14 ) {
187 		to_svcpu(vcpu)->gpr[num] = val;
188 		to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
189 	} else
190 		vcpu->arch.gpr[num] = val;
191 }
192 
193 static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
194 {
195 	if ( num < 14 )
196 		return to_svcpu(vcpu)->gpr[num];
197 	else
198 		return vcpu->arch.gpr[num];
199 }
200 
201 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
202 {
203 	to_svcpu(vcpu)->cr = val;
204 	to_book3s(vcpu)->shadow_vcpu->cr = val;
205 }
206 
207 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
208 {
209 	return to_svcpu(vcpu)->cr;
210 }
211 
212 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
213 {
214 	to_svcpu(vcpu)->xer = val;
215 	to_book3s(vcpu)->shadow_vcpu->xer = val;
216 }
217 
218 static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
219 {
220 	return to_svcpu(vcpu)->xer;
221 }
222 
223 static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
224 {
225 	to_svcpu(vcpu)->ctr = val;
226 }
227 
228 static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
229 {
230 	return to_svcpu(vcpu)->ctr;
231 }
232 
233 static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
234 {
235 	to_svcpu(vcpu)->lr = val;
236 }
237 
238 static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
239 {
240 	return to_svcpu(vcpu)->lr;
241 }
242 
243 static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
244 {
245 	to_svcpu(vcpu)->pc = val;
246 }
247 
248 static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
249 {
250 	return to_svcpu(vcpu)->pc;
251 }
252 
253 static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
254 {
255 	ulong pc = kvmppc_get_pc(vcpu);
256 	struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
257 
258 	/* Load the instruction manually if it failed to do so in the
259 	 * exit path */
260 	if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
261 		kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
262 
263 	return svcpu->last_inst;
264 }
265 
266 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
267 {
268 	return to_svcpu(vcpu)->fault_dar;
269 }
270 
271 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
272 {
273 	ulong crit_raw = vcpu->arch.shared->critical;
274 	ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
275 	bool crit;
276 
277 	/* Truncate crit indicators in 32 bit mode */
278 	if (!(vcpu->arch.shared->msr & MSR_SF)) {
279 		crit_raw &= 0xffffffff;
280 		crit_r1 &= 0xffffffff;
281 	}
282 
283 	/* Critical section when crit == r1 */
284 	crit = (crit_raw == crit_r1);
285 	/* ... and we're in supervisor mode */
286 	crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
287 
288 	return crit;
289 }
290 #else /* CONFIG_KVM_BOOK3S_PR */
291 
292 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
293 {
294 	return 0;
295 }
296 
297 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
298 			unsigned long pending_now, unsigned long old_pending)
299 {
300 }
301 
302 static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
303 {
304 	vcpu->arch.gpr[num] = val;
305 }
306 
307 static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
308 {
309 	return vcpu->arch.gpr[num];
310 }
311 
312 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
313 {
314 	vcpu->arch.cr = val;
315 }
316 
317 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
318 {
319 	return vcpu->arch.cr;
320 }
321 
322 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
323 {
324 	vcpu->arch.xer = val;
325 }
326 
327 static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
328 {
329 	return vcpu->arch.xer;
330 }
331 
332 static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
333 {
334 	vcpu->arch.ctr = val;
335 }
336 
337 static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
338 {
339 	return vcpu->arch.ctr;
340 }
341 
342 static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
343 {
344 	vcpu->arch.lr = val;
345 }
346 
347 static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
348 {
349 	return vcpu->arch.lr;
350 }
351 
352 static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
353 {
354 	vcpu->arch.pc = val;
355 }
356 
357 static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
358 {
359 	return vcpu->arch.pc;
360 }
361 
362 static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
363 {
364 	ulong pc = kvmppc_get_pc(vcpu);
365 
366 	/* Load the instruction manually if it failed to do so in the
367 	 * exit path */
368 	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
369 		kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
370 
371 	return vcpu->arch.last_inst;
372 }
373 
374 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
375 {
376 	return vcpu->arch.fault_dar;
377 }
378 
379 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
380 {
381 	return false;
382 }
383 #endif
384 
385 /* Magic register values loaded into r3 and r4 before the 'sc' assembly
386  * instruction for the OSI hypercalls */
387 #define OSI_SC_MAGIC_R3			0x113724FA
388 #define OSI_SC_MAGIC_R4			0x77810F9B
389 
390 #define INS_DCBZ			0x7c0007ec
391 
392 #endif /* __ASM_KVM_BOOK3S_H__ */
393