1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright SUSE Linux Products GmbH 2009
16  *
17  * Authors: Alexander Graf <agraf@suse.de>
18  */
19 
20 #ifndef __ASM_KVM_BOOK3S_H__
21 #define __ASM_KVM_BOOK3S_H__
22 
23 #include <linux/types.h>
24 #include <linux/kvm_host.h>
25 #include <asm/kvm_book3s_asm.h>
26 
27 struct kvmppc_bat {
28 	u64 raw;
29 	u32 bepi;
30 	u32 bepi_mask;
31 	u32 brpn;
32 	u8 wimg;
33 	u8 pp;
34 	bool vs		: 1;
35 	bool vp		: 1;
36 };
37 
38 struct kvmppc_sid_map {
39 	u64 guest_vsid;
40 	u64 guest_esid;
41 	u64 host_vsid;
42 	bool valid	: 1;
43 };
44 
45 #define SID_MAP_BITS    9
46 #define SID_MAP_NUM     (1 << SID_MAP_BITS)
47 #define SID_MAP_MASK    (SID_MAP_NUM - 1)
48 
49 #ifdef CONFIG_PPC_BOOK3S_64
50 #define SID_CONTEXTS	1
51 #else
52 #define SID_CONTEXTS	128
53 #define VSID_POOL_SIZE	(SID_CONTEXTS * 16)
54 #endif
55 
56 struct hpte_cache {
57 	struct hlist_node list_pte;
58 	struct hlist_node list_pte_long;
59 	struct hlist_node list_vpte;
60 	struct hlist_node list_vpte_long;
61 	struct rcu_head rcu_head;
62 	u64 host_va;
63 	u64 pfn;
64 	ulong slot;
65 	struct kvmppc_pte pte;
66 };
67 
68 struct kvmppc_vcpu_book3s {
69 	struct kvm_vcpu vcpu;
70 	struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
71 	struct kvmppc_sid_map sid_map[SID_MAP_NUM];
72 	struct {
73 		u64 esid;
74 		u64 vsid;
75 	} slb_shadow[64];
76 	u8 slb_shadow_max;
77 	struct kvmppc_bat ibat[8];
78 	struct kvmppc_bat dbat[8];
79 	u64 hid[6];
80 	u64 gqr[8];
81 	u64 sdr1;
82 	u64 hior;
83 	u64 msr_mask;
84 	u64 vsid_next;
85 #ifdef CONFIG_PPC_BOOK3S_32
86 	u32 vsid_pool[VSID_POOL_SIZE];
87 #else
88 	u64 vsid_first;
89 	u64 vsid_max;
90 #endif
91 	int context_id[SID_CONTEXTS];
92 
93 	struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
94 	struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
95 	struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
96 	struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
97 	int hpte_cache_count;
98 	spinlock_t mmu_lock;
99 };
100 
101 #define CONTEXT_HOST		0
102 #define CONTEXT_GUEST		1
103 #define CONTEXT_GUEST_END	2
104 
105 #define VSID_REAL	0x1fffffffffc00000ULL
106 #define VSID_BAT	0x1fffffffffb00000ULL
107 #define VSID_REAL_DR	0x2000000000000000ULL
108 #define VSID_REAL_IR	0x4000000000000000ULL
109 #define VSID_PR		0x8000000000000000ULL
110 
111 extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
112 extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
113 extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
114 extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
115 extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr);
116 extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
117 extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
118 extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
119 extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
120 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
121 extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
122 
123 extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
124 extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
125 extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
126 extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
127 extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
128 extern int kvmppc_mmu_hpte_sysinit(void);
129 extern void kvmppc_mmu_hpte_sysexit(void);
130 extern int kvmppc_mmu_hv_init(void);
131 
132 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
133 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
134 extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
135 extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
136 extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
137 			   bool upper, u32 val);
138 extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
139 extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
140 extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
141 
142 extern void kvmppc_entry_trampoline(void);
143 extern void kvmppc_hv_entry_trampoline(void);
144 extern void kvmppc_load_up_fpu(void);
145 extern void kvmppc_load_up_altivec(void);
146 extern void kvmppc_load_up_vsx(void);
147 extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
148 extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
149 extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
150 
151 static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
152 {
153 	return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu);
154 }
155 
156 extern void kvm_return_point(void);
157 
158 /* Also add subarch specific defines */
159 
160 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
161 #include <asm/kvm_book3s_32.h>
162 #endif
163 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
164 #include <asm/kvm_book3s_64.h>
165 #endif
166 
167 #ifdef CONFIG_KVM_BOOK3S_PR
168 
169 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
170 {
171 	return to_book3s(vcpu)->hior;
172 }
173 
174 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
175 			unsigned long pending_now, unsigned long old_pending)
176 {
177 	if (pending_now)
178 		vcpu->arch.shared->int_pending = 1;
179 	else if (old_pending)
180 		vcpu->arch.shared->int_pending = 0;
181 }
182 
183 static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
184 {
185 	if ( num < 14 ) {
186 		to_svcpu(vcpu)->gpr[num] = val;
187 		to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
188 	} else
189 		vcpu->arch.gpr[num] = val;
190 }
191 
192 static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
193 {
194 	if ( num < 14 )
195 		return to_svcpu(vcpu)->gpr[num];
196 	else
197 		return vcpu->arch.gpr[num];
198 }
199 
200 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
201 {
202 	to_svcpu(vcpu)->cr = val;
203 	to_book3s(vcpu)->shadow_vcpu->cr = val;
204 }
205 
206 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
207 {
208 	return to_svcpu(vcpu)->cr;
209 }
210 
211 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
212 {
213 	to_svcpu(vcpu)->xer = val;
214 	to_book3s(vcpu)->shadow_vcpu->xer = val;
215 }
216 
217 static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
218 {
219 	return to_svcpu(vcpu)->xer;
220 }
221 
222 static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
223 {
224 	to_svcpu(vcpu)->ctr = val;
225 }
226 
227 static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
228 {
229 	return to_svcpu(vcpu)->ctr;
230 }
231 
232 static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
233 {
234 	to_svcpu(vcpu)->lr = val;
235 }
236 
237 static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
238 {
239 	return to_svcpu(vcpu)->lr;
240 }
241 
242 static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
243 {
244 	to_svcpu(vcpu)->pc = val;
245 }
246 
247 static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
248 {
249 	return to_svcpu(vcpu)->pc;
250 }
251 
252 static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
253 {
254 	ulong pc = kvmppc_get_pc(vcpu);
255 	struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
256 
257 	/* Load the instruction manually if it failed to do so in the
258 	 * exit path */
259 	if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
260 		kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
261 
262 	return svcpu->last_inst;
263 }
264 
265 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
266 {
267 	return to_svcpu(vcpu)->fault_dar;
268 }
269 
270 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
271 {
272 	ulong crit_raw = vcpu->arch.shared->critical;
273 	ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
274 	bool crit;
275 
276 	/* Truncate crit indicators in 32 bit mode */
277 	if (!(vcpu->arch.shared->msr & MSR_SF)) {
278 		crit_raw &= 0xffffffff;
279 		crit_r1 &= 0xffffffff;
280 	}
281 
282 	/* Critical section when crit == r1 */
283 	crit = (crit_raw == crit_r1);
284 	/* ... and we're in supervisor mode */
285 	crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
286 
287 	return crit;
288 }
289 #else /* CONFIG_KVM_BOOK3S_PR */
290 
291 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
292 {
293 	return 0;
294 }
295 
296 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
297 			unsigned long pending_now, unsigned long old_pending)
298 {
299 }
300 
301 static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
302 {
303 	vcpu->arch.gpr[num] = val;
304 }
305 
306 static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
307 {
308 	return vcpu->arch.gpr[num];
309 }
310 
311 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
312 {
313 	vcpu->arch.cr = val;
314 }
315 
316 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
317 {
318 	return vcpu->arch.cr;
319 }
320 
321 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
322 {
323 	vcpu->arch.xer = val;
324 }
325 
326 static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
327 {
328 	return vcpu->arch.xer;
329 }
330 
331 static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
332 {
333 	vcpu->arch.ctr = val;
334 }
335 
336 static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
337 {
338 	return vcpu->arch.ctr;
339 }
340 
341 static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
342 {
343 	vcpu->arch.lr = val;
344 }
345 
346 static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
347 {
348 	return vcpu->arch.lr;
349 }
350 
351 static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
352 {
353 	vcpu->arch.pc = val;
354 }
355 
356 static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
357 {
358 	return vcpu->arch.pc;
359 }
360 
361 static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
362 {
363 	ulong pc = kvmppc_get_pc(vcpu);
364 
365 	/* Load the instruction manually if it failed to do so in the
366 	 * exit path */
367 	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
368 		kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
369 
370 	return vcpu->arch.last_inst;
371 }
372 
373 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
374 {
375 	return vcpu->arch.fault_dar;
376 }
377 
378 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
379 {
380 	return false;
381 }
382 #endif
383 
384 /* Magic register values loaded into r3 and r4 before the 'sc' assembly
385  * instruction for the OSI hypercalls */
386 #define OSI_SC_MAGIC_R3			0x113724FA
387 #define OSI_SC_MAGIC_R4			0x77810F9B
388 
389 #define INS_DCBZ			0x7c0007ec
390 
391 #endif /* __ASM_KVM_BOOK3S_H__ */
392