1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  *
4  * Copyright SUSE Linux Products GmbH 2009
5  *
6  * Authors: Alexander Graf <agraf@suse.de>
7  */
8 
9 #ifndef __ASM_KVM_BOOK3S_H__
10 #define __ASM_KVM_BOOK3S_H__
11 
12 #include <linux/types.h>
13 #include <linux/kvm_host.h>
14 #include <asm/kvm_book3s_asm.h>
15 
16 struct kvmppc_bat {
17 	u64 raw;
18 	u32 bepi;
19 	u32 bepi_mask;
20 	u32 brpn;
21 	u8 wimg;
22 	u8 pp;
23 	bool vs		: 1;
24 	bool vp		: 1;
25 };
26 
27 struct kvmppc_sid_map {
28 	u64 guest_vsid;
29 	u64 guest_esid;
30 	u64 host_vsid;
31 	bool valid	: 1;
32 };
33 
34 #define SID_MAP_BITS    9
35 #define SID_MAP_NUM     (1 << SID_MAP_BITS)
36 #define SID_MAP_MASK    (SID_MAP_NUM - 1)
37 
38 #ifdef CONFIG_PPC_BOOK3S_64
39 #define SID_CONTEXTS	1
40 #else
41 #define SID_CONTEXTS	128
42 #define VSID_POOL_SIZE	(SID_CONTEXTS * 16)
43 #endif
44 
45 struct hpte_cache {
46 	struct hlist_node list_pte;
47 	struct hlist_node list_pte_long;
48 	struct hlist_node list_vpte;
49 	struct hlist_node list_vpte_long;
50 #ifdef CONFIG_PPC_BOOK3S_64
51 	struct hlist_node list_vpte_64k;
52 #endif
53 	struct rcu_head rcu_head;
54 	u64 host_vpn;
55 	u64 pfn;
56 	ulong slot;
57 	struct kvmppc_pte pte;
58 	int pagesize;
59 };
60 
61 /*
62  * Struct for a virtual core.
63  * Note: entry_exit_map combines a bitmap of threads that have entered
64  * in the bottom 8 bits and a bitmap of threads that have exited in the
65  * next 8 bits.  This is so that we can atomically set the entry bit
66  * iff the exit map is 0 without taking a lock.
67  */
68 struct kvmppc_vcore {
69 	int n_runnable;
70 	int num_threads;
71 	int entry_exit_map;
72 	int napping_threads;
73 	int first_vcpuid;
74 	u16 pcpu;
75 	u16 last_cpu;
76 	u8 vcore_state;
77 	u8 in_guest;
78 	struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
79 	struct list_head preempt_list;
80 	spinlock_t lock;
81 	struct rcuwait wait;
82 	spinlock_t stoltb_lock;	/* protects stolen_tb and preempt_tb */
83 	u64 stolen_tb;
84 	u64 preempt_tb;
85 	struct kvm_vcpu *runner;
86 	struct kvm *kvm;
87 	u64 tb_offset;		/* guest timebase - host timebase */
88 	u64 tb_offset_applied;	/* timebase offset currently in force */
89 	ulong lpcr;
90 	u32 arch_compat;
91 	ulong pcr;
92 	ulong dpdes;		/* doorbell state (POWER8) */
93 	ulong vtb;		/* virtual timebase */
94 	ulong conferring_threads;
95 	unsigned int halt_poll_ns;
96 	atomic_t online_count;
97 };
98 
99 struct kvmppc_vcpu_book3s {
100 	struct kvmppc_sid_map sid_map[SID_MAP_NUM];
101 	struct {
102 		u64 esid;
103 		u64 vsid;
104 	} slb_shadow[64];
105 	u8 slb_shadow_max;
106 	struct kvmppc_bat ibat[8];
107 	struct kvmppc_bat dbat[8];
108 	u64 hid[6];
109 	u64 gqr[8];
110 	u64 sdr1;
111 	u64 hior;
112 	u64 msr_mask;
113 	u64 vtb;
114 #ifdef CONFIG_PPC_BOOK3S_32
115 	u32 vsid_pool[VSID_POOL_SIZE];
116 	u32 vsid_next;
117 #else
118 	u64 proto_vsid_first;
119 	u64 proto_vsid_max;
120 	u64 proto_vsid_next;
121 #endif
122 	int context_id[SID_CONTEXTS];
123 
124 	bool hior_explicit;		/* HIOR is set by ioctl, not PVR */
125 
126 	struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
127 	struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
128 	struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
129 	struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
130 #ifdef CONFIG_PPC_BOOK3S_64
131 	struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K];
132 #endif
133 	int hpte_cache_count;
134 	spinlock_t mmu_lock;
135 };
136 
137 #define VSID_REAL	0x07ffffffffc00000ULL
138 #define VSID_BAT	0x07ffffffffb00000ULL
139 #define VSID_64K	0x0800000000000000ULL
140 #define VSID_1T		0x1000000000000000ULL
141 #define VSID_REAL_DR	0x2000000000000000ULL
142 #define VSID_REAL_IR	0x4000000000000000ULL
143 #define VSID_PR		0x8000000000000000ULL
144 
145 extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
146 extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
147 extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
148 extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
149 extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
150 extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
151 extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
152 extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
153 			       bool iswrite);
154 extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
155 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
156 extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
157 extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
158 extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
159 			unsigned long addr, unsigned long status);
160 extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
161 			unsigned long slb_v, unsigned long valid);
162 extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
163 			unsigned long gpa, gva_t ea, int is_store);
164 
165 extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
166 extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
167 extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte);
168 extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
169 extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
170 extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
171 extern int kvmppc_mmu_hpte_sysinit(void);
172 extern void kvmppc_mmu_hpte_sysexit(void);
173 extern int kvmppc_mmu_hv_init(void);
174 extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
175 
176 extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
177 			unsigned long ea, unsigned long dsisr);
178 extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
179 					gva_t eaddr, void *to, void *from,
180 					unsigned long n);
181 extern long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
182 					void *to, unsigned long n);
183 extern long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
184 				      void *from, unsigned long n);
185 extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
186 				      struct kvmppc_pte *gpte, u64 root,
187 				      u64 *pte_ret_p);
188 extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
189 			struct kvmppc_pte *gpte, u64 table,
190 			int table_index, u64 *pte_ret_p);
191 extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
192 			struct kvmppc_pte *gpte, bool data, bool iswrite);
193 extern void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
194 				    unsigned int pshift, unsigned int lpid);
195 extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
196 			unsigned int shift,
197 			const struct kvm_memory_slot *memslot,
198 			unsigned int lpid);
199 extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested,
200 				    bool writing, unsigned long gpa,
201 				    unsigned int lpid);
202 extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
203 				unsigned long gpa,
204 				struct kvm_memory_slot *memslot,
205 				bool writing, bool kvm_ro,
206 				pte_t *inserted_pte, unsigned int *levelp);
207 extern int kvmppc_init_vm_radix(struct kvm *kvm);
208 extern void kvmppc_free_radix(struct kvm *kvm);
209 extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd,
210 				      unsigned int lpid);
211 extern int kvmppc_radix_init(void);
212 extern void kvmppc_radix_exit(void);
213 extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
214 			    unsigned long gfn);
215 extern bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
216 			  unsigned long gfn);
217 extern bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
218 			       unsigned long gfn);
219 extern long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
220 			struct kvm_memory_slot *memslot, unsigned long *map);
221 extern void kvmppc_radix_flush_memslot(struct kvm *kvm,
222 			const struct kvm_memory_slot *memslot);
223 extern int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
224 
225 /* XXX remove this export when load_last_inst() is generic */
226 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
227 extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
228 extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
229 					  unsigned int vec);
230 extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
231 extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac);
232 extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
233 			   bool upper, u32 val);
234 extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
235 extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu);
236 extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
237 			bool writing, bool *writable);
238 extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
239 			unsigned long *rmap, long pte_index, int realmode);
240 extern void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot,
241 			unsigned long gfn, unsigned long psize);
242 extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
243 			unsigned long pte_index);
244 void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
245 			unsigned long pte_index);
246 extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
247 			unsigned long *nb_ret);
248 extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
249 			unsigned long gpa, bool dirty);
250 extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
251 			long pte_index, unsigned long pteh, unsigned long ptel,
252 			pgd_t *pgdir, bool realmode, unsigned long *idx_ret);
253 extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
254 			unsigned long pte_index, unsigned long avpn,
255 			unsigned long *hpret);
256 extern long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm,
257 			struct kvm_memory_slot *memslot, unsigned long *map);
258 extern void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa,
259 			struct kvm_memory_slot *memslot,
260 			unsigned long *map);
261 extern unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm,
262 			unsigned long lpcr);
263 extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
264 			unsigned long mask);
265 extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr);
266 
267 extern int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu);
268 extern int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu);
269 extern void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu);
270 
271 extern void kvmppc_entry_trampoline(void);
272 extern void kvmppc_hv_entry_trampoline(void);
273 extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
274 extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
275 extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
276 extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
277 extern int kvmppc_hcall_impl_pr(unsigned long cmd);
278 extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
279 extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
280 extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
281 
282 long kvmppc_read_intr(void);
283 void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr);
284 void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
285 
286 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
287 void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu);
288 void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu);
289 void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu);
290 void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu);
291 #else
kvmppc_save_tm_pr(struct kvm_vcpu * vcpu)292 static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {}
kvmppc_restore_tm_pr(struct kvm_vcpu * vcpu)293 static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {}
kvmppc_save_tm_sprs(struct kvm_vcpu * vcpu)294 static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
kvmppc_restore_tm_sprs(struct kvm_vcpu * vcpu)295 static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
296 #endif
297 
298 long kvmhv_nested_init(void);
299 void kvmhv_nested_exit(void);
300 void kvmhv_vm_nested_init(struct kvm *kvm);
301 long kvmhv_set_partition_table(struct kvm_vcpu *vcpu);
302 long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu);
303 void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1);
304 void kvmhv_release_all_nested(struct kvm *kvm);
305 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
306 long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
307 long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
308 			     unsigned long type, unsigned long pg_sizes,
309 			     unsigned long start, unsigned long end);
310 int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu,
311 			  u64 time_limit, unsigned long lpcr);
312 void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
313 void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
314 				   struct hv_guest_state *hr);
315 long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
316 
317 void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
318 
319 extern int kvm_irq_bypass;
320 
to_book3s(struct kvm_vcpu * vcpu)321 static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
322 {
323 	return vcpu->arch.book3s;
324 }
325 
326 /* Also add subarch specific defines */
327 
328 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
329 #include <asm/kvm_book3s_32.h>
330 #endif
331 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
332 #include <asm/kvm_book3s_64.h>
333 #endif
334 
kvmppc_set_gpr(struct kvm_vcpu * vcpu,int num,ulong val)335 static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
336 {
337 	vcpu->arch.regs.gpr[num] = val;
338 }
339 
kvmppc_get_gpr(struct kvm_vcpu * vcpu,int num)340 static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
341 {
342 	return vcpu->arch.regs.gpr[num];
343 }
344 
kvmppc_set_cr(struct kvm_vcpu * vcpu,u32 val)345 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
346 {
347 	vcpu->arch.regs.ccr = val;
348 }
349 
kvmppc_get_cr(struct kvm_vcpu * vcpu)350 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
351 {
352 	return vcpu->arch.regs.ccr;
353 }
354 
kvmppc_set_xer(struct kvm_vcpu * vcpu,ulong val)355 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
356 {
357 	vcpu->arch.regs.xer = val;
358 }
359 
kvmppc_get_xer(struct kvm_vcpu * vcpu)360 static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
361 {
362 	return vcpu->arch.regs.xer;
363 }
364 
kvmppc_set_ctr(struct kvm_vcpu * vcpu,ulong val)365 static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
366 {
367 	vcpu->arch.regs.ctr = val;
368 }
369 
kvmppc_get_ctr(struct kvm_vcpu * vcpu)370 static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
371 {
372 	return vcpu->arch.regs.ctr;
373 }
374 
kvmppc_set_lr(struct kvm_vcpu * vcpu,ulong val)375 static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
376 {
377 	vcpu->arch.regs.link = val;
378 }
379 
kvmppc_get_lr(struct kvm_vcpu * vcpu)380 static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
381 {
382 	return vcpu->arch.regs.link;
383 }
384 
kvmppc_set_pc(struct kvm_vcpu * vcpu,ulong val)385 static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
386 {
387 	vcpu->arch.regs.nip = val;
388 }
389 
kvmppc_get_pc(struct kvm_vcpu * vcpu)390 static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
391 {
392 	return vcpu->arch.regs.nip;
393 }
394 
395 static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
kvmppc_need_byteswap(struct kvm_vcpu * vcpu)396 static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
397 {
398 	return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE);
399 }
400 
kvmppc_get_fault_dar(struct kvm_vcpu * vcpu)401 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
402 {
403 	return vcpu->arch.fault_dar;
404 }
405 
406 /* Expiry time of vcpu DEC relative to host TB */
kvmppc_dec_expires_host_tb(struct kvm_vcpu * vcpu)407 static inline u64 kvmppc_dec_expires_host_tb(struct kvm_vcpu *vcpu)
408 {
409 	return vcpu->arch.dec_expires - vcpu->arch.vcore->tb_offset;
410 }
411 
is_kvmppc_resume_guest(int r)412 static inline bool is_kvmppc_resume_guest(int r)
413 {
414 	return (r == RESUME_GUEST || r == RESUME_GUEST_NV);
415 }
416 
417 static inline bool is_kvmppc_hv_enabled(struct kvm *kvm);
kvmppc_supports_magic_page(struct kvm_vcpu * vcpu)418 static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
419 {
420 	/* Only PR KVM supports the magic page */
421 	return !is_kvmppc_hv_enabled(vcpu->kvm);
422 }
423 
424 extern int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu);
425 extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu);
426 
427 /* Magic register values loaded into r3 and r4 before the 'sc' assembly
428  * instruction for the OSI hypercalls */
429 #define OSI_SC_MAGIC_R3			0x113724FA
430 #define OSI_SC_MAGIC_R4			0x77810F9B
431 
432 #define INS_DCBZ			0x7c0007ec
433 /* TO = 31 for unconditional trap */
434 #define INS_TW				0x7fe00008
435 
436 #define SPLIT_HACK_MASK			0xff000000
437 #define SPLIT_HACK_OFFS			0xfb000000
438 
439 /*
440  * This packs a VCPU ID from the [0..KVM_MAX_VCPU_IDS) space down to the
441  * [0..KVM_MAX_VCPUS) space, using knowledge of the guest's core stride
442  * (but not its actual threading mode, which is not available) to avoid
443  * collisions.
444  *
445  * The implementation leaves VCPU IDs from the range [0..KVM_MAX_VCPUS) (block
446  * 0) unchanged: if the guest is filling each VCORE completely then it will be
447  * using consecutive IDs and it will fill the space without any packing.
448  *
449  * For higher VCPU IDs, the packed ID is based on the VCPU ID modulo
450  * KVM_MAX_VCPUS (effectively masking off the top bits) and then an offset is
451  * added to avoid collisions.
452  *
453  * VCPU IDs in the range [KVM_MAX_VCPUS..(KVM_MAX_VCPUS*2)) (block 1) are only
454  * possible if the guest is leaving at least 1/2 of each VCORE empty, so IDs
455  * can be safely packed into the second half of each VCORE by adding an offset
456  * of (stride / 2).
457  *
458  * Similarly, if VCPU IDs in the range [(KVM_MAX_VCPUS*2)..(KVM_MAX_VCPUS*4))
459  * (blocks 2 and 3) are seen, the guest must be leaving at least 3/4 of each
460  * VCORE empty so packed IDs can be offset by (stride / 4) and (stride * 3 / 4).
461  *
462  * Finally, VCPU IDs from blocks 5..7 will only be seen if the guest is using a
463  * stride of 8 and 1 thread per core so the remaining offsets of 1, 5, 3 and 7
464  * must be free to use.
465  *
466  * (The offsets for each block are stored in block_offsets[], indexed by the
467  * block number if the stride is 8. For cases where the guest's stride is less
468  * than 8, we can re-use the block_offsets array by multiplying the block
469  * number by (MAX_SMT_THREADS / stride) to reach the correct entry.)
470  */
kvmppc_pack_vcpu_id(struct kvm * kvm,u32 id)471 static inline u32 kvmppc_pack_vcpu_id(struct kvm *kvm, u32 id)
472 {
473 	const int block_offsets[MAX_SMT_THREADS] = {0, 4, 2, 6, 1, 5, 3, 7};
474 	int stride = kvm->arch.emul_smt_mode;
475 	int block = (id / KVM_MAX_VCPUS) * (MAX_SMT_THREADS / stride);
476 	u32 packed_id;
477 
478 	if (WARN_ONCE(block >= MAX_SMT_THREADS, "VCPU ID too large to pack"))
479 		return 0;
480 	packed_id = (id % KVM_MAX_VCPUS) + block_offsets[block];
481 	if (WARN_ONCE(packed_id >= KVM_MAX_VCPUS, "VCPU ID packing failed"))
482 		return 0;
483 	return packed_id;
484 }
485 
486 #endif /* __ASM_KVM_BOOK3S_H__ */
487