xref: /openbmc/linux/arch/powerpc/kernel/kvm.c (revision 64d85cc9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
4  * Copyright 2010-2011 Freescale Semiconductor, Inc.
5  *
6  * Authors:
7  *     Alexander Graf <agraf@suse.de>
8  */
9 
10 #include <linux/kvm_host.h>
11 #include <linux/init.h>
12 #include <linux/export.h>
13 #include <linux/kmemleak.h>
14 #include <linux/kvm_para.h>
15 #include <linux/slab.h>
16 #include <linux/of.h>
17 #include <linux/pagemap.h>
18 
19 #include <asm/reg.h>
20 #include <asm/sections.h>
21 #include <asm/cacheflush.h>
22 #include <asm/disassemble.h>
23 #include <asm/ppc-opcode.h>
24 #include <asm/epapr_hcalls.h>
25 
26 #define KVM_MAGIC_PAGE		(-4096L)
27 #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
28 
29 #define KVM_INST_LWZ		0x80000000
30 #define KVM_INST_STW		0x90000000
31 #define KVM_INST_LD		0xe8000000
32 #define KVM_INST_STD		0xf8000000
33 #define KVM_INST_NOP		0x60000000
34 #define KVM_INST_B		0x48000000
35 #define KVM_INST_B_MASK		0x03ffffff
36 #define KVM_INST_B_MAX		0x01ffffff
37 #define KVM_INST_LI		0x38000000
38 
39 #define KVM_MASK_RT		0x03e00000
40 #define KVM_RT_30		0x03c00000
41 #define KVM_MASK_RB		0x0000f800
42 #define KVM_INST_MFMSR		0x7c0000a6
43 
44 #define SPR_FROM		0
45 #define SPR_TO			0x100
46 
47 #define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
48 				    (((sprn) & 0x1f) << 16) | \
49 				    (((sprn) & 0x3e0) << 6) | \
50 				    (moveto))
51 
52 #define KVM_INST_MFSPR(sprn)	KVM_INST_SPR(sprn, SPR_FROM)
53 #define KVM_INST_MTSPR(sprn)	KVM_INST_SPR(sprn, SPR_TO)
54 
55 #define KVM_INST_TLBSYNC	0x7c00046c
56 #define KVM_INST_MTMSRD_L0	0x7c000164
57 #define KVM_INST_MTMSRD_L1	0x7c010164
58 #define KVM_INST_MTMSR		0x7c000124
59 
60 #define KVM_INST_WRTEE		0x7c000106
61 #define KVM_INST_WRTEEI_0	0x7c000146
62 #define KVM_INST_WRTEEI_1	0x7c008146
63 
64 #define KVM_INST_MTSRIN		0x7c0001e4
65 
66 static bool kvm_patching_worked = true;
67 char kvm_tmp[1024 * 1024];
68 static int kvm_tmp_index;
69 
70 static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
71 {
72 	*inst = new_inst;
73 	flush_icache_range((ulong)inst, (ulong)inst + 4);
74 }
75 
76 static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
77 {
78 #ifdef CONFIG_64BIT
79 	kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
80 #else
81 	kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
82 #endif
83 }
84 
85 static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
86 {
87 #ifdef CONFIG_64BIT
88 	kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
89 #else
90 	kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
91 #endif
92 }
93 
94 static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
95 {
96 	kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
97 }
98 
99 static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
100 {
101 #ifdef CONFIG_64BIT
102 	kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
103 #else
104 	kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
105 #endif
106 }
107 
108 static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
109 {
110 	kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
111 }
112 
113 static void kvm_patch_ins_nop(u32 *inst)
114 {
115 	kvm_patch_ins(inst, KVM_INST_NOP);
116 }
117 
118 static void kvm_patch_ins_b(u32 *inst, int addr)
119 {
120 #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
121 	/* On relocatable kernels interrupts handlers and our code
122 	   can be in different regions, so we don't patch them */
123 
124 	if ((ulong)inst < (ulong)&__end_interrupts)
125 		return;
126 #endif
127 
128 	kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
129 }
130 
131 static u32 *kvm_alloc(int len)
132 {
133 	u32 *p;
134 
135 	if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) {
136 		printk(KERN_ERR "KVM: No more space (%d + %d)\n",
137 				kvm_tmp_index, len);
138 		kvm_patching_worked = false;
139 		return NULL;
140 	}
141 
142 	p = (void*)&kvm_tmp[kvm_tmp_index];
143 	kvm_tmp_index += len;
144 
145 	return p;
146 }
147 
148 extern u32 kvm_emulate_mtmsrd_branch_offs;
149 extern u32 kvm_emulate_mtmsrd_reg_offs;
150 extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
151 extern u32 kvm_emulate_mtmsrd_len;
152 extern u32 kvm_emulate_mtmsrd[];
153 
154 static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
155 {
156 	u32 *p;
157 	int distance_start;
158 	int distance_end;
159 	ulong next_inst;
160 
161 	p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
162 	if (!p)
163 		return;
164 
165 	/* Find out where we are and put everything there */
166 	distance_start = (ulong)p - (ulong)inst;
167 	next_inst = ((ulong)inst + 4);
168 	distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
169 
170 	/* Make sure we only write valid b instructions */
171 	if (distance_start > KVM_INST_B_MAX) {
172 		kvm_patching_worked = false;
173 		return;
174 	}
175 
176 	/* Modify the chunk to fit the invocation */
177 	memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
178 	p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
179 	switch (get_rt(rt)) {
180 	case 30:
181 		kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
182 				 magic_var(scratch2), KVM_RT_30);
183 		break;
184 	case 31:
185 		kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
186 				 magic_var(scratch1), KVM_RT_30);
187 		break;
188 	default:
189 		p[kvm_emulate_mtmsrd_reg_offs] |= rt;
190 		break;
191 	}
192 
193 	p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
194 	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
195 
196 	/* Patch the invocation */
197 	kvm_patch_ins_b(inst, distance_start);
198 }
199 
200 extern u32 kvm_emulate_mtmsr_branch_offs;
201 extern u32 kvm_emulate_mtmsr_reg1_offs;
202 extern u32 kvm_emulate_mtmsr_reg2_offs;
203 extern u32 kvm_emulate_mtmsr_orig_ins_offs;
204 extern u32 kvm_emulate_mtmsr_len;
205 extern u32 kvm_emulate_mtmsr[];
206 
207 static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
208 {
209 	u32 *p;
210 	int distance_start;
211 	int distance_end;
212 	ulong next_inst;
213 
214 	p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
215 	if (!p)
216 		return;
217 
218 	/* Find out where we are and put everything there */
219 	distance_start = (ulong)p - (ulong)inst;
220 	next_inst = ((ulong)inst + 4);
221 	distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
222 
223 	/* Make sure we only write valid b instructions */
224 	if (distance_start > KVM_INST_B_MAX) {
225 		kvm_patching_worked = false;
226 		return;
227 	}
228 
229 	/* Modify the chunk to fit the invocation */
230 	memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
231 	p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
232 
233 	/* Make clobbered registers work too */
234 	switch (get_rt(rt)) {
235 	case 30:
236 		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
237 				 magic_var(scratch2), KVM_RT_30);
238 		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
239 				 magic_var(scratch2), KVM_RT_30);
240 		break;
241 	case 31:
242 		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
243 				 magic_var(scratch1), KVM_RT_30);
244 		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
245 				 magic_var(scratch1), KVM_RT_30);
246 		break;
247 	default:
248 		p[kvm_emulate_mtmsr_reg1_offs] |= rt;
249 		p[kvm_emulate_mtmsr_reg2_offs] |= rt;
250 		break;
251 	}
252 
253 	p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
254 	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
255 
256 	/* Patch the invocation */
257 	kvm_patch_ins_b(inst, distance_start);
258 }
259 
260 #ifdef CONFIG_BOOKE
261 
262 extern u32 kvm_emulate_wrtee_branch_offs;
263 extern u32 kvm_emulate_wrtee_reg_offs;
264 extern u32 kvm_emulate_wrtee_orig_ins_offs;
265 extern u32 kvm_emulate_wrtee_len;
266 extern u32 kvm_emulate_wrtee[];
267 
268 static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
269 {
270 	u32 *p;
271 	int distance_start;
272 	int distance_end;
273 	ulong next_inst;
274 
275 	p = kvm_alloc(kvm_emulate_wrtee_len * 4);
276 	if (!p)
277 		return;
278 
279 	/* Find out where we are and put everything there */
280 	distance_start = (ulong)p - (ulong)inst;
281 	next_inst = ((ulong)inst + 4);
282 	distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
283 
284 	/* Make sure we only write valid b instructions */
285 	if (distance_start > KVM_INST_B_MAX) {
286 		kvm_patching_worked = false;
287 		return;
288 	}
289 
290 	/* Modify the chunk to fit the invocation */
291 	memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
292 	p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
293 
294 	if (imm_one) {
295 		p[kvm_emulate_wrtee_reg_offs] =
296 			KVM_INST_LI | __PPC_RT(R30) | MSR_EE;
297 	} else {
298 		/* Make clobbered registers work too */
299 		switch (get_rt(rt)) {
300 		case 30:
301 			kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
302 					 magic_var(scratch2), KVM_RT_30);
303 			break;
304 		case 31:
305 			kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
306 					 magic_var(scratch1), KVM_RT_30);
307 			break;
308 		default:
309 			p[kvm_emulate_wrtee_reg_offs] |= rt;
310 			break;
311 		}
312 	}
313 
314 	p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
315 	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
316 
317 	/* Patch the invocation */
318 	kvm_patch_ins_b(inst, distance_start);
319 }
320 
321 extern u32 kvm_emulate_wrteei_0_branch_offs;
322 extern u32 kvm_emulate_wrteei_0_len;
323 extern u32 kvm_emulate_wrteei_0[];
324 
325 static void kvm_patch_ins_wrteei_0(u32 *inst)
326 {
327 	u32 *p;
328 	int distance_start;
329 	int distance_end;
330 	ulong next_inst;
331 
332 	p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
333 	if (!p)
334 		return;
335 
336 	/* Find out where we are and put everything there */
337 	distance_start = (ulong)p - (ulong)inst;
338 	next_inst = ((ulong)inst + 4);
339 	distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
340 
341 	/* Make sure we only write valid b instructions */
342 	if (distance_start > KVM_INST_B_MAX) {
343 		kvm_patching_worked = false;
344 		return;
345 	}
346 
347 	memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
348 	p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
349 	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
350 
351 	/* Patch the invocation */
352 	kvm_patch_ins_b(inst, distance_start);
353 }
354 
355 #endif
356 
357 #ifdef CONFIG_PPC_BOOK3S_32
358 
359 extern u32 kvm_emulate_mtsrin_branch_offs;
360 extern u32 kvm_emulate_mtsrin_reg1_offs;
361 extern u32 kvm_emulate_mtsrin_reg2_offs;
362 extern u32 kvm_emulate_mtsrin_orig_ins_offs;
363 extern u32 kvm_emulate_mtsrin_len;
364 extern u32 kvm_emulate_mtsrin[];
365 
366 static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
367 {
368 	u32 *p;
369 	int distance_start;
370 	int distance_end;
371 	ulong next_inst;
372 
373 	p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
374 	if (!p)
375 		return;
376 
377 	/* Find out where we are and put everything there */
378 	distance_start = (ulong)p - (ulong)inst;
379 	next_inst = ((ulong)inst + 4);
380 	distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
381 
382 	/* Make sure we only write valid b instructions */
383 	if (distance_start > KVM_INST_B_MAX) {
384 		kvm_patching_worked = false;
385 		return;
386 	}
387 
388 	/* Modify the chunk to fit the invocation */
389 	memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
390 	p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
391 	p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
392 	p[kvm_emulate_mtsrin_reg2_offs] |= rt;
393 	p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
394 	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
395 
396 	/* Patch the invocation */
397 	kvm_patch_ins_b(inst, distance_start);
398 }
399 
400 #endif
401 
402 static void kvm_map_magic_page(void *data)
403 {
404 	u32 *features = data;
405 
406 	ulong in[8] = {0};
407 	ulong out[8];
408 
409 	in[0] = KVM_MAGIC_PAGE;
410 	in[1] = KVM_MAGIC_PAGE | MAGIC_PAGE_FLAG_NOT_MAPPED_NX;
411 
412 	epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
413 
414 	*features = out[0];
415 }
416 
417 static void kvm_check_ins(u32 *inst, u32 features)
418 {
419 	u32 _inst = *inst;
420 	u32 inst_no_rt = _inst & ~KVM_MASK_RT;
421 	u32 inst_rt = _inst & KVM_MASK_RT;
422 
423 	switch (inst_no_rt) {
424 	/* Loads */
425 	case KVM_INST_MFMSR:
426 		kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
427 		break;
428 	case KVM_INST_MFSPR(SPRN_SPRG0):
429 		kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
430 		break;
431 	case KVM_INST_MFSPR(SPRN_SPRG1):
432 		kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
433 		break;
434 	case KVM_INST_MFSPR(SPRN_SPRG2):
435 		kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
436 		break;
437 	case KVM_INST_MFSPR(SPRN_SPRG3):
438 		kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
439 		break;
440 	case KVM_INST_MFSPR(SPRN_SRR0):
441 		kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
442 		break;
443 	case KVM_INST_MFSPR(SPRN_SRR1):
444 		kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
445 		break;
446 #ifdef CONFIG_BOOKE
447 	case KVM_INST_MFSPR(SPRN_DEAR):
448 #else
449 	case KVM_INST_MFSPR(SPRN_DAR):
450 #endif
451 		kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
452 		break;
453 	case KVM_INST_MFSPR(SPRN_DSISR):
454 		kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
455 		break;
456 
457 #ifdef CONFIG_PPC_BOOK3E_MMU
458 	case KVM_INST_MFSPR(SPRN_MAS0):
459 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
460 			kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
461 		break;
462 	case KVM_INST_MFSPR(SPRN_MAS1):
463 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
464 			kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
465 		break;
466 	case KVM_INST_MFSPR(SPRN_MAS2):
467 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
468 			kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
469 		break;
470 	case KVM_INST_MFSPR(SPRN_MAS3):
471 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
472 			kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
473 		break;
474 	case KVM_INST_MFSPR(SPRN_MAS4):
475 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
476 			kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
477 		break;
478 	case KVM_INST_MFSPR(SPRN_MAS6):
479 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
480 			kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
481 		break;
482 	case KVM_INST_MFSPR(SPRN_MAS7):
483 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
484 			kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
485 		break;
486 #endif /* CONFIG_PPC_BOOK3E_MMU */
487 
488 	case KVM_INST_MFSPR(SPRN_SPRG4):
489 #ifdef CONFIG_BOOKE
490 	case KVM_INST_MFSPR(SPRN_SPRG4R):
491 #endif
492 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
493 			kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
494 		break;
495 	case KVM_INST_MFSPR(SPRN_SPRG5):
496 #ifdef CONFIG_BOOKE
497 	case KVM_INST_MFSPR(SPRN_SPRG5R):
498 #endif
499 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
500 			kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
501 		break;
502 	case KVM_INST_MFSPR(SPRN_SPRG6):
503 #ifdef CONFIG_BOOKE
504 	case KVM_INST_MFSPR(SPRN_SPRG6R):
505 #endif
506 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
507 			kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
508 		break;
509 	case KVM_INST_MFSPR(SPRN_SPRG7):
510 #ifdef CONFIG_BOOKE
511 	case KVM_INST_MFSPR(SPRN_SPRG7R):
512 #endif
513 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
514 			kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
515 		break;
516 
517 #ifdef CONFIG_BOOKE
518 	case KVM_INST_MFSPR(SPRN_ESR):
519 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
520 			kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
521 		break;
522 #endif
523 
524 	case KVM_INST_MFSPR(SPRN_PIR):
525 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
526 			kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
527 		break;
528 
529 
530 	/* Stores */
531 	case KVM_INST_MTSPR(SPRN_SPRG0):
532 		kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
533 		break;
534 	case KVM_INST_MTSPR(SPRN_SPRG1):
535 		kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
536 		break;
537 	case KVM_INST_MTSPR(SPRN_SPRG2):
538 		kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
539 		break;
540 	case KVM_INST_MTSPR(SPRN_SPRG3):
541 		kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
542 		break;
543 	case KVM_INST_MTSPR(SPRN_SRR0):
544 		kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
545 		break;
546 	case KVM_INST_MTSPR(SPRN_SRR1):
547 		kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
548 		break;
549 #ifdef CONFIG_BOOKE
550 	case KVM_INST_MTSPR(SPRN_DEAR):
551 #else
552 	case KVM_INST_MTSPR(SPRN_DAR):
553 #endif
554 		kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
555 		break;
556 	case KVM_INST_MTSPR(SPRN_DSISR):
557 		kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
558 		break;
559 #ifdef CONFIG_PPC_BOOK3E_MMU
560 	case KVM_INST_MTSPR(SPRN_MAS0):
561 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
562 			kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
563 		break;
564 	case KVM_INST_MTSPR(SPRN_MAS1):
565 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
566 			kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
567 		break;
568 	case KVM_INST_MTSPR(SPRN_MAS2):
569 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
570 			kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
571 		break;
572 	case KVM_INST_MTSPR(SPRN_MAS3):
573 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
574 			kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
575 		break;
576 	case KVM_INST_MTSPR(SPRN_MAS4):
577 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
578 			kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
579 		break;
580 	case KVM_INST_MTSPR(SPRN_MAS6):
581 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
582 			kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
583 		break;
584 	case KVM_INST_MTSPR(SPRN_MAS7):
585 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
586 			kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
587 		break;
588 #endif /* CONFIG_PPC_BOOK3E_MMU */
589 
590 	case KVM_INST_MTSPR(SPRN_SPRG4):
591 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
592 			kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
593 		break;
594 	case KVM_INST_MTSPR(SPRN_SPRG5):
595 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
596 			kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
597 		break;
598 	case KVM_INST_MTSPR(SPRN_SPRG6):
599 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
600 			kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
601 		break;
602 	case KVM_INST_MTSPR(SPRN_SPRG7):
603 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
604 			kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
605 		break;
606 
607 #ifdef CONFIG_BOOKE
608 	case KVM_INST_MTSPR(SPRN_ESR):
609 		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
610 			kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
611 		break;
612 #endif
613 
614 	/* Nops */
615 	case KVM_INST_TLBSYNC:
616 		kvm_patch_ins_nop(inst);
617 		break;
618 
619 	/* Rewrites */
620 	case KVM_INST_MTMSRD_L1:
621 		kvm_patch_ins_mtmsrd(inst, inst_rt);
622 		break;
623 	case KVM_INST_MTMSR:
624 	case KVM_INST_MTMSRD_L0:
625 		kvm_patch_ins_mtmsr(inst, inst_rt);
626 		break;
627 #ifdef CONFIG_BOOKE
628 	case KVM_INST_WRTEE:
629 		kvm_patch_ins_wrtee(inst, inst_rt, 0);
630 		break;
631 #endif
632 	}
633 
634 	switch (inst_no_rt & ~KVM_MASK_RB) {
635 #ifdef CONFIG_PPC_BOOK3S_32
636 	case KVM_INST_MTSRIN:
637 		if (features & KVM_MAGIC_FEAT_SR) {
638 			u32 inst_rb = _inst & KVM_MASK_RB;
639 			kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
640 		}
641 		break;
642 #endif
643 	}
644 
645 	switch (_inst) {
646 #ifdef CONFIG_BOOKE
647 	case KVM_INST_WRTEEI_0:
648 		kvm_patch_ins_wrteei_0(inst);
649 		break;
650 
651 	case KVM_INST_WRTEEI_1:
652 		kvm_patch_ins_wrtee(inst, 0, 1);
653 		break;
654 #endif
655 	}
656 }
657 
658 extern u32 kvm_template_start[];
659 extern u32 kvm_template_end[];
660 
661 static void kvm_use_magic_page(void)
662 {
663 	u32 *p;
664 	u32 *start, *end;
665 	u32 features;
666 
667 	/* Tell the host to map the magic page to -4096 on all CPUs */
668 	on_each_cpu(kvm_map_magic_page, &features, 1);
669 
670 	/* Quick self-test to see if the mapping works */
671 	if (!fault_in_pages_readable((const char *)KVM_MAGIC_PAGE, sizeof(u32))) {
672 		kvm_patching_worked = false;
673 		return;
674 	}
675 
676 	/* Now loop through all code and find instructions */
677 	start = (void*)_stext;
678 	end = (void*)_etext;
679 
680 	/*
681 	 * Being interrupted in the middle of patching would
682 	 * be bad for SPRG4-7, which KVM can't keep in sync
683 	 * with emulated accesses because reads don't trap.
684 	 */
685 	local_irq_disable();
686 
687 	for (p = start; p < end; p++) {
688 		/* Avoid patching the template code */
689 		if (p >= kvm_template_start && p < kvm_template_end) {
690 			p = kvm_template_end - 1;
691 			continue;
692 		}
693 		kvm_check_ins(p, features);
694 	}
695 
696 	local_irq_enable();
697 
698 	printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
699 			 kvm_patching_worked ? "worked" : "failed");
700 }
701 
702 static __init void kvm_free_tmp(void)
703 {
704 	/*
705 	 * Inform kmemleak about the hole in the .bss section since the
706 	 * corresponding pages will be unmapped with DEBUG_PAGEALLOC=y.
707 	 */
708 	kmemleak_free_part(&kvm_tmp[kvm_tmp_index],
709 			   ARRAY_SIZE(kvm_tmp) - kvm_tmp_index);
710 	free_reserved_area(&kvm_tmp[kvm_tmp_index],
711 			   &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
712 }
713 
714 static int __init kvm_guest_init(void)
715 {
716 	if (!kvm_para_available())
717 		goto free_tmp;
718 
719 	if (!epapr_paravirt_enabled)
720 		goto free_tmp;
721 
722 	if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
723 		kvm_use_magic_page();
724 
725 #ifdef CONFIG_PPC_BOOK3S_64
726 	/* Enable napping */
727 	powersave_nap = 1;
728 #endif
729 
730 free_tmp:
731 	kvm_free_tmp();
732 
733 	return 0;
734 }
735 
736 postcore_initcall(kvm_guest_init);
737