1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7  */
8 
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/hugetlb.h>
14 #include <linux/module.h>
15 #include <linux/log2.h>
16 
17 #include <asm/tlbflush.h>
18 #include <asm/kvm_ppc.h>
19 #include <asm/kvm_book3s.h>
20 #include <asm/book3s/64/mmu-hash.h>
21 #include <asm/hvcall.h>
22 #include <asm/synch.h>
23 #include <asm/ppc-opcode.h>
24 
25 /* Translate address of a vmalloc'd thing to a linear map address */
26 static void *real_vmalloc_addr(void *x)
27 {
28 	unsigned long addr = (unsigned long) x;
29 	pte_t *p;
30 	/*
31 	 * assume we don't have huge pages in vmalloc space...
32 	 * So don't worry about THP collapse/split. Called
33 	 * Only in realmode, hence won't need irq_save/restore.
34 	 */
35 	p = __find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL, NULL);
36 	if (!p || !pte_present(*p))
37 		return NULL;
38 	addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
39 	return __va(addr);
40 }
41 
42 /* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
43 static int global_invalidates(struct kvm *kvm, unsigned long flags)
44 {
45 	int global;
46 
47 	/*
48 	 * If there is only one vcore, and it's currently running,
49 	 * as indicated by local_paca->kvm_hstate.kvm_vcpu being set,
50 	 * we can use tlbiel as long as we mark all other physical
51 	 * cores as potentially having stale TLB entries for this lpid.
52 	 * Otherwise, don't use tlbiel.
53 	 */
54 	if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu)
55 		global = 0;
56 	else
57 		global = 1;
58 
59 	if (!global) {
60 		/* any other core might now have stale TLB entries... */
61 		smp_wmb();
62 		cpumask_setall(&kvm->arch.need_tlb_flush);
63 		cpumask_clear_cpu(local_paca->kvm_hstate.kvm_vcore->pcpu,
64 				  &kvm->arch.need_tlb_flush);
65 	}
66 
67 	return global;
68 }
69 
70 /*
71  * Add this HPTE into the chain for the real page.
72  * Must be called with the chain locked; it unlocks the chain.
73  */
74 void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
75 			     unsigned long *rmap, long pte_index, int realmode)
76 {
77 	struct revmap_entry *head, *tail;
78 	unsigned long i;
79 
80 	if (*rmap & KVMPPC_RMAP_PRESENT) {
81 		i = *rmap & KVMPPC_RMAP_INDEX;
82 		head = &kvm->arch.revmap[i];
83 		if (realmode)
84 			head = real_vmalloc_addr(head);
85 		tail = &kvm->arch.revmap[head->back];
86 		if (realmode)
87 			tail = real_vmalloc_addr(tail);
88 		rev->forw = i;
89 		rev->back = head->back;
90 		tail->forw = pte_index;
91 		head->back = pte_index;
92 	} else {
93 		rev->forw = rev->back = pte_index;
94 		*rmap = (*rmap & ~KVMPPC_RMAP_INDEX) |
95 			pte_index | KVMPPC_RMAP_PRESENT;
96 	}
97 	unlock_rmap(rmap);
98 }
99 EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
100 
101 /* Update the changed page order field of an rmap entry */
102 void kvmppc_update_rmap_change(unsigned long *rmap, unsigned long psize)
103 {
104 	unsigned long order;
105 
106 	if (!psize)
107 		return;
108 	order = ilog2(psize);
109 	order <<= KVMPPC_RMAP_CHG_SHIFT;
110 	if (order > (*rmap & KVMPPC_RMAP_CHG_ORDER))
111 		*rmap = (*rmap & ~KVMPPC_RMAP_CHG_ORDER) | order;
112 }
113 EXPORT_SYMBOL_GPL(kvmppc_update_rmap_change);
114 
115 /* Returns a pointer to the revmap entry for the page mapped by a HPTE */
116 static unsigned long *revmap_for_hpte(struct kvm *kvm, unsigned long hpte_v,
117 				      unsigned long hpte_gr)
118 {
119 	struct kvm_memory_slot *memslot;
120 	unsigned long *rmap;
121 	unsigned long gfn;
122 
123 	gfn = hpte_rpn(hpte_gr, hpte_page_size(hpte_v, hpte_gr));
124 	memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
125 	if (!memslot)
126 		return NULL;
127 
128 	rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
129 	return rmap;
130 }
131 
132 /* Remove this HPTE from the chain for a real page */
133 static void remove_revmap_chain(struct kvm *kvm, long pte_index,
134 				struct revmap_entry *rev,
135 				unsigned long hpte_v, unsigned long hpte_r)
136 {
137 	struct revmap_entry *next, *prev;
138 	unsigned long ptel, head;
139 	unsigned long *rmap;
140 	unsigned long rcbits;
141 
142 	rcbits = hpte_r & (HPTE_R_R | HPTE_R_C);
143 	ptel = rev->guest_rpte |= rcbits;
144 	rmap = revmap_for_hpte(kvm, hpte_v, ptel);
145 	if (!rmap)
146 		return;
147 	lock_rmap(rmap);
148 
149 	head = *rmap & KVMPPC_RMAP_INDEX;
150 	next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]);
151 	prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]);
152 	next->back = rev->back;
153 	prev->forw = rev->forw;
154 	if (head == pte_index) {
155 		head = rev->forw;
156 		if (head == pte_index)
157 			*rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
158 		else
159 			*rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
160 	}
161 	*rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT;
162 	if (rcbits & HPTE_R_C)
163 		kvmppc_update_rmap_change(rmap, hpte_page_size(hpte_v, hpte_r));
164 	unlock_rmap(rmap);
165 }
166 
167 long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
168 		       long pte_index, unsigned long pteh, unsigned long ptel,
169 		       pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret)
170 {
171 	unsigned long i, pa, gpa, gfn, psize;
172 	unsigned long slot_fn, hva;
173 	__be64 *hpte;
174 	struct revmap_entry *rev;
175 	unsigned long g_ptel;
176 	struct kvm_memory_slot *memslot;
177 	unsigned hpage_shift;
178 	bool is_ci;
179 	unsigned long *rmap;
180 	pte_t *ptep;
181 	unsigned int writing;
182 	unsigned long mmu_seq;
183 	unsigned long rcbits, irq_flags = 0;
184 
185 	psize = hpte_page_size(pteh, ptel);
186 	if (!psize)
187 		return H_PARAMETER;
188 	writing = hpte_is_writable(ptel);
189 	pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
190 	ptel &= ~HPTE_GR_RESERVED;
191 	g_ptel = ptel;
192 
193 	/* used later to detect if we might have been invalidated */
194 	mmu_seq = kvm->mmu_notifier_seq;
195 	smp_rmb();
196 
197 	/* Find the memslot (if any) for this address */
198 	gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
199 	gfn = gpa >> PAGE_SHIFT;
200 	memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
201 	pa = 0;
202 	is_ci = false;
203 	rmap = NULL;
204 	if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
205 		/* Emulated MMIO - mark this with key=31 */
206 		pteh |= HPTE_V_ABSENT;
207 		ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
208 		goto do_insert;
209 	}
210 
211 	/* Check if the requested page fits entirely in the memslot. */
212 	if (!slot_is_aligned(memslot, psize))
213 		return H_PARAMETER;
214 	slot_fn = gfn - memslot->base_gfn;
215 	rmap = &memslot->arch.rmap[slot_fn];
216 
217 	/* Translate to host virtual address */
218 	hva = __gfn_to_hva_memslot(memslot, gfn);
219 	/*
220 	 * If we had a page table table change after lookup, we would
221 	 * retry via mmu_notifier_retry.
222 	 */
223 	if (realmode)
224 		ptep = __find_linux_pte_or_hugepte(pgdir, hva, NULL,
225 						   &hpage_shift);
226 	else {
227 		local_irq_save(irq_flags);
228 		ptep = find_linux_pte_or_hugepte(pgdir, hva, NULL,
229 						 &hpage_shift);
230 	}
231 	if (ptep) {
232 		pte_t pte;
233 		unsigned int host_pte_size;
234 
235 		if (hpage_shift)
236 			host_pte_size = 1ul << hpage_shift;
237 		else
238 			host_pte_size = PAGE_SIZE;
239 		/*
240 		 * We should always find the guest page size
241 		 * to <= host page size, if host is using hugepage
242 		 */
243 		if (host_pte_size < psize) {
244 			if (!realmode)
245 				local_irq_restore(flags);
246 			return H_PARAMETER;
247 		}
248 		pte = kvmppc_read_update_linux_pte(ptep, writing);
249 		if (pte_present(pte) && !pte_protnone(pte)) {
250 			if (writing && !pte_write(pte))
251 				/* make the actual HPTE be read-only */
252 				ptel = hpte_make_readonly(ptel);
253 			is_ci = pte_ci(pte);
254 			pa = pte_pfn(pte) << PAGE_SHIFT;
255 			pa |= hva & (host_pte_size - 1);
256 			pa |= gpa & ~PAGE_MASK;
257 		}
258 	}
259 	if (!realmode)
260 		local_irq_restore(irq_flags);
261 
262 	ptel &= ~(HPTE_R_PP0 - psize);
263 	ptel |= pa;
264 
265 	if (pa)
266 		pteh |= HPTE_V_VALID;
267 	else {
268 		pteh |= HPTE_V_ABSENT;
269 		ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
270 	}
271 
272 	/*If we had host pte mapping then  Check WIMG */
273 	if (ptep && !hpte_cache_flags_ok(ptel, is_ci)) {
274 		if (is_ci)
275 			return H_PARAMETER;
276 		/*
277 		 * Allow guest to map emulated device memory as
278 		 * uncacheable, but actually make it cacheable.
279 		 */
280 		ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
281 		ptel |= HPTE_R_M;
282 	}
283 
284 	/* Find and lock the HPTEG slot to use */
285  do_insert:
286 	if (pte_index >= kvm->arch.hpt_npte)
287 		return H_PARAMETER;
288 	if (likely((flags & H_EXACT) == 0)) {
289 		pte_index &= ~7UL;
290 		hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
291 		for (i = 0; i < 8; ++i) {
292 			if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 &&
293 			    try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
294 					  HPTE_V_ABSENT))
295 				break;
296 			hpte += 2;
297 		}
298 		if (i == 8) {
299 			/*
300 			 * Since try_lock_hpte doesn't retry (not even stdcx.
301 			 * failures), it could be that there is a free slot
302 			 * but we transiently failed to lock it.  Try again,
303 			 * actually locking each slot and checking it.
304 			 */
305 			hpte -= 16;
306 			for (i = 0; i < 8; ++i) {
307 				u64 pte;
308 				while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
309 					cpu_relax();
310 				pte = be64_to_cpu(hpte[0]);
311 				if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT)))
312 					break;
313 				__unlock_hpte(hpte, pte);
314 				hpte += 2;
315 			}
316 			if (i == 8)
317 				return H_PTEG_FULL;
318 		}
319 		pte_index += i;
320 	} else {
321 		hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
322 		if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
323 				   HPTE_V_ABSENT)) {
324 			/* Lock the slot and check again */
325 			u64 pte;
326 
327 			while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
328 				cpu_relax();
329 			pte = be64_to_cpu(hpte[0]);
330 			if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
331 				__unlock_hpte(hpte, pte);
332 				return H_PTEG_FULL;
333 			}
334 		}
335 	}
336 
337 	/* Save away the guest's idea of the second HPTE dword */
338 	rev = &kvm->arch.revmap[pte_index];
339 	if (realmode)
340 		rev = real_vmalloc_addr(rev);
341 	if (rev) {
342 		rev->guest_rpte = g_ptel;
343 		note_hpte_modification(kvm, rev);
344 	}
345 
346 	/* Link HPTE into reverse-map chain */
347 	if (pteh & HPTE_V_VALID) {
348 		if (realmode)
349 			rmap = real_vmalloc_addr(rmap);
350 		lock_rmap(rmap);
351 		/* Check for pending invalidations under the rmap chain lock */
352 		if (mmu_notifier_retry(kvm, mmu_seq)) {
353 			/* inval in progress, write a non-present HPTE */
354 			pteh |= HPTE_V_ABSENT;
355 			pteh &= ~HPTE_V_VALID;
356 			ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
357 			unlock_rmap(rmap);
358 		} else {
359 			kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
360 						realmode);
361 			/* Only set R/C in real HPTE if already set in *rmap */
362 			rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
363 			ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C);
364 		}
365 	}
366 
367 	/* Convert to new format on P9 */
368 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
369 		ptel = hpte_old_to_new_r(pteh, ptel);
370 		pteh = hpte_old_to_new_v(pteh);
371 	}
372 	hpte[1] = cpu_to_be64(ptel);
373 
374 	/* Write the first HPTE dword, unlocking the HPTE and making it valid */
375 	eieio();
376 	__unlock_hpte(hpte, pteh);
377 	asm volatile("ptesync" : : : "memory");
378 
379 	*pte_idx_ret = pte_index;
380 	return H_SUCCESS;
381 }
382 EXPORT_SYMBOL_GPL(kvmppc_do_h_enter);
383 
384 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
385 		    long pte_index, unsigned long pteh, unsigned long ptel)
386 {
387 	return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
388 				 vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]);
389 }
390 
391 #ifdef __BIG_ENDIAN__
392 #define LOCK_TOKEN	(*(u32 *)(&get_paca()->lock_token))
393 #else
394 #define LOCK_TOKEN	(*(u32 *)(&get_paca()->paca_index))
395 #endif
396 
397 static inline int is_mmio_hpte(unsigned long v, unsigned long r)
398 {
399 	return ((v & HPTE_V_ABSENT) &&
400 		(r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
401 		(HPTE_R_KEY_HI | HPTE_R_KEY_LO));
402 }
403 
404 static inline int try_lock_tlbie(unsigned int *lock)
405 {
406 	unsigned int tmp, old;
407 	unsigned int token = LOCK_TOKEN;
408 
409 	asm volatile("1:lwarx	%1,0,%2\n"
410 		     "	cmpwi	cr0,%1,0\n"
411 		     "	bne	2f\n"
412 		     "  stwcx.	%3,0,%2\n"
413 		     "	bne-	1b\n"
414 		     "  isync\n"
415 		     "2:"
416 		     : "=&r" (tmp), "=&r" (old)
417 		     : "r" (lock), "r" (token)
418 		     : "cc", "memory");
419 	return old == 0;
420 }
421 
422 static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
423 		      long npages, int global, bool need_sync)
424 {
425 	long i;
426 
427 	/*
428 	 * We use the POWER9 5-operand versions of tlbie and tlbiel here.
429 	 * Since we are using RIC=0 PRS=0 R=0, and P7/P8 tlbiel ignores
430 	 * the RS field, this is backwards-compatible with P7 and P8.
431 	 */
432 	if (global) {
433 		while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
434 			cpu_relax();
435 		if (need_sync)
436 			asm volatile("ptesync" : : : "memory");
437 		for (i = 0; i < npages; ++i)
438 			asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
439 				     "r" (rbvalues[i]), "r" (kvm->arch.lpid));
440 		asm volatile("eieio; tlbsync; ptesync" : : : "memory");
441 		kvm->arch.tlbie_lock = 0;
442 	} else {
443 		if (need_sync)
444 			asm volatile("ptesync" : : : "memory");
445 		for (i = 0; i < npages; ++i)
446 			asm volatile(PPC_TLBIEL(%0,%1,0,0,0) : :
447 				     "r" (rbvalues[i]), "r" (0));
448 		asm volatile("ptesync" : : : "memory");
449 	}
450 }
451 
452 long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
453 			unsigned long pte_index, unsigned long avpn,
454 			unsigned long *hpret)
455 {
456 	__be64 *hpte;
457 	unsigned long v, r, rb;
458 	struct revmap_entry *rev;
459 	u64 pte, orig_pte, pte_r;
460 
461 	if (pte_index >= kvm->arch.hpt_npte)
462 		return H_PARAMETER;
463 	hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
464 	while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
465 		cpu_relax();
466 	pte = orig_pte = be64_to_cpu(hpte[0]);
467 	pte_r = be64_to_cpu(hpte[1]);
468 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
469 		pte = hpte_new_to_old_v(pte, pte_r);
470 		pte_r = hpte_new_to_old_r(pte_r);
471 	}
472 	if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
473 	    ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) ||
474 	    ((flags & H_ANDCOND) && (pte & avpn) != 0)) {
475 		__unlock_hpte(hpte, orig_pte);
476 		return H_NOT_FOUND;
477 	}
478 
479 	rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
480 	v = pte & ~HPTE_V_HVLOCK;
481 	if (v & HPTE_V_VALID) {
482 		hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
483 		rb = compute_tlbie_rb(v, pte_r, pte_index);
484 		do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
485 		/*
486 		 * The reference (R) and change (C) bits in a HPT
487 		 * entry can be set by hardware at any time up until
488 		 * the HPTE is invalidated and the TLB invalidation
489 		 * sequence has completed.  This means that when
490 		 * removing a HPTE, we need to re-read the HPTE after
491 		 * the invalidation sequence has completed in order to
492 		 * obtain reliable values of R and C.
493 		 */
494 		remove_revmap_chain(kvm, pte_index, rev, v,
495 				    be64_to_cpu(hpte[1]));
496 	}
497 	r = rev->guest_rpte & ~HPTE_GR_RESERVED;
498 	note_hpte_modification(kvm, rev);
499 	unlock_hpte(hpte, 0);
500 
501 	if (is_mmio_hpte(v, pte_r))
502 		atomic64_inc(&kvm->arch.mmio_update);
503 
504 	if (v & HPTE_V_ABSENT)
505 		v = (v & ~HPTE_V_ABSENT) | HPTE_V_VALID;
506 	hpret[0] = v;
507 	hpret[1] = r;
508 	return H_SUCCESS;
509 }
510 EXPORT_SYMBOL_GPL(kvmppc_do_h_remove);
511 
512 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
513 		     unsigned long pte_index, unsigned long avpn)
514 {
515 	return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
516 				  &vcpu->arch.gpr[4]);
517 }
518 
519 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
520 {
521 	struct kvm *kvm = vcpu->kvm;
522 	unsigned long *args = &vcpu->arch.gpr[4];
523 	__be64 *hp, *hptes[4];
524 	unsigned long tlbrb[4];
525 	long int i, j, k, n, found, indexes[4];
526 	unsigned long flags, req, pte_index, rcbits;
527 	int global;
528 	long int ret = H_SUCCESS;
529 	struct revmap_entry *rev, *revs[4];
530 	u64 hp0, hp1;
531 
532 	global = global_invalidates(kvm, 0);
533 	for (i = 0; i < 4 && ret == H_SUCCESS; ) {
534 		n = 0;
535 		for (; i < 4; ++i) {
536 			j = i * 2;
537 			pte_index = args[j];
538 			flags = pte_index >> 56;
539 			pte_index &= ((1ul << 56) - 1);
540 			req = flags >> 6;
541 			flags &= 3;
542 			if (req == 3) {		/* no more requests */
543 				i = 4;
544 				break;
545 			}
546 			if (req != 1 || flags == 3 ||
547 			    pte_index >= kvm->arch.hpt_npte) {
548 				/* parameter error */
549 				args[j] = ((0xa0 | flags) << 56) + pte_index;
550 				ret = H_PARAMETER;
551 				break;
552 			}
553 			hp = (__be64 *) (kvm->arch.hpt_virt + (pte_index << 4));
554 			/* to avoid deadlock, don't spin except for first */
555 			if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
556 				if (n)
557 					break;
558 				while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
559 					cpu_relax();
560 			}
561 			found = 0;
562 			hp0 = be64_to_cpu(hp[0]);
563 			hp1 = be64_to_cpu(hp[1]);
564 			if (cpu_has_feature(CPU_FTR_ARCH_300)) {
565 				hp0 = hpte_new_to_old_v(hp0, hp1);
566 				hp1 = hpte_new_to_old_r(hp1);
567 			}
568 			if (hp0 & (HPTE_V_ABSENT | HPTE_V_VALID)) {
569 				switch (flags & 3) {
570 				case 0:		/* absolute */
571 					found = 1;
572 					break;
573 				case 1:		/* andcond */
574 					if (!(hp0 & args[j + 1]))
575 						found = 1;
576 					break;
577 				case 2:		/* AVPN */
578 					if ((hp0 & ~0x7fUL) == args[j + 1])
579 						found = 1;
580 					break;
581 				}
582 			}
583 			if (!found) {
584 				hp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
585 				args[j] = ((0x90 | flags) << 56) + pte_index;
586 				continue;
587 			}
588 
589 			args[j] = ((0x80 | flags) << 56) + pte_index;
590 			rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
591 			note_hpte_modification(kvm, rev);
592 
593 			if (!(hp0 & HPTE_V_VALID)) {
594 				/* insert R and C bits from PTE */
595 				rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
596 				args[j] |= rcbits << (56 - 5);
597 				hp[0] = 0;
598 				if (is_mmio_hpte(hp0, hp1))
599 					atomic64_inc(&kvm->arch.mmio_update);
600 				continue;
601 			}
602 
603 			/* leave it locked */
604 			hp[0] &= ~cpu_to_be64(HPTE_V_VALID);
605 			tlbrb[n] = compute_tlbie_rb(hp0, hp1, pte_index);
606 			indexes[n] = j;
607 			hptes[n] = hp;
608 			revs[n] = rev;
609 			++n;
610 		}
611 
612 		if (!n)
613 			break;
614 
615 		/* Now that we've collected a batch, do the tlbies */
616 		do_tlbies(kvm, tlbrb, n, global, true);
617 
618 		/* Read PTE low words after tlbie to get final R/C values */
619 		for (k = 0; k < n; ++k) {
620 			j = indexes[k];
621 			pte_index = args[j] & ((1ul << 56) - 1);
622 			hp = hptes[k];
623 			rev = revs[k];
624 			remove_revmap_chain(kvm, pte_index, rev,
625 				be64_to_cpu(hp[0]), be64_to_cpu(hp[1]));
626 			rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
627 			args[j] |= rcbits << (56 - 5);
628 			__unlock_hpte(hp, 0);
629 		}
630 	}
631 
632 	return ret;
633 }
634 
635 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
636 		      unsigned long pte_index, unsigned long avpn,
637 		      unsigned long va)
638 {
639 	struct kvm *kvm = vcpu->kvm;
640 	__be64 *hpte;
641 	struct revmap_entry *rev;
642 	unsigned long v, r, rb, mask, bits;
643 	u64 pte_v, pte_r;
644 
645 	if (pte_index >= kvm->arch.hpt_npte)
646 		return H_PARAMETER;
647 
648 	hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
649 	while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
650 		cpu_relax();
651 	v = pte_v = be64_to_cpu(hpte[0]);
652 	if (cpu_has_feature(CPU_FTR_ARCH_300))
653 		v = hpte_new_to_old_v(v, be64_to_cpu(hpte[1]));
654 	if ((v & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
655 	    ((flags & H_AVPN) && (v & ~0x7fUL) != avpn)) {
656 		__unlock_hpte(hpte, pte_v);
657 		return H_NOT_FOUND;
658 	}
659 
660 	pte_r = be64_to_cpu(hpte[1]);
661 	bits = (flags << 55) & HPTE_R_PP0;
662 	bits |= (flags << 48) & HPTE_R_KEY_HI;
663 	bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
664 
665 	/* Update guest view of 2nd HPTE dword */
666 	mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
667 		HPTE_R_KEY_HI | HPTE_R_KEY_LO;
668 	rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
669 	if (rev) {
670 		r = (rev->guest_rpte & ~mask) | bits;
671 		rev->guest_rpte = r;
672 		note_hpte_modification(kvm, rev);
673 	}
674 
675 	/* Update HPTE */
676 	if (v & HPTE_V_VALID) {
677 		/*
678 		 * If the page is valid, don't let it transition from
679 		 * readonly to writable.  If it should be writable, we'll
680 		 * take a trap and let the page fault code sort it out.
681 		 */
682 		r = (pte_r & ~mask) | bits;
683 		if (hpte_is_writable(r) && !hpte_is_writable(pte_r))
684 			r = hpte_make_readonly(r);
685 		/* If the PTE is changing, invalidate it first */
686 		if (r != pte_r) {
687 			rb = compute_tlbie_rb(v, r, pte_index);
688 			hpte[0] = cpu_to_be64((pte_v & ~HPTE_V_VALID) |
689 					      HPTE_V_ABSENT);
690 			do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags),
691 				  true);
692 			/* Don't lose R/C bit updates done by hardware */
693 			r |= be64_to_cpu(hpte[1]) & (HPTE_R_R | HPTE_R_C);
694 			hpte[1] = cpu_to_be64(r);
695 		}
696 	}
697 	unlock_hpte(hpte, pte_v & ~HPTE_V_HVLOCK);
698 	asm volatile("ptesync" : : : "memory");
699 	if (is_mmio_hpte(v, pte_r))
700 		atomic64_inc(&kvm->arch.mmio_update);
701 
702 	return H_SUCCESS;
703 }
704 
705 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
706 		   unsigned long pte_index)
707 {
708 	struct kvm *kvm = vcpu->kvm;
709 	__be64 *hpte;
710 	unsigned long v, r;
711 	int i, n = 1;
712 	struct revmap_entry *rev = NULL;
713 
714 	if (pte_index >= kvm->arch.hpt_npte)
715 		return H_PARAMETER;
716 	if (flags & H_READ_4) {
717 		pte_index &= ~3;
718 		n = 4;
719 	}
720 	rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
721 	for (i = 0; i < n; ++i, ++pte_index) {
722 		hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
723 		v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
724 		r = be64_to_cpu(hpte[1]);
725 		if (cpu_has_feature(CPU_FTR_ARCH_300)) {
726 			v = hpte_new_to_old_v(v, r);
727 			r = hpte_new_to_old_r(r);
728 		}
729 		if (v & HPTE_V_ABSENT) {
730 			v &= ~HPTE_V_ABSENT;
731 			v |= HPTE_V_VALID;
732 		}
733 		if (v & HPTE_V_VALID) {
734 			r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
735 			r &= ~HPTE_GR_RESERVED;
736 		}
737 		vcpu->arch.gpr[4 + i * 2] = v;
738 		vcpu->arch.gpr[5 + i * 2] = r;
739 	}
740 	return H_SUCCESS;
741 }
742 
743 long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
744 			unsigned long pte_index)
745 {
746 	struct kvm *kvm = vcpu->kvm;
747 	__be64 *hpte;
748 	unsigned long v, r, gr;
749 	struct revmap_entry *rev;
750 	unsigned long *rmap;
751 	long ret = H_NOT_FOUND;
752 
753 	if (pte_index >= kvm->arch.hpt_npte)
754 		return H_PARAMETER;
755 
756 	rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
757 	hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
758 	while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
759 		cpu_relax();
760 	v = be64_to_cpu(hpte[0]);
761 	r = be64_to_cpu(hpte[1]);
762 	if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
763 		goto out;
764 
765 	gr = rev->guest_rpte;
766 	if (rev->guest_rpte & HPTE_R_R) {
767 		rev->guest_rpte &= ~HPTE_R_R;
768 		note_hpte_modification(kvm, rev);
769 	}
770 	if (v & HPTE_V_VALID) {
771 		gr |= r & (HPTE_R_R | HPTE_R_C);
772 		if (r & HPTE_R_R) {
773 			kvmppc_clear_ref_hpte(kvm, hpte, pte_index);
774 			rmap = revmap_for_hpte(kvm, v, gr);
775 			if (rmap) {
776 				lock_rmap(rmap);
777 				*rmap |= KVMPPC_RMAP_REFERENCED;
778 				unlock_rmap(rmap);
779 			}
780 		}
781 	}
782 	vcpu->arch.gpr[4] = gr;
783 	ret = H_SUCCESS;
784  out:
785 	unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
786 	return ret;
787 }
788 
789 long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
790 			unsigned long pte_index)
791 {
792 	struct kvm *kvm = vcpu->kvm;
793 	__be64 *hpte;
794 	unsigned long v, r, gr;
795 	struct revmap_entry *rev;
796 	unsigned long *rmap;
797 	long ret = H_NOT_FOUND;
798 
799 	if (pte_index >= kvm->arch.hpt_npte)
800 		return H_PARAMETER;
801 
802 	rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
803 	hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
804 	while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
805 		cpu_relax();
806 	v = be64_to_cpu(hpte[0]);
807 	r = be64_to_cpu(hpte[1]);
808 	if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
809 		goto out;
810 
811 	gr = rev->guest_rpte;
812 	if (gr & HPTE_R_C) {
813 		rev->guest_rpte &= ~HPTE_R_C;
814 		note_hpte_modification(kvm, rev);
815 	}
816 	if (v & HPTE_V_VALID) {
817 		/* need to make it temporarily absent so C is stable */
818 		hpte[0] |= cpu_to_be64(HPTE_V_ABSENT);
819 		kvmppc_invalidate_hpte(kvm, hpte, pte_index);
820 		r = be64_to_cpu(hpte[1]);
821 		gr |= r & (HPTE_R_R | HPTE_R_C);
822 		if (r & HPTE_R_C) {
823 			unsigned long psize = hpte_page_size(v, r);
824 			hpte[1] = cpu_to_be64(r & ~HPTE_R_C);
825 			eieio();
826 			rmap = revmap_for_hpte(kvm, v, gr);
827 			if (rmap) {
828 				lock_rmap(rmap);
829 				*rmap |= KVMPPC_RMAP_CHANGED;
830 				kvmppc_update_rmap_change(rmap, psize);
831 				unlock_rmap(rmap);
832 			}
833 		}
834 	}
835 	vcpu->arch.gpr[4] = gr;
836 	ret = H_SUCCESS;
837  out:
838 	unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
839 	return ret;
840 }
841 
842 void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
843 			unsigned long pte_index)
844 {
845 	unsigned long rb;
846 	u64 hp0, hp1;
847 
848 	hptep[0] &= ~cpu_to_be64(HPTE_V_VALID);
849 	hp0 = be64_to_cpu(hptep[0]);
850 	hp1 = be64_to_cpu(hptep[1]);
851 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
852 		hp0 = hpte_new_to_old_v(hp0, hp1);
853 		hp1 = hpte_new_to_old_r(hp1);
854 	}
855 	rb = compute_tlbie_rb(hp0, hp1, pte_index);
856 	do_tlbies(kvm, &rb, 1, 1, true);
857 }
858 EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
859 
860 void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
861 			   unsigned long pte_index)
862 {
863 	unsigned long rb;
864 	unsigned char rbyte;
865 	u64 hp0, hp1;
866 
867 	hp0 = be64_to_cpu(hptep[0]);
868 	hp1 = be64_to_cpu(hptep[1]);
869 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
870 		hp0 = hpte_new_to_old_v(hp0, hp1);
871 		hp1 = hpte_new_to_old_r(hp1);
872 	}
873 	rb = compute_tlbie_rb(hp0, hp1, pte_index);
874 	rbyte = (be64_to_cpu(hptep[1]) & ~HPTE_R_R) >> 8;
875 	/* modify only the second-last byte, which contains the ref bit */
876 	*((char *)hptep + 14) = rbyte;
877 	do_tlbies(kvm, &rb, 1, 1, false);
878 }
879 EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte);
880 
881 static int slb_base_page_shift[4] = {
882 	24,	/* 16M */
883 	16,	/* 64k */
884 	34,	/* 16G */
885 	20,	/* 1M, unsupported */
886 };
887 
888 static struct mmio_hpte_cache_entry *mmio_cache_search(struct kvm_vcpu *vcpu,
889 		unsigned long eaddr, unsigned long slb_v, long mmio_update)
890 {
891 	struct mmio_hpte_cache_entry *entry = NULL;
892 	unsigned int pshift;
893 	unsigned int i;
894 
895 	for (i = 0; i < MMIO_HPTE_CACHE_SIZE; i++) {
896 		entry = &vcpu->arch.mmio_cache.entry[i];
897 		if (entry->mmio_update == mmio_update) {
898 			pshift = entry->slb_base_pshift;
899 			if ((entry->eaddr >> pshift) == (eaddr >> pshift) &&
900 			    entry->slb_v == slb_v)
901 				return entry;
902 		}
903 	}
904 	return NULL;
905 }
906 
907 static struct mmio_hpte_cache_entry *
908 			next_mmio_cache_entry(struct kvm_vcpu *vcpu)
909 {
910 	unsigned int index = vcpu->arch.mmio_cache.index;
911 
912 	vcpu->arch.mmio_cache.index++;
913 	if (vcpu->arch.mmio_cache.index == MMIO_HPTE_CACHE_SIZE)
914 		vcpu->arch.mmio_cache.index = 0;
915 
916 	return &vcpu->arch.mmio_cache.entry[index];
917 }
918 
919 /* When called from virtmode, this func should be protected by
920  * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
921  * can trigger deadlock issue.
922  */
923 long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
924 			      unsigned long valid)
925 {
926 	unsigned int i;
927 	unsigned int pshift;
928 	unsigned long somask;
929 	unsigned long vsid, hash;
930 	unsigned long avpn;
931 	__be64 *hpte;
932 	unsigned long mask, val;
933 	unsigned long v, r, orig_v;
934 
935 	/* Get page shift, work out hash and AVPN etc. */
936 	mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
937 	val = 0;
938 	pshift = 12;
939 	if (slb_v & SLB_VSID_L) {
940 		mask |= HPTE_V_LARGE;
941 		val |= HPTE_V_LARGE;
942 		pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4];
943 	}
944 	if (slb_v & SLB_VSID_B_1T) {
945 		somask = (1UL << 40) - 1;
946 		vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T;
947 		vsid ^= vsid << 25;
948 	} else {
949 		somask = (1UL << 28) - 1;
950 		vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
951 	}
952 	hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask;
953 	avpn = slb_v & ~(somask >> 16);	/* also includes B */
954 	avpn |= (eaddr & somask) >> 16;
955 
956 	if (pshift >= 24)
957 		avpn &= ~((1UL << (pshift - 16)) - 1);
958 	else
959 		avpn &= ~0x7fUL;
960 	val |= avpn;
961 
962 	for (;;) {
963 		hpte = (__be64 *)(kvm->arch.hpt_virt + (hash << 7));
964 
965 		for (i = 0; i < 16; i += 2) {
966 			/* Read the PTE racily */
967 			v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
968 			if (cpu_has_feature(CPU_FTR_ARCH_300))
969 				v = hpte_new_to_old_v(v, be64_to_cpu(hpte[i+1]));
970 
971 			/* Check valid/absent, hash, segment size and AVPN */
972 			if (!(v & valid) || (v & mask) != val)
973 				continue;
974 
975 			/* Lock the PTE and read it under the lock */
976 			while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
977 				cpu_relax();
978 			v = orig_v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
979 			r = be64_to_cpu(hpte[i+1]);
980 			if (cpu_has_feature(CPU_FTR_ARCH_300)) {
981 				v = hpte_new_to_old_v(v, r);
982 				r = hpte_new_to_old_r(r);
983 			}
984 
985 			/*
986 			 * Check the HPTE again, including base page size
987 			 */
988 			if ((v & valid) && (v & mask) == val &&
989 			    hpte_base_page_size(v, r) == (1ul << pshift))
990 				/* Return with the HPTE still locked */
991 				return (hash << 3) + (i >> 1);
992 
993 			__unlock_hpte(&hpte[i], orig_v);
994 		}
995 
996 		if (val & HPTE_V_SECONDARY)
997 			break;
998 		val |= HPTE_V_SECONDARY;
999 		hash = hash ^ kvm->arch.hpt_mask;
1000 	}
1001 	return -1;
1002 }
1003 EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte);
1004 
1005 /*
1006  * Called in real mode to check whether an HPTE not found fault
1007  * is due to accessing a paged-out page or an emulated MMIO page,
1008  * or if a protection fault is due to accessing a page that the
1009  * guest wanted read/write access to but which we made read-only.
1010  * Returns a possibly modified status (DSISR) value if not
1011  * (i.e. pass the interrupt to the guest),
1012  * -1 to pass the fault up to host kernel mode code, -2 to do that
1013  * and also load the instruction word (for MMIO emulation),
1014  * or 0 if we should make the guest retry the access.
1015  */
1016 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
1017 			  unsigned long slb_v, unsigned int status, bool data)
1018 {
1019 	struct kvm *kvm = vcpu->kvm;
1020 	long int index;
1021 	unsigned long v, r, gr, orig_v;
1022 	__be64 *hpte;
1023 	unsigned long valid;
1024 	struct revmap_entry *rev;
1025 	unsigned long pp, key;
1026 	struct mmio_hpte_cache_entry *cache_entry = NULL;
1027 	long mmio_update = 0;
1028 
1029 	/* For protection fault, expect to find a valid HPTE */
1030 	valid = HPTE_V_VALID;
1031 	if (status & DSISR_NOHPTE) {
1032 		valid |= HPTE_V_ABSENT;
1033 		mmio_update = atomic64_read(&kvm->arch.mmio_update);
1034 		cache_entry = mmio_cache_search(vcpu, addr, slb_v, mmio_update);
1035 	}
1036 	if (cache_entry) {
1037 		index = cache_entry->pte_index;
1038 		v = cache_entry->hpte_v;
1039 		r = cache_entry->hpte_r;
1040 		gr = cache_entry->rpte;
1041 	} else {
1042 		index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
1043 		if (index < 0) {
1044 			if (status & DSISR_NOHPTE)
1045 				return status;	/* there really was no HPTE */
1046 			return 0;	/* for prot fault, HPTE disappeared */
1047 		}
1048 		hpte = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
1049 		v = orig_v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
1050 		r = be64_to_cpu(hpte[1]);
1051 		if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1052 			v = hpte_new_to_old_v(v, r);
1053 			r = hpte_new_to_old_r(r);
1054 		}
1055 		rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
1056 		gr = rev->guest_rpte;
1057 
1058 		unlock_hpte(hpte, orig_v);
1059 	}
1060 
1061 	/* For not found, if the HPTE is valid by now, retry the instruction */
1062 	if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
1063 		return 0;
1064 
1065 	/* Check access permissions to the page */
1066 	pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
1067 	key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
1068 	status &= ~DSISR_NOHPTE;	/* DSISR_NOHPTE == SRR1_ISI_NOPT */
1069 	if (!data) {
1070 		if (gr & (HPTE_R_N | HPTE_R_G))
1071 			return status | SRR1_ISI_N_OR_G;
1072 		if (!hpte_read_permission(pp, slb_v & key))
1073 			return status | SRR1_ISI_PROT;
1074 	} else if (status & DSISR_ISSTORE) {
1075 		/* check write permission */
1076 		if (!hpte_write_permission(pp, slb_v & key))
1077 			return status | DSISR_PROTFAULT;
1078 	} else {
1079 		if (!hpte_read_permission(pp, slb_v & key))
1080 			return status | DSISR_PROTFAULT;
1081 	}
1082 
1083 	/* Check storage key, if applicable */
1084 	if (data && (vcpu->arch.shregs.msr & MSR_DR)) {
1085 		unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr);
1086 		if (status & DSISR_ISSTORE)
1087 			perm >>= 1;
1088 		if (perm & 1)
1089 			return status | DSISR_KEYFAULT;
1090 	}
1091 
1092 	/* Save HPTE info for virtual-mode handler */
1093 	vcpu->arch.pgfault_addr = addr;
1094 	vcpu->arch.pgfault_index = index;
1095 	vcpu->arch.pgfault_hpte[0] = v;
1096 	vcpu->arch.pgfault_hpte[1] = r;
1097 	vcpu->arch.pgfault_cache = cache_entry;
1098 
1099 	/* Check the storage key to see if it is possibly emulated MMIO */
1100 	if ((r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
1101 	    (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) {
1102 		if (!cache_entry) {
1103 			unsigned int pshift = 12;
1104 			unsigned int pshift_index;
1105 
1106 			if (slb_v & SLB_VSID_L) {
1107 				pshift_index = ((slb_v & SLB_VSID_LP) >> 4);
1108 				pshift = slb_base_page_shift[pshift_index];
1109 			}
1110 			cache_entry = next_mmio_cache_entry(vcpu);
1111 			cache_entry->eaddr = addr;
1112 			cache_entry->slb_base_pshift = pshift;
1113 			cache_entry->pte_index = index;
1114 			cache_entry->hpte_v = v;
1115 			cache_entry->hpte_r = r;
1116 			cache_entry->rpte = gr;
1117 			cache_entry->slb_v = slb_v;
1118 			cache_entry->mmio_update = mmio_update;
1119 		}
1120 		if (data && (vcpu->arch.shregs.msr & MSR_IR))
1121 			return -2;	/* MMIO emulation - load instr word */
1122 	}
1123 
1124 	return -1;		/* send fault up to host kernel mode */
1125 }
1126