1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright SUSE Linux Products GmbH 2010
16  *
17  * Authors: Alexander Graf <agraf@suse.de>
18  */
19 
20 #ifndef __ASM_KVM_BOOK3S_64_H__
21 #define __ASM_KVM_BOOK3S_64_H__
22 
23 #include <linux/string.h>
24 #include <asm/bitops.h>
25 #include <asm/book3s/64/mmu-hash.h>
26 
27 /* Power architecture requires HPT is at least 256kiB, at most 64TiB */
28 #define PPC_MIN_HPT_ORDER	18
29 #define PPC_MAX_HPT_ORDER	46
30 
31 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
32 static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
33 {
34 	preempt_disable();
35 	return &get_paca()->shadow_vcpu;
36 }
37 
38 static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
39 {
40 	preempt_enable();
41 }
42 #endif
43 
44 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
45 
46 static inline bool kvm_is_radix(struct kvm *kvm)
47 {
48 	return kvm->arch.radix;
49 }
50 
51 #define KVM_DEFAULT_HPT_ORDER	24	/* 16MB HPT by default */
52 #endif
53 
54 /*
55  * We use a lock bit in HPTE dword 0 to synchronize updates and
56  * accesses to each HPTE, and another bit to indicate non-present
57  * HPTEs.
58  */
59 #define HPTE_V_HVLOCK	0x40UL
60 #define HPTE_V_ABSENT	0x20UL
61 
62 /*
63  * We use this bit in the guest_rpte field of the revmap entry
64  * to indicate a modified HPTE.
65  */
66 #define HPTE_GR_MODIFIED	(1ul << 62)
67 
68 /* These bits are reserved in the guest view of the HPTE */
69 #define HPTE_GR_RESERVED	HPTE_GR_MODIFIED
70 
71 static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
72 {
73 	unsigned long tmp, old;
74 	__be64 be_lockbit, be_bits;
75 
76 	/*
77 	 * We load/store in native endian, but the HTAB is in big endian. If
78 	 * we byte swap all data we apply on the PTE we're implicitly correct
79 	 * again.
80 	 */
81 	be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
82 	be_bits = cpu_to_be64(bits);
83 
84 	asm volatile("	ldarx	%0,0,%2\n"
85 		     "	and.	%1,%0,%3\n"
86 		     "	bne	2f\n"
87 		     "	or	%0,%0,%4\n"
88 		     "  stdcx.	%0,0,%2\n"
89 		     "	beq+	2f\n"
90 		     "	mr	%1,%3\n"
91 		     "2:	isync"
92 		     : "=&r" (tmp), "=&r" (old)
93 		     : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
94 		     : "cc", "memory");
95 	return old == 0;
96 }
97 
98 static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
99 {
100 	hpte_v &= ~HPTE_V_HVLOCK;
101 	asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
102 	hpte[0] = cpu_to_be64(hpte_v);
103 }
104 
105 /* Without barrier */
106 static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
107 {
108 	hpte_v &= ~HPTE_V_HVLOCK;
109 	hpte[0] = cpu_to_be64(hpte_v);
110 }
111 
112 /*
113  * These functions encode knowledge of the POWER7/8/9 hardware
114  * interpretations of the HPTE LP (large page size) field.
115  */
116 static inline int kvmppc_hpte_page_shifts(unsigned long h, unsigned long l)
117 {
118 	unsigned int lphi;
119 
120 	if (!(h & HPTE_V_LARGE))
121 		return 12;	/* 4kB */
122 	lphi = (l >> 16) & 0xf;
123 	switch ((l >> 12) & 0xf) {
124 	case 0:
125 		return !lphi ? 24 : -1;		/* 16MB */
126 		break;
127 	case 1:
128 		return 16;			/* 64kB */
129 		break;
130 	case 3:
131 		return !lphi ? 34 : -1;		/* 16GB */
132 		break;
133 	case 7:
134 		return (16 << 8) + 12;		/* 64kB in 4kB */
135 		break;
136 	case 8:
137 		if (!lphi)
138 			return (24 << 8) + 16;	/* 16MB in 64kkB */
139 		if (lphi == 3)
140 			return (24 << 8) + 12;	/* 16MB in 4kB */
141 		break;
142 	}
143 	return -1;
144 }
145 
146 static inline int kvmppc_hpte_base_page_shift(unsigned long h, unsigned long l)
147 {
148 	return kvmppc_hpte_page_shifts(h, l) & 0xff;
149 }
150 
151 static inline int kvmppc_hpte_actual_page_shift(unsigned long h, unsigned long l)
152 {
153 	int tmp = kvmppc_hpte_page_shifts(h, l);
154 
155 	if (tmp >= 0x100)
156 		tmp >>= 8;
157 	return tmp;
158 }
159 
160 static inline unsigned long kvmppc_actual_pgsz(unsigned long v, unsigned long r)
161 {
162 	return 1ul << kvmppc_hpte_actual_page_shift(v, r);
163 }
164 
165 static inline int kvmppc_pgsize_lp_encoding(int base_shift, int actual_shift)
166 {
167 	switch (base_shift) {
168 	case 12:
169 		switch (actual_shift) {
170 		case 12:
171 			return 0;
172 		case 16:
173 			return 7;
174 		case 24:
175 			return 0x38;
176 		}
177 		break;
178 	case 16:
179 		switch (actual_shift) {
180 		case 16:
181 			return 1;
182 		case 24:
183 			return 8;
184 		}
185 		break;
186 	case 24:
187 		return 0;
188 	}
189 	return -1;
190 }
191 
192 static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
193 					     unsigned long pte_index)
194 {
195 	int a_pgshift, b_pgshift;
196 	unsigned long rb = 0, va_low, sllp;
197 
198 	b_pgshift = a_pgshift = kvmppc_hpte_page_shifts(v, r);
199 	if (a_pgshift >= 0x100) {
200 		b_pgshift &= 0xff;
201 		a_pgshift >>= 8;
202 	}
203 
204 	/*
205 	 * Ignore the top 14 bits of va
206 	 * v have top two bits covering segment size, hence move
207 	 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
208 	 * AVA field in v also have the lower 23 bits ignored.
209 	 * For base page size 4K we need 14 .. 65 bits (so need to
210 	 * collect extra 11 bits)
211 	 * For others we need 14..14+i
212 	 */
213 	/* This covers 14..54 bits of va*/
214 	rb = (v & ~0x7fUL) << 16;		/* AVA field */
215 
216 	/*
217 	 * AVA in v had cleared lower 23 bits. We need to derive
218 	 * that from pteg index
219 	 */
220 	va_low = pte_index >> 3;
221 	if (v & HPTE_V_SECONDARY)
222 		va_low = ~va_low;
223 	/*
224 	 * get the vpn bits from va_low using reverse of hashing.
225 	 * In v we have va with 23 bits dropped and then left shifted
226 	 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
227 	 * right shift it with (SID_SHIFT - (23 - 7))
228 	 */
229 	if (!(v & HPTE_V_1TB_SEG))
230 		va_low ^= v >> (SID_SHIFT - 16);
231 	else
232 		va_low ^= v >> (SID_SHIFT_1T - 16);
233 	va_low &= 0x7ff;
234 
235 	if (b_pgshift == 12) {
236 		if (a_pgshift > 12) {
237 			sllp = (a_pgshift == 16) ? 5 : 4;
238 			rb |= sllp << 5;	/*  AP field */
239 		}
240 		rb |= (va_low & 0x7ff) << 12;	/* remaining 11 bits of AVA */
241 	} else {
242 		int aval_shift;
243 		/*
244 		 * remaining bits of AVA/LP fields
245 		 * Also contain the rr bits of LP
246 		 */
247 		rb |= (va_low << b_pgshift) & 0x7ff000;
248 		/*
249 		 * Now clear not needed LP bits based on actual psize
250 		 */
251 		rb &= ~((1ul << a_pgshift) - 1);
252 		/*
253 		 * AVAL field 58..77 - base_page_shift bits of va
254 		 * we have space for 58..64 bits, Missing bits should
255 		 * be zero filled. +1 is to take care of L bit shift
256 		 */
257 		aval_shift = 64 - (77 - b_pgshift) + 1;
258 		rb |= ((va_low << aval_shift) & 0xfe);
259 
260 		rb |= 1;		/* L field */
261 		rb |= r & 0xff000 & ((1ul << a_pgshift) - 1); /* LP field */
262 	}
263 	rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8;	/* B field */
264 	return rb;
265 }
266 
267 static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
268 {
269 	return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
270 }
271 
272 static inline int hpte_is_writable(unsigned long ptel)
273 {
274 	unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
275 
276 	return pp != PP_RXRX && pp != PP_RXXX;
277 }
278 
279 static inline unsigned long hpte_make_readonly(unsigned long ptel)
280 {
281 	if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
282 		ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
283 	else
284 		ptel |= PP_RXRX;
285 	return ptel;
286 }
287 
288 static inline bool hpte_cache_flags_ok(unsigned long hptel, bool is_ci)
289 {
290 	unsigned int wimg = hptel & HPTE_R_WIMG;
291 
292 	/* Handle SAO */
293 	if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
294 	    cpu_has_feature(CPU_FTR_ARCH_206))
295 		wimg = HPTE_R_M;
296 
297 	if (!is_ci)
298 		return wimg == HPTE_R_M;
299 	/*
300 	 * if host is mapped cache inhibited, make sure hptel also have
301 	 * cache inhibited.
302 	 */
303 	if (wimg & HPTE_R_W) /* FIXME!! is this ok for all guest. ? */
304 		return false;
305 	return !!(wimg & HPTE_R_I);
306 }
307 
308 /*
309  * If it's present and writable, atomically set dirty and referenced bits and
310  * return the PTE, otherwise return 0.
311  */
312 static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
313 {
314 	pte_t old_pte, new_pte = __pte(0);
315 
316 	while (1) {
317 		/*
318 		 * Make sure we don't reload from ptep
319 		 */
320 		old_pte = READ_ONCE(*ptep);
321 		/*
322 		 * wait until H_PAGE_BUSY is clear then set it atomically
323 		 */
324 		if (unlikely(pte_val(old_pte) & H_PAGE_BUSY)) {
325 			cpu_relax();
326 			continue;
327 		}
328 		/* If pte is not present return None */
329 		if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT)))
330 			return __pte(0);
331 
332 		new_pte = pte_mkyoung(old_pte);
333 		if (writing && pte_write(old_pte))
334 			new_pte = pte_mkdirty(new_pte);
335 
336 		if (pte_xchg(ptep, old_pte, new_pte))
337 			break;
338 	}
339 	return new_pte;
340 }
341 
342 static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
343 {
344 	if (key)
345 		return PP_RWRX <= pp && pp <= PP_RXRX;
346 	return true;
347 }
348 
349 static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
350 {
351 	if (key)
352 		return pp == PP_RWRW;
353 	return pp <= PP_RWRW;
354 }
355 
356 static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
357 {
358 	unsigned long skey;
359 
360 	skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
361 		((hpte_r & HPTE_R_KEY_LO) >> 9);
362 	return (amr >> (62 - 2 * skey)) & 3;
363 }
364 
365 static inline void lock_rmap(unsigned long *rmap)
366 {
367 	do {
368 		while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
369 			cpu_relax();
370 	} while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
371 }
372 
373 static inline void unlock_rmap(unsigned long *rmap)
374 {
375 	__clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
376 }
377 
378 static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
379 				   unsigned long pagesize)
380 {
381 	unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
382 
383 	if (pagesize <= PAGE_SIZE)
384 		return true;
385 	return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
386 }
387 
388 /*
389  * This works for 4k, 64k and 16M pages on POWER7,
390  * and 4k and 16M pages on PPC970.
391  */
392 static inline unsigned long slb_pgsize_encoding(unsigned long psize)
393 {
394 	unsigned long senc = 0;
395 
396 	if (psize > 0x1000) {
397 		senc = SLB_VSID_L;
398 		if (psize == 0x10000)
399 			senc |= SLB_VSID_LP_01;
400 	}
401 	return senc;
402 }
403 
404 static inline int is_vrma_hpte(unsigned long hpte_v)
405 {
406 	return (hpte_v & ~0xffffffUL) ==
407 		(HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
408 }
409 
410 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
411 /*
412  * Note modification of an HPTE; set the HPTE modified bit
413  * if anyone is interested.
414  */
415 static inline void note_hpte_modification(struct kvm *kvm,
416 					  struct revmap_entry *rev)
417 {
418 	if (atomic_read(&kvm->arch.hpte_mod_interest))
419 		rev->guest_rpte |= HPTE_GR_MODIFIED;
420 }
421 
422 /*
423  * Like kvm_memslots(), but for use in real mode when we can't do
424  * any RCU stuff (since the secondary threads are offline from the
425  * kernel's point of view), and we can't print anything.
426  * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
427  */
428 static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
429 {
430 	return rcu_dereference_raw_notrace(kvm->memslots[0]);
431 }
432 
433 extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
434 
435 extern void kvmhv_rm_send_ipi(int cpu);
436 
437 static inline unsigned long kvmppc_hpt_npte(struct kvm_hpt_info *hpt)
438 {
439 	/* HPTEs are 2**4 bytes long */
440 	return 1UL << (hpt->order - 4);
441 }
442 
443 static inline unsigned long kvmppc_hpt_mask(struct kvm_hpt_info *hpt)
444 {
445 	/* 128 (2**7) bytes in each HPTEG */
446 	return (1UL << (hpt->order - 7)) - 1;
447 }
448 
449 /* Set bits in a dirty bitmap, which is in LE format */
450 static inline void set_dirty_bits(unsigned long *map, unsigned long i,
451 				  unsigned long npages)
452 {
453 
454 	if (npages >= 8)
455 		memset((char *)map + i / 8, 0xff, npages / 8);
456 	else
457 		for (; npages; ++i, --npages)
458 			__set_bit_le(i, map);
459 }
460 
461 static inline void set_dirty_bits_atomic(unsigned long *map, unsigned long i,
462 					 unsigned long npages)
463 {
464 	if (npages >= 8)
465 		memset((char *)map + i / 8, 0xff, npages / 8);
466 	else
467 		for (; npages; ++i, --npages)
468 			set_bit_le(i, map);
469 }
470 
471 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
472 
473 #endif /* __ASM_KVM_BOOK3S_64_H__ */
474