1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright SUSE Linux Products GmbH 2010
16  *
17  * Authors: Alexander Graf <agraf@suse.de>
18  */
19 
20 #ifndef __ASM_KVM_BOOK3S_64_H__
21 #define __ASM_KVM_BOOK3S_64_H__
22 
23 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
24 static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
25 {
26 	preempt_disable();
27 	return &get_paca()->shadow_vcpu;
28 }
29 
30 static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
31 {
32 	preempt_enable();
33 }
34 #endif
35 
36 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
37 #define KVM_DEFAULT_HPT_ORDER	24	/* 16MB HPT by default */
38 #endif
39 
40 #define VRMA_VSID	0x1ffffffUL	/* 1TB VSID reserved for VRMA */
41 
42 /*
43  * We use a lock bit in HPTE dword 0 to synchronize updates and
44  * accesses to each HPTE, and another bit to indicate non-present
45  * HPTEs.
46  */
47 #define HPTE_V_HVLOCK	0x40UL
48 #define HPTE_V_ABSENT	0x20UL
49 
50 /*
51  * We use this bit in the guest_rpte field of the revmap entry
52  * to indicate a modified HPTE.
53  */
54 #define HPTE_GR_MODIFIED	(1ul << 62)
55 
56 /* These bits are reserved in the guest view of the HPTE */
57 #define HPTE_GR_RESERVED	HPTE_GR_MODIFIED
58 
59 static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
60 {
61 	unsigned long tmp, old;
62 	__be64 be_lockbit, be_bits;
63 
64 	/*
65 	 * We load/store in native endian, but the HTAB is in big endian. If
66 	 * we byte swap all data we apply on the PTE we're implicitly correct
67 	 * again.
68 	 */
69 	be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
70 	be_bits = cpu_to_be64(bits);
71 
72 	asm volatile("	ldarx	%0,0,%2\n"
73 		     "	and.	%1,%0,%3\n"
74 		     "	bne	2f\n"
75 		     "	or	%0,%0,%4\n"
76 		     "  stdcx.	%0,0,%2\n"
77 		     "	beq+	2f\n"
78 		     "	mr	%1,%3\n"
79 		     "2:	isync"
80 		     : "=&r" (tmp), "=&r" (old)
81 		     : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
82 		     : "cc", "memory");
83 	return old == 0;
84 }
85 
86 static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
87 {
88 	hpte_v &= ~HPTE_V_HVLOCK;
89 	asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
90 	hpte[0] = cpu_to_be64(hpte_v);
91 }
92 
93 /* Without barrier */
94 static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
95 {
96 	hpte_v &= ~HPTE_V_HVLOCK;
97 	hpte[0] = cpu_to_be64(hpte_v);
98 }
99 
100 static inline int __hpte_actual_psize(unsigned int lp, int psize)
101 {
102 	int i, shift;
103 	unsigned int mask;
104 
105 	/* start from 1 ignoring MMU_PAGE_4K */
106 	for (i = 1; i < MMU_PAGE_COUNT; i++) {
107 
108 		/* invalid penc */
109 		if (mmu_psize_defs[psize].penc[i] == -1)
110 			continue;
111 		/*
112 		 * encoding bits per actual page size
113 		 *        PTE LP     actual page size
114 		 *    rrrr rrrz		>=8KB
115 		 *    rrrr rrzz		>=16KB
116 		 *    rrrr rzzz		>=32KB
117 		 *    rrrr zzzz		>=64KB
118 		 * .......
119 		 */
120 		shift = mmu_psize_defs[i].shift - LP_SHIFT;
121 		if (shift > LP_BITS)
122 			shift = LP_BITS;
123 		mask = (1 << shift) - 1;
124 		if ((lp & mask) == mmu_psize_defs[psize].penc[i])
125 			return i;
126 	}
127 	return -1;
128 }
129 
130 static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
131 					     unsigned long pte_index)
132 {
133 	int b_psize = MMU_PAGE_4K, a_psize = MMU_PAGE_4K;
134 	unsigned int penc;
135 	unsigned long rb = 0, va_low, sllp;
136 	unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
137 
138 	if (v & HPTE_V_LARGE) {
139 		for (b_psize = 0; b_psize < MMU_PAGE_COUNT; b_psize++) {
140 
141 			/* valid entries have a shift value */
142 			if (!mmu_psize_defs[b_psize].shift)
143 				continue;
144 
145 			a_psize = __hpte_actual_psize(lp, b_psize);
146 			if (a_psize != -1)
147 				break;
148 		}
149 	}
150 	/*
151 	 * Ignore the top 14 bits of va
152 	 * v have top two bits covering segment size, hence move
153 	 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
154 	 * AVA field in v also have the lower 23 bits ignored.
155 	 * For base page size 4K we need 14 .. 65 bits (so need to
156 	 * collect extra 11 bits)
157 	 * For others we need 14..14+i
158 	 */
159 	/* This covers 14..54 bits of va*/
160 	rb = (v & ~0x7fUL) << 16;		/* AVA field */
161 
162 	rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8;	/*  B field */
163 	/*
164 	 * AVA in v had cleared lower 23 bits. We need to derive
165 	 * that from pteg index
166 	 */
167 	va_low = pte_index >> 3;
168 	if (v & HPTE_V_SECONDARY)
169 		va_low = ~va_low;
170 	/*
171 	 * get the vpn bits from va_low using reverse of hashing.
172 	 * In v we have va with 23 bits dropped and then left shifted
173 	 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
174 	 * right shift it with (SID_SHIFT - (23 - 7))
175 	 */
176 	if (!(v & HPTE_V_1TB_SEG))
177 		va_low ^= v >> (SID_SHIFT - 16);
178 	else
179 		va_low ^= v >> (SID_SHIFT_1T - 16);
180 	va_low &= 0x7ff;
181 
182 	switch (b_psize) {
183 	case MMU_PAGE_4K:
184 		sllp = get_sllp_encoding(a_psize);
185 		rb |= sllp << 5;	/*  AP field */
186 		rb |= (va_low & 0x7ff) << 12;	/* remaining 11 bits of AVA */
187 		break;
188 	default:
189 	{
190 		int aval_shift;
191 		/*
192 		 * remaining bits of AVA/LP fields
193 		 * Also contain the rr bits of LP
194 		 */
195 		rb |= (va_low << mmu_psize_defs[b_psize].shift) & 0x7ff000;
196 		/*
197 		 * Now clear not needed LP bits based on actual psize
198 		 */
199 		rb &= ~((1ul << mmu_psize_defs[a_psize].shift) - 1);
200 		/*
201 		 * AVAL field 58..77 - base_page_shift bits of va
202 		 * we have space for 58..64 bits, Missing bits should
203 		 * be zero filled. +1 is to take care of L bit shift
204 		 */
205 		aval_shift = 64 - (77 - mmu_psize_defs[b_psize].shift) + 1;
206 		rb |= ((va_low << aval_shift) & 0xfe);
207 
208 		rb |= 1;		/* L field */
209 		penc = mmu_psize_defs[b_psize].penc[a_psize];
210 		rb |= penc << 12;	/* LP field */
211 		break;
212 	}
213 	}
214 	rb |= (v >> 54) & 0x300;		/* B field */
215 	return rb;
216 }
217 
218 static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
219 					     bool is_base_size)
220 {
221 
222 	int size, a_psize;
223 	/* Look at the 8 bit LP value */
224 	unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
225 
226 	/* only handle 4k, 64k and 16M pages for now */
227 	if (!(h & HPTE_V_LARGE))
228 		return 1ul << 12;
229 	else {
230 		for (size = 0; size < MMU_PAGE_COUNT; size++) {
231 			/* valid entries have a shift value */
232 			if (!mmu_psize_defs[size].shift)
233 				continue;
234 
235 			a_psize = __hpte_actual_psize(lp, size);
236 			if (a_psize != -1) {
237 				if (is_base_size)
238 					return 1ul << mmu_psize_defs[size].shift;
239 				return 1ul << mmu_psize_defs[a_psize].shift;
240 			}
241 		}
242 
243 	}
244 	return 0;
245 }
246 
247 static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
248 {
249 	return __hpte_page_size(h, l, 0);
250 }
251 
252 static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
253 {
254 	return __hpte_page_size(h, l, 1);
255 }
256 
257 static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
258 {
259 	return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
260 }
261 
262 static inline int hpte_is_writable(unsigned long ptel)
263 {
264 	unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
265 
266 	return pp != PP_RXRX && pp != PP_RXXX;
267 }
268 
269 static inline unsigned long hpte_make_readonly(unsigned long ptel)
270 {
271 	if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
272 		ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
273 	else
274 		ptel |= PP_RXRX;
275 	return ptel;
276 }
277 
278 static inline bool hpte_cache_flags_ok(unsigned long hptel, bool is_ci)
279 {
280 	unsigned int wimg = hptel & HPTE_R_WIMG;
281 
282 	/* Handle SAO */
283 	if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
284 	    cpu_has_feature(CPU_FTR_ARCH_206))
285 		wimg = HPTE_R_M;
286 
287 	if (!is_ci)
288 		return wimg == HPTE_R_M;
289 	/*
290 	 * if host is mapped cache inhibited, make sure hptel also have
291 	 * cache inhibited.
292 	 */
293 	if (wimg & HPTE_R_W) /* FIXME!! is this ok for all guest. ? */
294 		return false;
295 	return !!(wimg & HPTE_R_I);
296 }
297 
298 /*
299  * If it's present and writable, atomically set dirty and referenced bits and
300  * return the PTE, otherwise return 0.
301  */
302 static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
303 {
304 	pte_t old_pte, new_pte = __pte(0);
305 
306 	while (1) {
307 		/*
308 		 * Make sure we don't reload from ptep
309 		 */
310 		old_pte = READ_ONCE(*ptep);
311 		/*
312 		 * wait until H_PAGE_BUSY is clear then set it atomically
313 		 */
314 		if (unlikely(pte_val(old_pte) & H_PAGE_BUSY)) {
315 			cpu_relax();
316 			continue;
317 		}
318 		/* If pte is not present return None */
319 		if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT)))
320 			return __pte(0);
321 
322 		new_pte = pte_mkyoung(old_pte);
323 		if (writing && pte_write(old_pte))
324 			new_pte = pte_mkdirty(new_pte);
325 
326 		if (pte_xchg(ptep, old_pte, new_pte))
327 			break;
328 	}
329 	return new_pte;
330 }
331 
332 static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
333 {
334 	if (key)
335 		return PP_RWRX <= pp && pp <= PP_RXRX;
336 	return true;
337 }
338 
339 static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
340 {
341 	if (key)
342 		return pp == PP_RWRW;
343 	return pp <= PP_RWRW;
344 }
345 
346 static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
347 {
348 	unsigned long skey;
349 
350 	skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
351 		((hpte_r & HPTE_R_KEY_LO) >> 9);
352 	return (amr >> (62 - 2 * skey)) & 3;
353 }
354 
355 static inline void lock_rmap(unsigned long *rmap)
356 {
357 	do {
358 		while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
359 			cpu_relax();
360 	} while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
361 }
362 
363 static inline void unlock_rmap(unsigned long *rmap)
364 {
365 	__clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
366 }
367 
368 static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
369 				   unsigned long pagesize)
370 {
371 	unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
372 
373 	if (pagesize <= PAGE_SIZE)
374 		return true;
375 	return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
376 }
377 
378 /*
379  * This works for 4k, 64k and 16M pages on POWER7,
380  * and 4k and 16M pages on PPC970.
381  */
382 static inline unsigned long slb_pgsize_encoding(unsigned long psize)
383 {
384 	unsigned long senc = 0;
385 
386 	if (psize > 0x1000) {
387 		senc = SLB_VSID_L;
388 		if (psize == 0x10000)
389 			senc |= SLB_VSID_LP_01;
390 	}
391 	return senc;
392 }
393 
394 static inline int is_vrma_hpte(unsigned long hpte_v)
395 {
396 	return (hpte_v & ~0xffffffUL) ==
397 		(HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
398 }
399 
400 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
401 /*
402  * Note modification of an HPTE; set the HPTE modified bit
403  * if anyone is interested.
404  */
405 static inline void note_hpte_modification(struct kvm *kvm,
406 					  struct revmap_entry *rev)
407 {
408 	if (atomic_read(&kvm->arch.hpte_mod_interest))
409 		rev->guest_rpte |= HPTE_GR_MODIFIED;
410 }
411 
412 /*
413  * Like kvm_memslots(), but for use in real mode when we can't do
414  * any RCU stuff (since the secondary threads are offline from the
415  * kernel's point of view), and we can't print anything.
416  * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
417  */
418 static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
419 {
420 	return rcu_dereference_raw_notrace(kvm->memslots[0]);
421 }
422 
423 extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
424 
425 extern void kvmhv_rm_send_ipi(int cpu);
426 
427 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
428 
429 #endif /* __ASM_KVM_BOOK3S_64_H__ */
430