xref: /openbmc/linux/arch/powerpc/include/asm/kvm_book3s_64.h (revision 4f139972b489f8bc2c821aa25ac65018d92af3f7)
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright SUSE Linux Products GmbH 2010
16  *
17  * Authors: Alexander Graf <agraf@suse.de>
18  */
19 
20 #ifndef __ASM_KVM_BOOK3S_64_H__
21 #define __ASM_KVM_BOOK3S_64_H__
22 
23 #include <asm/book3s/64/mmu-hash.h>
24 
25 /* Power architecture requires HPT is at least 256kiB, at most 64TiB */
26 #define PPC_MIN_HPT_ORDER	18
27 #define PPC_MAX_HPT_ORDER	46
28 
29 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
30 static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
31 {
32 	preempt_disable();
33 	return &get_paca()->shadow_vcpu;
34 }
35 
36 static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
37 {
38 	preempt_enable();
39 }
40 #endif
41 
42 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
43 
44 static inline bool kvm_is_radix(struct kvm *kvm)
45 {
46 	return kvm->arch.radix;
47 }
48 
49 #define KVM_DEFAULT_HPT_ORDER	24	/* 16MB HPT by default */
50 #endif
51 
52 #define VRMA_VSID	0x1ffffffUL	/* 1TB VSID reserved for VRMA */
53 
54 /*
55  * We use a lock bit in HPTE dword 0 to synchronize updates and
56  * accesses to each HPTE, and another bit to indicate non-present
57  * HPTEs.
58  */
59 #define HPTE_V_HVLOCK	0x40UL
60 #define HPTE_V_ABSENT	0x20UL
61 
62 /*
63  * We use this bit in the guest_rpte field of the revmap entry
64  * to indicate a modified HPTE.
65  */
66 #define HPTE_GR_MODIFIED	(1ul << 62)
67 
68 /* These bits are reserved in the guest view of the HPTE */
69 #define HPTE_GR_RESERVED	HPTE_GR_MODIFIED
70 
71 static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
72 {
73 	unsigned long tmp, old;
74 	__be64 be_lockbit, be_bits;
75 
76 	/*
77 	 * We load/store in native endian, but the HTAB is in big endian. If
78 	 * we byte swap all data we apply on the PTE we're implicitly correct
79 	 * again.
80 	 */
81 	be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
82 	be_bits = cpu_to_be64(bits);
83 
84 	asm volatile("	ldarx	%0,0,%2\n"
85 		     "	and.	%1,%0,%3\n"
86 		     "	bne	2f\n"
87 		     "	or	%0,%0,%4\n"
88 		     "  stdcx.	%0,0,%2\n"
89 		     "	beq+	2f\n"
90 		     "	mr	%1,%3\n"
91 		     "2:	isync"
92 		     : "=&r" (tmp), "=&r" (old)
93 		     : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
94 		     : "cc", "memory");
95 	return old == 0;
96 }
97 
98 static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
99 {
100 	hpte_v &= ~HPTE_V_HVLOCK;
101 	asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
102 	hpte[0] = cpu_to_be64(hpte_v);
103 }
104 
105 /* Without barrier */
106 static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
107 {
108 	hpte_v &= ~HPTE_V_HVLOCK;
109 	hpte[0] = cpu_to_be64(hpte_v);
110 }
111 
112 static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
113 					     unsigned long pte_index)
114 {
115 	int i, b_psize = MMU_PAGE_4K, a_psize = MMU_PAGE_4K;
116 	unsigned int penc;
117 	unsigned long rb = 0, va_low, sllp;
118 	unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
119 
120 	if (v & HPTE_V_LARGE) {
121 		i = hpte_page_sizes[lp];
122 		b_psize = i & 0xf;
123 		a_psize = i >> 4;
124 	}
125 
126 	/*
127 	 * Ignore the top 14 bits of va
128 	 * v have top two bits covering segment size, hence move
129 	 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
130 	 * AVA field in v also have the lower 23 bits ignored.
131 	 * For base page size 4K we need 14 .. 65 bits (so need to
132 	 * collect extra 11 bits)
133 	 * For others we need 14..14+i
134 	 */
135 	/* This covers 14..54 bits of va*/
136 	rb = (v & ~0x7fUL) << 16;		/* AVA field */
137 
138 	/*
139 	 * AVA in v had cleared lower 23 bits. We need to derive
140 	 * that from pteg index
141 	 */
142 	va_low = pte_index >> 3;
143 	if (v & HPTE_V_SECONDARY)
144 		va_low = ~va_low;
145 	/*
146 	 * get the vpn bits from va_low using reverse of hashing.
147 	 * In v we have va with 23 bits dropped and then left shifted
148 	 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
149 	 * right shift it with (SID_SHIFT - (23 - 7))
150 	 */
151 	if (!(v & HPTE_V_1TB_SEG))
152 		va_low ^= v >> (SID_SHIFT - 16);
153 	else
154 		va_low ^= v >> (SID_SHIFT_1T - 16);
155 	va_low &= 0x7ff;
156 
157 	switch (b_psize) {
158 	case MMU_PAGE_4K:
159 		sllp = get_sllp_encoding(a_psize);
160 		rb |= sllp << 5;	/*  AP field */
161 		rb |= (va_low & 0x7ff) << 12;	/* remaining 11 bits of AVA */
162 		break;
163 	default:
164 	{
165 		int aval_shift;
166 		/*
167 		 * remaining bits of AVA/LP fields
168 		 * Also contain the rr bits of LP
169 		 */
170 		rb |= (va_low << mmu_psize_defs[b_psize].shift) & 0x7ff000;
171 		/*
172 		 * Now clear not needed LP bits based on actual psize
173 		 */
174 		rb &= ~((1ul << mmu_psize_defs[a_psize].shift) - 1);
175 		/*
176 		 * AVAL field 58..77 - base_page_shift bits of va
177 		 * we have space for 58..64 bits, Missing bits should
178 		 * be zero filled. +1 is to take care of L bit shift
179 		 */
180 		aval_shift = 64 - (77 - mmu_psize_defs[b_psize].shift) + 1;
181 		rb |= ((va_low << aval_shift) & 0xfe);
182 
183 		rb |= 1;		/* L field */
184 		penc = mmu_psize_defs[b_psize].penc[a_psize];
185 		rb |= penc << 12;	/* LP field */
186 		break;
187 	}
188 	}
189 	rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8;	/* B field */
190 	return rb;
191 }
192 
193 static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
194 {
195 	return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
196 }
197 
198 static inline int hpte_is_writable(unsigned long ptel)
199 {
200 	unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
201 
202 	return pp != PP_RXRX && pp != PP_RXXX;
203 }
204 
205 static inline unsigned long hpte_make_readonly(unsigned long ptel)
206 {
207 	if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
208 		ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
209 	else
210 		ptel |= PP_RXRX;
211 	return ptel;
212 }
213 
214 static inline bool hpte_cache_flags_ok(unsigned long hptel, bool is_ci)
215 {
216 	unsigned int wimg = hptel & HPTE_R_WIMG;
217 
218 	/* Handle SAO */
219 	if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
220 	    cpu_has_feature(CPU_FTR_ARCH_206))
221 		wimg = HPTE_R_M;
222 
223 	if (!is_ci)
224 		return wimg == HPTE_R_M;
225 	/*
226 	 * if host is mapped cache inhibited, make sure hptel also have
227 	 * cache inhibited.
228 	 */
229 	if (wimg & HPTE_R_W) /* FIXME!! is this ok for all guest. ? */
230 		return false;
231 	return !!(wimg & HPTE_R_I);
232 }
233 
234 /*
235  * If it's present and writable, atomically set dirty and referenced bits and
236  * return the PTE, otherwise return 0.
237  */
238 static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
239 {
240 	pte_t old_pte, new_pte = __pte(0);
241 
242 	while (1) {
243 		/*
244 		 * Make sure we don't reload from ptep
245 		 */
246 		old_pte = READ_ONCE(*ptep);
247 		/*
248 		 * wait until H_PAGE_BUSY is clear then set it atomically
249 		 */
250 		if (unlikely(pte_val(old_pte) & H_PAGE_BUSY)) {
251 			cpu_relax();
252 			continue;
253 		}
254 		/* If pte is not present return None */
255 		if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT)))
256 			return __pte(0);
257 
258 		new_pte = pte_mkyoung(old_pte);
259 		if (writing && pte_write(old_pte))
260 			new_pte = pte_mkdirty(new_pte);
261 
262 		if (pte_xchg(ptep, old_pte, new_pte))
263 			break;
264 	}
265 	return new_pte;
266 }
267 
268 static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
269 {
270 	if (key)
271 		return PP_RWRX <= pp && pp <= PP_RXRX;
272 	return true;
273 }
274 
275 static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
276 {
277 	if (key)
278 		return pp == PP_RWRW;
279 	return pp <= PP_RWRW;
280 }
281 
282 static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
283 {
284 	unsigned long skey;
285 
286 	skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
287 		((hpte_r & HPTE_R_KEY_LO) >> 9);
288 	return (amr >> (62 - 2 * skey)) & 3;
289 }
290 
291 static inline void lock_rmap(unsigned long *rmap)
292 {
293 	do {
294 		while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
295 			cpu_relax();
296 	} while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
297 }
298 
299 static inline void unlock_rmap(unsigned long *rmap)
300 {
301 	__clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
302 }
303 
304 static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
305 				   unsigned long pagesize)
306 {
307 	unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
308 
309 	if (pagesize <= PAGE_SIZE)
310 		return true;
311 	return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
312 }
313 
314 /*
315  * This works for 4k, 64k and 16M pages on POWER7,
316  * and 4k and 16M pages on PPC970.
317  */
318 static inline unsigned long slb_pgsize_encoding(unsigned long psize)
319 {
320 	unsigned long senc = 0;
321 
322 	if (psize > 0x1000) {
323 		senc = SLB_VSID_L;
324 		if (psize == 0x10000)
325 			senc |= SLB_VSID_LP_01;
326 	}
327 	return senc;
328 }
329 
330 static inline int is_vrma_hpte(unsigned long hpte_v)
331 {
332 	return (hpte_v & ~0xffffffUL) ==
333 		(HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
334 }
335 
336 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
337 /*
338  * Note modification of an HPTE; set the HPTE modified bit
339  * if anyone is interested.
340  */
341 static inline void note_hpte_modification(struct kvm *kvm,
342 					  struct revmap_entry *rev)
343 {
344 	if (atomic_read(&kvm->arch.hpte_mod_interest))
345 		rev->guest_rpte |= HPTE_GR_MODIFIED;
346 }
347 
348 /*
349  * Like kvm_memslots(), but for use in real mode when we can't do
350  * any RCU stuff (since the secondary threads are offline from the
351  * kernel's point of view), and we can't print anything.
352  * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
353  */
354 static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
355 {
356 	return rcu_dereference_raw_notrace(kvm->memslots[0]);
357 }
358 
359 extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
360 
361 extern void kvmhv_rm_send_ipi(int cpu);
362 
363 static inline unsigned long kvmppc_hpt_npte(struct kvm_hpt_info *hpt)
364 {
365 	/* HPTEs are 2**4 bytes long */
366 	return 1UL << (hpt->order - 4);
367 }
368 
369 static inline unsigned long kvmppc_hpt_mask(struct kvm_hpt_info *hpt)
370 {
371 	/* 128 (2**7) bytes in each HPTEG */
372 	return (1UL << (hpt->order - 7)) - 1;
373 }
374 
375 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
376 
377 #endif /* __ASM_KVM_BOOK3S_64_H__ */
378