xref: /openbmc/linux/arch/mips/kvm/mmu.c (revision bc5aa3a0)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * KVM/MIPS MMU handling in the KVM module.
7  *
8  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9  * Authors: Sanjay Lal <sanjayl@kymasys.com>
10  */
11 
12 #include <linux/highmem.h>
13 #include <linux/kvm_host.h>
14 #include <asm/mmu_context.h>
15 
16 static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
17 {
18 	int cpu = smp_processor_id();
19 
20 	return vcpu->arch.guest_kernel_asid[cpu] &
21 			cpu_asid_mask(&cpu_data[cpu]);
22 }
23 
24 static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
25 {
26 	int cpu = smp_processor_id();
27 
28 	return vcpu->arch.guest_user_asid[cpu] &
29 			cpu_asid_mask(&cpu_data[cpu]);
30 }
31 
32 static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
33 {
34 	int srcu_idx, err = 0;
35 	kvm_pfn_t pfn;
36 
37 	if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
38 		return 0;
39 
40 	srcu_idx = srcu_read_lock(&kvm->srcu);
41 	pfn = gfn_to_pfn(kvm, gfn);
42 
43 	if (is_error_noslot_pfn(pfn)) {
44 		kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
45 		err = -EFAULT;
46 		goto out;
47 	}
48 
49 	kvm->arch.guest_pmap[gfn] = pfn;
50 out:
51 	srcu_read_unlock(&kvm->srcu, srcu_idx);
52 	return err;
53 }
54 
55 /* Translate guest KSEG0 addresses to Host PA */
56 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
57 						    unsigned long gva)
58 {
59 	gfn_t gfn;
60 	unsigned long offset = gva & ~PAGE_MASK;
61 	struct kvm *kvm = vcpu->kvm;
62 
63 	if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
64 		kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
65 			__builtin_return_address(0), gva);
66 		return KVM_INVALID_PAGE;
67 	}
68 
69 	gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
70 
71 	if (gfn >= kvm->arch.guest_pmap_npages) {
72 		kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
73 			gva);
74 		return KVM_INVALID_PAGE;
75 	}
76 
77 	if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
78 		return KVM_INVALID_ADDR;
79 
80 	return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
81 }
82 
83 /* XXXKYMA: Must be called with interrupts disabled */
84 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
85 				    struct kvm_vcpu *vcpu)
86 {
87 	gfn_t gfn;
88 	kvm_pfn_t pfn0, pfn1;
89 	unsigned long vaddr = 0;
90 	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
91 	struct kvm *kvm = vcpu->kvm;
92 	const int flush_dcache_mask = 0;
93 	int ret;
94 
95 	if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
96 		kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
97 		kvm_mips_dump_host_tlbs();
98 		return -1;
99 	}
100 
101 	gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
102 	if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
103 		kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
104 			gfn, badvaddr);
105 		kvm_mips_dump_host_tlbs();
106 		return -1;
107 	}
108 	vaddr = badvaddr & (PAGE_MASK << 1);
109 
110 	if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
111 		return -1;
112 
113 	if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
114 		return -1;
115 
116 	pfn0 = kvm->arch.guest_pmap[gfn & ~0x1];
117 	pfn1 = kvm->arch.guest_pmap[gfn | 0x1];
118 
119 	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
120 		((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
121 		ENTRYLO_D | ENTRYLO_V;
122 	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
123 		((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
124 		ENTRYLO_D | ENTRYLO_V;
125 
126 	preempt_disable();
127 	entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
128 	ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
129 				      flush_dcache_mask);
130 	preempt_enable();
131 
132 	return ret;
133 }
134 
135 int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
136 					 struct kvm_mips_tlb *tlb)
137 {
138 	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
139 	struct kvm *kvm = vcpu->kvm;
140 	kvm_pfn_t pfn0, pfn1;
141 	gfn_t gfn0, gfn1;
142 	long tlb_lo[2];
143 	int ret;
144 
145 	tlb_lo[0] = tlb->tlb_lo[0];
146 	tlb_lo[1] = tlb->tlb_lo[1];
147 
148 	/*
149 	 * The commpage address must not be mapped to anything else if the guest
150 	 * TLB contains entries nearby, or commpage accesses will break.
151 	 */
152 	if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
153 			VPN2_MASK & (PAGE_MASK << 1)))
154 		tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
155 
156 	gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
157 	gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
158 	if (gfn0 >= kvm->arch.guest_pmap_npages ||
159 	    gfn1 >= kvm->arch.guest_pmap_npages) {
160 		kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
161 			__func__, gfn0, gfn1, tlb->tlb_hi);
162 		kvm_mips_dump_guest_tlbs(vcpu);
163 		return -1;
164 	}
165 
166 	if (kvm_mips_map_page(kvm, gfn0) < 0)
167 		return -1;
168 
169 	if (kvm_mips_map_page(kvm, gfn1) < 0)
170 		return -1;
171 
172 	pfn0 = kvm->arch.guest_pmap[gfn0];
173 	pfn1 = kvm->arch.guest_pmap[gfn1];
174 
175 	/* Get attributes from the Guest TLB */
176 	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
177 		((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
178 		(tlb_lo[0] & ENTRYLO_D) |
179 		(tlb_lo[0] & ENTRYLO_V);
180 	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
181 		((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
182 		(tlb_lo[1] & ENTRYLO_D) |
183 		(tlb_lo[1] & ENTRYLO_V);
184 
185 	kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
186 		  tlb->tlb_lo[0], tlb->tlb_lo[1]);
187 
188 	preempt_disable();
189 	entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
190 					       kvm_mips_get_kernel_asid(vcpu) :
191 					       kvm_mips_get_user_asid(vcpu));
192 	ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
193 				      tlb->tlb_mask);
194 	preempt_enable();
195 
196 	return ret;
197 }
198 
199 void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
200 			     struct kvm_vcpu *vcpu)
201 {
202 	unsigned long asid = asid_cache(cpu);
203 
204 	asid += cpu_asid_inc();
205 	if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
206 		if (cpu_has_vtag_icache)
207 			flush_icache_all();
208 
209 		kvm_local_flush_tlb_all();      /* start new asid cycle */
210 
211 		if (!asid)      /* fix version if needed */
212 			asid = asid_first_version(cpu);
213 	}
214 
215 	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
216 }
217 
218 /**
219  * kvm_mips_migrate_count() - Migrate timer.
220  * @vcpu:	Virtual CPU.
221  *
222  * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
223  * if it was running prior to being cancelled.
224  *
225  * Must be called when the VCPU is migrated to a different CPU to ensure that
226  * timer expiry during guest execution interrupts the guest and causes the
227  * interrupt to be delivered in a timely manner.
228  */
229 static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
230 {
231 	if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
232 		hrtimer_restart(&vcpu->arch.comparecount_timer);
233 }
234 
235 /* Restore ASID once we are scheduled back after preemption */
236 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
237 {
238 	unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
239 	unsigned long flags;
240 	int newasid = 0;
241 
242 	kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
243 
244 	/* Allocate new kernel and user ASIDs if needed */
245 
246 	local_irq_save(flags);
247 
248 	if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
249 						asid_version_mask(cpu)) {
250 		kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
251 		vcpu->arch.guest_kernel_asid[cpu] =
252 		    vcpu->arch.guest_kernel_mm.context.asid[cpu];
253 		kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
254 		vcpu->arch.guest_user_asid[cpu] =
255 		    vcpu->arch.guest_user_mm.context.asid[cpu];
256 		newasid++;
257 
258 		kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
259 			  cpu_context(cpu, current->mm));
260 		kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
261 			  cpu, vcpu->arch.guest_kernel_asid[cpu]);
262 		kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
263 			  vcpu->arch.guest_user_asid[cpu]);
264 	}
265 
266 	if (vcpu->arch.last_sched_cpu != cpu) {
267 		kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
268 			  vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
269 		/*
270 		 * Migrate the timer interrupt to the current CPU so that it
271 		 * always interrupts the guest and synchronously triggers a
272 		 * guest timer interrupt.
273 		 */
274 		kvm_mips_migrate_count(vcpu);
275 	}
276 
277 	if (!newasid) {
278 		/*
279 		 * If we preempted while the guest was executing, then reload
280 		 * the pre-empted ASID
281 		 */
282 		if (current->flags & PF_VCPU) {
283 			write_c0_entryhi(vcpu->arch.
284 					 preempt_entryhi & asid_mask);
285 			ehb();
286 		}
287 	} else {
288 		/* New ASIDs were allocated for the VM */
289 
290 		/*
291 		 * Were we in guest context? If so then the pre-empted ASID is
292 		 * no longer valid, we need to set it to what it should be based
293 		 * on the mode of the Guest (Kernel/User)
294 		 */
295 		if (current->flags & PF_VCPU) {
296 			if (KVM_GUEST_KERNEL_MODE(vcpu))
297 				write_c0_entryhi(vcpu->arch.
298 						 guest_kernel_asid[cpu] &
299 						 asid_mask);
300 			else
301 				write_c0_entryhi(vcpu->arch.
302 						 guest_user_asid[cpu] &
303 						 asid_mask);
304 			ehb();
305 		}
306 	}
307 
308 	/* restore guest state to registers */
309 	kvm_mips_callbacks->vcpu_set_regs(vcpu);
310 
311 	local_irq_restore(flags);
312 
313 }
314 
315 /* ASID can change if another task is scheduled during preemption */
316 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
317 {
318 	unsigned long flags;
319 	int cpu;
320 
321 	local_irq_save(flags);
322 
323 	cpu = smp_processor_id();
324 
325 	vcpu->arch.preempt_entryhi = read_c0_entryhi();
326 	vcpu->arch.last_sched_cpu = cpu;
327 
328 	/* save guest state in registers */
329 	kvm_mips_callbacks->vcpu_get_regs(vcpu);
330 
331 	if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
332 	     asid_version_mask(cpu))) {
333 		kvm_debug("%s: Dropping MMU Context:  %#lx\n", __func__,
334 			  cpu_context(cpu, current->mm));
335 		drop_mmu_context(current->mm, cpu);
336 	}
337 	write_c0_entryhi(cpu_asid(cpu, current->mm));
338 	ehb();
339 
340 	local_irq_restore(flags);
341 }
342 
343 u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
344 {
345 	struct mips_coproc *cop0 = vcpu->arch.cop0;
346 	unsigned long paddr, flags, vpn2, asid;
347 	unsigned long va = (unsigned long)opc;
348 	void *vaddr;
349 	u32 inst;
350 	int index;
351 
352 	if (KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0 ||
353 	    KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
354 		local_irq_save(flags);
355 		index = kvm_mips_host_tlb_lookup(vcpu, va);
356 		if (index >= 0) {
357 			inst = *(opc);
358 		} else {
359 			vpn2 = va & VPN2_MASK;
360 			asid = kvm_read_c0_guest_entryhi(cop0) &
361 						KVM_ENTRYHI_ASID;
362 			index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
363 			if (index < 0) {
364 				kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
365 					__func__, opc, vcpu, read_c0_entryhi());
366 				kvm_mips_dump_host_tlbs();
367 				kvm_mips_dump_guest_tlbs(vcpu);
368 				local_irq_restore(flags);
369 				return KVM_INVALID_INST;
370 			}
371 			if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
372 						&vcpu->arch.guest_tlb[index])) {
373 				kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
374 					__func__, opc, index, vcpu,
375 					read_c0_entryhi());
376 				kvm_mips_dump_guest_tlbs(vcpu);
377 				local_irq_restore(flags);
378 				return KVM_INVALID_INST;
379 			}
380 			inst = *(opc);
381 		}
382 		local_irq_restore(flags);
383 	} else if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
384 		paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu, va);
385 		vaddr = kmap_atomic(pfn_to_page(PHYS_PFN(paddr)));
386 		vaddr += paddr & ~PAGE_MASK;
387 		inst = *(u32 *)vaddr;
388 		kunmap_atomic(vaddr);
389 	} else {
390 		kvm_err("%s: illegal address: %p\n", __func__, opc);
391 		return KVM_INVALID_INST;
392 	}
393 
394 	return inst;
395 }
396