xref: /openbmc/linux/drivers/virt/acrn/hsm.c (revision bbaf1ff0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ACRN Hypervisor Service Module (HSM)
4  *
5  * Copyright (C) 2020 Intel Corporation. All rights reserved.
6  *
7  * Authors:
8  *	Fengwei Yin <fengwei.yin@intel.com>
9  *	Yakui Zhao <yakui.zhao@intel.com>
10  */
11 
12 #include <linux/cpu.h>
13 #include <linux/io.h>
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 
18 #include <asm/acrn.h>
19 #include <asm/hypervisor.h>
20 
21 #include "acrn_drv.h"
22 
23 /*
24  * When /dev/acrn_hsm is opened, a 'struct acrn_vm' object is created to
25  * represent a VM instance and continues to be associated with the opened file
26  * descriptor. All ioctl operations on this file descriptor will be targeted to
27  * the VM instance. Release of this file descriptor will destroy the object.
28  */
29 static int acrn_dev_open(struct inode *inode, struct file *filp)
30 {
31 	struct acrn_vm *vm;
32 
33 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
34 	if (!vm)
35 		return -ENOMEM;
36 
37 	vm->vmid = ACRN_INVALID_VMID;
38 	filp->private_data = vm;
39 	return 0;
40 }
41 
42 static int pmcmd_ioctl(u64 cmd, void __user *uptr)
43 {
44 	struct acrn_pstate_data *px_data;
45 	struct acrn_cstate_data *cx_data;
46 	u64 *pm_info;
47 	int ret = 0;
48 
49 	switch (cmd & PMCMD_TYPE_MASK) {
50 	case ACRN_PMCMD_GET_PX_CNT:
51 	case ACRN_PMCMD_GET_CX_CNT:
52 		pm_info = kmalloc(sizeof(u64), GFP_KERNEL);
53 		if (!pm_info)
54 			return -ENOMEM;
55 
56 		ret = hcall_get_cpu_state(cmd, virt_to_phys(pm_info));
57 		if (ret < 0) {
58 			kfree(pm_info);
59 			break;
60 		}
61 
62 		if (copy_to_user(uptr, pm_info, sizeof(u64)))
63 			ret = -EFAULT;
64 		kfree(pm_info);
65 		break;
66 	case ACRN_PMCMD_GET_PX_DATA:
67 		px_data = kmalloc(sizeof(*px_data), GFP_KERNEL);
68 		if (!px_data)
69 			return -ENOMEM;
70 
71 		ret = hcall_get_cpu_state(cmd, virt_to_phys(px_data));
72 		if (ret < 0) {
73 			kfree(px_data);
74 			break;
75 		}
76 
77 		if (copy_to_user(uptr, px_data, sizeof(*px_data)))
78 			ret = -EFAULT;
79 		kfree(px_data);
80 		break;
81 	case ACRN_PMCMD_GET_CX_DATA:
82 		cx_data = kmalloc(sizeof(*cx_data), GFP_KERNEL);
83 		if (!cx_data)
84 			return -ENOMEM;
85 
86 		ret = hcall_get_cpu_state(cmd, virt_to_phys(cx_data));
87 		if (ret < 0) {
88 			kfree(cx_data);
89 			break;
90 		}
91 
92 		if (copy_to_user(uptr, cx_data, sizeof(*cx_data)))
93 			ret = -EFAULT;
94 		kfree(cx_data);
95 		break;
96 	default:
97 		break;
98 	}
99 
100 	return ret;
101 }
102 
103 /*
104  * HSM relies on hypercall layer of the ACRN hypervisor to do the
105  * sanity check against the input parameters.
106  */
107 static long acrn_dev_ioctl(struct file *filp, unsigned int cmd,
108 			   unsigned long ioctl_param)
109 {
110 	struct acrn_vm *vm = filp->private_data;
111 	struct acrn_vm_creation *vm_param;
112 	struct acrn_vcpu_regs *cpu_regs;
113 	struct acrn_ioreq_notify notify;
114 	struct acrn_ptdev_irq *irq_info;
115 	struct acrn_ioeventfd ioeventfd;
116 	struct acrn_vm_memmap memmap;
117 	struct acrn_mmiodev *mmiodev;
118 	struct acrn_msi_entry *msi;
119 	struct acrn_pcidev *pcidev;
120 	struct acrn_irqfd irqfd;
121 	struct acrn_vdev *vdev;
122 	struct page *page;
123 	u64 cstate_cmd;
124 	int i, ret = 0;
125 
126 	if (vm->vmid == ACRN_INVALID_VMID && cmd != ACRN_IOCTL_CREATE_VM) {
127 		dev_dbg(acrn_dev.this_device,
128 			"ioctl 0x%x: Invalid VM state!\n", cmd);
129 		return -EINVAL;
130 	}
131 
132 	switch (cmd) {
133 	case ACRN_IOCTL_CREATE_VM:
134 		vm_param = memdup_user((void __user *)ioctl_param,
135 				       sizeof(struct acrn_vm_creation));
136 		if (IS_ERR(vm_param))
137 			return PTR_ERR(vm_param);
138 
139 		if ((vm_param->reserved0 | vm_param->reserved1) != 0) {
140 			kfree(vm_param);
141 			return -EINVAL;
142 		}
143 
144 		vm = acrn_vm_create(vm, vm_param);
145 		if (!vm) {
146 			ret = -EINVAL;
147 			kfree(vm_param);
148 			break;
149 		}
150 
151 		if (copy_to_user((void __user *)ioctl_param, vm_param,
152 				 sizeof(struct acrn_vm_creation))) {
153 			acrn_vm_destroy(vm);
154 			ret = -EFAULT;
155 		}
156 
157 		kfree(vm_param);
158 		break;
159 	case ACRN_IOCTL_START_VM:
160 		ret = hcall_start_vm(vm->vmid);
161 		if (ret < 0)
162 			dev_dbg(acrn_dev.this_device,
163 				"Failed to start VM %u!\n", vm->vmid);
164 		break;
165 	case ACRN_IOCTL_PAUSE_VM:
166 		ret = hcall_pause_vm(vm->vmid);
167 		if (ret < 0)
168 			dev_dbg(acrn_dev.this_device,
169 				"Failed to pause VM %u!\n", vm->vmid);
170 		break;
171 	case ACRN_IOCTL_RESET_VM:
172 		ret = hcall_reset_vm(vm->vmid);
173 		if (ret < 0)
174 			dev_dbg(acrn_dev.this_device,
175 				"Failed to restart VM %u!\n", vm->vmid);
176 		break;
177 	case ACRN_IOCTL_DESTROY_VM:
178 		ret = acrn_vm_destroy(vm);
179 		break;
180 	case ACRN_IOCTL_SET_VCPU_REGS:
181 		cpu_regs = memdup_user((void __user *)ioctl_param,
182 				       sizeof(struct acrn_vcpu_regs));
183 		if (IS_ERR(cpu_regs))
184 			return PTR_ERR(cpu_regs);
185 
186 		for (i = 0; i < ARRAY_SIZE(cpu_regs->reserved); i++)
187 			if (cpu_regs->reserved[i]) {
188 				kfree(cpu_regs);
189 				return -EINVAL;
190 			}
191 
192 		for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.reserved_32); i++)
193 			if (cpu_regs->vcpu_regs.reserved_32[i]) {
194 				kfree(cpu_regs);
195 				return -EINVAL;
196 			}
197 
198 		for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.reserved_64); i++)
199 			if (cpu_regs->vcpu_regs.reserved_64[i]) {
200 				kfree(cpu_regs);
201 				return -EINVAL;
202 			}
203 
204 		for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.gdt.reserved); i++)
205 			if (cpu_regs->vcpu_regs.gdt.reserved[i] |
206 			    cpu_regs->vcpu_regs.idt.reserved[i]) {
207 				kfree(cpu_regs);
208 				return -EINVAL;
209 			}
210 
211 		ret = hcall_set_vcpu_regs(vm->vmid, virt_to_phys(cpu_regs));
212 		if (ret < 0)
213 			dev_dbg(acrn_dev.this_device,
214 				"Failed to set regs state of VM%u!\n",
215 				vm->vmid);
216 		kfree(cpu_regs);
217 		break;
218 	case ACRN_IOCTL_SET_MEMSEG:
219 		if (copy_from_user(&memmap, (void __user *)ioctl_param,
220 				   sizeof(memmap)))
221 			return -EFAULT;
222 
223 		ret = acrn_vm_memseg_map(vm, &memmap);
224 		break;
225 	case ACRN_IOCTL_UNSET_MEMSEG:
226 		if (copy_from_user(&memmap, (void __user *)ioctl_param,
227 				   sizeof(memmap)))
228 			return -EFAULT;
229 
230 		ret = acrn_vm_memseg_unmap(vm, &memmap);
231 		break;
232 	case ACRN_IOCTL_ASSIGN_MMIODEV:
233 		mmiodev = memdup_user((void __user *)ioctl_param,
234 				      sizeof(struct acrn_mmiodev));
235 		if (IS_ERR(mmiodev))
236 			return PTR_ERR(mmiodev);
237 
238 		ret = hcall_assign_mmiodev(vm->vmid, virt_to_phys(mmiodev));
239 		if (ret < 0)
240 			dev_dbg(acrn_dev.this_device,
241 				"Failed to assign MMIO device!\n");
242 		kfree(mmiodev);
243 		break;
244 	case ACRN_IOCTL_DEASSIGN_MMIODEV:
245 		mmiodev = memdup_user((void __user *)ioctl_param,
246 				      sizeof(struct acrn_mmiodev));
247 		if (IS_ERR(mmiodev))
248 			return PTR_ERR(mmiodev);
249 
250 		ret = hcall_deassign_mmiodev(vm->vmid, virt_to_phys(mmiodev));
251 		if (ret < 0)
252 			dev_dbg(acrn_dev.this_device,
253 				"Failed to deassign MMIO device!\n");
254 		kfree(mmiodev);
255 		break;
256 	case ACRN_IOCTL_ASSIGN_PCIDEV:
257 		pcidev = memdup_user((void __user *)ioctl_param,
258 				     sizeof(struct acrn_pcidev));
259 		if (IS_ERR(pcidev))
260 			return PTR_ERR(pcidev);
261 
262 		ret = hcall_assign_pcidev(vm->vmid, virt_to_phys(pcidev));
263 		if (ret < 0)
264 			dev_dbg(acrn_dev.this_device,
265 				"Failed to assign pci device!\n");
266 		kfree(pcidev);
267 		break;
268 	case ACRN_IOCTL_DEASSIGN_PCIDEV:
269 		pcidev = memdup_user((void __user *)ioctl_param,
270 				     sizeof(struct acrn_pcidev));
271 		if (IS_ERR(pcidev))
272 			return PTR_ERR(pcidev);
273 
274 		ret = hcall_deassign_pcidev(vm->vmid, virt_to_phys(pcidev));
275 		if (ret < 0)
276 			dev_dbg(acrn_dev.this_device,
277 				"Failed to deassign pci device!\n");
278 		kfree(pcidev);
279 		break;
280 	case ACRN_IOCTL_CREATE_VDEV:
281 		vdev = memdup_user((void __user *)ioctl_param,
282 				   sizeof(struct acrn_vdev));
283 		if (IS_ERR(vdev))
284 			return PTR_ERR(vdev);
285 
286 		ret = hcall_create_vdev(vm->vmid, virt_to_phys(vdev));
287 		if (ret < 0)
288 			dev_dbg(acrn_dev.this_device,
289 				"Failed to create virtual device!\n");
290 		kfree(vdev);
291 		break;
292 	case ACRN_IOCTL_DESTROY_VDEV:
293 		vdev = memdup_user((void __user *)ioctl_param,
294 				   sizeof(struct acrn_vdev));
295 		if (IS_ERR(vdev))
296 			return PTR_ERR(vdev);
297 		ret = hcall_destroy_vdev(vm->vmid, virt_to_phys(vdev));
298 		if (ret < 0)
299 			dev_dbg(acrn_dev.this_device,
300 				"Failed to destroy virtual device!\n");
301 		kfree(vdev);
302 		break;
303 	case ACRN_IOCTL_SET_PTDEV_INTR:
304 		irq_info = memdup_user((void __user *)ioctl_param,
305 				       sizeof(struct acrn_ptdev_irq));
306 		if (IS_ERR(irq_info))
307 			return PTR_ERR(irq_info);
308 
309 		ret = hcall_set_ptdev_intr(vm->vmid, virt_to_phys(irq_info));
310 		if (ret < 0)
311 			dev_dbg(acrn_dev.this_device,
312 				"Failed to configure intr for ptdev!\n");
313 		kfree(irq_info);
314 		break;
315 	case ACRN_IOCTL_RESET_PTDEV_INTR:
316 		irq_info = memdup_user((void __user *)ioctl_param,
317 				       sizeof(struct acrn_ptdev_irq));
318 		if (IS_ERR(irq_info))
319 			return PTR_ERR(irq_info);
320 
321 		ret = hcall_reset_ptdev_intr(vm->vmid, virt_to_phys(irq_info));
322 		if (ret < 0)
323 			dev_dbg(acrn_dev.this_device,
324 				"Failed to reset intr for ptdev!\n");
325 		kfree(irq_info);
326 		break;
327 	case ACRN_IOCTL_SET_IRQLINE:
328 		ret = hcall_set_irqline(vm->vmid, ioctl_param);
329 		if (ret < 0)
330 			dev_dbg(acrn_dev.this_device,
331 				"Failed to set interrupt line!\n");
332 		break;
333 	case ACRN_IOCTL_INJECT_MSI:
334 		msi = memdup_user((void __user *)ioctl_param,
335 				  sizeof(struct acrn_msi_entry));
336 		if (IS_ERR(msi))
337 			return PTR_ERR(msi);
338 
339 		ret = hcall_inject_msi(vm->vmid, virt_to_phys(msi));
340 		if (ret < 0)
341 			dev_dbg(acrn_dev.this_device,
342 				"Failed to inject MSI!\n");
343 		kfree(msi);
344 		break;
345 	case ACRN_IOCTL_VM_INTR_MONITOR:
346 		ret = pin_user_pages_fast(ioctl_param, 1,
347 					  FOLL_WRITE | FOLL_LONGTERM, &page);
348 		if (unlikely(ret != 1)) {
349 			dev_dbg(acrn_dev.this_device,
350 				"Failed to pin intr hdr buffer!\n");
351 			return -EFAULT;
352 		}
353 
354 		ret = hcall_vm_intr_monitor(vm->vmid, page_to_phys(page));
355 		if (ret < 0) {
356 			unpin_user_page(page);
357 			dev_dbg(acrn_dev.this_device,
358 				"Failed to monitor intr data!\n");
359 			return ret;
360 		}
361 		if (vm->monitor_page)
362 			unpin_user_page(vm->monitor_page);
363 		vm->monitor_page = page;
364 		break;
365 	case ACRN_IOCTL_CREATE_IOREQ_CLIENT:
366 		if (vm->default_client)
367 			return -EEXIST;
368 		if (!acrn_ioreq_client_create(vm, NULL, NULL, true, "acrndm"))
369 			ret = -EINVAL;
370 		break;
371 	case ACRN_IOCTL_DESTROY_IOREQ_CLIENT:
372 		if (vm->default_client)
373 			acrn_ioreq_client_destroy(vm->default_client);
374 		break;
375 	case ACRN_IOCTL_ATTACH_IOREQ_CLIENT:
376 		if (vm->default_client)
377 			ret = acrn_ioreq_client_wait(vm->default_client);
378 		else
379 			ret = -ENODEV;
380 		break;
381 	case ACRN_IOCTL_NOTIFY_REQUEST_FINISH:
382 		if (copy_from_user(&notify, (void __user *)ioctl_param,
383 				   sizeof(struct acrn_ioreq_notify)))
384 			return -EFAULT;
385 
386 		if (notify.reserved != 0)
387 			return -EINVAL;
388 
389 		ret = acrn_ioreq_request_default_complete(vm, notify.vcpu);
390 		break;
391 	case ACRN_IOCTL_CLEAR_VM_IOREQ:
392 		acrn_ioreq_request_clear(vm);
393 		break;
394 	case ACRN_IOCTL_PM_GET_CPU_STATE:
395 		if (copy_from_user(&cstate_cmd, (void __user *)ioctl_param,
396 				   sizeof(cstate_cmd)))
397 			return -EFAULT;
398 
399 		ret = pmcmd_ioctl(cstate_cmd, (void __user *)ioctl_param);
400 		break;
401 	case ACRN_IOCTL_IOEVENTFD:
402 		if (copy_from_user(&ioeventfd, (void __user *)ioctl_param,
403 				   sizeof(ioeventfd)))
404 			return -EFAULT;
405 
406 		if (ioeventfd.reserved != 0)
407 			return -EINVAL;
408 
409 		ret = acrn_ioeventfd_config(vm, &ioeventfd);
410 		break;
411 	case ACRN_IOCTL_IRQFD:
412 		if (copy_from_user(&irqfd, (void __user *)ioctl_param,
413 				   sizeof(irqfd)))
414 			return -EFAULT;
415 		ret = acrn_irqfd_config(vm, &irqfd);
416 		break;
417 	default:
418 		dev_dbg(acrn_dev.this_device, "Unknown IOCTL 0x%x!\n", cmd);
419 		ret = -ENOTTY;
420 	}
421 
422 	return ret;
423 }
424 
425 static int acrn_dev_release(struct inode *inode, struct file *filp)
426 {
427 	struct acrn_vm *vm = filp->private_data;
428 
429 	acrn_vm_destroy(vm);
430 	kfree(vm);
431 	return 0;
432 }
433 
434 static ssize_t remove_cpu_store(struct device *dev,
435 				struct device_attribute *attr,
436 				const char *buf, size_t count)
437 {
438 	u64 cpu, lapicid;
439 	int ret;
440 
441 	if (kstrtoull(buf, 0, &cpu) < 0)
442 		return -EINVAL;
443 
444 	if (cpu >= num_possible_cpus() || cpu == 0 || !cpu_is_hotpluggable(cpu))
445 		return -EINVAL;
446 
447 	if (cpu_online(cpu))
448 		remove_cpu(cpu);
449 
450 	lapicid = cpu_data(cpu).apicid;
451 	dev_dbg(dev, "Try to remove cpu %lld with lapicid %lld\n", cpu, lapicid);
452 	ret = hcall_sos_remove_cpu(lapicid);
453 	if (ret < 0) {
454 		dev_err(dev, "Failed to remove cpu %lld!\n", cpu);
455 		goto fail_remove;
456 	}
457 
458 	return count;
459 
460 fail_remove:
461 	add_cpu(cpu);
462 	return ret;
463 }
464 static DEVICE_ATTR_WO(remove_cpu);
465 
466 static umode_t acrn_attr_visible(struct kobject *kobj, struct attribute *a, int n)
467 {
468        if (a == &dev_attr_remove_cpu.attr)
469                return IS_ENABLED(CONFIG_HOTPLUG_CPU) ? a->mode : 0;
470 
471        return a->mode;
472 }
473 
474 static struct attribute *acrn_attrs[] = {
475 	&dev_attr_remove_cpu.attr,
476 	NULL
477 };
478 
479 static struct attribute_group acrn_attr_group = {
480 	.attrs = acrn_attrs,
481 	.is_visible = acrn_attr_visible,
482 };
483 
484 static const struct attribute_group *acrn_attr_groups[] = {
485 	&acrn_attr_group,
486 	NULL
487 };
488 
489 static const struct file_operations acrn_fops = {
490 	.owner		= THIS_MODULE,
491 	.open		= acrn_dev_open,
492 	.release	= acrn_dev_release,
493 	.unlocked_ioctl = acrn_dev_ioctl,
494 };
495 
496 struct miscdevice acrn_dev = {
497 	.minor	= MISC_DYNAMIC_MINOR,
498 	.name	= "acrn_hsm",
499 	.fops	= &acrn_fops,
500 	.groups	= acrn_attr_groups,
501 };
502 
503 static int __init hsm_init(void)
504 {
505 	int ret;
506 
507 	if (x86_hyper_type != X86_HYPER_ACRN)
508 		return -ENODEV;
509 
510 	if (!(cpuid_eax(ACRN_CPUID_FEATURES) & ACRN_FEATURE_PRIVILEGED_VM))
511 		return -EPERM;
512 
513 	ret = misc_register(&acrn_dev);
514 	if (ret) {
515 		pr_err("Create misc dev failed!\n");
516 		return ret;
517 	}
518 
519 	ret = acrn_ioreq_intr_setup();
520 	if (ret) {
521 		pr_err("Setup I/O request handler failed!\n");
522 		misc_deregister(&acrn_dev);
523 		return ret;
524 	}
525 	return 0;
526 }
527 
528 static void __exit hsm_exit(void)
529 {
530 	acrn_ioreq_intr_remove();
531 	misc_deregister(&acrn_dev);
532 }
533 module_init(hsm_init);
534 module_exit(hsm_exit);
535 
536 MODULE_AUTHOR("Intel Corporation");
537 MODULE_LICENSE("GPL");
538 MODULE_DESCRIPTION("ACRN Hypervisor Service Module (HSM)");
539