1 /*
2  * VMware VMCI Driver
3  *
4  * Copyright (C) 2012 VMware, Inc. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the
8  * Free Software Foundation version 2 and no later version.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * for more details.
14  */
15 
16 #include <linux/vmw_vmci_defs.h>
17 #include <linux/vmw_vmci_api.h>
18 #include <linux/moduleparam.h>
19 #include <linux/miscdevice.h>
20 #include <linux/interrupt.h>
21 #include <linux/highmem.h>
22 #include <linux/atomic.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/mutex.h>
26 #include <linux/sched.h>
27 #include <linux/cred.h>
28 #include <linux/slab.h>
29 #include <linux/file.h>
30 #include <linux/init.h>
31 #include <linux/poll.h>
32 #include <linux/pci.h>
33 #include <linux/smp.h>
34 #include <linux/fs.h>
35 #include <linux/io.h>
36 
37 #include "vmci_handle_array.h"
38 #include "vmci_queue_pair.h"
39 #include "vmci_datagram.h"
40 #include "vmci_doorbell.h"
41 #include "vmci_resource.h"
42 #include "vmci_context.h"
43 #include "vmci_driver.h"
44 #include "vmci_event.h"
45 
46 #define VMCI_UTIL_NUM_RESOURCES 1
47 
48 enum {
49 	VMCI_NOTIFY_RESOURCE_QUEUE_PAIR = 0,
50 	VMCI_NOTIFY_RESOURCE_DOOR_BELL = 1,
51 };
52 
53 enum {
54 	VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY = 0,
55 	VMCI_NOTIFY_RESOURCE_ACTION_CREATE = 1,
56 	VMCI_NOTIFY_RESOURCE_ACTION_DESTROY = 2,
57 };
58 
59 /*
60  * VMCI driver initialization. This block can also be used to
61  * pass initial group membership etc.
62  */
63 struct vmci_init_blk {
64 	u32 cid;
65 	u32 flags;
66 };
67 
68 /* VMCIqueue_pairAllocInfo_VMToVM */
69 struct vmci_qp_alloc_info_vmvm {
70 	struct vmci_handle handle;
71 	u32 peer;
72 	u32 flags;
73 	u64 produce_size;
74 	u64 consume_size;
75 	u64 produce_page_file;	  /* User VA. */
76 	u64 consume_page_file;	  /* User VA. */
77 	u64 produce_page_file_size;  /* Size of the file name array. */
78 	u64 consume_page_file_size;  /* Size of the file name array. */
79 	s32 result;
80 	u32 _pad;
81 };
82 
83 /* VMCISetNotifyInfo: Used to pass notify flag's address to the host driver. */
84 struct vmci_set_notify_info {
85 	u64 notify_uva;
86 	s32 result;
87 	u32 _pad;
88 };
89 
90 /*
91  * Per-instance host state
92  */
93 struct vmci_host_dev {
94 	struct vmci_ctx *context;
95 	int user_version;
96 	enum vmci_obj_type ct_type;
97 	struct mutex lock;  /* Mutex lock for vmci context access */
98 };
99 
100 static struct vmci_ctx *host_context;
101 static bool vmci_host_device_initialized;
102 static atomic_t vmci_host_active_users = ATOMIC_INIT(0);
103 
104 /*
105  * Determines whether the VMCI host personality is
106  * available. Since the core functionality of the host driver is
107  * always present, all guests could possibly use the host
108  * personality. However, to minimize the deviation from the
109  * pre-unified driver state of affairs, we only consider the host
110  * device active if there is no active guest device or if there
111  * are VMX'en with active VMCI contexts using the host device.
112  */
113 bool vmci_host_code_active(void)
114 {
115 	return vmci_host_device_initialized &&
116 	    (!vmci_guest_code_active() ||
117 	     atomic_read(&vmci_host_active_users) > 0);
118 }
119 
120 /*
121  * Called on open of /dev/vmci.
122  */
123 static int vmci_host_open(struct inode *inode, struct file *filp)
124 {
125 	struct vmci_host_dev *vmci_host_dev;
126 
127 	vmci_host_dev = kzalloc(sizeof(struct vmci_host_dev), GFP_KERNEL);
128 	if (vmci_host_dev == NULL)
129 		return -ENOMEM;
130 
131 	vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
132 	mutex_init(&vmci_host_dev->lock);
133 	filp->private_data = vmci_host_dev;
134 
135 	return 0;
136 }
137 
138 /*
139  * Called on close of /dev/vmci, most often when the process
140  * exits.
141  */
142 static int vmci_host_close(struct inode *inode, struct file *filp)
143 {
144 	struct vmci_host_dev *vmci_host_dev = filp->private_data;
145 
146 	if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
147 		vmci_ctx_destroy(vmci_host_dev->context);
148 		vmci_host_dev->context = NULL;
149 
150 		/*
151 		 * The number of active contexts is used to track whether any
152 		 * VMX'en are using the host personality. It is incremented when
153 		 * a context is created through the IOCTL_VMCI_INIT_CONTEXT
154 		 * ioctl.
155 		 */
156 		atomic_dec(&vmci_host_active_users);
157 	}
158 	vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
159 
160 	kfree(vmci_host_dev);
161 	filp->private_data = NULL;
162 	return 0;
163 }
164 
165 /*
166  * This is used to wake up the VMX when a VMCI call arrives, or
167  * to wake up select() or poll() at the next clock tick.
168  */
169 static unsigned int vmci_host_poll(struct file *filp, poll_table *wait)
170 {
171 	struct vmci_host_dev *vmci_host_dev = filp->private_data;
172 	struct vmci_ctx *context = vmci_host_dev->context;
173 	unsigned int mask = 0;
174 
175 	if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
176 		/* Check for VMCI calls to this VM context. */
177 		if (wait)
178 			poll_wait(filp, &context->host_context.wait_queue,
179 				  wait);
180 
181 		spin_lock(&context->lock);
182 		if (context->pending_datagrams > 0 ||
183 		    vmci_handle_arr_get_size(
184 				context->pending_doorbell_array) > 0) {
185 			mask = POLLIN;
186 		}
187 		spin_unlock(&context->lock);
188 	}
189 	return mask;
190 }
191 
192 /*
193  * Copies the handles of a handle array into a user buffer, and
194  * returns the new length in userBufferSize. If the copy to the
195  * user buffer fails, the functions still returns VMCI_SUCCESS,
196  * but retval != 0.
197  */
198 static int drv_cp_harray_to_user(void __user *user_buf_uva,
199 				 u64 *user_buf_size,
200 				 struct vmci_handle_arr *handle_array,
201 				 int *retval)
202 {
203 	u32 array_size = 0;
204 	struct vmci_handle *handles;
205 
206 	if (handle_array)
207 		array_size = vmci_handle_arr_get_size(handle_array);
208 
209 	if (array_size * sizeof(*handles) > *user_buf_size)
210 		return VMCI_ERROR_MORE_DATA;
211 
212 	*user_buf_size = array_size * sizeof(*handles);
213 	if (*user_buf_size)
214 		*retval = copy_to_user(user_buf_uva,
215 				       vmci_handle_arr_get_handles
216 				       (handle_array), *user_buf_size);
217 
218 	return VMCI_SUCCESS;
219 }
220 
221 /*
222  * Sets up a given context for notify to work. Maps the notify
223  * boolean in user VA into kernel space.
224  */
225 static int vmci_host_setup_notify(struct vmci_ctx *context,
226 				  unsigned long uva)
227 {
228 	int retval;
229 
230 	if (context->notify_page) {
231 		pr_devel("%s: Notify mechanism is already set up\n", __func__);
232 		return VMCI_ERROR_DUPLICATE_ENTRY;
233 	}
234 
235 	/*
236 	 * We are using 'bool' internally, but let's make sure we explicit
237 	 * about the size.
238 	 */
239 	BUILD_BUG_ON(sizeof(bool) != sizeof(u8));
240 	if (!access_ok(VERIFY_WRITE, (void __user *)uva, sizeof(u8)))
241 		return VMCI_ERROR_GENERIC;
242 
243 	/*
244 	 * Lock physical page backing a given user VA.
245 	 */
246 	retval = get_user_pages_fast(uva, 1, 1, &context->notify_page);
247 	if (retval != 1) {
248 		context->notify_page = NULL;
249 		return VMCI_ERROR_GENERIC;
250 	}
251 
252 	/*
253 	 * Map the locked page and set up notify pointer.
254 	 */
255 	context->notify = kmap(context->notify_page) + (uva & (PAGE_SIZE - 1));
256 	vmci_ctx_check_signal_notify(context);
257 
258 	return VMCI_SUCCESS;
259 }
260 
261 static int vmci_host_get_version(struct vmci_host_dev *vmci_host_dev,
262 				 unsigned int cmd, void __user *uptr)
263 {
264 	if (cmd == IOCTL_VMCI_VERSION2) {
265 		int __user *vptr = uptr;
266 		if (get_user(vmci_host_dev->user_version, vptr))
267 			return -EFAULT;
268 	}
269 
270 	/*
271 	 * The basic logic here is:
272 	 *
273 	 * If the user sends in a version of 0 tell it our version.
274 	 * If the user didn't send in a version, tell it our version.
275 	 * If the user sent in an old version, tell it -its- version.
276 	 * If the user sent in an newer version, tell it our version.
277 	 *
278 	 * The rationale behind telling the caller its version is that
279 	 * Workstation 6.5 required that VMX and VMCI kernel module were
280 	 * version sync'd.  All new VMX users will be programmed to
281 	 * handle the VMCI kernel module version.
282 	 */
283 
284 	if (vmci_host_dev->user_version > 0 &&
285 	    vmci_host_dev->user_version < VMCI_VERSION_HOSTQP) {
286 		return vmci_host_dev->user_version;
287 	}
288 
289 	return VMCI_VERSION;
290 }
291 
292 #define vmci_ioctl_err(fmt, ...)	\
293 	pr_devel("%s: " fmt, ioctl_name, ##__VA_ARGS__)
294 
295 static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev,
296 				     const char *ioctl_name,
297 				     void __user *uptr)
298 {
299 	struct vmci_init_blk init_block;
300 	const struct cred *cred;
301 	int retval;
302 
303 	if (copy_from_user(&init_block, uptr, sizeof(init_block))) {
304 		vmci_ioctl_err("error reading init block\n");
305 		return -EFAULT;
306 	}
307 
308 	mutex_lock(&vmci_host_dev->lock);
309 
310 	if (vmci_host_dev->ct_type != VMCIOBJ_NOT_SET) {
311 		vmci_ioctl_err("received VMCI init on initialized handle\n");
312 		retval = -EINVAL;
313 		goto out;
314 	}
315 
316 	if (init_block.flags & ~VMCI_PRIVILEGE_FLAG_RESTRICTED) {
317 		vmci_ioctl_err("unsupported VMCI restriction flag\n");
318 		retval = -EINVAL;
319 		goto out;
320 	}
321 
322 	cred = get_current_cred();
323 	vmci_host_dev->context = vmci_ctx_create(init_block.cid,
324 						 init_block.flags, 0,
325 						 vmci_host_dev->user_version,
326 						 cred);
327 	put_cred(cred);
328 	if (IS_ERR(vmci_host_dev->context)) {
329 		retval = PTR_ERR(vmci_host_dev->context);
330 		vmci_ioctl_err("error initializing context\n");
331 		goto out;
332 	}
333 
334 	/*
335 	 * Copy cid to userlevel, we do this to allow the VMX
336 	 * to enforce its policy on cid generation.
337 	 */
338 	init_block.cid = vmci_ctx_get_id(vmci_host_dev->context);
339 	if (copy_to_user(uptr, &init_block, sizeof(init_block))) {
340 		vmci_ctx_destroy(vmci_host_dev->context);
341 		vmci_host_dev->context = NULL;
342 		vmci_ioctl_err("error writing init block\n");
343 		retval = -EFAULT;
344 		goto out;
345 	}
346 
347 	vmci_host_dev->ct_type = VMCIOBJ_CONTEXT;
348 	atomic_inc(&vmci_host_active_users);
349 
350 	retval = 0;
351 
352 out:
353 	mutex_unlock(&vmci_host_dev->lock);
354 	return retval;
355 }
356 
357 static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev,
358 				      const char *ioctl_name,
359 				      void __user *uptr)
360 {
361 	struct vmci_datagram_snd_rcv_info send_info;
362 	struct vmci_datagram *dg = NULL;
363 	u32 cid;
364 
365 	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
366 		vmci_ioctl_err("only valid for contexts\n");
367 		return -EINVAL;
368 	}
369 
370 	if (copy_from_user(&send_info, uptr, sizeof(send_info)))
371 		return -EFAULT;
372 
373 	if (send_info.len > VMCI_MAX_DG_SIZE) {
374 		vmci_ioctl_err("datagram is too big (size=%d)\n",
375 			       send_info.len);
376 		return -EINVAL;
377 	}
378 
379 	if (send_info.len < sizeof(*dg)) {
380 		vmci_ioctl_err("datagram is too small (size=%d)\n",
381 			       send_info.len);
382 		return -EINVAL;
383 	}
384 
385 	dg = memdup_user((void __user *)(uintptr_t)send_info.addr,
386 			 send_info.len);
387 	if (IS_ERR(dg)) {
388 		vmci_ioctl_err(
389 			"cannot allocate memory to dispatch datagram\n");
390 		return PTR_ERR(dg);
391 	}
392 
393 	if (VMCI_DG_SIZE(dg) != send_info.len) {
394 		vmci_ioctl_err("datagram size mismatch\n");
395 		kfree(dg);
396 		return -EINVAL;
397 	}
398 
399 	pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n",
400 		 dg->dst.context, dg->dst.resource,
401 		 dg->src.context, dg->src.resource,
402 		 (unsigned long long)dg->payload_size);
403 
404 	/* Get source context id. */
405 	cid = vmci_ctx_get_id(vmci_host_dev->context);
406 	send_info.result = vmci_datagram_dispatch(cid, dg, true);
407 	kfree(dg);
408 
409 	return copy_to_user(uptr, &send_info, sizeof(send_info)) ? -EFAULT : 0;
410 }
411 
412 static int vmci_host_do_receive_datagram(struct vmci_host_dev *vmci_host_dev,
413 					 const char *ioctl_name,
414 					 void __user *uptr)
415 {
416 	struct vmci_datagram_snd_rcv_info recv_info;
417 	struct vmci_datagram *dg = NULL;
418 	int retval;
419 	size_t size;
420 
421 	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
422 		vmci_ioctl_err("only valid for contexts\n");
423 		return -EINVAL;
424 	}
425 
426 	if (copy_from_user(&recv_info, uptr, sizeof(recv_info)))
427 		return -EFAULT;
428 
429 	size = recv_info.len;
430 	recv_info.result = vmci_ctx_dequeue_datagram(vmci_host_dev->context,
431 						     &size, &dg);
432 
433 	if (recv_info.result >= VMCI_SUCCESS) {
434 		void __user *ubuf = (void __user *)(uintptr_t)recv_info.addr;
435 		retval = copy_to_user(ubuf, dg, VMCI_DG_SIZE(dg));
436 		kfree(dg);
437 		if (retval != 0)
438 			return -EFAULT;
439 	}
440 
441 	return copy_to_user(uptr, &recv_info, sizeof(recv_info)) ? -EFAULT : 0;
442 }
443 
444 static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev,
445 					const char *ioctl_name,
446 					void __user *uptr)
447 {
448 	struct vmci_handle handle;
449 	int vmci_status;
450 	int __user *retptr;
451 	u32 cid;
452 
453 	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
454 		vmci_ioctl_err("only valid for contexts\n");
455 		return -EINVAL;
456 	}
457 
458 	cid = vmci_ctx_get_id(vmci_host_dev->context);
459 
460 	if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
461 		struct vmci_qp_alloc_info_vmvm alloc_info;
462 		struct vmci_qp_alloc_info_vmvm __user *info = uptr;
463 
464 		if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
465 			return -EFAULT;
466 
467 		handle = alloc_info.handle;
468 		retptr = &info->result;
469 
470 		vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
471 						alloc_info.peer,
472 						alloc_info.flags,
473 						VMCI_NO_PRIVILEGE_FLAGS,
474 						alloc_info.produce_size,
475 						alloc_info.consume_size,
476 						NULL,
477 						vmci_host_dev->context);
478 
479 		if (vmci_status == VMCI_SUCCESS)
480 			vmci_status = VMCI_SUCCESS_QUEUEPAIR_CREATE;
481 	} else {
482 		struct vmci_qp_alloc_info alloc_info;
483 		struct vmci_qp_alloc_info __user *info = uptr;
484 		struct vmci_qp_page_store page_store;
485 
486 		if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
487 			return -EFAULT;
488 
489 		handle = alloc_info.handle;
490 		retptr = &info->result;
491 
492 		page_store.pages = alloc_info.ppn_va;
493 		page_store.len = alloc_info.num_ppns;
494 
495 		vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
496 						alloc_info.peer,
497 						alloc_info.flags,
498 						VMCI_NO_PRIVILEGE_FLAGS,
499 						alloc_info.produce_size,
500 						alloc_info.consume_size,
501 						&page_store,
502 						vmci_host_dev->context);
503 	}
504 
505 	if (put_user(vmci_status, retptr)) {
506 		if (vmci_status >= VMCI_SUCCESS) {
507 			vmci_status = vmci_qp_broker_detach(handle,
508 							vmci_host_dev->context);
509 		}
510 		return -EFAULT;
511 	}
512 
513 	return 0;
514 }
515 
516 static int vmci_host_do_queuepair_setva(struct vmci_host_dev *vmci_host_dev,
517 					const char *ioctl_name,
518 					void __user *uptr)
519 {
520 	struct vmci_qp_set_va_info set_va_info;
521 	struct vmci_qp_set_va_info __user *info = uptr;
522 	s32 result;
523 
524 	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
525 		vmci_ioctl_err("only valid for contexts\n");
526 		return -EINVAL;
527 	}
528 
529 	if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
530 		vmci_ioctl_err("is not allowed\n");
531 		return -EINVAL;
532 	}
533 
534 	if (copy_from_user(&set_va_info, uptr, sizeof(set_va_info)))
535 		return -EFAULT;
536 
537 	if (set_va_info.va) {
538 		/*
539 		 * VMX is passing down a new VA for the queue
540 		 * pair mapping.
541 		 */
542 		result = vmci_qp_broker_map(set_va_info.handle,
543 					    vmci_host_dev->context,
544 					    set_va_info.va);
545 	} else {
546 		/*
547 		 * The queue pair is about to be unmapped by
548 		 * the VMX.
549 		 */
550 		result = vmci_qp_broker_unmap(set_va_info.handle,
551 					 vmci_host_dev->context, 0);
552 	}
553 
554 	return put_user(result, &info->result) ? -EFAULT : 0;
555 }
556 
557 static int vmci_host_do_queuepair_setpf(struct vmci_host_dev *vmci_host_dev,
558 					const char *ioctl_name,
559 					void __user *uptr)
560 {
561 	struct vmci_qp_page_file_info page_file_info;
562 	struct vmci_qp_page_file_info __user *info = uptr;
563 	s32 result;
564 
565 	if (vmci_host_dev->user_version < VMCI_VERSION_HOSTQP ||
566 	    vmci_host_dev->user_version >= VMCI_VERSION_NOVMVM) {
567 		vmci_ioctl_err("not supported on this VMX (version=%d)\n",
568 			       vmci_host_dev->user_version);
569 		return -EINVAL;
570 	}
571 
572 	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
573 		vmci_ioctl_err("only valid for contexts\n");
574 		return -EINVAL;
575 	}
576 
577 	if (copy_from_user(&page_file_info, uptr, sizeof(*info)))
578 		return -EFAULT;
579 
580 	/*
581 	 * Communicate success pre-emptively to the caller.  Note that the
582 	 * basic premise is that it is incumbent upon the caller not to look at
583 	 * the info.result field until after the ioctl() returns.  And then,
584 	 * only if the ioctl() result indicates no error.  We send up the
585 	 * SUCCESS status before calling SetPageStore() store because failing
586 	 * to copy up the result code means unwinding the SetPageStore().
587 	 *
588 	 * It turns out the logic to unwind a SetPageStore() opens a can of
589 	 * worms.  For example, if a host had created the queue_pair and a
590 	 * guest attaches and SetPageStore() is successful but writing success
591 	 * fails, then ... the host has to be stopped from writing (anymore)
592 	 * data into the queue_pair.  That means an additional test in the
593 	 * VMCI_Enqueue() code path.  Ugh.
594 	 */
595 
596 	if (put_user(VMCI_SUCCESS, &info->result)) {
597 		/*
598 		 * In this case, we can't write a result field of the
599 		 * caller's info block.  So, we don't even try to
600 		 * SetPageStore().
601 		 */
602 		return -EFAULT;
603 	}
604 
605 	result = vmci_qp_broker_set_page_store(page_file_info.handle,
606 						page_file_info.produce_va,
607 						page_file_info.consume_va,
608 						vmci_host_dev->context);
609 	if (result < VMCI_SUCCESS) {
610 		if (put_user(result, &info->result)) {
611 			/*
612 			 * Note that in this case the SetPageStore()
613 			 * call failed but we were unable to
614 			 * communicate that to the caller (because the
615 			 * copy_to_user() call failed).  So, if we
616 			 * simply return an error (in this case
617 			 * -EFAULT) then the caller will know that the
618 			 *  SetPageStore failed even though we couldn't
619 			 *  put the result code in the result field and
620 			 *  indicate exactly why it failed.
621 			 *
622 			 * That says nothing about the issue where we
623 			 * were once able to write to the caller's info
624 			 * memory and now can't.  Something more
625 			 * serious is probably going on than the fact
626 			 * that SetPageStore() didn't work.
627 			 */
628 			return -EFAULT;
629 		}
630 	}
631 
632 	return 0;
633 }
634 
635 static int vmci_host_do_qp_detach(struct vmci_host_dev *vmci_host_dev,
636 				  const char *ioctl_name,
637 				  void __user *uptr)
638 {
639 	struct vmci_qp_dtch_info detach_info;
640 	struct vmci_qp_dtch_info __user *info = uptr;
641 	s32 result;
642 
643 	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
644 		vmci_ioctl_err("only valid for contexts\n");
645 		return -EINVAL;
646 	}
647 
648 	if (copy_from_user(&detach_info, uptr, sizeof(detach_info)))
649 		return -EFAULT;
650 
651 	result = vmci_qp_broker_detach(detach_info.handle,
652 				       vmci_host_dev->context);
653 	if (result == VMCI_SUCCESS &&
654 	    vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
655 		result = VMCI_SUCCESS_LAST_DETACH;
656 	}
657 
658 	return put_user(result, &info->result) ? -EFAULT : 0;
659 }
660 
661 static int vmci_host_do_ctx_add_notify(struct vmci_host_dev *vmci_host_dev,
662 				       const char *ioctl_name,
663 				       void __user *uptr)
664 {
665 	struct vmci_ctx_info ar_info;
666 	struct vmci_ctx_info __user *info = uptr;
667 	s32 result;
668 	u32 cid;
669 
670 	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
671 		vmci_ioctl_err("only valid for contexts\n");
672 		return -EINVAL;
673 	}
674 
675 	if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
676 		return -EFAULT;
677 
678 	cid = vmci_ctx_get_id(vmci_host_dev->context);
679 	result = vmci_ctx_add_notification(cid, ar_info.remote_cid);
680 
681 	return put_user(result, &info->result) ? -EFAULT : 0;
682 }
683 
684 static int vmci_host_do_ctx_remove_notify(struct vmci_host_dev *vmci_host_dev,
685 					  const char *ioctl_name,
686 					  void __user *uptr)
687 {
688 	struct vmci_ctx_info ar_info;
689 	struct vmci_ctx_info __user *info = uptr;
690 	u32 cid;
691 	int result;
692 
693 	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
694 		vmci_ioctl_err("only valid for contexts\n");
695 		return -EINVAL;
696 	}
697 
698 	if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
699 		return -EFAULT;
700 
701 	cid = vmci_ctx_get_id(vmci_host_dev->context);
702 	result = vmci_ctx_remove_notification(cid,
703 					      ar_info.remote_cid);
704 
705 	return put_user(result, &info->result) ? -EFAULT : 0;
706 }
707 
708 static int vmci_host_do_ctx_get_cpt_state(struct vmci_host_dev *vmci_host_dev,
709 					  const char *ioctl_name,
710 					  void __user *uptr)
711 {
712 	struct vmci_ctx_chkpt_buf_info get_info;
713 	u32 cid;
714 	void *cpt_buf;
715 	int retval;
716 
717 	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
718 		vmci_ioctl_err("only valid for contexts\n");
719 		return -EINVAL;
720 	}
721 
722 	if (copy_from_user(&get_info, uptr, sizeof(get_info)))
723 		return -EFAULT;
724 
725 	cid = vmci_ctx_get_id(vmci_host_dev->context);
726 	get_info.result = vmci_ctx_get_chkpt_state(cid, get_info.cpt_type,
727 						&get_info.buf_size, &cpt_buf);
728 	if (get_info.result == VMCI_SUCCESS && get_info.buf_size) {
729 		void __user *ubuf = (void __user *)(uintptr_t)get_info.cpt_buf;
730 		retval = copy_to_user(ubuf, cpt_buf, get_info.buf_size);
731 		kfree(cpt_buf);
732 
733 		if (retval)
734 			return -EFAULT;
735 	}
736 
737 	return copy_to_user(uptr, &get_info, sizeof(get_info)) ? -EFAULT : 0;
738 }
739 
740 static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev,
741 					  const char *ioctl_name,
742 					  void __user *uptr)
743 {
744 	struct vmci_ctx_chkpt_buf_info set_info;
745 	u32 cid;
746 	void *cpt_buf;
747 	int retval;
748 
749 	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
750 		vmci_ioctl_err("only valid for contexts\n");
751 		return -EINVAL;
752 	}
753 
754 	if (copy_from_user(&set_info, uptr, sizeof(set_info)))
755 		return -EFAULT;
756 
757 	cpt_buf = kmalloc(set_info.buf_size, GFP_KERNEL);
758 	if (!cpt_buf) {
759 		vmci_ioctl_err(
760 			"cannot allocate memory to set cpt state (type=%d)\n",
761 			set_info.cpt_type);
762 		return -ENOMEM;
763 	}
764 
765 	if (copy_from_user(cpt_buf, (void __user *)(uintptr_t)set_info.cpt_buf,
766 			   set_info.buf_size)) {
767 		retval = -EFAULT;
768 		goto out;
769 	}
770 
771 	cid = vmci_ctx_get_id(vmci_host_dev->context);
772 	set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type,
773 						   set_info.buf_size, cpt_buf);
774 
775 	retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0;
776 
777 out:
778 	kfree(cpt_buf);
779 	return retval;
780 }
781 
782 static int vmci_host_do_get_context_id(struct vmci_host_dev *vmci_host_dev,
783 				       const char *ioctl_name,
784 				       void __user *uptr)
785 {
786 	u32 __user *u32ptr = uptr;
787 
788 	return put_user(VMCI_HOST_CONTEXT_ID, u32ptr) ? -EFAULT : 0;
789 }
790 
791 static int vmci_host_do_set_notify(struct vmci_host_dev *vmci_host_dev,
792 				   const char *ioctl_name,
793 				   void __user *uptr)
794 {
795 	struct vmci_set_notify_info notify_info;
796 
797 	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
798 		vmci_ioctl_err("only valid for contexts\n");
799 		return -EINVAL;
800 	}
801 
802 	if (copy_from_user(&notify_info, uptr, sizeof(notify_info)))
803 		return -EFAULT;
804 
805 	if (notify_info.notify_uva) {
806 		notify_info.result =
807 			vmci_host_setup_notify(vmci_host_dev->context,
808 					       notify_info.notify_uva);
809 	} else {
810 		vmci_ctx_unset_notify(vmci_host_dev->context);
811 		notify_info.result = VMCI_SUCCESS;
812 	}
813 
814 	return copy_to_user(uptr, &notify_info, sizeof(notify_info)) ?
815 		-EFAULT : 0;
816 }
817 
818 static int vmci_host_do_notify_resource(struct vmci_host_dev *vmci_host_dev,
819 					const char *ioctl_name,
820 					void __user *uptr)
821 {
822 	struct vmci_dbell_notify_resource_info info;
823 	u32 cid;
824 
825 	if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
826 		vmci_ioctl_err("invalid for current VMX versions\n");
827 		return -EINVAL;
828 	}
829 
830 	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
831 		vmci_ioctl_err("only valid for contexts\n");
832 		return -EINVAL;
833 	}
834 
835 	if (copy_from_user(&info, uptr, sizeof(info)))
836 		return -EFAULT;
837 
838 	cid = vmci_ctx_get_id(vmci_host_dev->context);
839 
840 	switch (info.action) {
841 	case VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY:
842 		if (info.resource == VMCI_NOTIFY_RESOURCE_DOOR_BELL) {
843 			u32 flags = VMCI_NO_PRIVILEGE_FLAGS;
844 			info.result = vmci_ctx_notify_dbell(cid, info.handle,
845 							    flags);
846 		} else {
847 			info.result = VMCI_ERROR_UNAVAILABLE;
848 		}
849 		break;
850 
851 	case VMCI_NOTIFY_RESOURCE_ACTION_CREATE:
852 		info.result = vmci_ctx_dbell_create(cid, info.handle);
853 		break;
854 
855 	case VMCI_NOTIFY_RESOURCE_ACTION_DESTROY:
856 		info.result = vmci_ctx_dbell_destroy(cid, info.handle);
857 		break;
858 
859 	default:
860 		vmci_ioctl_err("got unknown action (action=%d)\n",
861 			       info.action);
862 		info.result = VMCI_ERROR_INVALID_ARGS;
863 	}
864 
865 	return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
866 }
867 
868 static int vmci_host_do_recv_notifications(struct vmci_host_dev *vmci_host_dev,
869 					   const char *ioctl_name,
870 					   void __user *uptr)
871 {
872 	struct vmci_ctx_notify_recv_info info;
873 	struct vmci_handle_arr *db_handle_array;
874 	struct vmci_handle_arr *qp_handle_array;
875 	void __user *ubuf;
876 	u32 cid;
877 	int retval = 0;
878 
879 	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
880 		vmci_ioctl_err("only valid for contexts\n");
881 		return -EINVAL;
882 	}
883 
884 	if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
885 		vmci_ioctl_err("not supported for the current vmx version\n");
886 		return -EINVAL;
887 	}
888 
889 	if (copy_from_user(&info, uptr, sizeof(info)))
890 		return -EFAULT;
891 
892 	if ((info.db_handle_buf_size && !info.db_handle_buf_uva) ||
893 	    (info.qp_handle_buf_size && !info.qp_handle_buf_uva)) {
894 		return -EINVAL;
895 	}
896 
897 	cid = vmci_ctx_get_id(vmci_host_dev->context);
898 
899 	info.result = vmci_ctx_rcv_notifications_get(cid,
900 				&db_handle_array, &qp_handle_array);
901 	if (info.result != VMCI_SUCCESS)
902 		return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
903 
904 	ubuf = (void __user *)(uintptr_t)info.db_handle_buf_uva;
905 	info.result = drv_cp_harray_to_user(ubuf, &info.db_handle_buf_size,
906 					    db_handle_array, &retval);
907 	if (info.result == VMCI_SUCCESS && !retval) {
908 		ubuf = (void __user *)(uintptr_t)info.qp_handle_buf_uva;
909 		info.result = drv_cp_harray_to_user(ubuf,
910 						    &info.qp_handle_buf_size,
911 						    qp_handle_array, &retval);
912 	}
913 
914 	if (!retval && copy_to_user(uptr, &info, sizeof(info)))
915 		retval = -EFAULT;
916 
917 	vmci_ctx_rcv_notifications_release(cid,
918 				db_handle_array, qp_handle_array,
919 				info.result == VMCI_SUCCESS && !retval);
920 
921 	return retval;
922 }
923 
924 static long vmci_host_unlocked_ioctl(struct file *filp,
925 				     unsigned int iocmd, unsigned long ioarg)
926 {
927 #define VMCI_DO_IOCTL(ioctl_name, ioctl_fn) do {			\
928 		char *name = __stringify(IOCTL_VMCI_ ## ioctl_name);	\
929 		return vmci_host_do_ ## ioctl_fn(			\
930 			vmci_host_dev, name, uptr);			\
931 	} while (0)
932 
933 	struct vmci_host_dev *vmci_host_dev = filp->private_data;
934 	void __user *uptr = (void __user *)ioarg;
935 
936 	switch (iocmd) {
937 	case IOCTL_VMCI_INIT_CONTEXT:
938 		VMCI_DO_IOCTL(INIT_CONTEXT, init_context);
939 	case IOCTL_VMCI_DATAGRAM_SEND:
940 		VMCI_DO_IOCTL(DATAGRAM_SEND, send_datagram);
941 	case IOCTL_VMCI_DATAGRAM_RECEIVE:
942 		VMCI_DO_IOCTL(DATAGRAM_RECEIVE, receive_datagram);
943 	case IOCTL_VMCI_QUEUEPAIR_ALLOC:
944 		VMCI_DO_IOCTL(QUEUEPAIR_ALLOC, alloc_queuepair);
945 	case IOCTL_VMCI_QUEUEPAIR_SETVA:
946 		VMCI_DO_IOCTL(QUEUEPAIR_SETVA, queuepair_setva);
947 	case IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE:
948 		VMCI_DO_IOCTL(QUEUEPAIR_SETPAGEFILE, queuepair_setpf);
949 	case IOCTL_VMCI_QUEUEPAIR_DETACH:
950 		VMCI_DO_IOCTL(QUEUEPAIR_DETACH, qp_detach);
951 	case IOCTL_VMCI_CTX_ADD_NOTIFICATION:
952 		VMCI_DO_IOCTL(CTX_ADD_NOTIFICATION, ctx_add_notify);
953 	case IOCTL_VMCI_CTX_REMOVE_NOTIFICATION:
954 		VMCI_DO_IOCTL(CTX_REMOVE_NOTIFICATION, ctx_remove_notify);
955 	case IOCTL_VMCI_CTX_GET_CPT_STATE:
956 		VMCI_DO_IOCTL(CTX_GET_CPT_STATE, ctx_get_cpt_state);
957 	case IOCTL_VMCI_CTX_SET_CPT_STATE:
958 		VMCI_DO_IOCTL(CTX_SET_CPT_STATE, ctx_set_cpt_state);
959 	case IOCTL_VMCI_GET_CONTEXT_ID:
960 		VMCI_DO_IOCTL(GET_CONTEXT_ID, get_context_id);
961 	case IOCTL_VMCI_SET_NOTIFY:
962 		VMCI_DO_IOCTL(SET_NOTIFY, set_notify);
963 	case IOCTL_VMCI_NOTIFY_RESOURCE:
964 		VMCI_DO_IOCTL(NOTIFY_RESOURCE, notify_resource);
965 	case IOCTL_VMCI_NOTIFICATIONS_RECEIVE:
966 		VMCI_DO_IOCTL(NOTIFICATIONS_RECEIVE, recv_notifications);
967 
968 	case IOCTL_VMCI_VERSION:
969 	case IOCTL_VMCI_VERSION2:
970 		return vmci_host_get_version(vmci_host_dev, iocmd, uptr);
971 
972 	default:
973 		pr_devel("%s: Unknown ioctl (iocmd=%d)\n", __func__, iocmd);
974 		return -EINVAL;
975 	}
976 
977 #undef VMCI_DO_IOCTL
978 }
979 
980 static const struct file_operations vmuser_fops = {
981 	.owner		= THIS_MODULE,
982 	.open		= vmci_host_open,
983 	.release	= vmci_host_close,
984 	.poll		= vmci_host_poll,
985 	.unlocked_ioctl	= vmci_host_unlocked_ioctl,
986 	.compat_ioctl	= vmci_host_unlocked_ioctl,
987 };
988 
989 static struct miscdevice vmci_host_miscdev = {
990 	 .name = "vmci",
991 	 .minor = MISC_DYNAMIC_MINOR,
992 	 .fops = &vmuser_fops,
993 };
994 
995 int __init vmci_host_init(void)
996 {
997 	int error;
998 
999 	host_context = vmci_ctx_create(VMCI_HOST_CONTEXT_ID,
1000 					VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS,
1001 					-1, VMCI_VERSION, NULL);
1002 	if (IS_ERR(host_context)) {
1003 		error = PTR_ERR(host_context);
1004 		pr_warn("Failed to initialize VMCIContext (error%d)\n",
1005 			error);
1006 		return error;
1007 	}
1008 
1009 	error = misc_register(&vmci_host_miscdev);
1010 	if (error) {
1011 		pr_warn("Module registration error (name=%s, major=%d, minor=%d, err=%d)\n",
1012 			vmci_host_miscdev.name,
1013 			MISC_MAJOR, vmci_host_miscdev.minor,
1014 			error);
1015 		pr_warn("Unable to initialize host personality\n");
1016 		vmci_ctx_destroy(host_context);
1017 		return error;
1018 	}
1019 
1020 	pr_info("VMCI host device registered (name=%s, major=%d, minor=%d)\n",
1021 		vmci_host_miscdev.name, MISC_MAJOR, vmci_host_miscdev.minor);
1022 
1023 	vmci_host_device_initialized = true;
1024 	return 0;
1025 }
1026 
1027 void __exit vmci_host_exit(void)
1028 {
1029 	vmci_host_device_initialized = false;
1030 
1031 	misc_deregister(&vmci_host_miscdev);
1032 	vmci_ctx_destroy(host_context);
1033 	vmci_qp_broker_exit();
1034 
1035 	pr_debug("VMCI host driver module unloaded\n");
1036 }
1037