1 /*
2  * VMware VMCI Driver
3  *
4  * Copyright (C) 2012 VMware, Inc. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the
8  * Free Software Foundation version 2 and no later version.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * for more details.
14  */
15 
16 #include <linux/vmw_vmci_defs.h>
17 #include <linux/vmw_vmci_api.h>
18 #include <linux/moduleparam.h>
19 #include <linux/miscdevice.h>
20 #include <linux/interrupt.h>
21 #include <linux/highmem.h>
22 #include <linux/atomic.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/mutex.h>
26 #include <linux/sched.h>
27 #include <linux/slab.h>
28 #include <linux/file.h>
29 #include <linux/init.h>
30 #include <linux/poll.h>
31 #include <linux/pci.h>
32 #include <linux/smp.h>
33 #include <linux/fs.h>
34 #include <linux/io.h>
35 
36 #include "vmci_handle_array.h"
37 #include "vmci_queue_pair.h"
38 #include "vmci_datagram.h"
39 #include "vmci_doorbell.h"
40 #include "vmci_resource.h"
41 #include "vmci_context.h"
42 #include "vmci_driver.h"
43 #include "vmci_event.h"
44 
45 #define VMCI_UTIL_NUM_RESOURCES 1
46 
47 enum {
48 	VMCI_NOTIFY_RESOURCE_QUEUE_PAIR = 0,
49 	VMCI_NOTIFY_RESOURCE_DOOR_BELL = 1,
50 };
51 
52 enum {
53 	VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY = 0,
54 	VMCI_NOTIFY_RESOURCE_ACTION_CREATE = 1,
55 	VMCI_NOTIFY_RESOURCE_ACTION_DESTROY = 2,
56 };
57 
58 /*
59  * VMCI driver initialization. This block can also be used to
60  * pass initial group membership etc.
61  */
62 struct vmci_init_blk {
63 	u32 cid;
64 	u32 flags;
65 };
66 
67 /* VMCIqueue_pairAllocInfo_VMToVM */
68 struct vmci_qp_alloc_info_vmvm {
69 	struct vmci_handle handle;
70 	u32 peer;
71 	u32 flags;
72 	u64 produce_size;
73 	u64 consume_size;
74 	u64 produce_page_file;	  /* User VA. */
75 	u64 consume_page_file;	  /* User VA. */
76 	u64 produce_page_file_size;  /* Size of the file name array. */
77 	u64 consume_page_file_size;  /* Size of the file name array. */
78 	s32 result;
79 	u32 _pad;
80 };
81 
82 /* VMCISetNotifyInfo: Used to pass notify flag's address to the host driver. */
83 struct vmci_set_notify_info {
84 	u64 notify_uva;
85 	s32 result;
86 	u32 _pad;
87 };
88 
89 /*
90  * Per-instance host state
91  */
92 struct vmci_host_dev {
93 	struct vmci_ctx *context;
94 	int user_version;
95 	enum vmci_obj_type ct_type;
96 	struct mutex lock;  /* Mutex lock for vmci context access */
97 };
98 
99 static struct vmci_ctx *host_context;
100 static bool vmci_host_device_initialized;
101 static atomic_t vmci_host_active_users = ATOMIC_INIT(0);
102 
103 /*
104  * Determines whether the VMCI host personality is
105  * available. Since the core functionality of the host driver is
106  * always present, all guests could possibly use the host
107  * personality. However, to minimize the deviation from the
108  * pre-unified driver state of affairs, we only consider the host
109  * device active if there is no active guest device or if there
110  * are VMX'en with active VMCI contexts using the host device.
111  */
112 bool vmci_host_code_active(void)
113 {
114 	return vmci_host_device_initialized &&
115 	    (!vmci_guest_code_active() ||
116 	     atomic_read(&vmci_host_active_users) > 0);
117 }
118 
119 /*
120  * Called on open of /dev/vmci.
121  */
122 static int vmci_host_open(struct inode *inode, struct file *filp)
123 {
124 	struct vmci_host_dev *vmci_host_dev;
125 
126 	vmci_host_dev = kzalloc(sizeof(struct vmci_host_dev), GFP_KERNEL);
127 	if (vmci_host_dev == NULL)
128 		return -ENOMEM;
129 
130 	vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
131 	mutex_init(&vmci_host_dev->lock);
132 	filp->private_data = vmci_host_dev;
133 
134 	return 0;
135 }
136 
137 /*
138  * Called on close of /dev/vmci, most often when the process
139  * exits.
140  */
141 static int vmci_host_close(struct inode *inode, struct file *filp)
142 {
143 	struct vmci_host_dev *vmci_host_dev = filp->private_data;
144 
145 	if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
146 		vmci_ctx_destroy(vmci_host_dev->context);
147 		vmci_host_dev->context = NULL;
148 
149 		/*
150 		 * The number of active contexts is used to track whether any
151 		 * VMX'en are using the host personality. It is incremented when
152 		 * a context is created through the IOCTL_VMCI_INIT_CONTEXT
153 		 * ioctl.
154 		 */
155 		atomic_dec(&vmci_host_active_users);
156 	}
157 	vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
158 
159 	kfree(vmci_host_dev);
160 	filp->private_data = NULL;
161 	return 0;
162 }
163 
164 /*
165  * This is used to wake up the VMX when a VMCI call arrives, or
166  * to wake up select() or poll() at the next clock tick.
167  */
168 static unsigned int vmci_host_poll(struct file *filp, poll_table *wait)
169 {
170 	struct vmci_host_dev *vmci_host_dev = filp->private_data;
171 	struct vmci_ctx *context = vmci_host_dev->context;
172 	unsigned int mask = 0;
173 
174 	if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
175 		/* Check for VMCI calls to this VM context. */
176 		if (wait)
177 			poll_wait(filp, &context->host_context.wait_queue,
178 				  wait);
179 
180 		spin_lock(&context->lock);
181 		if (context->pending_datagrams > 0 ||
182 		    vmci_handle_arr_get_size(
183 				context->pending_doorbell_array) > 0) {
184 			mask = POLLIN;
185 		}
186 		spin_unlock(&context->lock);
187 	}
188 	return mask;
189 }
190 
191 /*
192  * Copies the handles of a handle array into a user buffer, and
193  * returns the new length in userBufferSize. If the copy to the
194  * user buffer fails, the functions still returns VMCI_SUCCESS,
195  * but retval != 0.
196  */
197 static int drv_cp_harray_to_user(void __user *user_buf_uva,
198 				 u64 *user_buf_size,
199 				 struct vmci_handle_arr *handle_array,
200 				 int *retval)
201 {
202 	u32 array_size = 0;
203 	struct vmci_handle *handles;
204 
205 	if (handle_array)
206 		array_size = vmci_handle_arr_get_size(handle_array);
207 
208 	if (array_size * sizeof(*handles) > *user_buf_size)
209 		return VMCI_ERROR_MORE_DATA;
210 
211 	*user_buf_size = array_size * sizeof(*handles);
212 	if (*user_buf_size)
213 		*retval = copy_to_user(user_buf_uva,
214 				       vmci_handle_arr_get_handles
215 				       (handle_array), *user_buf_size);
216 
217 	return VMCI_SUCCESS;
218 }
219 
220 /*
221  * Sets up a given context for notify to work. Maps the notify
222  * boolean in user VA into kernel space.
223  */
224 static int vmci_host_setup_notify(struct vmci_ctx *context,
225 				  unsigned long uva)
226 {
227 	int retval;
228 
229 	if (context->notify_page) {
230 		pr_devel("%s: Notify mechanism is already set up\n", __func__);
231 		return VMCI_ERROR_DUPLICATE_ENTRY;
232 	}
233 
234 	/*
235 	 * We are using 'bool' internally, but let's make sure we explicit
236 	 * about the size.
237 	 */
238 	BUILD_BUG_ON(sizeof(bool) != sizeof(u8));
239 	if (!access_ok(VERIFY_WRITE, (void __user *)uva, sizeof(u8)))
240 		return VMCI_ERROR_GENERIC;
241 
242 	/*
243 	 * Lock physical page backing a given user VA.
244 	 */
245 	retval = get_user_pages_fast(uva, 1, 1, &context->notify_page);
246 	if (retval != 1) {
247 		context->notify_page = NULL;
248 		return VMCI_ERROR_GENERIC;
249 	}
250 
251 	/*
252 	 * Map the locked page and set up notify pointer.
253 	 */
254 	context->notify = kmap(context->notify_page) + (uva & (PAGE_SIZE - 1));
255 	vmci_ctx_check_signal_notify(context);
256 
257 	return VMCI_SUCCESS;
258 }
259 
260 static int vmci_host_get_version(struct vmci_host_dev *vmci_host_dev,
261 				 unsigned int cmd, void __user *uptr)
262 {
263 	if (cmd == IOCTL_VMCI_VERSION2) {
264 		int __user *vptr = uptr;
265 		if (get_user(vmci_host_dev->user_version, vptr))
266 			return -EFAULT;
267 	}
268 
269 	/*
270 	 * The basic logic here is:
271 	 *
272 	 * If the user sends in a version of 0 tell it our version.
273 	 * If the user didn't send in a version, tell it our version.
274 	 * If the user sent in an old version, tell it -its- version.
275 	 * If the user sent in an newer version, tell it our version.
276 	 *
277 	 * The rationale behind telling the caller its version is that
278 	 * Workstation 6.5 required that VMX and VMCI kernel module were
279 	 * version sync'd.  All new VMX users will be programmed to
280 	 * handle the VMCI kernel module version.
281 	 */
282 
283 	if (vmci_host_dev->user_version > 0 &&
284 	    vmci_host_dev->user_version < VMCI_VERSION_HOSTQP) {
285 		return vmci_host_dev->user_version;
286 	}
287 
288 	return VMCI_VERSION;
289 }
290 
291 #define vmci_ioctl_err(fmt, ...)	\
292 	pr_devel("%s: " fmt, ioctl_name, ##__VA_ARGS__)
293 
294 static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev,
295 				     const char *ioctl_name,
296 				     void __user *uptr)
297 {
298 	struct vmci_init_blk init_block;
299 	const struct cred *cred;
300 	int retval;
301 
302 	if (copy_from_user(&init_block, uptr, sizeof(init_block))) {
303 		vmci_ioctl_err("error reading init block\n");
304 		return -EFAULT;
305 	}
306 
307 	mutex_lock(&vmci_host_dev->lock);
308 
309 	if (vmci_host_dev->ct_type != VMCIOBJ_NOT_SET) {
310 		vmci_ioctl_err("received VMCI init on initialized handle\n");
311 		retval = -EINVAL;
312 		goto out;
313 	}
314 
315 	if (init_block.flags & ~VMCI_PRIVILEGE_FLAG_RESTRICTED) {
316 		vmci_ioctl_err("unsupported VMCI restriction flag\n");
317 		retval = -EINVAL;
318 		goto out;
319 	}
320 
321 	cred = get_current_cred();
322 	vmci_host_dev->context = vmci_ctx_create(init_block.cid,
323 						 init_block.flags, 0,
324 						 vmci_host_dev->user_version,
325 						 cred);
326 	put_cred(cred);
327 	if (IS_ERR(vmci_host_dev->context)) {
328 		retval = PTR_ERR(vmci_host_dev->context);
329 		vmci_ioctl_err("error initializing context\n");
330 		goto out;
331 	}
332 
333 	/*
334 	 * Copy cid to userlevel, we do this to allow the VMX
335 	 * to enforce its policy on cid generation.
336 	 */
337 	init_block.cid = vmci_ctx_get_id(vmci_host_dev->context);
338 	if (copy_to_user(uptr, &init_block, sizeof(init_block))) {
339 		vmci_ctx_destroy(vmci_host_dev->context);
340 		vmci_host_dev->context = NULL;
341 		vmci_ioctl_err("error writing init block\n");
342 		retval = -EFAULT;
343 		goto out;
344 	}
345 
346 	vmci_host_dev->ct_type = VMCIOBJ_CONTEXT;
347 	atomic_inc(&vmci_host_active_users);
348 
349 	retval = 0;
350 
351 out:
352 	mutex_unlock(&vmci_host_dev->lock);
353 	return retval;
354 }
355 
356 static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev,
357 				      const char *ioctl_name,
358 				      void __user *uptr)
359 {
360 	struct vmci_datagram_snd_rcv_info send_info;
361 	struct vmci_datagram *dg = NULL;
362 	u32 cid;
363 
364 	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
365 		vmci_ioctl_err("only valid for contexts\n");
366 		return -EINVAL;
367 	}
368 
369 	if (copy_from_user(&send_info, uptr, sizeof(send_info)))
370 		return -EFAULT;
371 
372 	if (send_info.len > VMCI_MAX_DG_SIZE) {
373 		vmci_ioctl_err("datagram is too big (size=%d)\n",
374 			       send_info.len);
375 		return -EINVAL;
376 	}
377 
378 	if (send_info.len < sizeof(*dg)) {
379 		vmci_ioctl_err("datagram is too small (size=%d)\n",
380 			       send_info.len);
381 		return -EINVAL;
382 	}
383 
384 	dg = kmalloc(send_info.len, GFP_KERNEL);
385 	if (!dg) {
386 		vmci_ioctl_err(
387 			"cannot allocate memory to dispatch datagram\n");
388 		return -ENOMEM;
389 	}
390 
391 	if (copy_from_user(dg, (void __user *)(uintptr_t)send_info.addr,
392 			   send_info.len)) {
393 		vmci_ioctl_err("error getting datagram\n");
394 		kfree(dg);
395 		return -EFAULT;
396 	}
397 
398 	pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n",
399 		 dg->dst.context, dg->dst.resource,
400 		 dg->src.context, dg->src.resource,
401 		 (unsigned long long)dg->payload_size);
402 
403 	/* Get source context id. */
404 	cid = vmci_ctx_get_id(vmci_host_dev->context);
405 	send_info.result = vmci_datagram_dispatch(cid, dg, true);
406 	kfree(dg);
407 
408 	return copy_to_user(uptr, &send_info, sizeof(send_info)) ? -EFAULT : 0;
409 }
410 
411 static int vmci_host_do_receive_datagram(struct vmci_host_dev *vmci_host_dev,
412 					 const char *ioctl_name,
413 					 void __user *uptr)
414 {
415 	struct vmci_datagram_snd_rcv_info recv_info;
416 	struct vmci_datagram *dg = NULL;
417 	int retval;
418 	size_t size;
419 
420 	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
421 		vmci_ioctl_err("only valid for contexts\n");
422 		return -EINVAL;
423 	}
424 
425 	if (copy_from_user(&recv_info, uptr, sizeof(recv_info)))
426 		return -EFAULT;
427 
428 	size = recv_info.len;
429 	recv_info.result = vmci_ctx_dequeue_datagram(vmci_host_dev->context,
430 						     &size, &dg);
431 
432 	if (recv_info.result >= VMCI_SUCCESS) {
433 		void __user *ubuf = (void __user *)(uintptr_t)recv_info.addr;
434 		retval = copy_to_user(ubuf, dg, VMCI_DG_SIZE(dg));
435 		kfree(dg);
436 		if (retval != 0)
437 			return -EFAULT;
438 	}
439 
440 	return copy_to_user(uptr, &recv_info, sizeof(recv_info)) ? -EFAULT : 0;
441 }
442 
443 static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev,
444 					const char *ioctl_name,
445 					void __user *uptr)
446 {
447 	struct vmci_handle handle;
448 	int vmci_status;
449 	int __user *retptr;
450 	u32 cid;
451 
452 	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
453 		vmci_ioctl_err("only valid for contexts\n");
454 		return -EINVAL;
455 	}
456 
457 	cid = vmci_ctx_get_id(vmci_host_dev->context);
458 
459 	if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
460 		struct vmci_qp_alloc_info_vmvm alloc_info;
461 		struct vmci_qp_alloc_info_vmvm __user *info = uptr;
462 
463 		if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
464 			return -EFAULT;
465 
466 		handle = alloc_info.handle;
467 		retptr = &info->result;
468 
469 		vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
470 						alloc_info.peer,
471 						alloc_info.flags,
472 						VMCI_NO_PRIVILEGE_FLAGS,
473 						alloc_info.produce_size,
474 						alloc_info.consume_size,
475 						NULL,
476 						vmci_host_dev->context);
477 
478 		if (vmci_status == VMCI_SUCCESS)
479 			vmci_status = VMCI_SUCCESS_QUEUEPAIR_CREATE;
480 	} else {
481 		struct vmci_qp_alloc_info alloc_info;
482 		struct vmci_qp_alloc_info __user *info = uptr;
483 		struct vmci_qp_page_store page_store;
484 
485 		if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
486 			return -EFAULT;
487 
488 		handle = alloc_info.handle;
489 		retptr = &info->result;
490 
491 		page_store.pages = alloc_info.ppn_va;
492 		page_store.len = alloc_info.num_ppns;
493 
494 		vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
495 						alloc_info.peer,
496 						alloc_info.flags,
497 						VMCI_NO_PRIVILEGE_FLAGS,
498 						alloc_info.produce_size,
499 						alloc_info.consume_size,
500 						&page_store,
501 						vmci_host_dev->context);
502 	}
503 
504 	if (put_user(vmci_status, retptr)) {
505 		if (vmci_status >= VMCI_SUCCESS) {
506 			vmci_status = vmci_qp_broker_detach(handle,
507 							vmci_host_dev->context);
508 		}
509 		return -EFAULT;
510 	}
511 
512 	return 0;
513 }
514 
515 static int vmci_host_do_queuepair_setva(struct vmci_host_dev *vmci_host_dev,
516 					const char *ioctl_name,
517 					void __user *uptr)
518 {
519 	struct vmci_qp_set_va_info set_va_info;
520 	struct vmci_qp_set_va_info __user *info = uptr;
521 	s32 result;
522 
523 	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
524 		vmci_ioctl_err("only valid for contexts\n");
525 		return -EINVAL;
526 	}
527 
528 	if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
529 		vmci_ioctl_err("is not allowed\n");
530 		return -EINVAL;
531 	}
532 
533 	if (copy_from_user(&set_va_info, uptr, sizeof(set_va_info)))
534 		return -EFAULT;
535 
536 	if (set_va_info.va) {
537 		/*
538 		 * VMX is passing down a new VA for the queue
539 		 * pair mapping.
540 		 */
541 		result = vmci_qp_broker_map(set_va_info.handle,
542 					    vmci_host_dev->context,
543 					    set_va_info.va);
544 	} else {
545 		/*
546 		 * The queue pair is about to be unmapped by
547 		 * the VMX.
548 		 */
549 		result = vmci_qp_broker_unmap(set_va_info.handle,
550 					 vmci_host_dev->context, 0);
551 	}
552 
553 	return put_user(result, &info->result) ? -EFAULT : 0;
554 }
555 
556 static int vmci_host_do_queuepair_setpf(struct vmci_host_dev *vmci_host_dev,
557 					const char *ioctl_name,
558 					void __user *uptr)
559 {
560 	struct vmci_qp_page_file_info page_file_info;
561 	struct vmci_qp_page_file_info __user *info = uptr;
562 	s32 result;
563 
564 	if (vmci_host_dev->user_version < VMCI_VERSION_HOSTQP ||
565 	    vmci_host_dev->user_version >= VMCI_VERSION_NOVMVM) {
566 		vmci_ioctl_err("not supported on this VMX (version=%d)\n",
567 			       vmci_host_dev->user_version);
568 		return -EINVAL;
569 	}
570 
571 	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
572 		vmci_ioctl_err("only valid for contexts\n");
573 		return -EINVAL;
574 	}
575 
576 	if (copy_from_user(&page_file_info, uptr, sizeof(*info)))
577 		return -EFAULT;
578 
579 	/*
580 	 * Communicate success pre-emptively to the caller.  Note that the
581 	 * basic premise is that it is incumbent upon the caller not to look at
582 	 * the info.result field until after the ioctl() returns.  And then,
583 	 * only if the ioctl() result indicates no error.  We send up the
584 	 * SUCCESS status before calling SetPageStore() store because failing
585 	 * to copy up the result code means unwinding the SetPageStore().
586 	 *
587 	 * It turns out the logic to unwind a SetPageStore() opens a can of
588 	 * worms.  For example, if a host had created the queue_pair and a
589 	 * guest attaches and SetPageStore() is successful but writing success
590 	 * fails, then ... the host has to be stopped from writing (anymore)
591 	 * data into the queue_pair.  That means an additional test in the
592 	 * VMCI_Enqueue() code path.  Ugh.
593 	 */
594 
595 	if (put_user(VMCI_SUCCESS, &info->result)) {
596 		/*
597 		 * In this case, we can't write a result field of the
598 		 * caller's info block.  So, we don't even try to
599 		 * SetPageStore().
600 		 */
601 		return -EFAULT;
602 	}
603 
604 	result = vmci_qp_broker_set_page_store(page_file_info.handle,
605 						page_file_info.produce_va,
606 						page_file_info.consume_va,
607 						vmci_host_dev->context);
608 	if (result < VMCI_SUCCESS) {
609 		if (put_user(result, &info->result)) {
610 			/*
611 			 * Note that in this case the SetPageStore()
612 			 * call failed but we were unable to
613 			 * communicate that to the caller (because the
614 			 * copy_to_user() call failed).  So, if we
615 			 * simply return an error (in this case
616 			 * -EFAULT) then the caller will know that the
617 			 *  SetPageStore failed even though we couldn't
618 			 *  put the result code in the result field and
619 			 *  indicate exactly why it failed.
620 			 *
621 			 * That says nothing about the issue where we
622 			 * were once able to write to the caller's info
623 			 * memory and now can't.  Something more
624 			 * serious is probably going on than the fact
625 			 * that SetPageStore() didn't work.
626 			 */
627 			return -EFAULT;
628 		}
629 	}
630 
631 	return 0;
632 }
633 
634 static int vmci_host_do_qp_detach(struct vmci_host_dev *vmci_host_dev,
635 				  const char *ioctl_name,
636 				  void __user *uptr)
637 {
638 	struct vmci_qp_dtch_info detach_info;
639 	struct vmci_qp_dtch_info __user *info = uptr;
640 	s32 result;
641 
642 	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
643 		vmci_ioctl_err("only valid for contexts\n");
644 		return -EINVAL;
645 	}
646 
647 	if (copy_from_user(&detach_info, uptr, sizeof(detach_info)))
648 		return -EFAULT;
649 
650 	result = vmci_qp_broker_detach(detach_info.handle,
651 				       vmci_host_dev->context);
652 	if (result == VMCI_SUCCESS &&
653 	    vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
654 		result = VMCI_SUCCESS_LAST_DETACH;
655 	}
656 
657 	return put_user(result, &info->result) ? -EFAULT : 0;
658 }
659 
660 static int vmci_host_do_ctx_add_notify(struct vmci_host_dev *vmci_host_dev,
661 				       const char *ioctl_name,
662 				       void __user *uptr)
663 {
664 	struct vmci_ctx_info ar_info;
665 	struct vmci_ctx_info __user *info = uptr;
666 	s32 result;
667 	u32 cid;
668 
669 	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
670 		vmci_ioctl_err("only valid for contexts\n");
671 		return -EINVAL;
672 	}
673 
674 	if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
675 		return -EFAULT;
676 
677 	cid = vmci_ctx_get_id(vmci_host_dev->context);
678 	result = vmci_ctx_add_notification(cid, ar_info.remote_cid);
679 
680 	return put_user(result, &info->result) ? -EFAULT : 0;
681 }
682 
683 static int vmci_host_do_ctx_remove_notify(struct vmci_host_dev *vmci_host_dev,
684 					  const char *ioctl_name,
685 					  void __user *uptr)
686 {
687 	struct vmci_ctx_info ar_info;
688 	struct vmci_ctx_info __user *info = uptr;
689 	u32 cid;
690 	int result;
691 
692 	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
693 		vmci_ioctl_err("only valid for contexts\n");
694 		return -EINVAL;
695 	}
696 
697 	if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
698 		return -EFAULT;
699 
700 	cid = vmci_ctx_get_id(vmci_host_dev->context);
701 	result = vmci_ctx_remove_notification(cid,
702 					      ar_info.remote_cid);
703 
704 	return put_user(result, &info->result) ? -EFAULT : 0;
705 }
706 
707 static int vmci_host_do_ctx_get_cpt_state(struct vmci_host_dev *vmci_host_dev,
708 					  const char *ioctl_name,
709 					  void __user *uptr)
710 {
711 	struct vmci_ctx_chkpt_buf_info get_info;
712 	u32 cid;
713 	void *cpt_buf;
714 	int retval;
715 
716 	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
717 		vmci_ioctl_err("only valid for contexts\n");
718 		return -EINVAL;
719 	}
720 
721 	if (copy_from_user(&get_info, uptr, sizeof(get_info)))
722 		return -EFAULT;
723 
724 	cid = vmci_ctx_get_id(vmci_host_dev->context);
725 	get_info.result = vmci_ctx_get_chkpt_state(cid, get_info.cpt_type,
726 						&get_info.buf_size, &cpt_buf);
727 	if (get_info.result == VMCI_SUCCESS && get_info.buf_size) {
728 		void __user *ubuf = (void __user *)(uintptr_t)get_info.cpt_buf;
729 		retval = copy_to_user(ubuf, cpt_buf, get_info.buf_size);
730 		kfree(cpt_buf);
731 
732 		if (retval)
733 			return -EFAULT;
734 	}
735 
736 	return copy_to_user(uptr, &get_info, sizeof(get_info)) ? -EFAULT : 0;
737 }
738 
739 static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev,
740 					  const char *ioctl_name,
741 					  void __user *uptr)
742 {
743 	struct vmci_ctx_chkpt_buf_info set_info;
744 	u32 cid;
745 	void *cpt_buf;
746 	int retval;
747 
748 	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
749 		vmci_ioctl_err("only valid for contexts\n");
750 		return -EINVAL;
751 	}
752 
753 	if (copy_from_user(&set_info, uptr, sizeof(set_info)))
754 		return -EFAULT;
755 
756 	cpt_buf = kmalloc(set_info.buf_size, GFP_KERNEL);
757 	if (!cpt_buf) {
758 		vmci_ioctl_err(
759 			"cannot allocate memory to set cpt state (type=%d)\n",
760 			set_info.cpt_type);
761 		return -ENOMEM;
762 	}
763 
764 	if (copy_from_user(cpt_buf, (void __user *)(uintptr_t)set_info.cpt_buf,
765 			   set_info.buf_size)) {
766 		retval = -EFAULT;
767 		goto out;
768 	}
769 
770 	cid = vmci_ctx_get_id(vmci_host_dev->context);
771 	set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type,
772 						   set_info.buf_size, cpt_buf);
773 
774 	retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0;
775 
776 out:
777 	kfree(cpt_buf);
778 	return retval;
779 }
780 
781 static int vmci_host_do_get_context_id(struct vmci_host_dev *vmci_host_dev,
782 				       const char *ioctl_name,
783 				       void __user *uptr)
784 {
785 	u32 __user *u32ptr = uptr;
786 
787 	return put_user(VMCI_HOST_CONTEXT_ID, u32ptr) ? -EFAULT : 0;
788 }
789 
790 static int vmci_host_do_set_notify(struct vmci_host_dev *vmci_host_dev,
791 				   const char *ioctl_name,
792 				   void __user *uptr)
793 {
794 	struct vmci_set_notify_info notify_info;
795 
796 	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
797 		vmci_ioctl_err("only valid for contexts\n");
798 		return -EINVAL;
799 	}
800 
801 	if (copy_from_user(&notify_info, uptr, sizeof(notify_info)))
802 		return -EFAULT;
803 
804 	if (notify_info.notify_uva) {
805 		notify_info.result =
806 			vmci_host_setup_notify(vmci_host_dev->context,
807 					       notify_info.notify_uva);
808 	} else {
809 		vmci_ctx_unset_notify(vmci_host_dev->context);
810 		notify_info.result = VMCI_SUCCESS;
811 	}
812 
813 	return copy_to_user(uptr, &notify_info, sizeof(notify_info)) ?
814 		-EFAULT : 0;
815 }
816 
817 static int vmci_host_do_notify_resource(struct vmci_host_dev *vmci_host_dev,
818 					const char *ioctl_name,
819 					void __user *uptr)
820 {
821 	struct vmci_dbell_notify_resource_info info;
822 	u32 cid;
823 
824 	if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
825 		vmci_ioctl_err("invalid for current VMX versions\n");
826 		return -EINVAL;
827 	}
828 
829 	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
830 		vmci_ioctl_err("only valid for contexts\n");
831 		return -EINVAL;
832 	}
833 
834 	if (copy_from_user(&info, uptr, sizeof(info)))
835 		return -EFAULT;
836 
837 	cid = vmci_ctx_get_id(vmci_host_dev->context);
838 
839 	switch (info.action) {
840 	case VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY:
841 		if (info.resource == VMCI_NOTIFY_RESOURCE_DOOR_BELL) {
842 			u32 flags = VMCI_NO_PRIVILEGE_FLAGS;
843 			info.result = vmci_ctx_notify_dbell(cid, info.handle,
844 							    flags);
845 		} else {
846 			info.result = VMCI_ERROR_UNAVAILABLE;
847 		}
848 		break;
849 
850 	case VMCI_NOTIFY_RESOURCE_ACTION_CREATE:
851 		info.result = vmci_ctx_dbell_create(cid, info.handle);
852 		break;
853 
854 	case VMCI_NOTIFY_RESOURCE_ACTION_DESTROY:
855 		info.result = vmci_ctx_dbell_destroy(cid, info.handle);
856 		break;
857 
858 	default:
859 		vmci_ioctl_err("got unknown action (action=%d)\n",
860 			       info.action);
861 		info.result = VMCI_ERROR_INVALID_ARGS;
862 	}
863 
864 	return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
865 }
866 
867 static int vmci_host_do_recv_notifications(struct vmci_host_dev *vmci_host_dev,
868 					   const char *ioctl_name,
869 					   void __user *uptr)
870 {
871 	struct vmci_ctx_notify_recv_info info;
872 	struct vmci_handle_arr *db_handle_array;
873 	struct vmci_handle_arr *qp_handle_array;
874 	void __user *ubuf;
875 	u32 cid;
876 	int retval = 0;
877 
878 	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
879 		vmci_ioctl_err("only valid for contexts\n");
880 		return -EINVAL;
881 	}
882 
883 	if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
884 		vmci_ioctl_err("not supported for the current vmx version\n");
885 		return -EINVAL;
886 	}
887 
888 	if (copy_from_user(&info, uptr, sizeof(info)))
889 		return -EFAULT;
890 
891 	if ((info.db_handle_buf_size && !info.db_handle_buf_uva) ||
892 	    (info.qp_handle_buf_size && !info.qp_handle_buf_uva)) {
893 		return -EINVAL;
894 	}
895 
896 	cid = vmci_ctx_get_id(vmci_host_dev->context);
897 
898 	info.result = vmci_ctx_rcv_notifications_get(cid,
899 				&db_handle_array, &qp_handle_array);
900 	if (info.result != VMCI_SUCCESS)
901 		return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
902 
903 	ubuf = (void __user *)(uintptr_t)info.db_handle_buf_uva;
904 	info.result = drv_cp_harray_to_user(ubuf, &info.db_handle_buf_size,
905 					    db_handle_array, &retval);
906 	if (info.result == VMCI_SUCCESS && !retval) {
907 		ubuf = (void __user *)(uintptr_t)info.qp_handle_buf_uva;
908 		info.result = drv_cp_harray_to_user(ubuf,
909 						    &info.qp_handle_buf_size,
910 						    qp_handle_array, &retval);
911 	}
912 
913 	if (!retval && copy_to_user(uptr, &info, sizeof(info)))
914 		retval = -EFAULT;
915 
916 	vmci_ctx_rcv_notifications_release(cid,
917 				db_handle_array, qp_handle_array,
918 				info.result == VMCI_SUCCESS && !retval);
919 
920 	return retval;
921 }
922 
923 static long vmci_host_unlocked_ioctl(struct file *filp,
924 				     unsigned int iocmd, unsigned long ioarg)
925 {
926 #define VMCI_DO_IOCTL(ioctl_name, ioctl_fn) do {			\
927 		char *name = __stringify(IOCTL_VMCI_ ## ioctl_name);	\
928 		return vmci_host_do_ ## ioctl_fn(			\
929 			vmci_host_dev, name, uptr);			\
930 	} while (0)
931 
932 	struct vmci_host_dev *vmci_host_dev = filp->private_data;
933 	void __user *uptr = (void __user *)ioarg;
934 
935 	switch (iocmd) {
936 	case IOCTL_VMCI_INIT_CONTEXT:
937 		VMCI_DO_IOCTL(INIT_CONTEXT, init_context);
938 	case IOCTL_VMCI_DATAGRAM_SEND:
939 		VMCI_DO_IOCTL(DATAGRAM_SEND, send_datagram);
940 	case IOCTL_VMCI_DATAGRAM_RECEIVE:
941 		VMCI_DO_IOCTL(DATAGRAM_RECEIVE, receive_datagram);
942 	case IOCTL_VMCI_QUEUEPAIR_ALLOC:
943 		VMCI_DO_IOCTL(QUEUEPAIR_ALLOC, alloc_queuepair);
944 	case IOCTL_VMCI_QUEUEPAIR_SETVA:
945 		VMCI_DO_IOCTL(QUEUEPAIR_SETVA, queuepair_setva);
946 	case IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE:
947 		VMCI_DO_IOCTL(QUEUEPAIR_SETPAGEFILE, queuepair_setpf);
948 	case IOCTL_VMCI_QUEUEPAIR_DETACH:
949 		VMCI_DO_IOCTL(QUEUEPAIR_DETACH, qp_detach);
950 	case IOCTL_VMCI_CTX_ADD_NOTIFICATION:
951 		VMCI_DO_IOCTL(CTX_ADD_NOTIFICATION, ctx_add_notify);
952 	case IOCTL_VMCI_CTX_REMOVE_NOTIFICATION:
953 		VMCI_DO_IOCTL(CTX_REMOVE_NOTIFICATION, ctx_remove_notify);
954 	case IOCTL_VMCI_CTX_GET_CPT_STATE:
955 		VMCI_DO_IOCTL(CTX_GET_CPT_STATE, ctx_get_cpt_state);
956 	case IOCTL_VMCI_CTX_SET_CPT_STATE:
957 		VMCI_DO_IOCTL(CTX_SET_CPT_STATE, ctx_set_cpt_state);
958 	case IOCTL_VMCI_GET_CONTEXT_ID:
959 		VMCI_DO_IOCTL(GET_CONTEXT_ID, get_context_id);
960 	case IOCTL_VMCI_SET_NOTIFY:
961 		VMCI_DO_IOCTL(SET_NOTIFY, set_notify);
962 	case IOCTL_VMCI_NOTIFY_RESOURCE:
963 		VMCI_DO_IOCTL(NOTIFY_RESOURCE, notify_resource);
964 	case IOCTL_VMCI_NOTIFICATIONS_RECEIVE:
965 		VMCI_DO_IOCTL(NOTIFICATIONS_RECEIVE, recv_notifications);
966 
967 	case IOCTL_VMCI_VERSION:
968 	case IOCTL_VMCI_VERSION2:
969 		return vmci_host_get_version(vmci_host_dev, iocmd, uptr);
970 
971 	default:
972 		pr_devel("%s: Unknown ioctl (iocmd=%d)\n", __func__, iocmd);
973 		return -EINVAL;
974 	}
975 
976 #undef VMCI_DO_IOCTL
977 }
978 
979 static const struct file_operations vmuser_fops = {
980 	.owner		= THIS_MODULE,
981 	.open		= vmci_host_open,
982 	.release	= vmci_host_close,
983 	.poll		= vmci_host_poll,
984 	.unlocked_ioctl	= vmci_host_unlocked_ioctl,
985 	.compat_ioctl	= vmci_host_unlocked_ioctl,
986 };
987 
988 static struct miscdevice vmci_host_miscdev = {
989 	 .name = "vmci",
990 	 .minor = MISC_DYNAMIC_MINOR,
991 	 .fops = &vmuser_fops,
992 };
993 
994 int __init vmci_host_init(void)
995 {
996 	int error;
997 
998 	host_context = vmci_ctx_create(VMCI_HOST_CONTEXT_ID,
999 					VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS,
1000 					-1, VMCI_VERSION, NULL);
1001 	if (IS_ERR(host_context)) {
1002 		error = PTR_ERR(host_context);
1003 		pr_warn("Failed to initialize VMCIContext (error%d)\n",
1004 			error);
1005 		return error;
1006 	}
1007 
1008 	error = misc_register(&vmci_host_miscdev);
1009 	if (error) {
1010 		pr_warn("Module registration error (name=%s, major=%d, minor=%d, err=%d)\n",
1011 			vmci_host_miscdev.name,
1012 			MISC_MAJOR, vmci_host_miscdev.minor,
1013 			error);
1014 		pr_warn("Unable to initialize host personality\n");
1015 		vmci_ctx_destroy(host_context);
1016 		return error;
1017 	}
1018 
1019 	pr_info("VMCI host device registered (name=%s, major=%d, minor=%d)\n",
1020 		vmci_host_miscdev.name, MISC_MAJOR, vmci_host_miscdev.minor);
1021 
1022 	vmci_host_device_initialized = true;
1023 	return 0;
1024 }
1025 
1026 void __exit vmci_host_exit(void)
1027 {
1028 	int error;
1029 
1030 	vmci_host_device_initialized = false;
1031 
1032 	error = misc_deregister(&vmci_host_miscdev);
1033 	if (error)
1034 		pr_warn("Error unregistering character device: %d\n", error);
1035 
1036 	vmci_ctx_destroy(host_context);
1037 	vmci_qp_broker_exit();
1038 
1039 	pr_debug("VMCI host driver module unloaded\n");
1040 }
1041