1 /*
2  * VMware VMCI Driver
3  *
4  * Copyright (C) 2012 VMware, Inc. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the
8  * Free Software Foundation version 2 and no later version.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * for more details.
14  */
15 
16 #include <linux/vmw_vmci_defs.h>
17 #include <linux/vmw_vmci_api.h>
18 #include <linux/moduleparam.h>
19 #include <linux/interrupt.h>
20 #include <linux/highmem.h>
21 #include <linux/kernel.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/init.h>
27 #include <linux/pci.h>
28 #include <linux/smp.h>
29 #include <linux/io.h>
30 #include <linux/vmalloc.h>
31 
32 #include "vmci_datagram.h"
33 #include "vmci_doorbell.h"
34 #include "vmci_context.h"
35 #include "vmci_driver.h"
36 #include "vmci_event.h"
37 
38 #define PCI_DEVICE_ID_VMWARE_VMCI	0x0740
39 
40 #define VMCI_UTIL_NUM_RESOURCES 1
41 
42 static bool vmci_disable_msi;
43 module_param_named(disable_msi, vmci_disable_msi, bool, 0);
44 MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
45 
46 static bool vmci_disable_msix;
47 module_param_named(disable_msix, vmci_disable_msix, bool, 0);
48 MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
49 
50 static u32 ctx_update_sub_id = VMCI_INVALID_ID;
51 static u32 vm_context_id = VMCI_INVALID_ID;
52 
53 struct vmci_guest_device {
54 	struct device *dev;	/* PCI device we are attached to */
55 	void __iomem *iobase;
56 
57 	bool exclusive_vectors;
58 
59 	struct tasklet_struct datagram_tasklet;
60 	struct tasklet_struct bm_tasklet;
61 
62 	void *data_buffer;
63 	void *notification_bitmap;
64 	dma_addr_t notification_base;
65 };
66 
67 /* vmci_dev singleton device and supporting data*/
68 struct pci_dev *vmci_pdev;
69 static struct vmci_guest_device *vmci_dev_g;
70 static DEFINE_SPINLOCK(vmci_dev_spinlock);
71 
72 static atomic_t vmci_num_guest_devices = ATOMIC_INIT(0);
73 
74 bool vmci_guest_code_active(void)
75 {
76 	return atomic_read(&vmci_num_guest_devices) != 0;
77 }
78 
79 u32 vmci_get_vm_context_id(void)
80 {
81 	if (vm_context_id == VMCI_INVALID_ID) {
82 		struct vmci_datagram get_cid_msg;
83 		get_cid_msg.dst =
84 		    vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
85 				     VMCI_GET_CONTEXT_ID);
86 		get_cid_msg.src = VMCI_ANON_SRC_HANDLE;
87 		get_cid_msg.payload_size = 0;
88 		vm_context_id = vmci_send_datagram(&get_cid_msg);
89 	}
90 	return vm_context_id;
91 }
92 
93 /*
94  * VM to hypervisor call mechanism. We use the standard VMware naming
95  * convention since shared code is calling this function as well.
96  */
97 int vmci_send_datagram(struct vmci_datagram *dg)
98 {
99 	unsigned long flags;
100 	int result;
101 
102 	/* Check args. */
103 	if (dg == NULL)
104 		return VMCI_ERROR_INVALID_ARGS;
105 
106 	/*
107 	 * Need to acquire spinlock on the device because the datagram
108 	 * data may be spread over multiple pages and the monitor may
109 	 * interleave device user rpc calls from multiple
110 	 * VCPUs. Acquiring the spinlock precludes that
111 	 * possibility. Disabling interrupts to avoid incoming
112 	 * datagrams during a "rep out" and possibly landing up in
113 	 * this function.
114 	 */
115 	spin_lock_irqsave(&vmci_dev_spinlock, flags);
116 
117 	if (vmci_dev_g) {
118 		iowrite8_rep(vmci_dev_g->iobase + VMCI_DATA_OUT_ADDR,
119 			     dg, VMCI_DG_SIZE(dg));
120 		result = ioread32(vmci_dev_g->iobase + VMCI_RESULT_LOW_ADDR);
121 	} else {
122 		result = VMCI_ERROR_UNAVAILABLE;
123 	}
124 
125 	spin_unlock_irqrestore(&vmci_dev_spinlock, flags);
126 
127 	return result;
128 }
129 EXPORT_SYMBOL_GPL(vmci_send_datagram);
130 
131 /*
132  * Gets called with the new context id if updated or resumed.
133  * Context id.
134  */
135 static void vmci_guest_cid_update(u32 sub_id,
136 				  const struct vmci_event_data *event_data,
137 				  void *client_data)
138 {
139 	const struct vmci_event_payld_ctx *ev_payload =
140 				vmci_event_data_const_payload(event_data);
141 
142 	if (sub_id != ctx_update_sub_id) {
143 		pr_devel("Invalid subscriber (ID=0x%x)\n", sub_id);
144 		return;
145 	}
146 
147 	if (!event_data || ev_payload->context_id == VMCI_INVALID_ID) {
148 		pr_devel("Invalid event data\n");
149 		return;
150 	}
151 
152 	pr_devel("Updating context from (ID=0x%x) to (ID=0x%x) on event (type=%d)\n",
153 		 vm_context_id, ev_payload->context_id, event_data->event);
154 
155 	vm_context_id = ev_payload->context_id;
156 }
157 
158 /*
159  * Verify that the host supports the hypercalls we need. If it does not,
160  * try to find fallback hypercalls and use those instead.  Returns
161  * true if required hypercalls (or fallback hypercalls) are
162  * supported by the host, false otherwise.
163  */
164 static int vmci_check_host_caps(struct pci_dev *pdev)
165 {
166 	bool result;
167 	struct vmci_resource_query_msg *msg;
168 	u32 msg_size = sizeof(struct vmci_resource_query_hdr) +
169 				VMCI_UTIL_NUM_RESOURCES * sizeof(u32);
170 	struct vmci_datagram *check_msg;
171 
172 	check_msg = kmalloc(msg_size, GFP_KERNEL);
173 	if (!check_msg) {
174 		dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__);
175 		return -ENOMEM;
176 	}
177 
178 	check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
179 					  VMCI_RESOURCES_QUERY);
180 	check_msg->src = VMCI_ANON_SRC_HANDLE;
181 	check_msg->payload_size = msg_size - VMCI_DG_HEADERSIZE;
182 	msg = (struct vmci_resource_query_msg *)VMCI_DG_PAYLOAD(check_msg);
183 
184 	msg->num_resources = VMCI_UTIL_NUM_RESOURCES;
185 	msg->resources[0] = VMCI_GET_CONTEXT_ID;
186 
187 	/* Checks that hyper calls are supported */
188 	result = vmci_send_datagram(check_msg) == 0x01;
189 	kfree(check_msg);
190 
191 	dev_dbg(&pdev->dev, "%s: Host capability check: %s\n",
192 		__func__, result ? "PASSED" : "FAILED");
193 
194 	/* We need the vector. There are no fallbacks. */
195 	return result ? 0 : -ENXIO;
196 }
197 
198 /*
199  * Reads datagrams from the data in port and dispatches them. We
200  * always start reading datagrams into only the first page of the
201  * datagram buffer. If the datagrams don't fit into one page, we
202  * use the maximum datagram buffer size for the remainder of the
203  * invocation. This is a simple heuristic for not penalizing
204  * small datagrams.
205  *
206  * This function assumes that it has exclusive access to the data
207  * in port for the duration of the call.
208  */
209 static void vmci_dispatch_dgs(unsigned long data)
210 {
211 	struct vmci_guest_device *vmci_dev = (struct vmci_guest_device *)data;
212 	u8 *dg_in_buffer = vmci_dev->data_buffer;
213 	struct vmci_datagram *dg;
214 	size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE;
215 	size_t current_dg_in_buffer_size = PAGE_SIZE;
216 	size_t remaining_bytes;
217 
218 	BUILD_BUG_ON(VMCI_MAX_DG_SIZE < PAGE_SIZE);
219 
220 	ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR,
221 		    vmci_dev->data_buffer, current_dg_in_buffer_size);
222 	dg = (struct vmci_datagram *)dg_in_buffer;
223 	remaining_bytes = current_dg_in_buffer_size;
224 
225 	while (dg->dst.resource != VMCI_INVALID_ID ||
226 	       remaining_bytes > PAGE_SIZE) {
227 		unsigned dg_in_size;
228 
229 		/*
230 		 * When the input buffer spans multiple pages, a datagram can
231 		 * start on any page boundary in the buffer.
232 		 */
233 		if (dg->dst.resource == VMCI_INVALID_ID) {
234 			dg = (struct vmci_datagram *)roundup(
235 				(uintptr_t)dg + 1, PAGE_SIZE);
236 			remaining_bytes =
237 				(size_t)(dg_in_buffer +
238 					 current_dg_in_buffer_size -
239 					 (u8 *)dg);
240 			continue;
241 		}
242 
243 		dg_in_size = VMCI_DG_SIZE_ALIGNED(dg);
244 
245 		if (dg_in_size <= dg_in_buffer_size) {
246 			int result;
247 
248 			/*
249 			 * If the remaining bytes in the datagram
250 			 * buffer doesn't contain the complete
251 			 * datagram, we first make sure we have enough
252 			 * room for it and then we read the reminder
253 			 * of the datagram and possibly any following
254 			 * datagrams.
255 			 */
256 			if (dg_in_size > remaining_bytes) {
257 				if (remaining_bytes !=
258 				    current_dg_in_buffer_size) {
259 
260 					/*
261 					 * We move the partial
262 					 * datagram to the front and
263 					 * read the reminder of the
264 					 * datagram and possibly
265 					 * following calls into the
266 					 * following bytes.
267 					 */
268 					memmove(dg_in_buffer, dg_in_buffer +
269 						current_dg_in_buffer_size -
270 						remaining_bytes,
271 						remaining_bytes);
272 					dg = (struct vmci_datagram *)
273 					    dg_in_buffer;
274 				}
275 
276 				if (current_dg_in_buffer_size !=
277 				    dg_in_buffer_size)
278 					current_dg_in_buffer_size =
279 					    dg_in_buffer_size;
280 
281 				ioread8_rep(vmci_dev->iobase +
282 						VMCI_DATA_IN_ADDR,
283 					vmci_dev->data_buffer +
284 						remaining_bytes,
285 					current_dg_in_buffer_size -
286 						remaining_bytes);
287 			}
288 
289 			/*
290 			 * We special case event datagrams from the
291 			 * hypervisor.
292 			 */
293 			if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
294 			    dg->dst.resource == VMCI_EVENT_HANDLER) {
295 				result = vmci_event_dispatch(dg);
296 			} else {
297 				result = vmci_datagram_invoke_guest_handler(dg);
298 			}
299 			if (result < VMCI_SUCCESS)
300 				dev_dbg(vmci_dev->dev,
301 					"Datagram with resource (ID=0x%x) failed (err=%d)\n",
302 					 dg->dst.resource, result);
303 
304 			/* On to the next datagram. */
305 			dg = (struct vmci_datagram *)((u8 *)dg +
306 						      dg_in_size);
307 		} else {
308 			size_t bytes_to_skip;
309 
310 			/*
311 			 * Datagram doesn't fit in datagram buffer of maximal
312 			 * size. We drop it.
313 			 */
314 			dev_dbg(vmci_dev->dev,
315 				"Failed to receive datagram (size=%u bytes)\n",
316 				 dg_in_size);
317 
318 			bytes_to_skip = dg_in_size - remaining_bytes;
319 			if (current_dg_in_buffer_size != dg_in_buffer_size)
320 				current_dg_in_buffer_size = dg_in_buffer_size;
321 
322 			for (;;) {
323 				ioread8_rep(vmci_dev->iobase +
324 						VMCI_DATA_IN_ADDR,
325 					vmci_dev->data_buffer,
326 					current_dg_in_buffer_size);
327 				if (bytes_to_skip <= current_dg_in_buffer_size)
328 					break;
329 
330 				bytes_to_skip -= current_dg_in_buffer_size;
331 			}
332 			dg = (struct vmci_datagram *)(dg_in_buffer +
333 						      bytes_to_skip);
334 		}
335 
336 		remaining_bytes =
337 		    (size_t) (dg_in_buffer + current_dg_in_buffer_size -
338 			      (u8 *)dg);
339 
340 		if (remaining_bytes < VMCI_DG_HEADERSIZE) {
341 			/* Get the next batch of datagrams. */
342 
343 			ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR,
344 				    vmci_dev->data_buffer,
345 				    current_dg_in_buffer_size);
346 			dg = (struct vmci_datagram *)dg_in_buffer;
347 			remaining_bytes = current_dg_in_buffer_size;
348 		}
349 	}
350 }
351 
352 /*
353  * Scans the notification bitmap for raised flags, clears them
354  * and handles the notifications.
355  */
356 static void vmci_process_bitmap(unsigned long data)
357 {
358 	struct vmci_guest_device *dev = (struct vmci_guest_device *)data;
359 
360 	if (!dev->notification_bitmap) {
361 		dev_dbg(dev->dev, "No bitmap present in %s\n", __func__);
362 		return;
363 	}
364 
365 	vmci_dbell_scan_notification_entries(dev->notification_bitmap);
366 }
367 
368 /*
369  * Interrupt handler for legacy or MSI interrupt, or for first MSI-X
370  * interrupt (vector VMCI_INTR_DATAGRAM).
371  */
372 static irqreturn_t vmci_interrupt(int irq, void *_dev)
373 {
374 	struct vmci_guest_device *dev = _dev;
375 
376 	/*
377 	 * If we are using MSI-X with exclusive vectors then we simply schedule
378 	 * the datagram tasklet, since we know the interrupt was meant for us.
379 	 * Otherwise we must read the ICR to determine what to do.
380 	 */
381 
382 	if (dev->exclusive_vectors) {
383 		tasklet_schedule(&dev->datagram_tasklet);
384 	} else {
385 		unsigned int icr;
386 
387 		/* Acknowledge interrupt and determine what needs doing. */
388 		icr = ioread32(dev->iobase + VMCI_ICR_ADDR);
389 		if (icr == 0 || icr == ~0)
390 			return IRQ_NONE;
391 
392 		if (icr & VMCI_ICR_DATAGRAM) {
393 			tasklet_schedule(&dev->datagram_tasklet);
394 			icr &= ~VMCI_ICR_DATAGRAM;
395 		}
396 
397 		if (icr & VMCI_ICR_NOTIFICATION) {
398 			tasklet_schedule(&dev->bm_tasklet);
399 			icr &= ~VMCI_ICR_NOTIFICATION;
400 		}
401 
402 		if (icr != 0)
403 			dev_warn(dev->dev,
404 				 "Ignoring unknown interrupt cause (%d)\n",
405 				 icr);
406 	}
407 
408 	return IRQ_HANDLED;
409 }
410 
411 /*
412  * Interrupt handler for MSI-X interrupt vector VMCI_INTR_NOTIFICATION,
413  * which is for the notification bitmap.  Will only get called if we are
414  * using MSI-X with exclusive vectors.
415  */
416 static irqreturn_t vmci_interrupt_bm(int irq, void *_dev)
417 {
418 	struct vmci_guest_device *dev = _dev;
419 
420 	/* For MSI-X we can just assume it was meant for us. */
421 	tasklet_schedule(&dev->bm_tasklet);
422 
423 	return IRQ_HANDLED;
424 }
425 
426 /*
427  * Most of the initialization at module load time is done here.
428  */
429 static int vmci_guest_probe_device(struct pci_dev *pdev,
430 				   const struct pci_device_id *id)
431 {
432 	struct vmci_guest_device *vmci_dev;
433 	void __iomem *iobase;
434 	unsigned int capabilities;
435 	unsigned long cmd;
436 	int vmci_err;
437 	int error;
438 
439 	dev_dbg(&pdev->dev, "Probing for vmci/PCI guest device\n");
440 
441 	error = pcim_enable_device(pdev);
442 	if (error) {
443 		dev_err(&pdev->dev,
444 			"Failed to enable VMCI device: %d\n", error);
445 		return error;
446 	}
447 
448 	error = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME);
449 	if (error) {
450 		dev_err(&pdev->dev, "Failed to reserve/map IO regions\n");
451 		return error;
452 	}
453 
454 	iobase = pcim_iomap_table(pdev)[0];
455 
456 	dev_info(&pdev->dev, "Found VMCI PCI device at %#lx, irq %u\n",
457 		 (unsigned long)iobase, pdev->irq);
458 
459 	vmci_dev = devm_kzalloc(&pdev->dev, sizeof(*vmci_dev), GFP_KERNEL);
460 	if (!vmci_dev) {
461 		dev_err(&pdev->dev,
462 			"Can't allocate memory for VMCI device\n");
463 		return -ENOMEM;
464 	}
465 
466 	vmci_dev->dev = &pdev->dev;
467 	vmci_dev->exclusive_vectors = false;
468 	vmci_dev->iobase = iobase;
469 
470 	tasklet_init(&vmci_dev->datagram_tasklet,
471 		     vmci_dispatch_dgs, (unsigned long)vmci_dev);
472 	tasklet_init(&vmci_dev->bm_tasklet,
473 		     vmci_process_bitmap, (unsigned long)vmci_dev);
474 
475 	vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE);
476 	if (!vmci_dev->data_buffer) {
477 		dev_err(&pdev->dev,
478 			"Can't allocate memory for datagram buffer\n");
479 		return -ENOMEM;
480 	}
481 
482 	pci_set_master(pdev);	/* To enable queue_pair functionality. */
483 
484 	/*
485 	 * Verify that the VMCI Device supports the capabilities that
486 	 * we need. If the device is missing capabilities that we would
487 	 * like to use, check for fallback capabilities and use those
488 	 * instead (so we can run a new VM on old hosts). Fail the load if
489 	 * a required capability is missing and there is no fallback.
490 	 *
491 	 * Right now, we need datagrams. There are no fallbacks.
492 	 */
493 	capabilities = ioread32(vmci_dev->iobase + VMCI_CAPS_ADDR);
494 	if (!(capabilities & VMCI_CAPS_DATAGRAM)) {
495 		dev_err(&pdev->dev, "Device does not support datagrams\n");
496 		error = -ENXIO;
497 		goto err_free_data_buffer;
498 	}
499 
500 	/*
501 	 * If the hardware supports notifications, we will use that as
502 	 * well.
503 	 */
504 	if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
505 		vmci_dev->notification_bitmap = dma_alloc_coherent(
506 			&pdev->dev, PAGE_SIZE, &vmci_dev->notification_base,
507 			GFP_KERNEL);
508 		if (!vmci_dev->notification_bitmap) {
509 			dev_warn(&pdev->dev,
510 				 "Unable to allocate notification bitmap\n");
511 		} else {
512 			memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE);
513 			capabilities |= VMCI_CAPS_NOTIFICATIONS;
514 		}
515 	}
516 
517 	dev_info(&pdev->dev, "Using capabilities 0x%x\n", capabilities);
518 
519 	/* Let the host know which capabilities we intend to use. */
520 	iowrite32(capabilities, vmci_dev->iobase + VMCI_CAPS_ADDR);
521 
522 	/* Set up global device so that we can start sending datagrams */
523 	spin_lock_irq(&vmci_dev_spinlock);
524 	vmci_dev_g = vmci_dev;
525 	vmci_pdev = pdev;
526 	spin_unlock_irq(&vmci_dev_spinlock);
527 
528 	/*
529 	 * Register notification bitmap with device if that capability is
530 	 * used.
531 	 */
532 	if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
533 		unsigned long bitmap_ppn =
534 			vmci_dev->notification_base >> PAGE_SHIFT;
535 		if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) {
536 			dev_warn(&pdev->dev,
537 				 "VMCI device unable to register notification bitmap with PPN 0x%x\n",
538 				 (u32) bitmap_ppn);
539 			error = -ENXIO;
540 			goto err_remove_vmci_dev_g;
541 		}
542 	}
543 
544 	/* Check host capabilities. */
545 	error = vmci_check_host_caps(pdev);
546 	if (error)
547 		goto err_remove_bitmap;
548 
549 	/* Enable device. */
550 
551 	/*
552 	 * We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can
553 	 * update the internal context id when needed.
554 	 */
555 	vmci_err = vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE,
556 					vmci_guest_cid_update, NULL,
557 					&ctx_update_sub_id);
558 	if (vmci_err < VMCI_SUCCESS)
559 		dev_warn(&pdev->dev,
560 			 "Failed to subscribe to event (type=%d): %d\n",
561 			 VMCI_EVENT_CTX_ID_UPDATE, vmci_err);
562 
563 	/*
564 	 * Enable interrupts.  Try MSI-X first, then MSI, and then fallback on
565 	 * legacy interrupts.
566 	 */
567 	error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS,
568 			PCI_IRQ_MSIX);
569 	if (error) {
570 		error = pci_alloc_irq_vectors(pdev, 1, 1,
571 				PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY);
572 		if (error)
573 			goto err_remove_bitmap;
574 	} else {
575 		vmci_dev->exclusive_vectors = true;
576 	}
577 
578 	/*
579 	 * Request IRQ for legacy or MSI interrupts, or for first
580 	 * MSI-X vector.
581 	 */
582 	error = request_irq(pci_irq_vector(pdev, 0), vmci_interrupt,
583 			    IRQF_SHARED, KBUILD_MODNAME, vmci_dev);
584 	if (error) {
585 		dev_err(&pdev->dev, "Irq %u in use: %d\n",
586 			pci_irq_vector(pdev, 0), error);
587 		goto err_disable_msi;
588 	}
589 
590 	/*
591 	 * For MSI-X with exclusive vectors we need to request an
592 	 * interrupt for each vector so that we get a separate
593 	 * interrupt handler routine.  This allows us to distinguish
594 	 * between the vectors.
595 	 */
596 	if (vmci_dev->exclusive_vectors) {
597 		error = request_irq(pci_irq_vector(pdev, 1),
598 				    vmci_interrupt_bm, 0, KBUILD_MODNAME,
599 				    vmci_dev);
600 		if (error) {
601 			dev_err(&pdev->dev,
602 				"Failed to allocate irq %u: %d\n",
603 				pci_irq_vector(pdev, 1), error);
604 			goto err_free_irq;
605 		}
606 	}
607 
608 	dev_dbg(&pdev->dev, "Registered device\n");
609 
610 	atomic_inc(&vmci_num_guest_devices);
611 
612 	/* Enable specific interrupt bits. */
613 	cmd = VMCI_IMR_DATAGRAM;
614 	if (capabilities & VMCI_CAPS_NOTIFICATIONS)
615 		cmd |= VMCI_IMR_NOTIFICATION;
616 	iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR);
617 
618 	/* Enable interrupts. */
619 	iowrite32(VMCI_CONTROL_INT_ENABLE,
620 		  vmci_dev->iobase + VMCI_CONTROL_ADDR);
621 
622 	pci_set_drvdata(pdev, vmci_dev);
623 	return 0;
624 
625 err_free_irq:
626 	free_irq(pci_irq_vector(pdev, 0), vmci_dev);
627 	tasklet_kill(&vmci_dev->datagram_tasklet);
628 	tasklet_kill(&vmci_dev->bm_tasklet);
629 
630 err_disable_msi:
631 	pci_free_irq_vectors(pdev);
632 
633 	vmci_err = vmci_event_unsubscribe(ctx_update_sub_id);
634 	if (vmci_err < VMCI_SUCCESS)
635 		dev_warn(&pdev->dev,
636 			 "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
637 			 VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err);
638 
639 err_remove_bitmap:
640 	if (vmci_dev->notification_bitmap) {
641 		iowrite32(VMCI_CONTROL_RESET,
642 			  vmci_dev->iobase + VMCI_CONTROL_ADDR);
643 		dma_free_coherent(&pdev->dev, PAGE_SIZE,
644 				  vmci_dev->notification_bitmap,
645 				  vmci_dev->notification_base);
646 	}
647 
648 err_remove_vmci_dev_g:
649 	spin_lock_irq(&vmci_dev_spinlock);
650 	vmci_pdev = NULL;
651 	vmci_dev_g = NULL;
652 	spin_unlock_irq(&vmci_dev_spinlock);
653 
654 err_free_data_buffer:
655 	vfree(vmci_dev->data_buffer);
656 
657 	/* The rest are managed resources and will be freed by PCI core */
658 	return error;
659 }
660 
661 static void vmci_guest_remove_device(struct pci_dev *pdev)
662 {
663 	struct vmci_guest_device *vmci_dev = pci_get_drvdata(pdev);
664 	int vmci_err;
665 
666 	dev_dbg(&pdev->dev, "Removing device\n");
667 
668 	atomic_dec(&vmci_num_guest_devices);
669 
670 	vmci_qp_guest_endpoints_exit();
671 
672 	vmci_err = vmci_event_unsubscribe(ctx_update_sub_id);
673 	if (vmci_err < VMCI_SUCCESS)
674 		dev_warn(&pdev->dev,
675 			 "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
676 			 VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err);
677 
678 	spin_lock_irq(&vmci_dev_spinlock);
679 	vmci_dev_g = NULL;
680 	vmci_pdev = NULL;
681 	spin_unlock_irq(&vmci_dev_spinlock);
682 
683 	dev_dbg(&pdev->dev, "Resetting vmci device\n");
684 	iowrite32(VMCI_CONTROL_RESET, vmci_dev->iobase + VMCI_CONTROL_ADDR);
685 
686 	/*
687 	 * Free IRQ and then disable MSI/MSI-X as appropriate.  For
688 	 * MSI-X, we might have multiple vectors, each with their own
689 	 * IRQ, which we must free too.
690 	 */
691 	if (vmci_dev->exclusive_vectors)
692 		free_irq(pci_irq_vector(pdev, 1), vmci_dev);
693 	free_irq(pci_irq_vector(pdev, 0), vmci_dev);
694 	pci_free_irq_vectors(pdev);
695 
696 	tasklet_kill(&vmci_dev->datagram_tasklet);
697 	tasklet_kill(&vmci_dev->bm_tasklet);
698 
699 	if (vmci_dev->notification_bitmap) {
700 		/*
701 		 * The device reset above cleared the bitmap state of the
702 		 * device, so we can safely free it here.
703 		 */
704 
705 		dma_free_coherent(&pdev->dev, PAGE_SIZE,
706 				  vmci_dev->notification_bitmap,
707 				  vmci_dev->notification_base);
708 	}
709 
710 	vfree(vmci_dev->data_buffer);
711 
712 	/* The rest are managed resources and will be freed by PCI core */
713 }
714 
715 static const struct pci_device_id vmci_ids[] = {
716 	{ PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_VMCI), },
717 	{ 0 },
718 };
719 MODULE_DEVICE_TABLE(pci, vmci_ids);
720 
721 static struct pci_driver vmci_guest_driver = {
722 	.name		= KBUILD_MODNAME,
723 	.id_table	= vmci_ids,
724 	.probe		= vmci_guest_probe_device,
725 	.remove		= vmci_guest_remove_device,
726 };
727 
728 int __init vmci_guest_init(void)
729 {
730 	return pci_register_driver(&vmci_guest_driver);
731 }
732 
733 void __exit vmci_guest_exit(void)
734 {
735 	pci_unregister_driver(&vmci_guest_driver);
736 }
737