1 /*
2  * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of EITHER the GNU General Public License
6  * version 2 as published by the Free Software Foundation or the BSD
7  * 2-Clause License. This program is distributed in the hope that it
8  * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9  * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10  * See the GNU General Public License version 2 for more details at
11  * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program available in the file COPYING in the main
15  * directory of this source tree.
16  *
17  * The BSD 2-Clause License
18  *
19  *     Redistribution and use in source and binary forms, with or
20  *     without modification, are permitted provided that the following
21  *     conditions are met:
22  *
23  *      - Redistributions of source code must retain the above
24  *        copyright notice, this list of conditions and the following
25  *        disclaimer.
26  *
27  *      - Redistributions in binary form must reproduce the above
28  *        copyright notice, this list of conditions and the following
29  *        disclaimer in the documentation and/or other materials
30  *        provided with the distribution.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43  * OF THE POSSIBILITY OF SUCH DAMAGE.
44  */
45 
46 #include <linux/errno.h>
47 #include <linux/inetdevice.h>
48 #include <linux/init.h>
49 #include <linux/module.h>
50 #include <linux/slab.h>
51 #include <rdma/ib_addr.h>
52 #include <rdma/ib_smi.h>
53 #include <rdma/ib_user_verbs.h>
54 #include <net/addrconf.h>
55 
56 #include "pvrdma.h"
57 
58 #define DRV_NAME	"vmw_pvrdma"
59 #define DRV_VERSION	"1.0.1.0-k"
60 
61 static DEFINE_MUTEX(pvrdma_device_list_lock);
62 static LIST_HEAD(pvrdma_device_list);
63 static struct workqueue_struct *event_wq;
64 
65 static int pvrdma_add_gid(struct ib_device *ibdev,
66 			  u8 port_num,
67 			  unsigned int index,
68 			  const union ib_gid *gid,
69 			  const struct ib_gid_attr *attr,
70 			  void **context);
71 static int pvrdma_del_gid(struct ib_device *ibdev,
72 			  u8 port_num,
73 			  unsigned int index,
74 			  void **context);
75 
76 
77 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
78 			char *buf)
79 {
80 	return sprintf(buf, "VMW_PVRDMA-%s\n", DRV_VERSION);
81 }
82 
83 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
84 			char *buf)
85 {
86 	return sprintf(buf, "%d\n", PVRDMA_REV_ID);
87 }
88 
89 static ssize_t show_board(struct device *device, struct device_attribute *attr,
90 			  char *buf)
91 {
92 	return sprintf(buf, "%d\n", PVRDMA_BOARD_ID);
93 }
94 
95 static DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,	   NULL);
96 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca,	   NULL);
97 static DEVICE_ATTR(board_id, S_IRUGO, show_board,  NULL);
98 
99 static struct device_attribute *pvrdma_class_attributes[] = {
100 	&dev_attr_hw_rev,
101 	&dev_attr_hca_type,
102 	&dev_attr_board_id
103 };
104 
105 static void pvrdma_get_fw_ver_str(struct ib_device *device, char *str)
106 {
107 	struct pvrdma_dev *dev =
108 		container_of(device, struct pvrdma_dev, ib_dev);
109 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d\n",
110 		 (int) (dev->dsr->caps.fw_ver >> 32),
111 		 (int) (dev->dsr->caps.fw_ver >> 16) & 0xffff,
112 		 (int) dev->dsr->caps.fw_ver & 0xffff);
113 }
114 
115 static int pvrdma_init_device(struct pvrdma_dev *dev)
116 {
117 	/*  Initialize some device related stuff */
118 	spin_lock_init(&dev->cmd_lock);
119 	sema_init(&dev->cmd_sema, 1);
120 	atomic_set(&dev->num_qps, 0);
121 	atomic_set(&dev->num_srqs, 0);
122 	atomic_set(&dev->num_cqs, 0);
123 	atomic_set(&dev->num_pds, 0);
124 	atomic_set(&dev->num_ahs, 0);
125 
126 	return 0;
127 }
128 
129 static int pvrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
130 				 struct ib_port_immutable *immutable)
131 {
132 	struct pvrdma_dev *dev = to_vdev(ibdev);
133 	struct ib_port_attr attr;
134 	int err;
135 
136 	if (dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1)
137 		immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE;
138 	else if (dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V2)
139 		immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
140 
141 	err = ib_query_port(ibdev, port_num, &attr);
142 	if (err)
143 		return err;
144 
145 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
146 	immutable->gid_tbl_len = attr.gid_tbl_len;
147 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
148 	return 0;
149 }
150 
151 static struct net_device *pvrdma_get_netdev(struct ib_device *ibdev,
152 					    u8 port_num)
153 {
154 	struct net_device *netdev;
155 	struct pvrdma_dev *dev = to_vdev(ibdev);
156 
157 	if (port_num != 1)
158 		return NULL;
159 
160 	rcu_read_lock();
161 	netdev = dev->netdev;
162 	if (netdev)
163 		dev_hold(netdev);
164 	rcu_read_unlock();
165 
166 	return netdev;
167 }
168 
169 static int pvrdma_register_device(struct pvrdma_dev *dev)
170 {
171 	int ret = -1;
172 	int i = 0;
173 
174 	strlcpy(dev->ib_dev.name, "vmw_pvrdma%d", IB_DEVICE_NAME_MAX);
175 	dev->ib_dev.node_guid = dev->dsr->caps.node_guid;
176 	dev->sys_image_guid = dev->dsr->caps.sys_image_guid;
177 	dev->flags = 0;
178 	dev->ib_dev.owner = THIS_MODULE;
179 	dev->ib_dev.num_comp_vectors = 1;
180 	dev->ib_dev.dev.parent = &dev->pdev->dev;
181 	dev->ib_dev.uverbs_abi_ver = PVRDMA_UVERBS_ABI_VERSION;
182 	dev->ib_dev.uverbs_cmd_mask =
183 		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)		|
184 		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)	|
185 		(1ull << IB_USER_VERBS_CMD_QUERY_PORT)		|
186 		(1ull << IB_USER_VERBS_CMD_ALLOC_PD)		|
187 		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)		|
188 		(1ull << IB_USER_VERBS_CMD_REG_MR)		|
189 		(1ull << IB_USER_VERBS_CMD_DEREG_MR)		|
190 		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)	|
191 		(1ull << IB_USER_VERBS_CMD_CREATE_CQ)		|
192 		(1ull << IB_USER_VERBS_CMD_POLL_CQ)		|
193 		(1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)	|
194 		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ)		|
195 		(1ull << IB_USER_VERBS_CMD_CREATE_QP)		|
196 		(1ull << IB_USER_VERBS_CMD_MODIFY_QP)		|
197 		(1ull << IB_USER_VERBS_CMD_QUERY_QP)		|
198 		(1ull << IB_USER_VERBS_CMD_DESTROY_QP)		|
199 		(1ull << IB_USER_VERBS_CMD_POST_SEND)		|
200 		(1ull << IB_USER_VERBS_CMD_POST_RECV)		|
201 		(1ull << IB_USER_VERBS_CMD_CREATE_AH)		|
202 		(1ull << IB_USER_VERBS_CMD_DESTROY_AH);
203 
204 	dev->ib_dev.node_type = RDMA_NODE_IB_CA;
205 	dev->ib_dev.phys_port_cnt = dev->dsr->caps.phys_port_cnt;
206 
207 	dev->ib_dev.query_device = pvrdma_query_device;
208 	dev->ib_dev.query_port = pvrdma_query_port;
209 	dev->ib_dev.query_gid = pvrdma_query_gid;
210 	dev->ib_dev.query_pkey = pvrdma_query_pkey;
211 	dev->ib_dev.modify_port	= pvrdma_modify_port;
212 	dev->ib_dev.alloc_ucontext = pvrdma_alloc_ucontext;
213 	dev->ib_dev.dealloc_ucontext = pvrdma_dealloc_ucontext;
214 	dev->ib_dev.mmap = pvrdma_mmap;
215 	dev->ib_dev.alloc_pd = pvrdma_alloc_pd;
216 	dev->ib_dev.dealloc_pd = pvrdma_dealloc_pd;
217 	dev->ib_dev.create_ah = pvrdma_create_ah;
218 	dev->ib_dev.destroy_ah = pvrdma_destroy_ah;
219 	dev->ib_dev.create_qp = pvrdma_create_qp;
220 	dev->ib_dev.modify_qp = pvrdma_modify_qp;
221 	dev->ib_dev.query_qp = pvrdma_query_qp;
222 	dev->ib_dev.destroy_qp = pvrdma_destroy_qp;
223 	dev->ib_dev.post_send = pvrdma_post_send;
224 	dev->ib_dev.post_recv = pvrdma_post_recv;
225 	dev->ib_dev.create_cq = pvrdma_create_cq;
226 	dev->ib_dev.modify_cq = pvrdma_modify_cq;
227 	dev->ib_dev.resize_cq = pvrdma_resize_cq;
228 	dev->ib_dev.destroy_cq = pvrdma_destroy_cq;
229 	dev->ib_dev.poll_cq = pvrdma_poll_cq;
230 	dev->ib_dev.req_notify_cq = pvrdma_req_notify_cq;
231 	dev->ib_dev.get_dma_mr = pvrdma_get_dma_mr;
232 	dev->ib_dev.reg_user_mr	= pvrdma_reg_user_mr;
233 	dev->ib_dev.dereg_mr = pvrdma_dereg_mr;
234 	dev->ib_dev.alloc_mr = pvrdma_alloc_mr;
235 	dev->ib_dev.map_mr_sg = pvrdma_map_mr_sg;
236 	dev->ib_dev.add_gid = pvrdma_add_gid;
237 	dev->ib_dev.del_gid = pvrdma_del_gid;
238 	dev->ib_dev.get_netdev = pvrdma_get_netdev;
239 	dev->ib_dev.get_port_immutable = pvrdma_port_immutable;
240 	dev->ib_dev.get_link_layer = pvrdma_port_link_layer;
241 	dev->ib_dev.get_dev_fw_str = pvrdma_get_fw_ver_str;
242 
243 	mutex_init(&dev->port_mutex);
244 	spin_lock_init(&dev->desc_lock);
245 
246 	dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(void *),
247 			      GFP_KERNEL);
248 	if (!dev->cq_tbl)
249 		return ret;
250 	spin_lock_init(&dev->cq_tbl_lock);
251 
252 	dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(void *),
253 			      GFP_KERNEL);
254 	if (!dev->qp_tbl)
255 		goto err_cq_free;
256 	spin_lock_init(&dev->qp_tbl_lock);
257 
258 	/* Check if SRQ is supported by backend */
259 	if (dev->dsr->caps.max_srq) {
260 		dev->ib_dev.uverbs_cmd_mask |=
261 			(1ull << IB_USER_VERBS_CMD_CREATE_SRQ)	|
262 			(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)	|
263 			(1ull << IB_USER_VERBS_CMD_QUERY_SRQ)	|
264 			(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)	|
265 			(1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
266 
267 		dev->ib_dev.create_srq = pvrdma_create_srq;
268 		dev->ib_dev.modify_srq = pvrdma_modify_srq;
269 		dev->ib_dev.query_srq = pvrdma_query_srq;
270 		dev->ib_dev.destroy_srq = pvrdma_destroy_srq;
271 		dev->ib_dev.post_srq_recv = pvrdma_post_srq_recv;
272 
273 		dev->srq_tbl = kcalloc(dev->dsr->caps.max_srq,
274 				       sizeof(struct pvrdma_srq *),
275 				       GFP_KERNEL);
276 		if (!dev->srq_tbl)
277 			goto err_qp_free;
278 	}
279 	spin_lock_init(&dev->srq_tbl_lock);
280 
281 	ret = ib_register_device(&dev->ib_dev, NULL);
282 	if (ret)
283 		goto err_srq_free;
284 
285 	for (i = 0; i < ARRAY_SIZE(pvrdma_class_attributes); ++i) {
286 		ret = device_create_file(&dev->ib_dev.dev,
287 					 pvrdma_class_attributes[i]);
288 		if (ret)
289 			goto err_class;
290 	}
291 
292 	dev->ib_active = true;
293 
294 	return 0;
295 
296 err_class:
297 	ib_unregister_device(&dev->ib_dev);
298 err_srq_free:
299 	kfree(dev->srq_tbl);
300 err_qp_free:
301 	kfree(dev->qp_tbl);
302 err_cq_free:
303 	kfree(dev->cq_tbl);
304 
305 	return ret;
306 }
307 
308 static irqreturn_t pvrdma_intr0_handler(int irq, void *dev_id)
309 {
310 	u32 icr = PVRDMA_INTR_CAUSE_RESPONSE;
311 	struct pvrdma_dev *dev = dev_id;
312 
313 	dev_dbg(&dev->pdev->dev, "interrupt 0 (response) handler\n");
314 
315 	if (!dev->pdev->msix_enabled) {
316 		/* Legacy intr */
317 		icr = pvrdma_read_reg(dev, PVRDMA_REG_ICR);
318 		if (icr == 0)
319 			return IRQ_NONE;
320 	}
321 
322 	if (icr == PVRDMA_INTR_CAUSE_RESPONSE)
323 		complete(&dev->cmd_done);
324 
325 	return IRQ_HANDLED;
326 }
327 
328 static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
329 {
330 	struct pvrdma_qp *qp;
331 	unsigned long flags;
332 
333 	spin_lock_irqsave(&dev->qp_tbl_lock, flags);
334 	qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp];
335 	if (qp)
336 		atomic_inc(&qp->refcnt);
337 	spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
338 
339 	if (qp && qp->ibqp.event_handler) {
340 		struct ib_qp *ibqp = &qp->ibqp;
341 		struct ib_event e;
342 
343 		e.device = ibqp->device;
344 		e.element.qp = ibqp;
345 		e.event = type; /* 1:1 mapping for now. */
346 		ibqp->event_handler(&e, ibqp->qp_context);
347 	}
348 	if (qp) {
349 		atomic_dec(&qp->refcnt);
350 		if (atomic_read(&qp->refcnt) == 0)
351 			wake_up(&qp->wait);
352 	}
353 }
354 
355 static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
356 {
357 	struct pvrdma_cq *cq;
358 	unsigned long flags;
359 
360 	spin_lock_irqsave(&dev->cq_tbl_lock, flags);
361 	cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq];
362 	if (cq)
363 		atomic_inc(&cq->refcnt);
364 	spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
365 
366 	if (cq && cq->ibcq.event_handler) {
367 		struct ib_cq *ibcq = &cq->ibcq;
368 		struct ib_event e;
369 
370 		e.device = ibcq->device;
371 		e.element.cq = ibcq;
372 		e.event = type; /* 1:1 mapping for now. */
373 		ibcq->event_handler(&e, ibcq->cq_context);
374 	}
375 	if (cq) {
376 		atomic_dec(&cq->refcnt);
377 		if (atomic_read(&cq->refcnt) == 0)
378 			wake_up(&cq->wait);
379 	}
380 }
381 
382 static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type)
383 {
384 	struct pvrdma_srq *srq;
385 	unsigned long flags;
386 
387 	spin_lock_irqsave(&dev->srq_tbl_lock, flags);
388 	if (dev->srq_tbl)
389 		srq = dev->srq_tbl[srqn % dev->dsr->caps.max_srq];
390 	else
391 		srq = NULL;
392 	if (srq)
393 		refcount_inc(&srq->refcnt);
394 	spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
395 
396 	if (srq && srq->ibsrq.event_handler) {
397 		struct ib_srq *ibsrq = &srq->ibsrq;
398 		struct ib_event e;
399 
400 		e.device = ibsrq->device;
401 		e.element.srq = ibsrq;
402 		e.event = type; /* 1:1 mapping for now. */
403 		ibsrq->event_handler(&e, ibsrq->srq_context);
404 	}
405 	if (srq) {
406 		if (refcount_dec_and_test(&srq->refcnt))
407 			wake_up(&srq->wait);
408 	}
409 }
410 
411 static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port,
412 				  enum ib_event_type event)
413 {
414 	struct ib_event ib_event;
415 
416 	memset(&ib_event, 0, sizeof(ib_event));
417 	ib_event.device = &dev->ib_dev;
418 	ib_event.element.port_num = port;
419 	ib_event.event = event;
420 	ib_dispatch_event(&ib_event);
421 }
422 
423 static void pvrdma_dev_event(struct pvrdma_dev *dev, u8 port, int type)
424 {
425 	if (port < 1 || port > dev->dsr->caps.phys_port_cnt) {
426 		dev_warn(&dev->pdev->dev, "event on port %d\n", port);
427 		return;
428 	}
429 
430 	pvrdma_dispatch_event(dev, port, type);
431 }
432 
433 static inline struct pvrdma_eqe *get_eqe(struct pvrdma_dev *dev, unsigned int i)
434 {
435 	return (struct pvrdma_eqe *)pvrdma_page_dir_get_ptr(
436 					&dev->async_pdir,
437 					PAGE_SIZE +
438 					sizeof(struct pvrdma_eqe) * i);
439 }
440 
441 static irqreturn_t pvrdma_intr1_handler(int irq, void *dev_id)
442 {
443 	struct pvrdma_dev *dev = dev_id;
444 	struct pvrdma_ring *ring = &dev->async_ring_state->rx;
445 	int ring_slots = (dev->dsr->async_ring_pages.num_pages - 1) *
446 			 PAGE_SIZE / sizeof(struct pvrdma_eqe);
447 	unsigned int head;
448 
449 	dev_dbg(&dev->pdev->dev, "interrupt 1 (async event) handler\n");
450 
451 	/*
452 	 * Don't process events until the IB device is registered. Otherwise
453 	 * we'll try to ib_dispatch_event() on an invalid device.
454 	 */
455 	if (!dev->ib_active)
456 		return IRQ_HANDLED;
457 
458 	while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) {
459 		struct pvrdma_eqe *eqe;
460 
461 		eqe = get_eqe(dev, head);
462 
463 		switch (eqe->type) {
464 		case PVRDMA_EVENT_QP_FATAL:
465 		case PVRDMA_EVENT_QP_REQ_ERR:
466 		case PVRDMA_EVENT_QP_ACCESS_ERR:
467 		case PVRDMA_EVENT_COMM_EST:
468 		case PVRDMA_EVENT_SQ_DRAINED:
469 		case PVRDMA_EVENT_PATH_MIG:
470 		case PVRDMA_EVENT_PATH_MIG_ERR:
471 		case PVRDMA_EVENT_QP_LAST_WQE_REACHED:
472 			pvrdma_qp_event(dev, eqe->info, eqe->type);
473 			break;
474 
475 		case PVRDMA_EVENT_CQ_ERR:
476 			pvrdma_cq_event(dev, eqe->info, eqe->type);
477 			break;
478 
479 		case PVRDMA_EVENT_SRQ_ERR:
480 		case PVRDMA_EVENT_SRQ_LIMIT_REACHED:
481 			pvrdma_srq_event(dev, eqe->info, eqe->type);
482 			break;
483 
484 		case PVRDMA_EVENT_PORT_ACTIVE:
485 		case PVRDMA_EVENT_PORT_ERR:
486 		case PVRDMA_EVENT_LID_CHANGE:
487 		case PVRDMA_EVENT_PKEY_CHANGE:
488 		case PVRDMA_EVENT_SM_CHANGE:
489 		case PVRDMA_EVENT_CLIENT_REREGISTER:
490 		case PVRDMA_EVENT_GID_CHANGE:
491 			pvrdma_dev_event(dev, eqe->info, eqe->type);
492 			break;
493 
494 		case PVRDMA_EVENT_DEVICE_FATAL:
495 			pvrdma_dev_event(dev, 1, eqe->type);
496 			break;
497 
498 		default:
499 			break;
500 		}
501 
502 		pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
503 	}
504 
505 	return IRQ_HANDLED;
506 }
507 
508 static inline struct pvrdma_cqne *get_cqne(struct pvrdma_dev *dev,
509 					   unsigned int i)
510 {
511 	return (struct pvrdma_cqne *)pvrdma_page_dir_get_ptr(
512 					&dev->cq_pdir,
513 					PAGE_SIZE +
514 					sizeof(struct pvrdma_cqne) * i);
515 }
516 
517 static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
518 {
519 	struct pvrdma_dev *dev = dev_id;
520 	struct pvrdma_ring *ring = &dev->cq_ring_state->rx;
521 	int ring_slots = (dev->dsr->cq_ring_pages.num_pages - 1) * PAGE_SIZE /
522 			 sizeof(struct pvrdma_cqne);
523 	unsigned int head;
524 	unsigned long flags;
525 
526 	dev_dbg(&dev->pdev->dev, "interrupt x (completion) handler\n");
527 
528 	while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) {
529 		struct pvrdma_cqne *cqne;
530 		struct pvrdma_cq *cq;
531 
532 		cqne = get_cqne(dev, head);
533 		spin_lock_irqsave(&dev->cq_tbl_lock, flags);
534 		cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq];
535 		if (cq)
536 			atomic_inc(&cq->refcnt);
537 		spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
538 
539 		if (cq && cq->ibcq.comp_handler)
540 			cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
541 		if (cq) {
542 			atomic_dec(&cq->refcnt);
543 			if (atomic_read(&cq->refcnt))
544 				wake_up(&cq->wait);
545 		}
546 		pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
547 	}
548 
549 	return IRQ_HANDLED;
550 }
551 
552 static void pvrdma_free_irq(struct pvrdma_dev *dev)
553 {
554 	int i;
555 
556 	dev_dbg(&dev->pdev->dev, "freeing interrupts\n");
557 	for (i = 0; i < dev->nr_vectors; i++)
558 		free_irq(pci_irq_vector(dev->pdev, i), dev);
559 }
560 
561 static void pvrdma_enable_intrs(struct pvrdma_dev *dev)
562 {
563 	dev_dbg(&dev->pdev->dev, "enable interrupts\n");
564 	pvrdma_write_reg(dev, PVRDMA_REG_IMR, 0);
565 }
566 
567 static void pvrdma_disable_intrs(struct pvrdma_dev *dev)
568 {
569 	dev_dbg(&dev->pdev->dev, "disable interrupts\n");
570 	pvrdma_write_reg(dev, PVRDMA_REG_IMR, ~0);
571 }
572 
573 static int pvrdma_alloc_intrs(struct pvrdma_dev *dev)
574 {
575 	struct pci_dev *pdev = dev->pdev;
576 	int ret = 0, i;
577 
578 	ret = pci_alloc_irq_vectors(pdev, 1, PVRDMA_MAX_INTERRUPTS,
579 			PCI_IRQ_MSIX);
580 	if (ret < 0) {
581 		ret = pci_alloc_irq_vectors(pdev, 1, 1,
582 				PCI_IRQ_MSI | PCI_IRQ_LEGACY);
583 		if (ret < 0)
584 			return ret;
585 	}
586 	dev->nr_vectors = ret;
587 
588 	ret = request_irq(pci_irq_vector(dev->pdev, 0), pvrdma_intr0_handler,
589 			pdev->msix_enabled ? 0 : IRQF_SHARED, DRV_NAME, dev);
590 	if (ret) {
591 		dev_err(&dev->pdev->dev,
592 			"failed to request interrupt 0\n");
593 		goto out_free_vectors;
594 	}
595 
596 	for (i = 1; i < dev->nr_vectors; i++) {
597 		ret = request_irq(pci_irq_vector(dev->pdev, i),
598 				i == 1 ? pvrdma_intr1_handler :
599 					 pvrdma_intrx_handler,
600 				0, DRV_NAME, dev);
601 		if (ret) {
602 			dev_err(&dev->pdev->dev,
603 				"failed to request interrupt %d\n", i);
604 			goto free_irqs;
605 		}
606 	}
607 
608 	return 0;
609 
610 free_irqs:
611 	while (--i >= 0)
612 		free_irq(pci_irq_vector(dev->pdev, i), dev);
613 out_free_vectors:
614 	pci_free_irq_vectors(pdev);
615 	return ret;
616 }
617 
618 static void pvrdma_free_slots(struct pvrdma_dev *dev)
619 {
620 	struct pci_dev *pdev = dev->pdev;
621 
622 	if (dev->resp_slot)
623 		dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->resp_slot,
624 				  dev->dsr->resp_slot_dma);
625 	if (dev->cmd_slot)
626 		dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->cmd_slot,
627 				  dev->dsr->cmd_slot_dma);
628 }
629 
630 static int pvrdma_add_gid_at_index(struct pvrdma_dev *dev,
631 				   const union ib_gid *gid,
632 				   u8 gid_type,
633 				   int index)
634 {
635 	int ret;
636 	union pvrdma_cmd_req req;
637 	struct pvrdma_cmd_create_bind *cmd_bind = &req.create_bind;
638 
639 	if (!dev->sgid_tbl) {
640 		dev_warn(&dev->pdev->dev, "sgid table not initialized\n");
641 		return -EINVAL;
642 	}
643 
644 	memset(cmd_bind, 0, sizeof(*cmd_bind));
645 	cmd_bind->hdr.cmd = PVRDMA_CMD_CREATE_BIND;
646 	memcpy(cmd_bind->new_gid, gid->raw, 16);
647 	cmd_bind->mtu = ib_mtu_enum_to_int(IB_MTU_1024);
648 	cmd_bind->vlan = 0xfff;
649 	cmd_bind->index = index;
650 	cmd_bind->gid_type = gid_type;
651 
652 	ret = pvrdma_cmd_post(dev, &req, NULL, 0);
653 	if (ret < 0) {
654 		dev_warn(&dev->pdev->dev,
655 			 "could not create binding, error: %d\n", ret);
656 		return -EFAULT;
657 	}
658 	memcpy(&dev->sgid_tbl[index], gid, sizeof(*gid));
659 	return 0;
660 }
661 
662 static int pvrdma_add_gid(struct ib_device *ibdev,
663 			  u8 port_num,
664 			  unsigned int index,
665 			  const union ib_gid *gid,
666 			  const struct ib_gid_attr *attr,
667 			  void **context)
668 {
669 	struct pvrdma_dev *dev = to_vdev(ibdev);
670 
671 	return pvrdma_add_gid_at_index(dev, gid,
672 				       ib_gid_type_to_pvrdma(attr->gid_type),
673 				       index);
674 }
675 
676 static int pvrdma_del_gid_at_index(struct pvrdma_dev *dev, int index)
677 {
678 	int ret;
679 	union pvrdma_cmd_req req;
680 	struct pvrdma_cmd_destroy_bind *cmd_dest = &req.destroy_bind;
681 
682 	/* Update sgid table. */
683 	if (!dev->sgid_tbl) {
684 		dev_warn(&dev->pdev->dev, "sgid table not initialized\n");
685 		return -EINVAL;
686 	}
687 
688 	memset(cmd_dest, 0, sizeof(*cmd_dest));
689 	cmd_dest->hdr.cmd = PVRDMA_CMD_DESTROY_BIND;
690 	memcpy(cmd_dest->dest_gid, &dev->sgid_tbl[index], 16);
691 	cmd_dest->index = index;
692 
693 	ret = pvrdma_cmd_post(dev, &req, NULL, 0);
694 	if (ret < 0) {
695 		dev_warn(&dev->pdev->dev,
696 			 "could not destroy binding, error: %d\n", ret);
697 		return ret;
698 	}
699 	memset(&dev->sgid_tbl[index], 0, 16);
700 	return 0;
701 }
702 
703 static int pvrdma_del_gid(struct ib_device *ibdev,
704 			  u8 port_num,
705 			  unsigned int index,
706 			  void **context)
707 {
708 	struct pvrdma_dev *dev = to_vdev(ibdev);
709 
710 	dev_dbg(&dev->pdev->dev, "removing gid at index %u from %s",
711 		index, dev->netdev->name);
712 
713 	return pvrdma_del_gid_at_index(dev, index);
714 }
715 
716 static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
717 					  unsigned long event)
718 {
719 	switch (event) {
720 	case NETDEV_REBOOT:
721 	case NETDEV_DOWN:
722 		pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
723 		break;
724 	case NETDEV_UP:
725 		pvrdma_write_reg(dev, PVRDMA_REG_CTL,
726 				 PVRDMA_DEVICE_CTL_UNQUIESCE);
727 
728 		mb();
729 
730 		if (pvrdma_read_reg(dev, PVRDMA_REG_ERR))
731 			dev_err(&dev->pdev->dev,
732 				"failed to activate device during link up\n");
733 		else
734 			pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
735 		break;
736 	default:
737 		dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n",
738 			event, dev->ib_dev.name);
739 		break;
740 	}
741 }
742 
743 static void pvrdma_netdevice_event_work(struct work_struct *work)
744 {
745 	struct pvrdma_netdevice_work *netdev_work;
746 	struct pvrdma_dev *dev;
747 
748 	netdev_work = container_of(work, struct pvrdma_netdevice_work, work);
749 
750 	mutex_lock(&pvrdma_device_list_lock);
751 	list_for_each_entry(dev, &pvrdma_device_list, device_link) {
752 		if (dev->netdev == netdev_work->event_netdev) {
753 			pvrdma_netdevice_event_handle(dev, netdev_work->event);
754 			break;
755 		}
756 	}
757 	mutex_unlock(&pvrdma_device_list_lock);
758 
759 	kfree(netdev_work);
760 }
761 
762 static int pvrdma_netdevice_event(struct notifier_block *this,
763 				  unsigned long event, void *ptr)
764 {
765 	struct net_device *event_netdev = netdev_notifier_info_to_dev(ptr);
766 	struct pvrdma_netdevice_work *netdev_work;
767 
768 	netdev_work = kmalloc(sizeof(*netdev_work), GFP_ATOMIC);
769 	if (!netdev_work)
770 		return NOTIFY_BAD;
771 
772 	INIT_WORK(&netdev_work->work, pvrdma_netdevice_event_work);
773 	netdev_work->event_netdev = event_netdev;
774 	netdev_work->event = event;
775 	queue_work(event_wq, &netdev_work->work);
776 
777 	return NOTIFY_DONE;
778 }
779 
780 static int pvrdma_pci_probe(struct pci_dev *pdev,
781 			    const struct pci_device_id *id)
782 {
783 	struct pci_dev *pdev_net;
784 	struct pvrdma_dev *dev;
785 	int ret;
786 	unsigned long start;
787 	unsigned long len;
788 	dma_addr_t slot_dma = 0;
789 
790 	dev_dbg(&pdev->dev, "initializing driver %s\n", pci_name(pdev));
791 
792 	/* Allocate zero-out device */
793 	dev = (struct pvrdma_dev *)ib_alloc_device(sizeof(*dev));
794 	if (!dev) {
795 		dev_err(&pdev->dev, "failed to allocate IB device\n");
796 		return -ENOMEM;
797 	}
798 
799 	mutex_lock(&pvrdma_device_list_lock);
800 	list_add(&dev->device_link, &pvrdma_device_list);
801 	mutex_unlock(&pvrdma_device_list_lock);
802 
803 	ret = pvrdma_init_device(dev);
804 	if (ret)
805 		goto err_free_device;
806 
807 	dev->pdev = pdev;
808 	pci_set_drvdata(pdev, dev);
809 
810 	ret = pci_enable_device(pdev);
811 	if (ret) {
812 		dev_err(&pdev->dev, "cannot enable PCI device\n");
813 		goto err_free_device;
814 	}
815 
816 	dev_dbg(&pdev->dev, "PCI resource flags BAR0 %#lx\n",
817 		pci_resource_flags(pdev, 0));
818 	dev_dbg(&pdev->dev, "PCI resource len %#llx\n",
819 		(unsigned long long)pci_resource_len(pdev, 0));
820 	dev_dbg(&pdev->dev, "PCI resource start %#llx\n",
821 		(unsigned long long)pci_resource_start(pdev, 0));
822 	dev_dbg(&pdev->dev, "PCI resource flags BAR1 %#lx\n",
823 		pci_resource_flags(pdev, 1));
824 	dev_dbg(&pdev->dev, "PCI resource len %#llx\n",
825 		(unsigned long long)pci_resource_len(pdev, 1));
826 	dev_dbg(&pdev->dev, "PCI resource start %#llx\n",
827 		(unsigned long long)pci_resource_start(pdev, 1));
828 
829 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
830 	    !(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
831 		dev_err(&pdev->dev, "PCI BAR region not MMIO\n");
832 		ret = -ENOMEM;
833 		goto err_free_device;
834 	}
835 
836 	ret = pci_request_regions(pdev, DRV_NAME);
837 	if (ret) {
838 		dev_err(&pdev->dev, "cannot request PCI resources\n");
839 		goto err_disable_pdev;
840 	}
841 
842 	/* Enable 64-Bit DMA */
843 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
844 		ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
845 		if (ret != 0) {
846 			dev_err(&pdev->dev,
847 				"pci_set_consistent_dma_mask failed\n");
848 			goto err_free_resource;
849 		}
850 	} else {
851 		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
852 		if (ret != 0) {
853 			dev_err(&pdev->dev,
854 				"pci_set_dma_mask failed\n");
855 			goto err_free_resource;
856 		}
857 	}
858 
859 	pci_set_master(pdev);
860 
861 	/* Map register space */
862 	start = pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_REG);
863 	len = pci_resource_len(dev->pdev, PVRDMA_PCI_RESOURCE_REG);
864 	dev->regs = ioremap(start, len);
865 	if (!dev->regs) {
866 		dev_err(&pdev->dev, "register mapping failed\n");
867 		ret = -ENOMEM;
868 		goto err_free_resource;
869 	}
870 
871 	/* Setup per-device UAR. */
872 	dev->driver_uar.index = 0;
873 	dev->driver_uar.pfn =
874 		pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_UAR) >>
875 		PAGE_SHIFT;
876 	dev->driver_uar.map =
877 		ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
878 	if (!dev->driver_uar.map) {
879 		dev_err(&pdev->dev, "failed to remap UAR pages\n");
880 		ret = -ENOMEM;
881 		goto err_unmap_regs;
882 	}
883 
884 	dev->dsr_version = pvrdma_read_reg(dev, PVRDMA_REG_VERSION);
885 	dev_info(&pdev->dev, "device version %d, driver version %d\n",
886 		 dev->dsr_version, PVRDMA_VERSION);
887 
888 	dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr),
889 				      &dev->dsrbase, GFP_KERNEL);
890 	if (!dev->dsr) {
891 		dev_err(&pdev->dev, "failed to allocate shared region\n");
892 		ret = -ENOMEM;
893 		goto err_uar_unmap;
894 	}
895 
896 	/* Setup the shared region */
897 	memset(dev->dsr, 0, sizeof(*dev->dsr));
898 	dev->dsr->driver_version = PVRDMA_VERSION;
899 	dev->dsr->gos_info.gos_bits = sizeof(void *) == 4 ?
900 		PVRDMA_GOS_BITS_32 :
901 		PVRDMA_GOS_BITS_64;
902 	dev->dsr->gos_info.gos_type = PVRDMA_GOS_TYPE_LINUX;
903 	dev->dsr->gos_info.gos_ver = 1;
904 	dev->dsr->uar_pfn = dev->driver_uar.pfn;
905 
906 	/* Command slot. */
907 	dev->cmd_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
908 					   &slot_dma, GFP_KERNEL);
909 	if (!dev->cmd_slot) {
910 		ret = -ENOMEM;
911 		goto err_free_dsr;
912 	}
913 
914 	dev->dsr->cmd_slot_dma = (u64)slot_dma;
915 
916 	/* Response slot. */
917 	dev->resp_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
918 					    &slot_dma, GFP_KERNEL);
919 	if (!dev->resp_slot) {
920 		ret = -ENOMEM;
921 		goto err_free_slots;
922 	}
923 
924 	dev->dsr->resp_slot_dma = (u64)slot_dma;
925 
926 	/* Async event ring */
927 	dev->dsr->async_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
928 	ret = pvrdma_page_dir_init(dev, &dev->async_pdir,
929 				   dev->dsr->async_ring_pages.num_pages, true);
930 	if (ret)
931 		goto err_free_slots;
932 	dev->async_ring_state = dev->async_pdir.pages[0];
933 	dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma;
934 
935 	/* CQ notification ring */
936 	dev->dsr->cq_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
937 	ret = pvrdma_page_dir_init(dev, &dev->cq_pdir,
938 				   dev->dsr->cq_ring_pages.num_pages, true);
939 	if (ret)
940 		goto err_free_async_ring;
941 	dev->cq_ring_state = dev->cq_pdir.pages[0];
942 	dev->dsr->cq_ring_pages.pdir_dma = dev->cq_pdir.dir_dma;
943 
944 	/*
945 	 * Write the PA of the shared region to the device. The writes must be
946 	 * ordered such that the high bits are written last. When the writes
947 	 * complete, the device will have filled out the capabilities.
948 	 */
949 
950 	pvrdma_write_reg(dev, PVRDMA_REG_DSRLOW, (u32)dev->dsrbase);
951 	pvrdma_write_reg(dev, PVRDMA_REG_DSRHIGH,
952 			 (u32)((u64)(dev->dsrbase) >> 32));
953 
954 	/* Make sure the write is complete before reading status. */
955 	mb();
956 
957 	/* The driver supports RoCE V1 and V2. */
958 	if (!PVRDMA_SUPPORTED(dev)) {
959 		dev_err(&pdev->dev, "driver needs RoCE v1 or v2 support\n");
960 		ret = -EFAULT;
961 		goto err_free_cq_ring;
962 	}
963 
964 	/* Paired vmxnet3 will have same bus, slot. But func will be 0 */
965 	pdev_net = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
966 	if (!pdev_net) {
967 		dev_err(&pdev->dev, "failed to find paired net device\n");
968 		ret = -ENODEV;
969 		goto err_free_cq_ring;
970 	}
971 
972 	if (pdev_net->vendor != PCI_VENDOR_ID_VMWARE ||
973 	    pdev_net->device != PCI_DEVICE_ID_VMWARE_VMXNET3) {
974 		dev_err(&pdev->dev, "failed to find paired vmxnet3 device\n");
975 		pci_dev_put(pdev_net);
976 		ret = -ENODEV;
977 		goto err_free_cq_ring;
978 	}
979 
980 	dev->netdev = pci_get_drvdata(pdev_net);
981 	pci_dev_put(pdev_net);
982 	if (!dev->netdev) {
983 		dev_err(&pdev->dev, "failed to get vmxnet3 device\n");
984 		ret = -ENODEV;
985 		goto err_free_cq_ring;
986 	}
987 
988 	dev_info(&pdev->dev, "paired device to %s\n", dev->netdev->name);
989 
990 	/* Interrupt setup */
991 	ret = pvrdma_alloc_intrs(dev);
992 	if (ret) {
993 		dev_err(&pdev->dev, "failed to allocate interrupts\n");
994 		ret = -ENOMEM;
995 		goto err_free_cq_ring;
996 	}
997 
998 	/* Allocate UAR table. */
999 	ret = pvrdma_uar_table_init(dev);
1000 	if (ret) {
1001 		dev_err(&pdev->dev, "failed to allocate UAR table\n");
1002 		ret = -ENOMEM;
1003 		goto err_free_intrs;
1004 	}
1005 
1006 	/* Allocate GID table */
1007 	dev->sgid_tbl = kcalloc(dev->dsr->caps.gid_tbl_len,
1008 				sizeof(union ib_gid), GFP_KERNEL);
1009 	if (!dev->sgid_tbl) {
1010 		ret = -ENOMEM;
1011 		goto err_free_uar_table;
1012 	}
1013 	dev_dbg(&pdev->dev, "gid table len %d\n", dev->dsr->caps.gid_tbl_len);
1014 
1015 	pvrdma_enable_intrs(dev);
1016 
1017 	/* Activate pvrdma device */
1018 	pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_ACTIVATE);
1019 
1020 	/* Make sure the write is complete before reading status. */
1021 	mb();
1022 
1023 	/* Check if device was successfully activated */
1024 	ret = pvrdma_read_reg(dev, PVRDMA_REG_ERR);
1025 	if (ret != 0) {
1026 		dev_err(&pdev->dev, "failed to activate device\n");
1027 		ret = -EFAULT;
1028 		goto err_disable_intr;
1029 	}
1030 
1031 	/* Register IB device */
1032 	ret = pvrdma_register_device(dev);
1033 	if (ret) {
1034 		dev_err(&pdev->dev, "failed to register IB device\n");
1035 		goto err_disable_intr;
1036 	}
1037 
1038 	dev->nb_netdev.notifier_call = pvrdma_netdevice_event;
1039 	ret = register_netdevice_notifier(&dev->nb_netdev);
1040 	if (ret) {
1041 		dev_err(&pdev->dev, "failed to register netdevice events\n");
1042 		goto err_unreg_ibdev;
1043 	}
1044 
1045 	dev_info(&pdev->dev, "attached to device\n");
1046 	return 0;
1047 
1048 err_unreg_ibdev:
1049 	ib_unregister_device(&dev->ib_dev);
1050 err_disable_intr:
1051 	pvrdma_disable_intrs(dev);
1052 	kfree(dev->sgid_tbl);
1053 err_free_uar_table:
1054 	pvrdma_uar_table_cleanup(dev);
1055 err_free_intrs:
1056 	pvrdma_free_irq(dev);
1057 	pci_free_irq_vectors(pdev);
1058 err_free_cq_ring:
1059 	pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
1060 err_free_async_ring:
1061 	pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
1062 err_free_slots:
1063 	pvrdma_free_slots(dev);
1064 err_free_dsr:
1065 	dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr,
1066 			  dev->dsrbase);
1067 err_uar_unmap:
1068 	iounmap(dev->driver_uar.map);
1069 err_unmap_regs:
1070 	iounmap(dev->regs);
1071 err_free_resource:
1072 	pci_release_regions(pdev);
1073 err_disable_pdev:
1074 	pci_disable_device(pdev);
1075 	pci_set_drvdata(pdev, NULL);
1076 err_free_device:
1077 	mutex_lock(&pvrdma_device_list_lock);
1078 	list_del(&dev->device_link);
1079 	mutex_unlock(&pvrdma_device_list_lock);
1080 	ib_dealloc_device(&dev->ib_dev);
1081 	return ret;
1082 }
1083 
1084 static void pvrdma_pci_remove(struct pci_dev *pdev)
1085 {
1086 	struct pvrdma_dev *dev = pci_get_drvdata(pdev);
1087 
1088 	if (!dev)
1089 		return;
1090 
1091 	dev_info(&pdev->dev, "detaching from device\n");
1092 
1093 	unregister_netdevice_notifier(&dev->nb_netdev);
1094 	dev->nb_netdev.notifier_call = NULL;
1095 
1096 	flush_workqueue(event_wq);
1097 
1098 	/* Unregister ib device */
1099 	ib_unregister_device(&dev->ib_dev);
1100 
1101 	mutex_lock(&pvrdma_device_list_lock);
1102 	list_del(&dev->device_link);
1103 	mutex_unlock(&pvrdma_device_list_lock);
1104 
1105 	pvrdma_disable_intrs(dev);
1106 	pvrdma_free_irq(dev);
1107 	pci_free_irq_vectors(pdev);
1108 
1109 	/* Deactivate pvrdma device */
1110 	pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_RESET);
1111 	pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
1112 	pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
1113 	pvrdma_free_slots(dev);
1114 
1115 	iounmap(dev->regs);
1116 	kfree(dev->sgid_tbl);
1117 	kfree(dev->cq_tbl);
1118 	kfree(dev->srq_tbl);
1119 	kfree(dev->qp_tbl);
1120 	pvrdma_uar_table_cleanup(dev);
1121 	iounmap(dev->driver_uar.map);
1122 
1123 	ib_dealloc_device(&dev->ib_dev);
1124 
1125 	/* Free pci resources */
1126 	pci_release_regions(pdev);
1127 	pci_disable_device(pdev);
1128 	pci_set_drvdata(pdev, NULL);
1129 }
1130 
1131 static const struct pci_device_id pvrdma_pci_table[] = {
1132 	{ PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_PVRDMA), },
1133 	{ 0 },
1134 };
1135 
1136 MODULE_DEVICE_TABLE(pci, pvrdma_pci_table);
1137 
1138 static struct pci_driver pvrdma_driver = {
1139 	.name		= DRV_NAME,
1140 	.id_table	= pvrdma_pci_table,
1141 	.probe		= pvrdma_pci_probe,
1142 	.remove		= pvrdma_pci_remove,
1143 };
1144 
1145 static int __init pvrdma_init(void)
1146 {
1147 	int err;
1148 
1149 	event_wq = alloc_ordered_workqueue("pvrdma_event_wq", WQ_MEM_RECLAIM);
1150 	if (!event_wq)
1151 		return -ENOMEM;
1152 
1153 	err = pci_register_driver(&pvrdma_driver);
1154 	if (err)
1155 		destroy_workqueue(event_wq);
1156 
1157 	return err;
1158 }
1159 
1160 static void __exit pvrdma_cleanup(void)
1161 {
1162 	pci_unregister_driver(&pvrdma_driver);
1163 
1164 	destroy_workqueue(event_wq);
1165 }
1166 
1167 module_init(pvrdma_init);
1168 module_exit(pvrdma_cleanup);
1169 
1170 MODULE_AUTHOR("VMware, Inc");
1171 MODULE_DESCRIPTION("VMware Paravirtual RDMA driver");
1172 MODULE_LICENSE("Dual BSD/GPL");
1173