1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: Main component of the bnxt_re driver
37  */
38 
39 #include <linux/module.h>
40 #include <linux/netdevice.h>
41 #include <linux/ethtool.h>
42 #include <linux/mutex.h>
43 #include <linux/list.h>
44 #include <linux/rculist.h>
45 #include <linux/spinlock.h>
46 #include <linux/pci.h>
47 #include <net/dcbnl.h>
48 #include <net/ipv6.h>
49 #include <net/addrconf.h>
50 #include <linux/if_ether.h>
51 
52 #include <rdma/ib_verbs.h>
53 #include <rdma/ib_user_verbs.h>
54 #include <rdma/ib_umem.h>
55 #include <rdma/ib_addr.h>
56 
57 #include "bnxt_ulp.h"
58 #include "roce_hsi.h"
59 #include "qplib_res.h"
60 #include "qplib_sp.h"
61 #include "qplib_fp.h"
62 #include "qplib_rcfw.h"
63 #include "bnxt_re.h"
64 #include "ib_verbs.h"
65 #include <rdma/bnxt_re-abi.h>
66 #include "bnxt.h"
67 #include "hw_counters.h"
68 
69 static char version[] =
70 		BNXT_RE_DESC "\n";
71 
72 MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>");
73 MODULE_DESCRIPTION(BNXT_RE_DESC " Driver");
74 MODULE_LICENSE("Dual BSD/GPL");
75 
76 /* globals */
77 static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list);
78 /* Mutex to protect the list of bnxt_re devices added */
79 static DEFINE_MUTEX(bnxt_re_dev_lock);
80 static struct workqueue_struct *bnxt_re_wq;
81 static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev);
82 
83 /* SR-IOV helper functions */
84 
85 static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev)
86 {
87 	struct bnxt *bp;
88 
89 	bp = netdev_priv(rdev->en_dev->net);
90 	if (BNXT_VF(bp))
91 		rdev->is_virtfn = 1;
92 }
93 
94 /* Set the maximum number of each resource that the driver actually wants
95  * to allocate. This may be up to the maximum number the firmware has
96  * reserved for the function. The driver may choose to allocate fewer
97  * resources than the firmware maximum.
98  */
99 static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
100 {
101 	u32 vf_qps = 0, vf_srqs = 0, vf_cqs = 0, vf_mrws = 0, vf_gids = 0;
102 	u32 i;
103 	u32 vf_pct;
104 	u32 num_vfs;
105 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
106 
107 	rdev->qplib_ctx.qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
108 					  dev_attr->max_qp);
109 
110 	rdev->qplib_ctx.mrw_count = BNXT_RE_MAX_MRW_COUNT_256K;
111 	/* Use max_mr from fw since max_mrw does not get set */
112 	rdev->qplib_ctx.mrw_count = min_t(u32, rdev->qplib_ctx.mrw_count,
113 					  dev_attr->max_mr);
114 	rdev->qplib_ctx.srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
115 					   dev_attr->max_srq);
116 	rdev->qplib_ctx.cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT,
117 					 dev_attr->max_cq);
118 
119 	for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
120 		rdev->qplib_ctx.tqm_count[i] =
121 		rdev->dev_attr.tqm_alloc_reqs[i];
122 
123 	if (rdev->num_vfs) {
124 		/*
125 		 * Reserve a set of resources for the PF. Divide the remaining
126 		 * resources among the VFs
127 		 */
128 		vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF;
129 		num_vfs = 100 * rdev->num_vfs;
130 		vf_qps = (rdev->qplib_ctx.qpc_count * vf_pct) / num_vfs;
131 		vf_srqs = (rdev->qplib_ctx.srqc_count * vf_pct) / num_vfs;
132 		vf_cqs = (rdev->qplib_ctx.cq_count * vf_pct) / num_vfs;
133 		/*
134 		 * The driver allows many more MRs than other resources. If the
135 		 * firmware does also, then reserve a fixed amount for the PF
136 		 * and divide the rest among VFs. VFs may use many MRs for NFS
137 		 * mounts, ISER, NVME applications, etc. If the firmware
138 		 * severely restricts the number of MRs, then let PF have
139 		 * half and divide the rest among VFs, as for the other
140 		 * resource types.
141 		 */
142 		if (rdev->qplib_ctx.mrw_count < BNXT_RE_MAX_MRW_COUNT_64K)
143 			vf_mrws = rdev->qplib_ctx.mrw_count * vf_pct / num_vfs;
144 		else
145 			vf_mrws = (rdev->qplib_ctx.mrw_count -
146 				   BNXT_RE_RESVD_MR_FOR_PF) / rdev->num_vfs;
147 		vf_gids = BNXT_RE_MAX_GID_PER_VF;
148 	}
149 	rdev->qplib_ctx.vf_res.max_mrw_per_vf = vf_mrws;
150 	rdev->qplib_ctx.vf_res.max_gid_per_vf = vf_gids;
151 	rdev->qplib_ctx.vf_res.max_qp_per_vf = vf_qps;
152 	rdev->qplib_ctx.vf_res.max_srq_per_vf = vf_srqs;
153 	rdev->qplib_ctx.vf_res.max_cq_per_vf = vf_cqs;
154 }
155 
156 /* for handling bnxt_en callbacks later */
157 static void bnxt_re_stop(void *p)
158 {
159 }
160 
161 static void bnxt_re_start(void *p)
162 {
163 }
164 
165 static void bnxt_re_sriov_config(void *p, int num_vfs)
166 {
167 	struct bnxt_re_dev *rdev = p;
168 
169 	if (!rdev)
170 		return;
171 
172 	rdev->num_vfs = num_vfs;
173 	bnxt_re_set_resource_limits(rdev);
174 	bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
175 				      &rdev->qplib_ctx);
176 }
177 
178 static void bnxt_re_shutdown(void *p)
179 {
180 	struct bnxt_re_dev *rdev = p;
181 
182 	if (!rdev)
183 		return;
184 
185 	bnxt_re_ib_unreg(rdev);
186 }
187 
188 static void bnxt_re_stop_irq(void *handle)
189 {
190 	struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
191 	struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
192 	struct bnxt_qplib_nq *nq;
193 	int indx;
194 
195 	for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) {
196 		nq = &rdev->nq[indx - 1];
197 		bnxt_qplib_nq_stop_irq(nq, false);
198 	}
199 
200 	bnxt_qplib_rcfw_stop_irq(rcfw, false);
201 }
202 
203 static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
204 {
205 	struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
206 	struct bnxt_msix_entry *msix_ent = rdev->msix_entries;
207 	struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
208 	struct bnxt_qplib_nq *nq;
209 	int indx, rc;
210 
211 	if (!ent) {
212 		/* Not setting the f/w timeout bit in rcfw.
213 		 * During the driver unload the first command
214 		 * to f/w will timeout and that will set the
215 		 * timeout bit.
216 		 */
217 		dev_err(rdev_to_dev(rdev), "Failed to re-start IRQs\n");
218 		return;
219 	}
220 
221 	/* Vectors may change after restart, so update with new vectors
222 	 * in device sctructure.
223 	 */
224 	for (indx = 0; indx < rdev->num_msix; indx++)
225 		rdev->msix_entries[indx].vector = ent[indx].vector;
226 
227 	bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
228 				  false);
229 	for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) {
230 		nq = &rdev->nq[indx - 1];
231 		rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
232 					     msix_ent[indx].vector, false);
233 		if (rc)
234 			dev_warn(rdev_to_dev(rdev),
235 				 "Failed to reinit NQ index %d\n", indx - 1);
236 	}
237 }
238 
239 static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
240 	.ulp_async_notifier = NULL,
241 	.ulp_stop = bnxt_re_stop,
242 	.ulp_start = bnxt_re_start,
243 	.ulp_sriov_config = bnxt_re_sriov_config,
244 	.ulp_shutdown = bnxt_re_shutdown,
245 	.ulp_irq_stop = bnxt_re_stop_irq,
246 	.ulp_irq_restart = bnxt_re_start_irq
247 };
248 
249 /* RoCE -> Net driver */
250 
251 /* Driver registration routines used to let the networking driver (bnxt_en)
252  * to know that the RoCE driver is now installed
253  */
254 static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev)
255 {
256 	struct bnxt_en_dev *en_dev;
257 	int rc;
258 
259 	if (!rdev)
260 		return -EINVAL;
261 
262 	en_dev = rdev->en_dev;
263 
264 	rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev,
265 						    BNXT_ROCE_ULP);
266 	return rc;
267 }
268 
269 static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
270 {
271 	struct bnxt_en_dev *en_dev;
272 	int rc = 0;
273 
274 	if (!rdev)
275 		return -EINVAL;
276 
277 	en_dev = rdev->en_dev;
278 
279 	rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP,
280 						  &bnxt_re_ulp_ops, rdev);
281 	return rc;
282 }
283 
284 static int bnxt_re_free_msix(struct bnxt_re_dev *rdev)
285 {
286 	struct bnxt_en_dev *en_dev;
287 	int rc;
288 
289 	if (!rdev)
290 		return -EINVAL;
291 
292 	en_dev = rdev->en_dev;
293 
294 
295 	rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP);
296 
297 	return rc;
298 }
299 
300 static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
301 {
302 	int rc = 0, num_msix_want = BNXT_RE_MAX_MSIX, num_msix_got;
303 	struct bnxt_en_dev *en_dev;
304 
305 	if (!rdev)
306 		return -EINVAL;
307 
308 	en_dev = rdev->en_dev;
309 
310 	num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus());
311 
312 	num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP,
313 							 rdev->msix_entries,
314 							 num_msix_want);
315 	if (num_msix_got < BNXT_RE_MIN_MSIX) {
316 		rc = -EINVAL;
317 		goto done;
318 	}
319 	if (num_msix_got != num_msix_want) {
320 		dev_warn(rdev_to_dev(rdev),
321 			 "Requested %d MSI-X vectors, got %d\n",
322 			 num_msix_want, num_msix_got);
323 	}
324 	rdev->num_msix = num_msix_got;
325 done:
326 	return rc;
327 }
328 
329 static void bnxt_re_init_hwrm_hdr(struct bnxt_re_dev *rdev, struct input *hdr,
330 				  u16 opcd, u16 crid, u16 trid)
331 {
332 	hdr->req_type = cpu_to_le16(opcd);
333 	hdr->cmpl_ring = cpu_to_le16(crid);
334 	hdr->target_id = cpu_to_le16(trid);
335 }
336 
337 static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
338 				int msg_len, void *resp, int resp_max_len,
339 				int timeout)
340 {
341 	fw_msg->msg = msg;
342 	fw_msg->msg_len = msg_len;
343 	fw_msg->resp = resp;
344 	fw_msg->resp_max_len = resp_max_len;
345 	fw_msg->timeout = timeout;
346 }
347 
348 static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id)
349 {
350 	struct bnxt_en_dev *en_dev = rdev->en_dev;
351 	struct hwrm_ring_free_input req = {0};
352 	struct hwrm_ring_free_output resp;
353 	struct bnxt_fw_msg fw_msg;
354 	int rc = -EINVAL;
355 
356 	if (!en_dev)
357 		return rc;
358 
359 	memset(&fw_msg, 0, sizeof(fw_msg));
360 
361 	bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1);
362 	req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
363 	req.ring_id = cpu_to_le16(fw_ring_id);
364 	bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
365 			    sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
366 	rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
367 	if (rc)
368 		dev_err(rdev_to_dev(rdev),
369 			"Failed to free HW ring:%d :%#x", req.ring_id, rc);
370 	return rc;
371 }
372 
373 static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
374 				  int pages, int type, u32 ring_mask,
375 				  u32 map_index, u16 *fw_ring_id)
376 {
377 	struct bnxt_en_dev *en_dev = rdev->en_dev;
378 	struct hwrm_ring_alloc_input req = {0};
379 	struct hwrm_ring_alloc_output resp;
380 	struct bnxt_fw_msg fw_msg;
381 	int rc = -EINVAL;
382 
383 	if (!en_dev)
384 		return rc;
385 
386 	memset(&fw_msg, 0, sizeof(fw_msg));
387 	bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
388 	req.enables = 0;
389 	req.page_tbl_addr =  cpu_to_le64(dma_arr[0]);
390 	if (pages > 1) {
391 		/* Page size is in log2 units */
392 		req.page_size = BNXT_PAGE_SHIFT;
393 		req.page_tbl_depth = 1;
394 	}
395 	req.fbo = 0;
396 	/* Association of ring index with doorbell index and MSIX number */
397 	req.logical_id = cpu_to_le16(map_index);
398 	req.length = cpu_to_le32(ring_mask + 1);
399 	req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
400 	req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
401 	bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
402 			    sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
403 	rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
404 	if (!rc)
405 		*fw_ring_id = le16_to_cpu(resp.ring_id);
406 
407 	return rc;
408 }
409 
410 static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
411 				      u32 fw_stats_ctx_id)
412 {
413 	struct bnxt_en_dev *en_dev = rdev->en_dev;
414 	struct hwrm_stat_ctx_free_input req = {0};
415 	struct bnxt_fw_msg fw_msg;
416 	int rc = -EINVAL;
417 
418 	if (!en_dev)
419 		return rc;
420 
421 	memset(&fw_msg, 0, sizeof(fw_msg));
422 
423 	bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1);
424 	req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
425 	bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&req,
426 			    sizeof(req), DFLT_HWRM_CMD_TIMEOUT);
427 	rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
428 	if (rc)
429 		dev_err(rdev_to_dev(rdev),
430 			"Failed to free HW stats context %#x", rc);
431 
432 	return rc;
433 }
434 
435 static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
436 				       dma_addr_t dma_map,
437 				       u32 *fw_stats_ctx_id)
438 {
439 	struct hwrm_stat_ctx_alloc_output resp = {0};
440 	struct hwrm_stat_ctx_alloc_input req = {0};
441 	struct bnxt_en_dev *en_dev = rdev->en_dev;
442 	struct bnxt_fw_msg fw_msg;
443 	int rc = -EINVAL;
444 
445 	*fw_stats_ctx_id = INVALID_STATS_CTX_ID;
446 
447 	if (!en_dev)
448 		return rc;
449 
450 	memset(&fw_msg, 0, sizeof(fw_msg));
451 
452 	bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
453 	req.update_period_ms = cpu_to_le32(1000);
454 	req.stats_dma_addr = cpu_to_le64(dma_map);
455 	req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
456 	bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
457 			    sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
458 	rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
459 	if (!rc)
460 		*fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id);
461 
462 	return rc;
463 }
464 
465 /* Device */
466 
467 static bool is_bnxt_re_dev(struct net_device *netdev)
468 {
469 	struct ethtool_drvinfo drvinfo;
470 
471 	if (netdev->ethtool_ops && netdev->ethtool_ops->get_drvinfo) {
472 		memset(&drvinfo, 0, sizeof(drvinfo));
473 		netdev->ethtool_ops->get_drvinfo(netdev, &drvinfo);
474 
475 		if (strcmp(drvinfo.driver, "bnxt_en"))
476 			return false;
477 		return true;
478 	}
479 	return false;
480 }
481 
482 static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev)
483 {
484 	struct bnxt_re_dev *rdev;
485 
486 	rcu_read_lock();
487 	list_for_each_entry_rcu(rdev, &bnxt_re_dev_list, list) {
488 		if (rdev->netdev == netdev) {
489 			rcu_read_unlock();
490 			return rdev;
491 		}
492 	}
493 	rcu_read_unlock();
494 	return NULL;
495 }
496 
497 static void bnxt_re_dev_unprobe(struct net_device *netdev,
498 				struct bnxt_en_dev *en_dev)
499 {
500 	dev_put(netdev);
501 	module_put(en_dev->pdev->driver->driver.owner);
502 }
503 
504 static struct bnxt_en_dev *bnxt_re_dev_probe(struct net_device *netdev)
505 {
506 	struct bnxt *bp = netdev_priv(netdev);
507 	struct bnxt_en_dev *en_dev;
508 	struct pci_dev *pdev;
509 
510 	/* Call bnxt_en's RoCE probe via indirect API */
511 	if (!bp->ulp_probe)
512 		return ERR_PTR(-EINVAL);
513 
514 	en_dev = bp->ulp_probe(netdev);
515 	if (IS_ERR(en_dev))
516 		return en_dev;
517 
518 	pdev = en_dev->pdev;
519 	if (!pdev)
520 		return ERR_PTR(-EINVAL);
521 
522 	if (!(en_dev->flags & BNXT_EN_FLAG_ROCE_CAP)) {
523 		dev_info(&pdev->dev,
524 			"%s: probe error: RoCE is not supported on this device",
525 			ROCE_DRV_MODULE_NAME);
526 		return ERR_PTR(-ENODEV);
527 	}
528 
529 	/* Bump net device reference count */
530 	if (!try_module_get(pdev->driver->driver.owner))
531 		return ERR_PTR(-ENODEV);
532 
533 	dev_hold(netdev);
534 
535 	return en_dev;
536 }
537 
538 static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
539 			   char *buf)
540 {
541 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
542 
543 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", rdev->en_dev->pdev->vendor);
544 }
545 static DEVICE_ATTR_RO(hw_rev);
546 
547 static ssize_t hca_type_show(struct device *device,
548 			     struct device_attribute *attr, char *buf)
549 {
550 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
551 
552 	return scnprintf(buf, PAGE_SIZE, "%s\n", rdev->ibdev.node_desc);
553 }
554 static DEVICE_ATTR_RO(hca_type);
555 
556 static struct attribute *bnxt_re_attributes[] = {
557 	&dev_attr_hw_rev.attr,
558 	&dev_attr_hca_type.attr,
559 	NULL
560 };
561 
562 static const struct attribute_group bnxt_re_dev_attr_group = {
563 	.attrs = bnxt_re_attributes,
564 };
565 
566 static void bnxt_re_unregister_ib(struct bnxt_re_dev *rdev)
567 {
568 	ib_unregister_device(&rdev->ibdev);
569 }
570 
571 static const struct ib_device_ops bnxt_re_dev_ops = {
572 	.add_gid = bnxt_re_add_gid,
573 	.alloc_hw_stats = bnxt_re_ib_alloc_hw_stats,
574 	.alloc_mr = bnxt_re_alloc_mr,
575 	.alloc_pd = bnxt_re_alloc_pd,
576 	.alloc_ucontext = bnxt_re_alloc_ucontext,
577 	.create_ah = bnxt_re_create_ah,
578 	.create_cq = bnxt_re_create_cq,
579 	.create_qp = bnxt_re_create_qp,
580 	.create_srq = bnxt_re_create_srq,
581 	.dealloc_pd = bnxt_re_dealloc_pd,
582 	.dealloc_ucontext = bnxt_re_dealloc_ucontext,
583 	.del_gid = bnxt_re_del_gid,
584 	.dereg_mr = bnxt_re_dereg_mr,
585 	.destroy_ah = bnxt_re_destroy_ah,
586 	.destroy_cq = bnxt_re_destroy_cq,
587 	.destroy_qp = bnxt_re_destroy_qp,
588 	.destroy_srq = bnxt_re_destroy_srq,
589 	.get_dev_fw_str = bnxt_re_query_fw_str,
590 	.get_dma_mr = bnxt_re_get_dma_mr,
591 	.get_hw_stats = bnxt_re_ib_get_hw_stats,
592 	.get_link_layer = bnxt_re_get_link_layer,
593 	.get_netdev = bnxt_re_get_netdev,
594 	.get_port_immutable = bnxt_re_get_port_immutable,
595 	.map_mr_sg = bnxt_re_map_mr_sg,
596 	.mmap = bnxt_re_mmap,
597 	.modify_ah = bnxt_re_modify_ah,
598 	.modify_device = bnxt_re_modify_device,
599 	.modify_qp = bnxt_re_modify_qp,
600 	.modify_srq = bnxt_re_modify_srq,
601 	.poll_cq = bnxt_re_poll_cq,
602 	.post_recv = bnxt_re_post_recv,
603 	.post_send = bnxt_re_post_send,
604 	.post_srq_recv = bnxt_re_post_srq_recv,
605 	.query_ah = bnxt_re_query_ah,
606 	.query_device = bnxt_re_query_device,
607 	.query_pkey = bnxt_re_query_pkey,
608 	.query_port = bnxt_re_query_port,
609 	.query_qp = bnxt_re_query_qp,
610 	.query_srq = bnxt_re_query_srq,
611 	.reg_user_mr = bnxt_re_reg_user_mr,
612 	.req_notify_cq = bnxt_re_req_notify_cq,
613 };
614 
615 static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
616 {
617 	struct ib_device *ibdev = &rdev->ibdev;
618 
619 	/* ib device init */
620 	ibdev->owner = THIS_MODULE;
621 	ibdev->node_type = RDMA_NODE_IB_CA;
622 	strlcpy(ibdev->node_desc, BNXT_RE_DESC " HCA",
623 		strlen(BNXT_RE_DESC) + 5);
624 	ibdev->phys_port_cnt = 1;
625 
626 	bnxt_qplib_get_guid(rdev->netdev->dev_addr, (u8 *)&ibdev->node_guid);
627 
628 	ibdev->num_comp_vectors	= 1;
629 	ibdev->dev.parent = &rdev->en_dev->pdev->dev;
630 	ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY;
631 
632 	/* User space */
633 	ibdev->uverbs_abi_ver = BNXT_RE_ABI_VERSION;
634 	ibdev->uverbs_cmd_mask =
635 			(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)		|
636 			(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)	|
637 			(1ull << IB_USER_VERBS_CMD_QUERY_PORT)		|
638 			(1ull << IB_USER_VERBS_CMD_ALLOC_PD)		|
639 			(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)		|
640 			(1ull << IB_USER_VERBS_CMD_REG_MR)		|
641 			(1ull << IB_USER_VERBS_CMD_REREG_MR)		|
642 			(1ull << IB_USER_VERBS_CMD_DEREG_MR)		|
643 			(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
644 			(1ull << IB_USER_VERBS_CMD_CREATE_CQ)		|
645 			(1ull << IB_USER_VERBS_CMD_RESIZE_CQ)		|
646 			(1ull << IB_USER_VERBS_CMD_DESTROY_CQ)		|
647 			(1ull << IB_USER_VERBS_CMD_CREATE_QP)		|
648 			(1ull << IB_USER_VERBS_CMD_MODIFY_QP)		|
649 			(1ull << IB_USER_VERBS_CMD_QUERY_QP)		|
650 			(1ull << IB_USER_VERBS_CMD_DESTROY_QP)		|
651 			(1ull << IB_USER_VERBS_CMD_CREATE_SRQ)		|
652 			(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)		|
653 			(1ull << IB_USER_VERBS_CMD_QUERY_SRQ)		|
654 			(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)		|
655 			(1ull << IB_USER_VERBS_CMD_CREATE_AH)		|
656 			(1ull << IB_USER_VERBS_CMD_MODIFY_AH)		|
657 			(1ull << IB_USER_VERBS_CMD_QUERY_AH)		|
658 			(1ull << IB_USER_VERBS_CMD_DESTROY_AH);
659 	/* POLL_CQ and REQ_NOTIFY_CQ is directly handled in libbnxt_re */
660 
661 
662 	rdma_set_device_sysfs_group(ibdev, &bnxt_re_dev_attr_group);
663 	ibdev->driver_id = RDMA_DRIVER_BNXT_RE;
664 	ib_set_device_ops(ibdev, &bnxt_re_dev_ops);
665 	return ib_register_device(ibdev, "bnxt_re%d", NULL);
666 }
667 
668 static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev)
669 {
670 	dev_put(rdev->netdev);
671 	rdev->netdev = NULL;
672 
673 	mutex_lock(&bnxt_re_dev_lock);
674 	list_del_rcu(&rdev->list);
675 	mutex_unlock(&bnxt_re_dev_lock);
676 
677 	synchronize_rcu();
678 
679 	ib_dealloc_device(&rdev->ibdev);
680 	/* rdev is gone */
681 }
682 
683 static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev,
684 					   struct bnxt_en_dev *en_dev)
685 {
686 	struct bnxt_re_dev *rdev;
687 
688 	/* Allocate bnxt_re_dev instance here */
689 	rdev = (struct bnxt_re_dev *)ib_alloc_device(sizeof(*rdev));
690 	if (!rdev) {
691 		dev_err(NULL, "%s: bnxt_re_dev allocation failure!",
692 			ROCE_DRV_MODULE_NAME);
693 		return NULL;
694 	}
695 	/* Default values */
696 	rdev->netdev = netdev;
697 	dev_hold(rdev->netdev);
698 	rdev->en_dev = en_dev;
699 	rdev->id = rdev->en_dev->pdev->devfn;
700 	INIT_LIST_HEAD(&rdev->qp_list);
701 	mutex_init(&rdev->qp_lock);
702 	atomic_set(&rdev->qp_count, 0);
703 	atomic_set(&rdev->cq_count, 0);
704 	atomic_set(&rdev->srq_count, 0);
705 	atomic_set(&rdev->mr_count, 0);
706 	atomic_set(&rdev->mw_count, 0);
707 	rdev->cosq[0] = 0xFFFF;
708 	rdev->cosq[1] = 0xFFFF;
709 
710 	mutex_lock(&bnxt_re_dev_lock);
711 	list_add_tail_rcu(&rdev->list, &bnxt_re_dev_list);
712 	mutex_unlock(&bnxt_re_dev_lock);
713 	return rdev;
714 }
715 
716 static int bnxt_re_handle_unaffi_async_event(struct creq_func_event
717 					     *unaffi_async)
718 {
719 	switch (unaffi_async->event) {
720 	case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
721 		break;
722 	case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
723 		break;
724 	case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
725 		break;
726 	case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
727 		break;
728 	case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
729 		break;
730 	case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
731 		break;
732 	case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
733 		break;
734 	case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
735 		break;
736 	case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
737 		break;
738 	case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
739 		break;
740 	case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
741 		break;
742 	default:
743 		return -EINVAL;
744 	}
745 	return 0;
746 }
747 
748 static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
749 					 struct bnxt_re_qp *qp)
750 {
751 	struct ib_event event;
752 	unsigned int flags;
753 
754 	if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
755 		flags = bnxt_re_lock_cqs(qp);
756 		bnxt_qplib_add_flush_qp(&qp->qplib_qp);
757 		bnxt_re_unlock_cqs(qp, flags);
758 	}
759 
760 	memset(&event, 0, sizeof(event));
761 	if (qp->qplib_qp.srq) {
762 		event.device = &qp->rdev->ibdev;
763 		event.element.qp = &qp->ib_qp;
764 		event.event = IB_EVENT_QP_LAST_WQE_REACHED;
765 	}
766 
767 	if (event.device && qp->ib_qp.event_handler)
768 		qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
769 
770 	return 0;
771 }
772 
773 static int bnxt_re_handle_affi_async_event(struct creq_qp_event *affi_async,
774 					   void *obj)
775 {
776 	int rc = 0;
777 	u8 event;
778 
779 	if (!obj)
780 		return rc; /* QP was already dead, still return success */
781 
782 	event = affi_async->event;
783 	if (event == CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION) {
784 		struct bnxt_qplib_qp *lib_qp = obj;
785 		struct bnxt_re_qp *qp = container_of(lib_qp, struct bnxt_re_qp,
786 						     qplib_qp);
787 		rc = bnxt_re_handle_qp_async_event(affi_async, qp);
788 	}
789 	return rc;
790 }
791 
792 static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw *rcfw,
793 			       void *aeqe, void *obj)
794 {
795 	struct creq_qp_event *affi_async;
796 	struct creq_func_event *unaffi_async;
797 	u8 type;
798 	int rc;
799 
800 	type = ((struct creq_base *)aeqe)->type;
801 	if (type == CREQ_BASE_TYPE_FUNC_EVENT) {
802 		unaffi_async = aeqe;
803 		rc = bnxt_re_handle_unaffi_async_event(unaffi_async);
804 	} else {
805 		affi_async = aeqe;
806 		rc = bnxt_re_handle_affi_async_event(affi_async, obj);
807 	}
808 
809 	return rc;
810 }
811 
812 static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
813 				struct bnxt_qplib_srq *handle, u8 event)
814 {
815 	struct bnxt_re_srq *srq = container_of(handle, struct bnxt_re_srq,
816 					       qplib_srq);
817 	struct ib_event ib_event;
818 	int rc = 0;
819 
820 	if (!srq) {
821 		dev_err(NULL, "%s: SRQ is NULL, SRQN not handled",
822 			ROCE_DRV_MODULE_NAME);
823 		rc = -EINVAL;
824 		goto done;
825 	}
826 	ib_event.device = &srq->rdev->ibdev;
827 	ib_event.element.srq = &srq->ib_srq;
828 	if (event == NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT)
829 		ib_event.event = IB_EVENT_SRQ_LIMIT_REACHED;
830 	else
831 		ib_event.event = IB_EVENT_SRQ_ERR;
832 
833 	if (srq->ib_srq.event_handler) {
834 		/* Lock event_handler? */
835 		(*srq->ib_srq.event_handler)(&ib_event,
836 					     srq->ib_srq.srq_context);
837 	}
838 done:
839 	return rc;
840 }
841 
842 static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
843 			       struct bnxt_qplib_cq *handle)
844 {
845 	struct bnxt_re_cq *cq = container_of(handle, struct bnxt_re_cq,
846 					     qplib_cq);
847 
848 	if (!cq) {
849 		dev_err(NULL, "%s: CQ is NULL, CQN not handled",
850 			ROCE_DRV_MODULE_NAME);
851 		return -EINVAL;
852 	}
853 	if (cq->ib_cq.comp_handler) {
854 		/* Lock comp_handler? */
855 		(*cq->ib_cq.comp_handler)(&cq->ib_cq, cq->ib_cq.cq_context);
856 	}
857 
858 	return 0;
859 }
860 
861 static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
862 {
863 	int i;
864 
865 	for (i = 1; i < rdev->num_msix; i++)
866 		bnxt_qplib_disable_nq(&rdev->nq[i - 1]);
867 
868 	if (rdev->qplib_res.rcfw)
869 		bnxt_qplib_cleanup_res(&rdev->qplib_res);
870 }
871 
872 static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
873 {
874 	int rc = 0, i;
875 	int num_vec_enabled = 0;
876 
877 	bnxt_qplib_init_res(&rdev->qplib_res);
878 
879 	for (i = 1; i < rdev->num_msix ; i++) {
880 		rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
881 					  i - 1, rdev->msix_entries[i].vector,
882 					  rdev->msix_entries[i].db_offset,
883 					  &bnxt_re_cqn_handler,
884 					  &bnxt_re_srqn_handler);
885 
886 		if (rc) {
887 			dev_err(rdev_to_dev(rdev),
888 				"Failed to enable NQ with rc = 0x%x", rc);
889 			goto fail;
890 		}
891 		num_vec_enabled++;
892 	}
893 	return 0;
894 fail:
895 	for (i = num_vec_enabled; i >= 0; i--)
896 		bnxt_qplib_disable_nq(&rdev->nq[i]);
897 
898 	return rc;
899 }
900 
901 static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev)
902 {
903 	int i;
904 
905 	for (i = 0; i < rdev->num_msix - 1; i++) {
906 		bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id);
907 		bnxt_qplib_free_nq(&rdev->nq[i]);
908 	}
909 }
910 
911 static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
912 {
913 	bnxt_re_free_nq_res(rdev);
914 
915 	if (rdev->qplib_res.dpi_tbl.max) {
916 		bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
917 				       &rdev->qplib_res.dpi_tbl,
918 				       &rdev->dpi_privileged);
919 	}
920 	if (rdev->qplib_res.rcfw) {
921 		bnxt_qplib_free_res(&rdev->qplib_res);
922 		rdev->qplib_res.rcfw = NULL;
923 	}
924 }
925 
926 static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
927 {
928 	int rc = 0, i;
929 	int num_vec_created = 0;
930 
931 	/* Configure and allocate resources for qplib */
932 	rdev->qplib_res.rcfw = &rdev->rcfw;
933 	rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
934 				     rdev->is_virtfn);
935 	if (rc)
936 		goto fail;
937 
938 	rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->en_dev->pdev,
939 				  rdev->netdev, &rdev->dev_attr);
940 	if (rc)
941 		goto fail;
942 
943 	rc = bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
944 				  &rdev->dpi_privileged,
945 				  rdev);
946 	if (rc)
947 		goto dealloc_res;
948 
949 	for (i = 0; i < rdev->num_msix - 1; i++) {
950 		rdev->nq[i].hwq.max_elements = BNXT_RE_MAX_CQ_COUNT +
951 			BNXT_RE_MAX_SRQC_COUNT + 2;
952 		rc = bnxt_qplib_alloc_nq(rdev->en_dev->pdev, &rdev->nq[i]);
953 		if (rc) {
954 			dev_err(rdev_to_dev(rdev), "Alloc Failed NQ%d rc:%#x",
955 				i, rc);
956 			goto free_nq;
957 		}
958 		rc = bnxt_re_net_ring_alloc
959 			(rdev, rdev->nq[i].hwq.pbl[PBL_LVL_0].pg_map_arr,
960 			 rdev->nq[i].hwq.pbl[rdev->nq[i].hwq.level].pg_count,
961 			 HWRM_RING_ALLOC_CMPL,
962 			 BNXT_QPLIB_NQE_MAX_CNT - 1,
963 			 rdev->msix_entries[i + 1].ring_idx,
964 			 &rdev->nq[i].ring_id);
965 		if (rc) {
966 			dev_err(rdev_to_dev(rdev),
967 				"Failed to allocate NQ fw id with rc = 0x%x",
968 				rc);
969 			bnxt_qplib_free_nq(&rdev->nq[i]);
970 			goto free_nq;
971 		}
972 		num_vec_created++;
973 	}
974 	return 0;
975 free_nq:
976 	for (i = num_vec_created; i >= 0; i--) {
977 		bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id);
978 		bnxt_qplib_free_nq(&rdev->nq[i]);
979 	}
980 	bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
981 			       &rdev->qplib_res.dpi_tbl,
982 			       &rdev->dpi_privileged);
983 dealloc_res:
984 	bnxt_qplib_free_res(&rdev->qplib_res);
985 
986 fail:
987 	rdev->qplib_res.rcfw = NULL;
988 	return rc;
989 }
990 
991 static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp,
992 				   u8 port_num, enum ib_event_type event)
993 {
994 	struct ib_event ib_event;
995 
996 	ib_event.device = ibdev;
997 	if (qp) {
998 		ib_event.element.qp = qp;
999 		ib_event.event = event;
1000 		if (qp->event_handler)
1001 			qp->event_handler(&ib_event, qp->qp_context);
1002 
1003 	} else {
1004 		ib_event.element.port_num = port_num;
1005 		ib_event.event = event;
1006 		ib_dispatch_event(&ib_event);
1007 	}
1008 }
1009 
1010 #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN      0x02
1011 static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev *rdev, u8 dir,
1012 				      u64 *cid_map)
1013 {
1014 	struct hwrm_queue_pri2cos_qcfg_input req = {0};
1015 	struct bnxt *bp = netdev_priv(rdev->netdev);
1016 	struct hwrm_queue_pri2cos_qcfg_output resp;
1017 	struct bnxt_en_dev *en_dev = rdev->en_dev;
1018 	struct bnxt_fw_msg fw_msg;
1019 	u32 flags = 0;
1020 	u8 *qcfgmap, *tmp_map;
1021 	int rc = 0, i;
1022 
1023 	if (!cid_map)
1024 		return -EINVAL;
1025 
1026 	memset(&fw_msg, 0, sizeof(fw_msg));
1027 	bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
1028 			      HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
1029 	flags |= (dir & 0x01);
1030 	flags |= HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN;
1031 	req.flags = cpu_to_le32(flags);
1032 	req.port_id = bp->pf.port_id;
1033 
1034 	bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
1035 			    sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
1036 	rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
1037 	if (rc)
1038 		return rc;
1039 
1040 	if (resp.queue_cfg_info) {
1041 		dev_warn(rdev_to_dev(rdev),
1042 			 "Asymmetric cos queue configuration detected");
1043 		dev_warn(rdev_to_dev(rdev),
1044 			 " on device, QoS may not be fully functional\n");
1045 	}
1046 	qcfgmap = &resp.pri0_cos_queue_id;
1047 	tmp_map = (u8 *)cid_map;
1048 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1049 		tmp_map[i] = qcfgmap[i];
1050 
1051 	return rc;
1052 }
1053 
1054 static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
1055 					struct bnxt_re_qp *qp)
1056 {
1057 	return (qp->ib_qp.qp_type == IB_QPT_GSI) || (qp == rdev->qp1_sqp);
1058 }
1059 
1060 static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
1061 {
1062 	int mask = IB_QP_STATE;
1063 	struct ib_qp_attr qp_attr;
1064 	struct bnxt_re_qp *qp;
1065 
1066 	qp_attr.qp_state = IB_QPS_ERR;
1067 	mutex_lock(&rdev->qp_lock);
1068 	list_for_each_entry(qp, &rdev->qp_list, list) {
1069 		/* Modify the state of all QPs except QP1/Shadow QP */
1070 		if (!bnxt_re_is_qp1_or_shadow_qp(rdev, qp)) {
1071 			if (qp->qplib_qp.state !=
1072 			    CMDQ_MODIFY_QP_NEW_STATE_RESET &&
1073 			    qp->qplib_qp.state !=
1074 			    CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1075 				bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp,
1076 						       1, IB_EVENT_QP_FATAL);
1077 				bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, mask,
1078 						  NULL);
1079 			}
1080 		}
1081 	}
1082 	mutex_unlock(&rdev->qp_lock);
1083 }
1084 
1085 static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
1086 {
1087 	struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
1088 	struct bnxt_qplib_gid gid;
1089 	u16 gid_idx, index;
1090 	int rc = 0;
1091 
1092 	if (!test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
1093 		return 0;
1094 
1095 	if (!sgid_tbl) {
1096 		dev_err(rdev_to_dev(rdev), "QPLIB: SGID table not allocated");
1097 		return -EINVAL;
1098 	}
1099 
1100 	for (index = 0; index < sgid_tbl->active; index++) {
1101 		gid_idx = sgid_tbl->hw_id[index];
1102 
1103 		if (!memcmp(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
1104 			    sizeof(bnxt_qplib_gid_zero)))
1105 			continue;
1106 		/* need to modify the VLAN enable setting of non VLAN GID only
1107 		 * as setting is done for VLAN GID while adding GID
1108 		 */
1109 		if (sgid_tbl->vlan[index])
1110 			continue;
1111 
1112 		memcpy(&gid, &sgid_tbl->tbl[index], sizeof(gid));
1113 
1114 		rc = bnxt_qplib_update_sgid(sgid_tbl, &gid, gid_idx,
1115 					    rdev->qplib_res.netdev->dev_addr);
1116 	}
1117 
1118 	return rc;
1119 }
1120 
1121 static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev)
1122 {
1123 	u32 prio_map = 0, tmp_map = 0;
1124 	struct net_device *netdev;
1125 	struct dcb_app app;
1126 
1127 	netdev = rdev->netdev;
1128 
1129 	memset(&app, 0, sizeof(app));
1130 	app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
1131 	app.protocol = ETH_P_IBOE;
1132 	tmp_map = dcb_ieee_getapp_mask(netdev, &app);
1133 	prio_map = tmp_map;
1134 
1135 	app.selector = IEEE_8021QAZ_APP_SEL_DGRAM;
1136 	app.protocol = ROCE_V2_UDP_DPORT;
1137 	tmp_map = dcb_ieee_getapp_mask(netdev, &app);
1138 	prio_map |= tmp_map;
1139 
1140 	return prio_map;
1141 }
1142 
1143 static void bnxt_re_parse_cid_map(u8 prio_map, u8 *cid_map, u16 *cosq)
1144 {
1145 	u16 prio;
1146 	u8 id;
1147 
1148 	for (prio = 0, id = 0; prio < 8; prio++) {
1149 		if (prio_map & (1 << prio)) {
1150 			cosq[id] = cid_map[prio];
1151 			id++;
1152 			if (id == 2) /* Max 2 tcs supported */
1153 				break;
1154 		}
1155 	}
1156 }
1157 
1158 static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
1159 {
1160 	u8 prio_map = 0;
1161 	u64 cid_map;
1162 	int rc;
1163 
1164 	/* Get priority for roce */
1165 	prio_map = bnxt_re_get_priority_mask(rdev);
1166 
1167 	if (prio_map == rdev->cur_prio_map)
1168 		return 0;
1169 	rdev->cur_prio_map = prio_map;
1170 	/* Get cosq id for this priority */
1171 	rc = bnxt_re_query_hwrm_pri2cos(rdev, 0, &cid_map);
1172 	if (rc) {
1173 		dev_warn(rdev_to_dev(rdev), "no cos for p_mask %x\n", prio_map);
1174 		return rc;
1175 	}
1176 	/* Parse CoS IDs for app priority */
1177 	bnxt_re_parse_cid_map(prio_map, (u8 *)&cid_map, rdev->cosq);
1178 
1179 	/* Config BONO. */
1180 	rc = bnxt_qplib_map_tc2cos(&rdev->qplib_res, rdev->cosq);
1181 	if (rc) {
1182 		dev_warn(rdev_to_dev(rdev), "no tc for cos{%x, %x}\n",
1183 			 rdev->cosq[0], rdev->cosq[1]);
1184 		return rc;
1185 	}
1186 
1187 	/* Actual priorities are not programmed as they are already
1188 	 * done by L2 driver; just enable or disable priority vlan tagging
1189 	 */
1190 	if ((prio_map == 0 && rdev->qplib_res.prio) ||
1191 	    (prio_map != 0 && !rdev->qplib_res.prio)) {
1192 		rdev->qplib_res.prio = prio_map ? true : false;
1193 
1194 		bnxt_re_update_gid(rdev);
1195 	}
1196 
1197 	return 0;
1198 }
1199 
1200 static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
1201 {
1202 	struct bnxt_en_dev *en_dev = rdev->en_dev;
1203 	struct hwrm_ver_get_output resp = {0};
1204 	struct hwrm_ver_get_input req = {0};
1205 	struct bnxt_fw_msg fw_msg;
1206 	int rc = 0;
1207 
1208 	memset(&fw_msg, 0, sizeof(fw_msg));
1209 	bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
1210 			      HWRM_VER_GET, -1, -1);
1211 	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1212 	req.hwrm_intf_min = HWRM_VERSION_MINOR;
1213 	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1214 	bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
1215 			    sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
1216 	rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
1217 	if (rc) {
1218 		dev_err(rdev_to_dev(rdev),
1219 			"Failed to query HW version, rc = 0x%x", rc);
1220 		return;
1221 	}
1222 	rdev->qplib_ctx.hwrm_intf_ver =
1223 		(u64)resp.hwrm_intf_major << 48 |
1224 		(u64)resp.hwrm_intf_minor << 32 |
1225 		(u64)resp.hwrm_intf_build << 16 |
1226 		resp.hwrm_intf_patch;
1227 }
1228 
1229 static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
1230 {
1231 	int rc;
1232 
1233 	if (test_and_clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) {
1234 		/* Cleanup ib dev */
1235 		bnxt_re_unregister_ib(rdev);
1236 	}
1237 	if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
1238 		cancel_delayed_work_sync(&rdev->worker);
1239 
1240 	if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED,
1241 			       &rdev->flags))
1242 		bnxt_re_cleanup_res(rdev);
1243 	if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags))
1244 		bnxt_re_free_res(rdev);
1245 
1246 	if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
1247 		rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
1248 		if (rc)
1249 			dev_warn(rdev_to_dev(rdev),
1250 				 "Failed to deinitialize RCFW: %#x", rc);
1251 		bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
1252 		bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
1253 		bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
1254 		bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id);
1255 		bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
1256 	}
1257 	if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) {
1258 		rc = bnxt_re_free_msix(rdev);
1259 		if (rc)
1260 			dev_warn(rdev_to_dev(rdev),
1261 				 "Failed to free MSI-X vectors: %#x", rc);
1262 	}
1263 	if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) {
1264 		rc = bnxt_re_unregister_netdev(rdev);
1265 		if (rc)
1266 			dev_warn(rdev_to_dev(rdev),
1267 				 "Failed to unregister with netdev: %#x", rc);
1268 	}
1269 }
1270 
1271 /* worker thread for polling periodic events. Now used for QoS programming*/
1272 static void bnxt_re_worker(struct work_struct *work)
1273 {
1274 	struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
1275 						worker.work);
1276 
1277 	bnxt_re_setup_qos(rdev);
1278 	schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
1279 }
1280 
1281 static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
1282 {
1283 	int rc;
1284 
1285 	bool locked;
1286 
1287 	/* Acquire rtnl lock through out this function */
1288 	rtnl_lock();
1289 	locked = true;
1290 
1291 	/* Registered a new RoCE device instance to netdev */
1292 	rc = bnxt_re_register_netdev(rdev);
1293 	if (rc) {
1294 		rtnl_unlock();
1295 		pr_err("Failed to register with netedev: %#x\n", rc);
1296 		return -EINVAL;
1297 	}
1298 	set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
1299 
1300 	/* Check whether VF or PF */
1301 	bnxt_re_get_sriov_func_type(rdev);
1302 
1303 	rc = bnxt_re_request_msix(rdev);
1304 	if (rc) {
1305 		pr_err("Failed to get MSI-X vectors: %#x\n", rc);
1306 		rc = -EINVAL;
1307 		goto fail;
1308 	}
1309 	set_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags);
1310 
1311 	bnxt_re_query_hwrm_intf_version(rdev);
1312 
1313 	/* Establish RCFW Communication Channel to initialize the context
1314 	 * memory for the function and all child VFs
1315 	 */
1316 	rc = bnxt_qplib_alloc_rcfw_channel(rdev->en_dev->pdev, &rdev->rcfw,
1317 					   &rdev->qplib_ctx,
1318 					   BNXT_RE_MAX_QPC_COUNT);
1319 	if (rc) {
1320 		pr_err("Failed to allocate RCFW Channel: %#x\n", rc);
1321 		goto fail;
1322 	}
1323 	rc = bnxt_re_net_ring_alloc
1324 			(rdev, rdev->rcfw.creq.pbl[PBL_LVL_0].pg_map_arr,
1325 			 rdev->rcfw.creq.pbl[rdev->rcfw.creq.level].pg_count,
1326 			 HWRM_RING_ALLOC_CMPL, BNXT_QPLIB_CREQE_MAX_CNT - 1,
1327 			 rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx,
1328 			 &rdev->rcfw.creq_ring_id);
1329 	if (rc) {
1330 		pr_err("Failed to allocate CREQ: %#x\n", rc);
1331 		goto free_rcfw;
1332 	}
1333 	rc = bnxt_qplib_enable_rcfw_channel
1334 				(rdev->en_dev->pdev, &rdev->rcfw,
1335 				 rdev->msix_entries[BNXT_RE_AEQ_IDX].vector,
1336 				 rdev->msix_entries[BNXT_RE_AEQ_IDX].db_offset,
1337 				 rdev->is_virtfn, &bnxt_re_aeq_handler);
1338 	if (rc) {
1339 		pr_err("Failed to enable RCFW channel: %#x\n", rc);
1340 		goto free_ring;
1341 	}
1342 
1343 	rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
1344 				     rdev->is_virtfn);
1345 	if (rc)
1346 		goto disable_rcfw;
1347 	if (!rdev->is_virtfn)
1348 		bnxt_re_set_resource_limits(rdev);
1349 
1350 	rc = bnxt_qplib_alloc_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx, 0);
1351 	if (rc) {
1352 		pr_err("Failed to allocate QPLIB context: %#x\n", rc);
1353 		goto disable_rcfw;
1354 	}
1355 	rc = bnxt_re_net_stats_ctx_alloc(rdev,
1356 					 rdev->qplib_ctx.stats.dma_map,
1357 					 &rdev->qplib_ctx.stats.fw_id);
1358 	if (rc) {
1359 		pr_err("Failed to allocate stats context: %#x\n", rc);
1360 		goto free_ctx;
1361 	}
1362 
1363 	rc = bnxt_qplib_init_rcfw(&rdev->rcfw, &rdev->qplib_ctx,
1364 				  rdev->is_virtfn);
1365 	if (rc) {
1366 		pr_err("Failed to initialize RCFW: %#x\n", rc);
1367 		goto free_sctx;
1368 	}
1369 	set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags);
1370 
1371 	/* Resources based on the 'new' device caps */
1372 	rc = bnxt_re_alloc_res(rdev);
1373 	if (rc) {
1374 		pr_err("Failed to allocate resources: %#x\n", rc);
1375 		goto fail;
1376 	}
1377 	set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags);
1378 	rc = bnxt_re_init_res(rdev);
1379 	if (rc) {
1380 		pr_err("Failed to initialize resources: %#x\n", rc);
1381 		goto fail;
1382 	}
1383 
1384 	set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags);
1385 
1386 	if (!rdev->is_virtfn) {
1387 		rc = bnxt_re_setup_qos(rdev);
1388 		if (rc)
1389 			pr_info("RoCE priority not yet configured\n");
1390 
1391 		INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
1392 		set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags);
1393 		schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
1394 	}
1395 
1396 	rtnl_unlock();
1397 	locked = false;
1398 
1399 	/* Register ib dev */
1400 	rc = bnxt_re_register_ib(rdev);
1401 	if (rc) {
1402 		pr_err("Failed to register with IB: %#x\n", rc);
1403 		goto fail;
1404 	}
1405 	set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
1406 	dev_info(rdev_to_dev(rdev), "Device registered successfully");
1407 	ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
1408 			 &rdev->active_width);
1409 	set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
1410 	bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE);
1411 	bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE);
1412 
1413 	return 0;
1414 free_sctx:
1415 	bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
1416 free_ctx:
1417 	bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
1418 disable_rcfw:
1419 	bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
1420 free_ring:
1421 	bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id);
1422 free_rcfw:
1423 	bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
1424 fail:
1425 	if (!locked)
1426 		rtnl_lock();
1427 	bnxt_re_ib_unreg(rdev);
1428 	rtnl_unlock();
1429 
1430 	return rc;
1431 }
1432 
1433 static void bnxt_re_dev_unreg(struct bnxt_re_dev *rdev)
1434 {
1435 	struct bnxt_en_dev *en_dev = rdev->en_dev;
1436 	struct net_device *netdev = rdev->netdev;
1437 
1438 	bnxt_re_dev_remove(rdev);
1439 
1440 	if (netdev)
1441 		bnxt_re_dev_unprobe(netdev, en_dev);
1442 }
1443 
1444 static int bnxt_re_dev_reg(struct bnxt_re_dev **rdev, struct net_device *netdev)
1445 {
1446 	struct bnxt_en_dev *en_dev;
1447 	int rc = 0;
1448 
1449 	if (!is_bnxt_re_dev(netdev))
1450 		return -ENODEV;
1451 
1452 	en_dev = bnxt_re_dev_probe(netdev);
1453 	if (IS_ERR(en_dev)) {
1454 		if (en_dev != ERR_PTR(-ENODEV))
1455 			pr_err("%s: Failed to probe\n", ROCE_DRV_MODULE_NAME);
1456 		rc = PTR_ERR(en_dev);
1457 		goto exit;
1458 	}
1459 	*rdev = bnxt_re_dev_add(netdev, en_dev);
1460 	if (!*rdev) {
1461 		rc = -ENOMEM;
1462 		bnxt_re_dev_unprobe(netdev, en_dev);
1463 		goto exit;
1464 	}
1465 exit:
1466 	return rc;
1467 }
1468 
1469 static void bnxt_re_remove_one(struct bnxt_re_dev *rdev)
1470 {
1471 	pci_dev_put(rdev->en_dev->pdev);
1472 }
1473 
1474 /* Handle all deferred netevents tasks */
1475 static void bnxt_re_task(struct work_struct *work)
1476 {
1477 	struct bnxt_re_work *re_work;
1478 	struct bnxt_re_dev *rdev;
1479 	int rc = 0;
1480 
1481 	re_work = container_of(work, struct bnxt_re_work, work);
1482 	rdev = re_work->rdev;
1483 
1484 	if (re_work->event != NETDEV_REGISTER &&
1485 	    !test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
1486 		return;
1487 
1488 	switch (re_work->event) {
1489 	case NETDEV_REGISTER:
1490 		rc = bnxt_re_ib_reg(rdev);
1491 		if (rc) {
1492 			dev_err(rdev_to_dev(rdev),
1493 				"Failed to register with IB: %#x", rc);
1494 			bnxt_re_remove_one(rdev);
1495 			bnxt_re_dev_unreg(rdev);
1496 			goto exit;
1497 		}
1498 		break;
1499 	case NETDEV_UP:
1500 		bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
1501 				       IB_EVENT_PORT_ACTIVE);
1502 		break;
1503 	case NETDEV_DOWN:
1504 		bnxt_re_dev_stop(rdev);
1505 		break;
1506 	case NETDEV_CHANGE:
1507 		if (!netif_carrier_ok(rdev->netdev))
1508 			bnxt_re_dev_stop(rdev);
1509 		else if (netif_carrier_ok(rdev->netdev))
1510 			bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
1511 					       IB_EVENT_PORT_ACTIVE);
1512 		ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
1513 				 &rdev->active_width);
1514 		break;
1515 	default:
1516 		break;
1517 	}
1518 	smp_mb__before_atomic();
1519 	atomic_dec(&rdev->sched_count);
1520 exit:
1521 	kfree(re_work);
1522 }
1523 
1524 static void bnxt_re_init_one(struct bnxt_re_dev *rdev)
1525 {
1526 	pci_dev_get(rdev->en_dev->pdev);
1527 }
1528 
1529 /*
1530  * "Notifier chain callback can be invoked for the same chain from
1531  * different CPUs at the same time".
1532  *
1533  * For cases when the netdev is already present, our call to the
1534  * register_netdevice_notifier() will actually get the rtnl_lock()
1535  * before sending NETDEV_REGISTER and (if up) NETDEV_UP
1536  * events.
1537  *
1538  * But for cases when the netdev is not already present, the notifier
1539  * chain is subjected to be invoked from different CPUs simultaneously.
1540  *
1541  * This is protected by the netdev_mutex.
1542  */
1543 static int bnxt_re_netdev_event(struct notifier_block *notifier,
1544 				unsigned long event, void *ptr)
1545 {
1546 	struct net_device *real_dev, *netdev = netdev_notifier_info_to_dev(ptr);
1547 	struct bnxt_re_work *re_work;
1548 	struct bnxt_re_dev *rdev;
1549 	int rc = 0;
1550 	bool sch_work = false;
1551 
1552 	real_dev = rdma_vlan_dev_real_dev(netdev);
1553 	if (!real_dev)
1554 		real_dev = netdev;
1555 
1556 	rdev = bnxt_re_from_netdev(real_dev);
1557 	if (!rdev && event != NETDEV_REGISTER)
1558 		goto exit;
1559 	if (real_dev != netdev)
1560 		goto exit;
1561 
1562 	switch (event) {
1563 	case NETDEV_REGISTER:
1564 		if (rdev)
1565 			break;
1566 		rc = bnxt_re_dev_reg(&rdev, real_dev);
1567 		if (rc == -ENODEV)
1568 			break;
1569 		if (rc) {
1570 			pr_err("Failed to register with the device %s: %#x\n",
1571 			       real_dev->name, rc);
1572 			break;
1573 		}
1574 		bnxt_re_init_one(rdev);
1575 		sch_work = true;
1576 		break;
1577 
1578 	case NETDEV_UNREGISTER:
1579 		/* netdev notifier will call NETDEV_UNREGISTER again later since
1580 		 * we are still holding the reference to the netdev
1581 		 */
1582 		if (atomic_read(&rdev->sched_count) > 0)
1583 			goto exit;
1584 		bnxt_re_ib_unreg(rdev);
1585 		bnxt_re_remove_one(rdev);
1586 		bnxt_re_dev_unreg(rdev);
1587 		break;
1588 
1589 	default:
1590 		sch_work = true;
1591 		break;
1592 	}
1593 	if (sch_work) {
1594 		/* Allocate for the deferred task */
1595 		re_work = kzalloc(sizeof(*re_work), GFP_ATOMIC);
1596 		if (re_work) {
1597 			re_work->rdev = rdev;
1598 			re_work->event = event;
1599 			re_work->vlan_dev = (real_dev == netdev ?
1600 					     NULL : netdev);
1601 			INIT_WORK(&re_work->work, bnxt_re_task);
1602 			atomic_inc(&rdev->sched_count);
1603 			queue_work(bnxt_re_wq, &re_work->work);
1604 		}
1605 	}
1606 
1607 exit:
1608 	return NOTIFY_DONE;
1609 }
1610 
1611 static struct notifier_block bnxt_re_netdev_notifier = {
1612 	.notifier_call = bnxt_re_netdev_event
1613 };
1614 
1615 static int __init bnxt_re_mod_init(void)
1616 {
1617 	int rc = 0;
1618 
1619 	pr_info("%s: %s", ROCE_DRV_MODULE_NAME, version);
1620 
1621 	bnxt_re_wq = create_singlethread_workqueue("bnxt_re");
1622 	if (!bnxt_re_wq)
1623 		return -ENOMEM;
1624 
1625 	INIT_LIST_HEAD(&bnxt_re_dev_list);
1626 
1627 	rc = register_netdevice_notifier(&bnxt_re_netdev_notifier);
1628 	if (rc) {
1629 		pr_err("%s: Cannot register to netdevice_notifier",
1630 		       ROCE_DRV_MODULE_NAME);
1631 		goto err_netdev;
1632 	}
1633 	return 0;
1634 
1635 err_netdev:
1636 	destroy_workqueue(bnxt_re_wq);
1637 
1638 	return rc;
1639 }
1640 
1641 static void __exit bnxt_re_mod_exit(void)
1642 {
1643 	struct bnxt_re_dev *rdev, *next;
1644 	LIST_HEAD(to_be_deleted);
1645 
1646 	mutex_lock(&bnxt_re_dev_lock);
1647 	/* Free all adapter allocated resources */
1648 	if (!list_empty(&bnxt_re_dev_list))
1649 		list_splice_init(&bnxt_re_dev_list, &to_be_deleted);
1650 	mutex_unlock(&bnxt_re_dev_lock);
1651        /*
1652 	* Cleanup the devices in reverse order so that the VF device
1653 	* cleanup is done before PF cleanup
1654 	*/
1655 	list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) {
1656 		dev_info(rdev_to_dev(rdev), "Unregistering Device");
1657 		/*
1658 		 * Flush out any scheduled tasks before destroying the
1659 		 * resources
1660 		 */
1661 		flush_workqueue(bnxt_re_wq);
1662 		bnxt_re_dev_stop(rdev);
1663 		/* Acquire the rtnl_lock as the L2 resources are freed here */
1664 		rtnl_lock();
1665 		bnxt_re_ib_unreg(rdev);
1666 		rtnl_unlock();
1667 		bnxt_re_remove_one(rdev);
1668 		bnxt_re_dev_unreg(rdev);
1669 	}
1670 	unregister_netdevice_notifier(&bnxt_re_netdev_notifier);
1671 	if (bnxt_re_wq)
1672 		destroy_workqueue(bnxt_re_wq);
1673 }
1674 
1675 module_init(bnxt_re_mod_init);
1676 module_exit(bnxt_re_mod_exit);
1677