xref: /openbmc/linux/drivers/infiniband/hw/qedr/main.c (revision b8265621)
1 /* QLogic qedr NIC Driver
2  * Copyright (c) 2015-2016  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <rdma/ib_verbs.h>
34 #include <rdma/ib_addr.h>
35 #include <rdma/ib_user_verbs.h>
36 #include <rdma/iw_cm.h>
37 #include <rdma/ib_mad.h>
38 #include <linux/netdevice.h>
39 #include <linux/iommu.h>
40 #include <linux/pci.h>
41 #include <net/addrconf.h>
42 
43 #include <linux/qed/qed_chain.h>
44 #include <linux/qed/qed_if.h>
45 #include "qedr.h"
46 #include "verbs.h"
47 #include <rdma/qedr-abi.h>
48 #include "qedr_iw_cm.h"
49 
50 MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
51 MODULE_AUTHOR("QLogic Corporation");
52 MODULE_LICENSE("Dual BSD/GPL");
53 
54 #define QEDR_WQ_MULTIPLIER_DFT	(3)
55 
56 static void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num,
57 				   enum ib_event_type type)
58 {
59 	struct ib_event ibev;
60 
61 	ibev.device = &dev->ibdev;
62 	ibev.element.port_num = port_num;
63 	ibev.event = type;
64 
65 	ib_dispatch_event(&ibev);
66 }
67 
68 static enum rdma_link_layer qedr_link_layer(struct ib_device *device,
69 					    u8 port_num)
70 {
71 	return IB_LINK_LAYER_ETHERNET;
72 }
73 
74 static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str)
75 {
76 	struct qedr_dev *qedr = get_qedr_dev(ibdev);
77 	u32 fw_ver = (u32)qedr->attr.fw_ver;
78 
79 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
80 		 (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
81 		 (fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
82 }
83 
84 static int qedr_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
85 				    struct ib_port_immutable *immutable)
86 {
87 	struct ib_port_attr attr;
88 	int err;
89 
90 	err = qedr_query_port(ibdev, port_num, &attr);
91 	if (err)
92 		return err;
93 
94 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
95 	immutable->gid_tbl_len = attr.gid_tbl_len;
96 	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
97 	    RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
98 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
99 
100 	return 0;
101 }
102 
103 static int qedr_iw_port_immutable(struct ib_device *ibdev, u8 port_num,
104 				  struct ib_port_immutable *immutable)
105 {
106 	struct ib_port_attr attr;
107 	int err;
108 
109 	err = qedr_query_port(ibdev, port_num, &attr);
110 	if (err)
111 		return err;
112 
113 	immutable->pkey_tbl_len = 1;
114 	immutable->gid_tbl_len = 1;
115 	immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
116 	immutable->max_mad_size = 0;
117 
118 	return 0;
119 }
120 
121 /* QEDR sysfs interface */
122 static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
123 			   char *buf)
124 {
125 	struct qedr_dev *dev =
126 		rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
127 
128 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->attr.hw_ver);
129 }
130 static DEVICE_ATTR_RO(hw_rev);
131 
132 static ssize_t hca_type_show(struct device *device,
133 			     struct device_attribute *attr, char *buf)
134 {
135 	struct qedr_dev *dev =
136 		rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
137 
138 	return scnprintf(buf, PAGE_SIZE, "FastLinQ QL%x %s\n",
139 			 dev->pdev->device,
140 			 rdma_protocol_iwarp(&dev->ibdev, 1) ?
141 			 "iWARP" : "RoCE");
142 }
143 static DEVICE_ATTR_RO(hca_type);
144 
145 static struct attribute *qedr_attributes[] = {
146 	&dev_attr_hw_rev.attr,
147 	&dev_attr_hca_type.attr,
148 	NULL
149 };
150 
151 static const struct attribute_group qedr_attr_group = {
152 	.attrs = qedr_attributes,
153 };
154 
155 static const struct ib_device_ops qedr_iw_dev_ops = {
156 	.get_port_immutable = qedr_iw_port_immutable,
157 	.iw_accept = qedr_iw_accept,
158 	.iw_add_ref = qedr_iw_qp_add_ref,
159 	.iw_connect = qedr_iw_connect,
160 	.iw_create_listen = qedr_iw_create_listen,
161 	.iw_destroy_listen = qedr_iw_destroy_listen,
162 	.iw_get_qp = qedr_iw_get_qp,
163 	.iw_reject = qedr_iw_reject,
164 	.iw_rem_ref = qedr_iw_qp_rem_ref,
165 	.query_gid = qedr_iw_query_gid,
166 };
167 
168 static int qedr_iw_register_device(struct qedr_dev *dev)
169 {
170 	dev->ibdev.node_type = RDMA_NODE_RNIC;
171 
172 	ib_set_device_ops(&dev->ibdev, &qedr_iw_dev_ops);
173 
174 	memcpy(dev->ibdev.iw_ifname,
175 	       dev->ndev->name, sizeof(dev->ibdev.iw_ifname));
176 
177 	return 0;
178 }
179 
180 static const struct ib_device_ops qedr_roce_dev_ops = {
181 	.get_port_immutable = qedr_roce_port_immutable,
182 };
183 
184 static void qedr_roce_register_device(struct qedr_dev *dev)
185 {
186 	dev->ibdev.node_type = RDMA_NODE_IB_CA;
187 
188 	ib_set_device_ops(&dev->ibdev, &qedr_roce_dev_ops);
189 }
190 
191 static const struct ib_device_ops qedr_dev_ops = {
192 	.owner = THIS_MODULE,
193 	.driver_id = RDMA_DRIVER_QEDR,
194 	.uverbs_abi_ver = QEDR_ABI_VERSION,
195 
196 	.alloc_mr = qedr_alloc_mr,
197 	.alloc_pd = qedr_alloc_pd,
198 	.alloc_ucontext = qedr_alloc_ucontext,
199 	.create_ah = qedr_create_ah,
200 	.create_cq = qedr_create_cq,
201 	.create_qp = qedr_create_qp,
202 	.create_srq = qedr_create_srq,
203 	.dealloc_pd = qedr_dealloc_pd,
204 	.dealloc_ucontext = qedr_dealloc_ucontext,
205 	.dereg_mr = qedr_dereg_mr,
206 	.destroy_ah = qedr_destroy_ah,
207 	.destroy_cq = qedr_destroy_cq,
208 	.destroy_qp = qedr_destroy_qp,
209 	.destroy_srq = qedr_destroy_srq,
210 	.get_dev_fw_str = qedr_get_dev_fw_str,
211 	.get_dma_mr = qedr_get_dma_mr,
212 	.get_link_layer = qedr_link_layer,
213 	.map_mr_sg = qedr_map_mr_sg,
214 	.mmap = qedr_mmap,
215 	.mmap_free = qedr_mmap_free,
216 	.modify_qp = qedr_modify_qp,
217 	.modify_srq = qedr_modify_srq,
218 	.poll_cq = qedr_poll_cq,
219 	.post_recv = qedr_post_recv,
220 	.post_send = qedr_post_send,
221 	.post_srq_recv = qedr_post_srq_recv,
222 	.process_mad = qedr_process_mad,
223 	.query_device = qedr_query_device,
224 	.query_pkey = qedr_query_pkey,
225 	.query_port = qedr_query_port,
226 	.query_qp = qedr_query_qp,
227 	.query_srq = qedr_query_srq,
228 	.reg_user_mr = qedr_reg_user_mr,
229 	.req_notify_cq = qedr_arm_cq,
230 	.resize_cq = qedr_resize_cq,
231 
232 	INIT_RDMA_OBJ_SIZE(ib_ah, qedr_ah, ibah),
233 	INIT_RDMA_OBJ_SIZE(ib_cq, qedr_cq, ibcq),
234 	INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd),
235 	INIT_RDMA_OBJ_SIZE(ib_srq, qedr_srq, ibsrq),
236 	INIT_RDMA_OBJ_SIZE(ib_ucontext, qedr_ucontext, ibucontext),
237 };
238 
239 static int qedr_register_device(struct qedr_dev *dev)
240 {
241 	int rc;
242 
243 	dev->ibdev.node_guid = dev->attr.node_guid;
244 	memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC));
245 
246 	dev->ibdev.uverbs_cmd_mask = QEDR_UVERBS(GET_CONTEXT) |
247 				     QEDR_UVERBS(QUERY_DEVICE) |
248 				     QEDR_UVERBS(QUERY_PORT) |
249 				     QEDR_UVERBS(ALLOC_PD) |
250 				     QEDR_UVERBS(DEALLOC_PD) |
251 				     QEDR_UVERBS(CREATE_COMP_CHANNEL) |
252 				     QEDR_UVERBS(CREATE_CQ) |
253 				     QEDR_UVERBS(RESIZE_CQ) |
254 				     QEDR_UVERBS(DESTROY_CQ) |
255 				     QEDR_UVERBS(REQ_NOTIFY_CQ) |
256 				     QEDR_UVERBS(CREATE_QP) |
257 				     QEDR_UVERBS(MODIFY_QP) |
258 				     QEDR_UVERBS(QUERY_QP) |
259 				     QEDR_UVERBS(DESTROY_QP) |
260 				     QEDR_UVERBS(CREATE_SRQ) |
261 				     QEDR_UVERBS(DESTROY_SRQ) |
262 				     QEDR_UVERBS(QUERY_SRQ) |
263 				     QEDR_UVERBS(MODIFY_SRQ) |
264 				     QEDR_UVERBS(POST_SRQ_RECV) |
265 				     QEDR_UVERBS(REG_MR) |
266 				     QEDR_UVERBS(DEREG_MR) |
267 				     QEDR_UVERBS(POLL_CQ) |
268 				     QEDR_UVERBS(POST_SEND) |
269 				     QEDR_UVERBS(POST_RECV);
270 
271 	if (IS_IWARP(dev)) {
272 		rc = qedr_iw_register_device(dev);
273 		if (rc)
274 			return rc;
275 	} else {
276 		qedr_roce_register_device(dev);
277 	}
278 
279 	dev->ibdev.phys_port_cnt = 1;
280 	dev->ibdev.num_comp_vectors = dev->num_cnq;
281 	dev->ibdev.dev.parent = &dev->pdev->dev;
282 
283 	rdma_set_device_sysfs_group(&dev->ibdev, &qedr_attr_group);
284 	ib_set_device_ops(&dev->ibdev, &qedr_dev_ops);
285 
286 	rc = ib_device_set_netdev(&dev->ibdev, dev->ndev, 1);
287 	if (rc)
288 		return rc;
289 
290 	return ib_register_device(&dev->ibdev, "qedr%d");
291 }
292 
293 /* This function allocates fast-path status block memory */
294 static int qedr_alloc_mem_sb(struct qedr_dev *dev,
295 			     struct qed_sb_info *sb_info, u16 sb_id)
296 {
297 	struct status_block_e4 *sb_virt;
298 	dma_addr_t sb_phys;
299 	int rc;
300 
301 	sb_virt = dma_alloc_coherent(&dev->pdev->dev,
302 				     sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
303 	if (!sb_virt)
304 		return -ENOMEM;
305 
306 	rc = dev->ops->common->sb_init(dev->cdev, sb_info,
307 				       sb_virt, sb_phys, sb_id,
308 				       QED_SB_TYPE_CNQ);
309 	if (rc) {
310 		pr_err("Status block initialization failed\n");
311 		dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt),
312 				  sb_virt, sb_phys);
313 		return rc;
314 	}
315 
316 	return 0;
317 }
318 
319 static void qedr_free_mem_sb(struct qedr_dev *dev,
320 			     struct qed_sb_info *sb_info, int sb_id)
321 {
322 	if (sb_info->sb_virt) {
323 		dev->ops->common->sb_release(dev->cdev, sb_info, sb_id,
324 					     QED_SB_TYPE_CNQ);
325 		dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt),
326 				  (void *)sb_info->sb_virt, sb_info->sb_phys);
327 	}
328 }
329 
330 static void qedr_free_resources(struct qedr_dev *dev)
331 {
332 	int i;
333 
334 	if (IS_IWARP(dev))
335 		destroy_workqueue(dev->iwarp_wq);
336 
337 	for (i = 0; i < dev->num_cnq; i++) {
338 		qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
339 		dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
340 	}
341 
342 	kfree(dev->cnq_array);
343 	kfree(dev->sb_array);
344 	kfree(dev->sgid_tbl);
345 }
346 
347 static int qedr_alloc_resources(struct qedr_dev *dev)
348 {
349 	struct qed_chain_init_params params = {
350 		.mode		= QED_CHAIN_MODE_PBL,
351 		.intended_use	= QED_CHAIN_USE_TO_CONSUME,
352 		.cnt_type	= QED_CHAIN_CNT_TYPE_U16,
353 		.elem_size	= sizeof(struct regpair *),
354 	};
355 	struct qedr_cnq *cnq;
356 	__le16 *cons_pi;
357 	int i, rc;
358 
359 	dev->sgid_tbl = kcalloc(QEDR_MAX_SGID, sizeof(union ib_gid),
360 				GFP_KERNEL);
361 	if (!dev->sgid_tbl)
362 		return -ENOMEM;
363 
364 	spin_lock_init(&dev->sgid_lock);
365 	xa_init_flags(&dev->srqs, XA_FLAGS_LOCK_IRQ);
366 
367 	if (IS_IWARP(dev)) {
368 		xa_init(&dev->qps);
369 		dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
370 	}
371 
372 	/* Allocate Status blocks for CNQ */
373 	dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array),
374 				GFP_KERNEL);
375 	if (!dev->sb_array) {
376 		rc = -ENOMEM;
377 		goto err1;
378 	}
379 
380 	dev->cnq_array = kcalloc(dev->num_cnq,
381 				 sizeof(*dev->cnq_array), GFP_KERNEL);
382 	if (!dev->cnq_array) {
383 		rc = -ENOMEM;
384 		goto err2;
385 	}
386 
387 	dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev);
388 
389 	/* Allocate CNQ PBLs */
390 	params.num_elems = min_t(u32, QED_RDMA_MAX_CNQ_SIZE,
391 				 QEDR_ROCE_MAX_CNQ_SIZE);
392 
393 	for (i = 0; i < dev->num_cnq; i++) {
394 		cnq = &dev->cnq_array[i];
395 
396 		rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i],
397 				       dev->sb_start + i);
398 		if (rc)
399 			goto err3;
400 
401 		rc = dev->ops->common->chain_alloc(dev->cdev, &cnq->pbl,
402 						   &params);
403 		if (rc)
404 			goto err4;
405 
406 		cnq->dev = dev;
407 		cnq->sb = &dev->sb_array[i];
408 		cons_pi = dev->sb_array[i].sb_virt->pi_array;
409 		cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX];
410 		cnq->index = i;
411 		sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev));
412 
413 		DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n",
414 			 i, qed_chain_get_cons_idx(&cnq->pbl));
415 	}
416 
417 	return 0;
418 err4:
419 	qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
420 err3:
421 	for (--i; i >= 0; i--) {
422 		dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
423 		qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
424 	}
425 	kfree(dev->cnq_array);
426 err2:
427 	kfree(dev->sb_array);
428 err1:
429 	kfree(dev->sgid_tbl);
430 	return rc;
431 }
432 
433 static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
434 {
435 	int rc = pci_enable_atomic_ops_to_root(pdev,
436 					       PCI_EXP_DEVCAP2_ATOMIC_COMP64);
437 
438 	if (rc) {
439 		dev->atomic_cap = IB_ATOMIC_NONE;
440 		DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability disabled\n");
441 	} else {
442 		dev->atomic_cap = IB_ATOMIC_GLOB;
443 		DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability enabled\n");
444 	}
445 }
446 
447 static const struct qed_rdma_ops *qed_ops;
448 
449 #define HILO_U64(hi, lo)		((((u64)(hi)) << 32) + (lo))
450 
451 static irqreturn_t qedr_irq_handler(int irq, void *handle)
452 {
453 	u16 hw_comp_cons, sw_comp_cons;
454 	struct qedr_cnq *cnq = handle;
455 	struct regpair *cq_handle;
456 	struct qedr_cq *cq;
457 
458 	qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0);
459 
460 	qed_sb_update_sb_idx(cnq->sb);
461 
462 	hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr);
463 	sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
464 
465 	/* Align protocol-index and chain reads */
466 	rmb();
467 
468 	while (sw_comp_cons != hw_comp_cons) {
469 		cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl);
470 		cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi,
471 				cq_handle->lo);
472 
473 		if (cq == NULL) {
474 			DP_ERR(cnq->dev,
475 			       "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n",
476 			       cq_handle->hi, cq_handle->lo, sw_comp_cons,
477 			       hw_comp_cons);
478 
479 			break;
480 		}
481 
482 		if (cq->sig != QEDR_CQ_MAGIC_NUMBER) {
483 			DP_ERR(cnq->dev,
484 			       "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n",
485 			       cq_handle->hi, cq_handle->lo, cq);
486 			break;
487 		}
488 
489 		cq->arm_flags = 0;
490 
491 		if (!cq->destroyed && cq->ibcq.comp_handler)
492 			(*cq->ibcq.comp_handler)
493 				(&cq->ibcq, cq->ibcq.cq_context);
494 
495 		/* The CQ's CNQ notification counter is checked before
496 		 * destroying the CQ in a busy-wait loop that waits for all of
497 		 * the CQ's CNQ interrupts to be processed. It is increased
498 		 * here, only after the completion handler, to ensure that the
499 		 * the handler is not running when the CQ is destroyed.
500 		 */
501 		cq->cnq_notif++;
502 
503 		sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
504 
505 		cnq->n_comp++;
506 	}
507 
508 	qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index,
509 				      sw_comp_cons);
510 
511 	qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1);
512 
513 	return IRQ_HANDLED;
514 }
515 
516 static void qedr_sync_free_irqs(struct qedr_dev *dev)
517 {
518 	u32 vector;
519 	u16 idx;
520 	int i;
521 
522 	for (i = 0; i < dev->int_info.used_cnt; i++) {
523 		if (dev->int_info.msix_cnt) {
524 			idx = i * dev->num_hwfns + dev->affin_hwfn_idx;
525 			vector = dev->int_info.msix[idx].vector;
526 			synchronize_irq(vector);
527 			free_irq(vector, &dev->cnq_array[i]);
528 		}
529 	}
530 
531 	dev->int_info.used_cnt = 0;
532 }
533 
534 static int qedr_req_msix_irqs(struct qedr_dev *dev)
535 {
536 	int i, rc = 0;
537 	u16 idx;
538 
539 	if (dev->num_cnq > dev->int_info.msix_cnt) {
540 		DP_ERR(dev,
541 		       "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n",
542 		       dev->num_cnq, dev->int_info.msix_cnt);
543 		return -EINVAL;
544 	}
545 
546 	for (i = 0; i < dev->num_cnq; i++) {
547 		idx = i * dev->num_hwfns + dev->affin_hwfn_idx;
548 		rc = request_irq(dev->int_info.msix[idx].vector,
549 				 qedr_irq_handler, 0, dev->cnq_array[i].name,
550 				 &dev->cnq_array[i]);
551 		if (rc) {
552 			DP_ERR(dev, "Request cnq %d irq failed\n", i);
553 			qedr_sync_free_irqs(dev);
554 		} else {
555 			DP_DEBUG(dev, QEDR_MSG_INIT,
556 				 "Requested cnq irq for %s [entry %d]. Cookie is at %p\n",
557 				 dev->cnq_array[i].name, i,
558 				 &dev->cnq_array[i]);
559 			dev->int_info.used_cnt++;
560 		}
561 	}
562 
563 	return rc;
564 }
565 
566 static int qedr_setup_irqs(struct qedr_dev *dev)
567 {
568 	int rc;
569 
570 	DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n");
571 
572 	/* Learn Interrupt configuration */
573 	rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq);
574 	if (rc < 0)
575 		return rc;
576 
577 	rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info);
578 	if (rc) {
579 		DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n");
580 		return rc;
581 	}
582 
583 	if (dev->int_info.msix_cnt) {
584 		DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n",
585 			 dev->int_info.msix_cnt);
586 		rc = qedr_req_msix_irqs(dev);
587 		if (rc)
588 			return rc;
589 	}
590 
591 	DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n");
592 
593 	return 0;
594 }
595 
596 static int qedr_set_device_attr(struct qedr_dev *dev)
597 {
598 	struct qed_rdma_device *qed_attr;
599 	struct qedr_device_attr *attr;
600 	u32 page_size;
601 
602 	/* Part 1 - query core capabilities */
603 	qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
604 
605 	/* Part 2 - check capabilities */
606 	page_size = ~dev->attr.page_size_caps + 1;
607 	if (page_size > PAGE_SIZE) {
608 		DP_ERR(dev,
609 		       "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
610 		       PAGE_SIZE, page_size);
611 		return -ENODEV;
612 	}
613 
614 	/* Part 3 - copy and update capabilities */
615 	attr = &dev->attr;
616 	attr->vendor_id = qed_attr->vendor_id;
617 	attr->vendor_part_id = qed_attr->vendor_part_id;
618 	attr->hw_ver = qed_attr->hw_ver;
619 	attr->fw_ver = qed_attr->fw_ver;
620 	attr->node_guid = qed_attr->node_guid;
621 	attr->sys_image_guid = qed_attr->sys_image_guid;
622 	attr->max_cnq = qed_attr->max_cnq;
623 	attr->max_sge = qed_attr->max_sge;
624 	attr->max_inline = qed_attr->max_inline;
625 	attr->max_sqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_SQE);
626 	attr->max_rqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_RQE);
627 	attr->max_qp_resp_rd_atomic_resc = qed_attr->max_qp_resp_rd_atomic_resc;
628 	attr->max_qp_req_rd_atomic_resc = qed_attr->max_qp_req_rd_atomic_resc;
629 	attr->max_dev_resp_rd_atomic_resc =
630 	    qed_attr->max_dev_resp_rd_atomic_resc;
631 	attr->max_cq = qed_attr->max_cq;
632 	attr->max_qp = qed_attr->max_qp;
633 	attr->max_mr = qed_attr->max_mr;
634 	attr->max_mr_size = qed_attr->max_mr_size;
635 	attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES);
636 	attr->max_mw = qed_attr->max_mw;
637 	attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl;
638 	attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size;
639 	attr->max_pd = qed_attr->max_pd;
640 	attr->max_ah = qed_attr->max_ah;
641 	attr->max_pkey = qed_attr->max_pkey;
642 	attr->max_srq = qed_attr->max_srq;
643 	attr->max_srq_wr = qed_attr->max_srq_wr;
644 	attr->dev_caps = qed_attr->dev_caps;
645 	attr->page_size_caps = qed_attr->page_size_caps;
646 	attr->dev_ack_delay = qed_attr->dev_ack_delay;
647 	attr->reserved_lkey = qed_attr->reserved_lkey;
648 	attr->bad_pkey_counter = qed_attr->bad_pkey_counter;
649 	attr->max_stats_queues = qed_attr->max_stats_queues;
650 
651 	return 0;
652 }
653 
654 static void qedr_unaffiliated_event(void *context, u8 event_code)
655 {
656 	pr_err("unaffiliated event not implemented yet\n");
657 }
658 
659 static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
660 {
661 #define EVENT_TYPE_NOT_DEFINED	0
662 #define EVENT_TYPE_CQ		1
663 #define EVENT_TYPE_QP		2
664 #define EVENT_TYPE_SRQ		3
665 	struct qedr_dev *dev = (struct qedr_dev *)context;
666 	struct regpair *async_handle = (struct regpair *)fw_handle;
667 	u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo;
668 	u8 event_type = EVENT_TYPE_NOT_DEFINED;
669 	struct ib_event event;
670 	struct ib_srq *ibsrq;
671 	struct qedr_srq *srq;
672 	unsigned long flags;
673 	struct ib_cq *ibcq;
674 	struct ib_qp *ibqp;
675 	struct qedr_cq *cq;
676 	struct qedr_qp *qp;
677 	u16 srq_id;
678 
679 	if (IS_ROCE(dev)) {
680 		switch (e_code) {
681 		case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
682 			event.event = IB_EVENT_CQ_ERR;
683 			event_type = EVENT_TYPE_CQ;
684 			break;
685 		case ROCE_ASYNC_EVENT_SQ_DRAINED:
686 			event.event = IB_EVENT_SQ_DRAINED;
687 			event_type = EVENT_TYPE_QP;
688 			break;
689 		case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR:
690 			event.event = IB_EVENT_QP_FATAL;
691 			event_type = EVENT_TYPE_QP;
692 			break;
693 		case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR:
694 			event.event = IB_EVENT_QP_REQ_ERR;
695 			event_type = EVENT_TYPE_QP;
696 			break;
697 		case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR:
698 			event.event = IB_EVENT_QP_ACCESS_ERR;
699 			event_type = EVENT_TYPE_QP;
700 			break;
701 		case ROCE_ASYNC_EVENT_SRQ_LIMIT:
702 			event.event = IB_EVENT_SRQ_LIMIT_REACHED;
703 			event_type = EVENT_TYPE_SRQ;
704 			break;
705 		case ROCE_ASYNC_EVENT_SRQ_EMPTY:
706 			event.event = IB_EVENT_SRQ_ERR;
707 			event_type = EVENT_TYPE_SRQ;
708 			break;
709 		default:
710 			DP_ERR(dev, "unsupported event %d on handle=%llx\n",
711 			       e_code, roce_handle64);
712 		}
713 	} else {
714 		switch (e_code) {
715 		case QED_IWARP_EVENT_SRQ_LIMIT:
716 			event.event = IB_EVENT_SRQ_LIMIT_REACHED;
717 			event_type = EVENT_TYPE_SRQ;
718 			break;
719 		case QED_IWARP_EVENT_SRQ_EMPTY:
720 			event.event = IB_EVENT_SRQ_ERR;
721 			event_type = EVENT_TYPE_SRQ;
722 			break;
723 		default:
724 		DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code,
725 		       roce_handle64);
726 		}
727 	}
728 	switch (event_type) {
729 	case EVENT_TYPE_CQ:
730 		cq = (struct qedr_cq *)(uintptr_t)roce_handle64;
731 		if (cq) {
732 			ibcq = &cq->ibcq;
733 			if (ibcq->event_handler) {
734 				event.device = ibcq->device;
735 				event.element.cq = ibcq;
736 				ibcq->event_handler(&event, ibcq->cq_context);
737 			}
738 		} else {
739 			WARN(1,
740 			     "Error: CQ event with NULL pointer ibcq. Handle=%llx\n",
741 			     roce_handle64);
742 		}
743 		DP_ERR(dev, "CQ event %d on handle %p\n", e_code, cq);
744 		break;
745 	case EVENT_TYPE_QP:
746 		qp = (struct qedr_qp *)(uintptr_t)roce_handle64;
747 		if (qp) {
748 			ibqp = &qp->ibqp;
749 			if (ibqp->event_handler) {
750 				event.device = ibqp->device;
751 				event.element.qp = ibqp;
752 				ibqp->event_handler(&event, ibqp->qp_context);
753 			}
754 		} else {
755 			WARN(1,
756 			     "Error: QP event with NULL pointer ibqp. Handle=%llx\n",
757 			     roce_handle64);
758 		}
759 		DP_ERR(dev, "QP event %d on handle %p\n", e_code, qp);
760 		break;
761 	case EVENT_TYPE_SRQ:
762 		srq_id = (u16)roce_handle64;
763 		xa_lock_irqsave(&dev->srqs, flags);
764 		srq = xa_load(&dev->srqs, srq_id);
765 		if (srq) {
766 			ibsrq = &srq->ibsrq;
767 			if (ibsrq->event_handler) {
768 				event.device = ibsrq->device;
769 				event.element.srq = ibsrq;
770 				ibsrq->event_handler(&event,
771 						     ibsrq->srq_context);
772 			}
773 		} else {
774 			DP_NOTICE(dev,
775 				  "SRQ event with NULL pointer ibsrq. Handle=%llx\n",
776 				  roce_handle64);
777 		}
778 		xa_unlock_irqrestore(&dev->srqs, flags);
779 		DP_NOTICE(dev, "SRQ event %d on handle %p\n", e_code, srq);
780 	default:
781 		break;
782 	}
783 }
784 
785 static int qedr_init_hw(struct qedr_dev *dev)
786 {
787 	struct qed_rdma_add_user_out_params out_params;
788 	struct qed_rdma_start_in_params *in_params;
789 	struct qed_rdma_cnq_params *cur_pbl;
790 	struct qed_rdma_events events;
791 	dma_addr_t p_phys_table;
792 	u32 page_cnt;
793 	int rc = 0;
794 	int i;
795 
796 	in_params =  kzalloc(sizeof(*in_params), GFP_KERNEL);
797 	if (!in_params) {
798 		rc = -ENOMEM;
799 		goto out;
800 	}
801 
802 	in_params->desired_cnq = dev->num_cnq;
803 	for (i = 0; i < dev->num_cnq; i++) {
804 		cur_pbl = &in_params->cnq_pbl_list[i];
805 
806 		page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl);
807 		cur_pbl->num_pbl_pages = page_cnt;
808 
809 		p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl);
810 		cur_pbl->pbl_ptr = (u64)p_phys_table;
811 	}
812 
813 	events.affiliated_event = qedr_affiliated_event;
814 	events.unaffiliated_event = qedr_unaffiliated_event;
815 	events.context = dev;
816 
817 	in_params->events = &events;
818 	in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS;
819 	in_params->max_mtu = dev->ndev->mtu;
820 	dev->iwarp_max_mtu = dev->ndev->mtu;
821 	ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr);
822 
823 	rc = dev->ops->rdma_init(dev->cdev, in_params);
824 	if (rc)
825 		goto out;
826 
827 	rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params);
828 	if (rc)
829 		goto out;
830 
831 	dev->db_addr = out_params.dpi_addr;
832 	dev->db_phys_addr = out_params.dpi_phys_addr;
833 	dev->db_size = out_params.dpi_size;
834 	dev->dpi = out_params.dpi;
835 
836 	rc = qedr_set_device_attr(dev);
837 out:
838 	kfree(in_params);
839 	if (rc)
840 		DP_ERR(dev, "Init HW Failed rc = %d\n", rc);
841 
842 	return rc;
843 }
844 
845 static void qedr_stop_hw(struct qedr_dev *dev)
846 {
847 	dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi);
848 	dev->ops->rdma_stop(dev->rdma_ctx);
849 }
850 
851 static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
852 				 struct net_device *ndev)
853 {
854 	struct qed_dev_rdma_info dev_info;
855 	struct qedr_dev *dev;
856 	int rc = 0;
857 
858 	dev = ib_alloc_device(qedr_dev, ibdev);
859 	if (!dev) {
860 		pr_err("Unable to allocate ib device\n");
861 		return NULL;
862 	}
863 
864 	DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n");
865 
866 	dev->pdev = pdev;
867 	dev->ndev = ndev;
868 	dev->cdev = cdev;
869 
870 	qed_ops = qed_get_rdma_ops();
871 	if (!qed_ops) {
872 		DP_ERR(dev, "Failed to get qed roce operations\n");
873 		goto init_err;
874 	}
875 
876 	dev->ops = qed_ops;
877 	rc = qed_ops->fill_dev_info(cdev, &dev_info);
878 	if (rc)
879 		goto init_err;
880 
881 	dev->user_dpm_enabled = dev_info.user_dpm_enabled;
882 	dev->rdma_type = dev_info.rdma_type;
883 	dev->num_hwfns = dev_info.common.num_hwfns;
884 
885 	if (IS_IWARP(dev) && QEDR_IS_CMT(dev)) {
886 		rc = dev->ops->iwarp_set_engine_affin(cdev, false);
887 		if (rc) {
888 			DP_ERR(dev, "iWARP is disabled over a 100g device Enabling it may impact L2 performance. To enable it run devlink dev param set <dev> name iwarp_cmt value true cmode runtime\n");
889 			goto init_err;
890 		}
891 	}
892 	dev->affin_hwfn_idx = dev->ops->common->get_affin_hwfn_idx(cdev);
893 
894 	dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
895 
896 	dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
897 	if (!dev->num_cnq) {
898 		DP_ERR(dev, "Failed. At least one CNQ is required.\n");
899 		rc = -ENOMEM;
900 		goto init_err;
901 	}
902 
903 	dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT;
904 
905 	qedr_pci_set_atomic(dev, pdev);
906 
907 	rc = qedr_alloc_resources(dev);
908 	if (rc)
909 		goto init_err;
910 
911 	rc = qedr_init_hw(dev);
912 	if (rc)
913 		goto alloc_err;
914 
915 	rc = qedr_setup_irqs(dev);
916 	if (rc)
917 		goto irq_err;
918 
919 	rc = qedr_register_device(dev);
920 	if (rc) {
921 		DP_ERR(dev, "Unable to allocate register device\n");
922 		goto reg_err;
923 	}
924 
925 	if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
926 		qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
927 
928 	DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
929 	return dev;
930 
931 reg_err:
932 	qedr_sync_free_irqs(dev);
933 irq_err:
934 	qedr_stop_hw(dev);
935 alloc_err:
936 	qedr_free_resources(dev);
937 init_err:
938 	ib_dealloc_device(&dev->ibdev);
939 	DP_ERR(dev, "qedr driver load failed rc=%d\n", rc);
940 
941 	return NULL;
942 }
943 
944 static void qedr_remove(struct qedr_dev *dev)
945 {
946 	/* First unregister with stack to stop all the active traffic
947 	 * of the registered clients.
948 	 */
949 	ib_unregister_device(&dev->ibdev);
950 
951 	qedr_stop_hw(dev);
952 	qedr_sync_free_irqs(dev);
953 	qedr_free_resources(dev);
954 
955 	if (IS_IWARP(dev) && QEDR_IS_CMT(dev))
956 		dev->ops->iwarp_set_engine_affin(dev->cdev, true);
957 
958 	ib_dealloc_device(&dev->ibdev);
959 }
960 
961 static void qedr_close(struct qedr_dev *dev)
962 {
963 	if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
964 		qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
965 }
966 
967 static void qedr_shutdown(struct qedr_dev *dev)
968 {
969 	qedr_close(dev);
970 	qedr_remove(dev);
971 }
972 
973 static void qedr_open(struct qedr_dev *dev)
974 {
975 	if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
976 		qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
977 }
978 
979 static void qedr_mac_address_change(struct qedr_dev *dev)
980 {
981 	union ib_gid *sgid = &dev->sgid_tbl[0];
982 	u8 guid[8], mac_addr[6];
983 	int rc;
984 
985 	/* Update SGID */
986 	ether_addr_copy(&mac_addr[0], dev->ndev->dev_addr);
987 	guid[0] = mac_addr[0] ^ 2;
988 	guid[1] = mac_addr[1];
989 	guid[2] = mac_addr[2];
990 	guid[3] = 0xff;
991 	guid[4] = 0xfe;
992 	guid[5] = mac_addr[3];
993 	guid[6] = mac_addr[4];
994 	guid[7] = mac_addr[5];
995 	sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
996 	memcpy(&sgid->raw[8], guid, sizeof(guid));
997 
998 	/* Update LL2 */
999 	rc = dev->ops->ll2_set_mac_filter(dev->cdev,
1000 					  dev->gsi_ll2_mac_address,
1001 					  dev->ndev->dev_addr);
1002 
1003 	ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
1004 
1005 	qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
1006 
1007 	if (rc)
1008 		DP_ERR(dev, "Error updating mac filter\n");
1009 }
1010 
1011 /* event handling via NIC driver ensures that all the NIC specific
1012  * initialization done before RoCE driver notifies
1013  * event to stack.
1014  */
1015 static void qedr_notify(struct qedr_dev *dev, enum qede_rdma_event event)
1016 {
1017 	switch (event) {
1018 	case QEDE_UP:
1019 		qedr_open(dev);
1020 		break;
1021 	case QEDE_DOWN:
1022 		qedr_close(dev);
1023 		break;
1024 	case QEDE_CLOSE:
1025 		qedr_shutdown(dev);
1026 		break;
1027 	case QEDE_CHANGE_ADDR:
1028 		qedr_mac_address_change(dev);
1029 		break;
1030 	default:
1031 		pr_err("Event not supported\n");
1032 	}
1033 }
1034 
1035 static struct qedr_driver qedr_drv = {
1036 	.name = "qedr_driver",
1037 	.add = qedr_add,
1038 	.remove = qedr_remove,
1039 	.notify = qedr_notify,
1040 };
1041 
1042 static int __init qedr_init_module(void)
1043 {
1044 	return qede_rdma_register_driver(&qedr_drv);
1045 }
1046 
1047 static void __exit qedr_exit_module(void)
1048 {
1049 	qede_rdma_unregister_driver(&qedr_drv);
1050 }
1051 
1052 module_init(qedr_init_module);
1053 module_exit(qedr_exit_module);
1054