xref: /openbmc/linux/drivers/infiniband/hw/qedr/main.c (revision 113094f7)
1 /* QLogic qedr NIC Driver
2  * Copyright (c) 2015-2016  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <rdma/ib_verbs.h>
34 #include <rdma/ib_addr.h>
35 #include <rdma/ib_user_verbs.h>
36 #include <rdma/iw_cm.h>
37 #include <rdma/ib_mad.h>
38 #include <linux/netdevice.h>
39 #include <linux/iommu.h>
40 #include <linux/pci.h>
41 #include <net/addrconf.h>
42 
43 #include <linux/qed/qed_chain.h>
44 #include <linux/qed/qed_if.h>
45 #include "qedr.h"
46 #include "verbs.h"
47 #include <rdma/qedr-abi.h>
48 #include "qedr_iw_cm.h"
49 
50 MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
51 MODULE_AUTHOR("QLogic Corporation");
52 MODULE_LICENSE("Dual BSD/GPL");
53 
54 #define QEDR_WQ_MULTIPLIER_DFT	(3)
55 
56 static void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num,
57 				   enum ib_event_type type)
58 {
59 	struct ib_event ibev;
60 
61 	ibev.device = &dev->ibdev;
62 	ibev.element.port_num = port_num;
63 	ibev.event = type;
64 
65 	ib_dispatch_event(&ibev);
66 }
67 
68 static enum rdma_link_layer qedr_link_layer(struct ib_device *device,
69 					    u8 port_num)
70 {
71 	return IB_LINK_LAYER_ETHERNET;
72 }
73 
74 static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str)
75 {
76 	struct qedr_dev *qedr = get_qedr_dev(ibdev);
77 	u32 fw_ver = (u32)qedr->attr.fw_ver;
78 
79 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%d. %d. %d. %d",
80 		 (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
81 		 (fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
82 }
83 
84 static int qedr_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
85 				    struct ib_port_immutable *immutable)
86 {
87 	struct ib_port_attr attr;
88 	int err;
89 
90 	err = qedr_query_port(ibdev, port_num, &attr);
91 	if (err)
92 		return err;
93 
94 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
95 	immutable->gid_tbl_len = attr.gid_tbl_len;
96 	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
97 	    RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
98 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
99 
100 	return 0;
101 }
102 
103 static int qedr_iw_port_immutable(struct ib_device *ibdev, u8 port_num,
104 				  struct ib_port_immutable *immutable)
105 {
106 	struct ib_port_attr attr;
107 	int err;
108 
109 	err = qedr_query_port(ibdev, port_num, &attr);
110 	if (err)
111 		return err;
112 
113 	immutable->pkey_tbl_len = 1;
114 	immutable->gid_tbl_len = 1;
115 	immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
116 	immutable->max_mad_size = 0;
117 
118 	return 0;
119 }
120 
121 /* QEDR sysfs interface */
122 static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
123 			   char *buf)
124 {
125 	struct qedr_dev *dev =
126 		rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
127 
128 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
129 }
130 static DEVICE_ATTR_RO(hw_rev);
131 
132 static ssize_t hca_type_show(struct device *device,
133 			     struct device_attribute *attr, char *buf)
134 {
135 	return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
136 }
137 static DEVICE_ATTR_RO(hca_type);
138 
139 static struct attribute *qedr_attributes[] = {
140 	&dev_attr_hw_rev.attr,
141 	&dev_attr_hca_type.attr,
142 	NULL
143 };
144 
145 static const struct attribute_group qedr_attr_group = {
146 	.attrs = qedr_attributes,
147 };
148 
149 static const struct ib_device_ops qedr_iw_dev_ops = {
150 	.get_port_immutable = qedr_iw_port_immutable,
151 	.iw_accept = qedr_iw_accept,
152 	.iw_add_ref = qedr_iw_qp_add_ref,
153 	.iw_connect = qedr_iw_connect,
154 	.iw_create_listen = qedr_iw_create_listen,
155 	.iw_destroy_listen = qedr_iw_destroy_listen,
156 	.iw_get_qp = qedr_iw_get_qp,
157 	.iw_reject = qedr_iw_reject,
158 	.iw_rem_ref = qedr_iw_qp_rem_ref,
159 	.query_gid = qedr_iw_query_gid,
160 };
161 
162 static int qedr_iw_register_device(struct qedr_dev *dev)
163 {
164 	dev->ibdev.node_type = RDMA_NODE_RNIC;
165 
166 	ib_set_device_ops(&dev->ibdev, &qedr_iw_dev_ops);
167 
168 	memcpy(dev->ibdev.iw_ifname,
169 	       dev->ndev->name, sizeof(dev->ibdev.iw_ifname));
170 
171 	return 0;
172 }
173 
174 static const struct ib_device_ops qedr_roce_dev_ops = {
175 	.get_port_immutable = qedr_roce_port_immutable,
176 };
177 
178 static void qedr_roce_register_device(struct qedr_dev *dev)
179 {
180 	dev->ibdev.node_type = RDMA_NODE_IB_CA;
181 
182 	ib_set_device_ops(&dev->ibdev, &qedr_roce_dev_ops);
183 }
184 
185 static const struct ib_device_ops qedr_dev_ops = {
186 	.alloc_mr = qedr_alloc_mr,
187 	.alloc_pd = qedr_alloc_pd,
188 	.alloc_ucontext = qedr_alloc_ucontext,
189 	.create_ah = qedr_create_ah,
190 	.create_cq = qedr_create_cq,
191 	.create_qp = qedr_create_qp,
192 	.create_srq = qedr_create_srq,
193 	.dealloc_pd = qedr_dealloc_pd,
194 	.dealloc_ucontext = qedr_dealloc_ucontext,
195 	.dereg_mr = qedr_dereg_mr,
196 	.destroy_ah = qedr_destroy_ah,
197 	.destroy_cq = qedr_destroy_cq,
198 	.destroy_qp = qedr_destroy_qp,
199 	.destroy_srq = qedr_destroy_srq,
200 	.get_dev_fw_str = qedr_get_dev_fw_str,
201 	.get_dma_mr = qedr_get_dma_mr,
202 	.get_link_layer = qedr_link_layer,
203 	.map_mr_sg = qedr_map_mr_sg,
204 	.mmap = qedr_mmap,
205 	.modify_port = qedr_modify_port,
206 	.modify_qp = qedr_modify_qp,
207 	.modify_srq = qedr_modify_srq,
208 	.poll_cq = qedr_poll_cq,
209 	.post_recv = qedr_post_recv,
210 	.post_send = qedr_post_send,
211 	.post_srq_recv = qedr_post_srq_recv,
212 	.process_mad = qedr_process_mad,
213 	.query_device = qedr_query_device,
214 	.query_pkey = qedr_query_pkey,
215 	.query_port = qedr_query_port,
216 	.query_qp = qedr_query_qp,
217 	.query_srq = qedr_query_srq,
218 	.reg_user_mr = qedr_reg_user_mr,
219 	.req_notify_cq = qedr_arm_cq,
220 	.resize_cq = qedr_resize_cq,
221 
222 	INIT_RDMA_OBJ_SIZE(ib_ah, qedr_ah, ibah),
223 	INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd),
224 	INIT_RDMA_OBJ_SIZE(ib_srq, qedr_srq, ibsrq),
225 	INIT_RDMA_OBJ_SIZE(ib_ucontext, qedr_ucontext, ibucontext),
226 };
227 
228 static int qedr_register_device(struct qedr_dev *dev)
229 {
230 	int rc;
231 
232 	dev->ibdev.node_guid = dev->attr.node_guid;
233 	memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC));
234 	dev->ibdev.owner = THIS_MODULE;
235 	dev->ibdev.uverbs_abi_ver = QEDR_ABI_VERSION;
236 
237 	dev->ibdev.uverbs_cmd_mask = QEDR_UVERBS(GET_CONTEXT) |
238 				     QEDR_UVERBS(QUERY_DEVICE) |
239 				     QEDR_UVERBS(QUERY_PORT) |
240 				     QEDR_UVERBS(ALLOC_PD) |
241 				     QEDR_UVERBS(DEALLOC_PD) |
242 				     QEDR_UVERBS(CREATE_COMP_CHANNEL) |
243 				     QEDR_UVERBS(CREATE_CQ) |
244 				     QEDR_UVERBS(RESIZE_CQ) |
245 				     QEDR_UVERBS(DESTROY_CQ) |
246 				     QEDR_UVERBS(REQ_NOTIFY_CQ) |
247 				     QEDR_UVERBS(CREATE_QP) |
248 				     QEDR_UVERBS(MODIFY_QP) |
249 				     QEDR_UVERBS(QUERY_QP) |
250 				     QEDR_UVERBS(DESTROY_QP) |
251 				     QEDR_UVERBS(CREATE_SRQ) |
252 				     QEDR_UVERBS(DESTROY_SRQ) |
253 				     QEDR_UVERBS(QUERY_SRQ) |
254 				     QEDR_UVERBS(MODIFY_SRQ) |
255 				     QEDR_UVERBS(POST_SRQ_RECV) |
256 				     QEDR_UVERBS(REG_MR) |
257 				     QEDR_UVERBS(DEREG_MR) |
258 				     QEDR_UVERBS(POLL_CQ) |
259 				     QEDR_UVERBS(POST_SEND) |
260 				     QEDR_UVERBS(POST_RECV);
261 
262 	if (IS_IWARP(dev)) {
263 		rc = qedr_iw_register_device(dev);
264 		if (rc)
265 			return rc;
266 	} else {
267 		qedr_roce_register_device(dev);
268 	}
269 
270 	dev->ibdev.phys_port_cnt = 1;
271 	dev->ibdev.num_comp_vectors = dev->num_cnq;
272 	dev->ibdev.dev.parent = &dev->pdev->dev;
273 
274 	rdma_set_device_sysfs_group(&dev->ibdev, &qedr_attr_group);
275 	ib_set_device_ops(&dev->ibdev, &qedr_dev_ops);
276 
277 	dev->ibdev.driver_id = RDMA_DRIVER_QEDR;
278 	rc = ib_device_set_netdev(&dev->ibdev, dev->ndev, 1);
279 	if (rc)
280 		return rc;
281 
282 	return ib_register_device(&dev->ibdev, "qedr%d");
283 }
284 
285 /* This function allocates fast-path status block memory */
286 static int qedr_alloc_mem_sb(struct qedr_dev *dev,
287 			     struct qed_sb_info *sb_info, u16 sb_id)
288 {
289 	struct status_block_e4 *sb_virt;
290 	dma_addr_t sb_phys;
291 	int rc;
292 
293 	sb_virt = dma_alloc_coherent(&dev->pdev->dev,
294 				     sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
295 	if (!sb_virt)
296 		return -ENOMEM;
297 
298 	rc = dev->ops->common->sb_init(dev->cdev, sb_info,
299 				       sb_virt, sb_phys, sb_id,
300 				       QED_SB_TYPE_CNQ);
301 	if (rc) {
302 		pr_err("Status block initialization failed\n");
303 		dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt),
304 				  sb_virt, sb_phys);
305 		return rc;
306 	}
307 
308 	return 0;
309 }
310 
311 static void qedr_free_mem_sb(struct qedr_dev *dev,
312 			     struct qed_sb_info *sb_info, int sb_id)
313 {
314 	if (sb_info->sb_virt) {
315 		dev->ops->common->sb_release(dev->cdev, sb_info, sb_id,
316 					     QED_SB_TYPE_CNQ);
317 		dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt),
318 				  (void *)sb_info->sb_virt, sb_info->sb_phys);
319 	}
320 }
321 
322 static void qedr_free_resources(struct qedr_dev *dev)
323 {
324 	int i;
325 
326 	if (IS_IWARP(dev))
327 		destroy_workqueue(dev->iwarp_wq);
328 
329 	for (i = 0; i < dev->num_cnq; i++) {
330 		qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
331 		dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
332 	}
333 
334 	kfree(dev->cnq_array);
335 	kfree(dev->sb_array);
336 	kfree(dev->sgid_tbl);
337 }
338 
339 static int qedr_alloc_resources(struct qedr_dev *dev)
340 {
341 	struct qedr_cnq *cnq;
342 	__le16 *cons_pi;
343 	u16 n_entries;
344 	int i, rc;
345 
346 	dev->sgid_tbl = kcalloc(QEDR_MAX_SGID, sizeof(union ib_gid),
347 				GFP_KERNEL);
348 	if (!dev->sgid_tbl)
349 		return -ENOMEM;
350 
351 	spin_lock_init(&dev->sgid_lock);
352 
353 	if (IS_IWARP(dev)) {
354 		xa_init_flags(&dev->qps, XA_FLAGS_LOCK_IRQ);
355 		dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
356 	}
357 
358 	/* Allocate Status blocks for CNQ */
359 	dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array),
360 				GFP_KERNEL);
361 	if (!dev->sb_array) {
362 		rc = -ENOMEM;
363 		goto err1;
364 	}
365 
366 	dev->cnq_array = kcalloc(dev->num_cnq,
367 				 sizeof(*dev->cnq_array), GFP_KERNEL);
368 	if (!dev->cnq_array) {
369 		rc = -ENOMEM;
370 		goto err2;
371 	}
372 
373 	dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev);
374 
375 	/* Allocate CNQ PBLs */
376 	n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE);
377 	for (i = 0; i < dev->num_cnq; i++) {
378 		cnq = &dev->cnq_array[i];
379 
380 		rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i],
381 				       dev->sb_start + i);
382 		if (rc)
383 			goto err3;
384 
385 		rc = dev->ops->common->chain_alloc(dev->cdev,
386 						   QED_CHAIN_USE_TO_CONSUME,
387 						   QED_CHAIN_MODE_PBL,
388 						   QED_CHAIN_CNT_TYPE_U16,
389 						   n_entries,
390 						   sizeof(struct regpair *),
391 						   &cnq->pbl, NULL);
392 		if (rc)
393 			goto err4;
394 
395 		cnq->dev = dev;
396 		cnq->sb = &dev->sb_array[i];
397 		cons_pi = dev->sb_array[i].sb_virt->pi_array;
398 		cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX];
399 		cnq->index = i;
400 		sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev));
401 
402 		DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n",
403 			 i, qed_chain_get_cons_idx(&cnq->pbl));
404 	}
405 
406 	return 0;
407 err4:
408 	qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
409 err3:
410 	for (--i; i >= 0; i--) {
411 		dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
412 		qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
413 	}
414 	kfree(dev->cnq_array);
415 err2:
416 	kfree(dev->sb_array);
417 err1:
418 	kfree(dev->sgid_tbl);
419 	return rc;
420 }
421 
422 static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
423 {
424 	int rc = pci_enable_atomic_ops_to_root(pdev,
425 					       PCI_EXP_DEVCAP2_ATOMIC_COMP64);
426 
427 	if (rc) {
428 		dev->atomic_cap = IB_ATOMIC_NONE;
429 		DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability disabled\n");
430 	} else {
431 		dev->atomic_cap = IB_ATOMIC_GLOB;
432 		DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability enabled\n");
433 	}
434 }
435 
436 static const struct qed_rdma_ops *qed_ops;
437 
438 #define HILO_U64(hi, lo)		((((u64)(hi)) << 32) + (lo))
439 
440 static irqreturn_t qedr_irq_handler(int irq, void *handle)
441 {
442 	u16 hw_comp_cons, sw_comp_cons;
443 	struct qedr_cnq *cnq = handle;
444 	struct regpair *cq_handle;
445 	struct qedr_cq *cq;
446 
447 	qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0);
448 
449 	qed_sb_update_sb_idx(cnq->sb);
450 
451 	hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr);
452 	sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
453 
454 	/* Align protocol-index and chain reads */
455 	rmb();
456 
457 	while (sw_comp_cons != hw_comp_cons) {
458 		cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl);
459 		cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi,
460 				cq_handle->lo);
461 
462 		if (cq == NULL) {
463 			DP_ERR(cnq->dev,
464 			       "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n",
465 			       cq_handle->hi, cq_handle->lo, sw_comp_cons,
466 			       hw_comp_cons);
467 
468 			break;
469 		}
470 
471 		if (cq->sig != QEDR_CQ_MAGIC_NUMBER) {
472 			DP_ERR(cnq->dev,
473 			       "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n",
474 			       cq_handle->hi, cq_handle->lo, cq);
475 			break;
476 		}
477 
478 		cq->arm_flags = 0;
479 
480 		if (!cq->destroyed && cq->ibcq.comp_handler)
481 			(*cq->ibcq.comp_handler)
482 				(&cq->ibcq, cq->ibcq.cq_context);
483 
484 		/* The CQ's CNQ notification counter is checked before
485 		 * destroying the CQ in a busy-wait loop that waits for all of
486 		 * the CQ's CNQ interrupts to be processed. It is increased
487 		 * here, only after the completion handler, to ensure that the
488 		 * the handler is not running when the CQ is destroyed.
489 		 */
490 		cq->cnq_notif++;
491 
492 		sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
493 
494 		cnq->n_comp++;
495 	}
496 
497 	qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index,
498 				      sw_comp_cons);
499 
500 	qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1);
501 
502 	return IRQ_HANDLED;
503 }
504 
505 static void qedr_sync_free_irqs(struct qedr_dev *dev)
506 {
507 	u32 vector;
508 	u16 idx;
509 	int i;
510 
511 	for (i = 0; i < dev->int_info.used_cnt; i++) {
512 		if (dev->int_info.msix_cnt) {
513 			idx = i * dev->num_hwfns + dev->affin_hwfn_idx;
514 			vector = dev->int_info.msix[idx].vector;
515 			synchronize_irq(vector);
516 			free_irq(vector, &dev->cnq_array[i]);
517 		}
518 	}
519 
520 	dev->int_info.used_cnt = 0;
521 }
522 
523 static int qedr_req_msix_irqs(struct qedr_dev *dev)
524 {
525 	int i, rc = 0;
526 	u16 idx;
527 
528 	if (dev->num_cnq > dev->int_info.msix_cnt) {
529 		DP_ERR(dev,
530 		       "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n",
531 		       dev->num_cnq, dev->int_info.msix_cnt);
532 		return -EINVAL;
533 	}
534 
535 	for (i = 0; i < dev->num_cnq; i++) {
536 		idx = i * dev->num_hwfns + dev->affin_hwfn_idx;
537 		rc = request_irq(dev->int_info.msix[idx].vector,
538 				 qedr_irq_handler, 0, dev->cnq_array[i].name,
539 				 &dev->cnq_array[i]);
540 		if (rc) {
541 			DP_ERR(dev, "Request cnq %d irq failed\n", i);
542 			qedr_sync_free_irqs(dev);
543 		} else {
544 			DP_DEBUG(dev, QEDR_MSG_INIT,
545 				 "Requested cnq irq for %s [entry %d]. Cookie is at %p\n",
546 				 dev->cnq_array[i].name, i,
547 				 &dev->cnq_array[i]);
548 			dev->int_info.used_cnt++;
549 		}
550 	}
551 
552 	return rc;
553 }
554 
555 static int qedr_setup_irqs(struct qedr_dev *dev)
556 {
557 	int rc;
558 
559 	DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n");
560 
561 	/* Learn Interrupt configuration */
562 	rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq);
563 	if (rc < 0)
564 		return rc;
565 
566 	rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info);
567 	if (rc) {
568 		DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n");
569 		return rc;
570 	}
571 
572 	if (dev->int_info.msix_cnt) {
573 		DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n",
574 			 dev->int_info.msix_cnt);
575 		rc = qedr_req_msix_irqs(dev);
576 		if (rc)
577 			return rc;
578 	}
579 
580 	DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n");
581 
582 	return 0;
583 }
584 
585 static int qedr_set_device_attr(struct qedr_dev *dev)
586 {
587 	struct qed_rdma_device *qed_attr;
588 	struct qedr_device_attr *attr;
589 	u32 page_size;
590 
591 	/* Part 1 - query core capabilities */
592 	qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
593 
594 	/* Part 2 - check capabilities */
595 	page_size = ~dev->attr.page_size_caps + 1;
596 	if (page_size > PAGE_SIZE) {
597 		DP_ERR(dev,
598 		       "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
599 		       PAGE_SIZE, page_size);
600 		return -ENODEV;
601 	}
602 
603 	/* Part 3 - copy and update capabilities */
604 	attr = &dev->attr;
605 	attr->vendor_id = qed_attr->vendor_id;
606 	attr->vendor_part_id = qed_attr->vendor_part_id;
607 	attr->hw_ver = qed_attr->hw_ver;
608 	attr->fw_ver = qed_attr->fw_ver;
609 	attr->node_guid = qed_attr->node_guid;
610 	attr->sys_image_guid = qed_attr->sys_image_guid;
611 	attr->max_cnq = qed_attr->max_cnq;
612 	attr->max_sge = qed_attr->max_sge;
613 	attr->max_inline = qed_attr->max_inline;
614 	attr->max_sqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_SQE);
615 	attr->max_rqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_RQE);
616 	attr->max_qp_resp_rd_atomic_resc = qed_attr->max_qp_resp_rd_atomic_resc;
617 	attr->max_qp_req_rd_atomic_resc = qed_attr->max_qp_req_rd_atomic_resc;
618 	attr->max_dev_resp_rd_atomic_resc =
619 	    qed_attr->max_dev_resp_rd_atomic_resc;
620 	attr->max_cq = qed_attr->max_cq;
621 	attr->max_qp = qed_attr->max_qp;
622 	attr->max_mr = qed_attr->max_mr;
623 	attr->max_mr_size = qed_attr->max_mr_size;
624 	attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES);
625 	attr->max_mw = qed_attr->max_mw;
626 	attr->max_fmr = qed_attr->max_fmr;
627 	attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl;
628 	attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size;
629 	attr->max_pd = qed_attr->max_pd;
630 	attr->max_ah = qed_attr->max_ah;
631 	attr->max_pkey = qed_attr->max_pkey;
632 	attr->max_srq = qed_attr->max_srq;
633 	attr->max_srq_wr = qed_attr->max_srq_wr;
634 	attr->dev_caps = qed_attr->dev_caps;
635 	attr->page_size_caps = qed_attr->page_size_caps;
636 	attr->dev_ack_delay = qed_attr->dev_ack_delay;
637 	attr->reserved_lkey = qed_attr->reserved_lkey;
638 	attr->bad_pkey_counter = qed_attr->bad_pkey_counter;
639 	attr->max_stats_queues = qed_attr->max_stats_queues;
640 
641 	return 0;
642 }
643 
644 static void qedr_unaffiliated_event(void *context, u8 event_code)
645 {
646 	pr_err("unaffiliated event not implemented yet\n");
647 }
648 
649 static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
650 {
651 #define EVENT_TYPE_NOT_DEFINED	0
652 #define EVENT_TYPE_CQ		1
653 #define EVENT_TYPE_QP		2
654 #define EVENT_TYPE_SRQ		3
655 	struct qedr_dev *dev = (struct qedr_dev *)context;
656 	struct regpair *async_handle = (struct regpair *)fw_handle;
657 	u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo;
658 	u8 event_type = EVENT_TYPE_NOT_DEFINED;
659 	struct ib_event event;
660 	struct ib_srq *ibsrq;
661 	struct qedr_srq *srq;
662 	unsigned long flags;
663 	struct ib_cq *ibcq;
664 	struct ib_qp *ibqp;
665 	struct qedr_cq *cq;
666 	struct qedr_qp *qp;
667 	u16 srq_id;
668 
669 	if (IS_ROCE(dev)) {
670 		switch (e_code) {
671 		case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
672 			event.event = IB_EVENT_CQ_ERR;
673 			event_type = EVENT_TYPE_CQ;
674 			break;
675 		case ROCE_ASYNC_EVENT_SQ_DRAINED:
676 			event.event = IB_EVENT_SQ_DRAINED;
677 			event_type = EVENT_TYPE_QP;
678 			break;
679 		case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR:
680 			event.event = IB_EVENT_QP_FATAL;
681 			event_type = EVENT_TYPE_QP;
682 			break;
683 		case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR:
684 			event.event = IB_EVENT_QP_REQ_ERR;
685 			event_type = EVENT_TYPE_QP;
686 			break;
687 		case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR:
688 			event.event = IB_EVENT_QP_ACCESS_ERR;
689 			event_type = EVENT_TYPE_QP;
690 			break;
691 		case ROCE_ASYNC_EVENT_SRQ_LIMIT:
692 			event.event = IB_EVENT_SRQ_LIMIT_REACHED;
693 			event_type = EVENT_TYPE_SRQ;
694 			break;
695 		case ROCE_ASYNC_EVENT_SRQ_EMPTY:
696 			event.event = IB_EVENT_SRQ_ERR;
697 			event_type = EVENT_TYPE_SRQ;
698 			break;
699 		default:
700 			DP_ERR(dev, "unsupported event %d on handle=%llx\n",
701 			       e_code, roce_handle64);
702 		}
703 	} else {
704 		switch (e_code) {
705 		case QED_IWARP_EVENT_SRQ_LIMIT:
706 			event.event = IB_EVENT_SRQ_LIMIT_REACHED;
707 			event_type = EVENT_TYPE_SRQ;
708 			break;
709 		case QED_IWARP_EVENT_SRQ_EMPTY:
710 			event.event = IB_EVENT_SRQ_ERR;
711 			event_type = EVENT_TYPE_SRQ;
712 			break;
713 		default:
714 		DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code,
715 		       roce_handle64);
716 		}
717 	}
718 	switch (event_type) {
719 	case EVENT_TYPE_CQ:
720 		cq = (struct qedr_cq *)(uintptr_t)roce_handle64;
721 		if (cq) {
722 			ibcq = &cq->ibcq;
723 			if (ibcq->event_handler) {
724 				event.device = ibcq->device;
725 				event.element.cq = ibcq;
726 				ibcq->event_handler(&event, ibcq->cq_context);
727 			}
728 		} else {
729 			WARN(1,
730 			     "Error: CQ event with NULL pointer ibcq. Handle=%llx\n",
731 			     roce_handle64);
732 		}
733 		DP_ERR(dev, "CQ event %d on handle %p\n", e_code, cq);
734 		break;
735 	case EVENT_TYPE_QP:
736 		qp = (struct qedr_qp *)(uintptr_t)roce_handle64;
737 		if (qp) {
738 			ibqp = &qp->ibqp;
739 			if (ibqp->event_handler) {
740 				event.device = ibqp->device;
741 				event.element.qp = ibqp;
742 				ibqp->event_handler(&event, ibqp->qp_context);
743 			}
744 		} else {
745 			WARN(1,
746 			     "Error: QP event with NULL pointer ibqp. Handle=%llx\n",
747 			     roce_handle64);
748 		}
749 		DP_ERR(dev, "QP event %d on handle %p\n", e_code, qp);
750 		break;
751 	case EVENT_TYPE_SRQ:
752 		srq_id = (u16)roce_handle64;
753 		xa_lock_irqsave(&dev->srqs, flags);
754 		srq = xa_load(&dev->srqs, srq_id);
755 		if (srq) {
756 			ibsrq = &srq->ibsrq;
757 			if (ibsrq->event_handler) {
758 				event.device = ibsrq->device;
759 				event.element.srq = ibsrq;
760 				ibsrq->event_handler(&event,
761 						     ibsrq->srq_context);
762 			}
763 		} else {
764 			DP_NOTICE(dev,
765 				  "SRQ event with NULL pointer ibsrq. Handle=%llx\n",
766 				  roce_handle64);
767 		}
768 		xa_unlock_irqrestore(&dev->srqs, flags);
769 		DP_NOTICE(dev, "SRQ event %d on handle %p\n", e_code, srq);
770 	default:
771 		break;
772 	}
773 }
774 
775 static int qedr_init_hw(struct qedr_dev *dev)
776 {
777 	struct qed_rdma_add_user_out_params out_params;
778 	struct qed_rdma_start_in_params *in_params;
779 	struct qed_rdma_cnq_params *cur_pbl;
780 	struct qed_rdma_events events;
781 	dma_addr_t p_phys_table;
782 	u32 page_cnt;
783 	int rc = 0;
784 	int i;
785 
786 	in_params =  kzalloc(sizeof(*in_params), GFP_KERNEL);
787 	if (!in_params) {
788 		rc = -ENOMEM;
789 		goto out;
790 	}
791 
792 	in_params->desired_cnq = dev->num_cnq;
793 	for (i = 0; i < dev->num_cnq; i++) {
794 		cur_pbl = &in_params->cnq_pbl_list[i];
795 
796 		page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl);
797 		cur_pbl->num_pbl_pages = page_cnt;
798 
799 		p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl);
800 		cur_pbl->pbl_ptr = (u64)p_phys_table;
801 	}
802 
803 	events.affiliated_event = qedr_affiliated_event;
804 	events.unaffiliated_event = qedr_unaffiliated_event;
805 	events.context = dev;
806 
807 	in_params->events = &events;
808 	in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS;
809 	in_params->max_mtu = dev->ndev->mtu;
810 	dev->iwarp_max_mtu = dev->ndev->mtu;
811 	ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr);
812 
813 	rc = dev->ops->rdma_init(dev->cdev, in_params);
814 	if (rc)
815 		goto out;
816 
817 	rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params);
818 	if (rc)
819 		goto out;
820 
821 	dev->db_addr = (void __iomem *)(uintptr_t)out_params.dpi_addr;
822 	dev->db_phys_addr = out_params.dpi_phys_addr;
823 	dev->db_size = out_params.dpi_size;
824 	dev->dpi = out_params.dpi;
825 
826 	rc = qedr_set_device_attr(dev);
827 out:
828 	kfree(in_params);
829 	if (rc)
830 		DP_ERR(dev, "Init HW Failed rc = %d\n", rc);
831 
832 	return rc;
833 }
834 
835 static void qedr_stop_hw(struct qedr_dev *dev)
836 {
837 	dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi);
838 	dev->ops->rdma_stop(dev->rdma_ctx);
839 }
840 
841 static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
842 				 struct net_device *ndev)
843 {
844 	struct qed_dev_rdma_info dev_info;
845 	struct qedr_dev *dev;
846 	int rc = 0;
847 
848 	dev = ib_alloc_device(qedr_dev, ibdev);
849 	if (!dev) {
850 		pr_err("Unable to allocate ib device\n");
851 		return NULL;
852 	}
853 
854 	DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n");
855 
856 	dev->pdev = pdev;
857 	dev->ndev = ndev;
858 	dev->cdev = cdev;
859 
860 	qed_ops = qed_get_rdma_ops();
861 	if (!qed_ops) {
862 		DP_ERR(dev, "Failed to get qed roce operations\n");
863 		goto init_err;
864 	}
865 
866 	dev->ops = qed_ops;
867 	rc = qed_ops->fill_dev_info(cdev, &dev_info);
868 	if (rc)
869 		goto init_err;
870 
871 	dev->user_dpm_enabled = dev_info.user_dpm_enabled;
872 	dev->rdma_type = dev_info.rdma_type;
873 	dev->num_hwfns = dev_info.common.num_hwfns;
874 
875 	if (IS_IWARP(dev) && QEDR_IS_CMT(dev)) {
876 		rc = dev->ops->iwarp_set_engine_affin(cdev, false);
877 		if (rc) {
878 			DP_ERR(dev, "iWARP is disabled over a 100g device Enabling it may impact L2 performance. To enable it run devlink dev param set <dev> name iwarp_cmt value true cmode runtime\n");
879 			goto init_err;
880 		}
881 	}
882 	dev->affin_hwfn_idx = dev->ops->common->get_affin_hwfn_idx(cdev);
883 
884 	dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
885 
886 	dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
887 	if (!dev->num_cnq) {
888 		DP_ERR(dev, "Failed. At least one CNQ is required.\n");
889 		rc = -ENOMEM;
890 		goto init_err;
891 	}
892 
893 	dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT;
894 
895 	qedr_pci_set_atomic(dev, pdev);
896 
897 	rc = qedr_alloc_resources(dev);
898 	if (rc)
899 		goto init_err;
900 
901 	rc = qedr_init_hw(dev);
902 	if (rc)
903 		goto alloc_err;
904 
905 	rc = qedr_setup_irqs(dev);
906 	if (rc)
907 		goto irq_err;
908 
909 	rc = qedr_register_device(dev);
910 	if (rc) {
911 		DP_ERR(dev, "Unable to allocate register device\n");
912 		goto reg_err;
913 	}
914 
915 	if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
916 		qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
917 
918 	DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
919 	return dev;
920 
921 reg_err:
922 	qedr_sync_free_irqs(dev);
923 irq_err:
924 	qedr_stop_hw(dev);
925 alloc_err:
926 	qedr_free_resources(dev);
927 init_err:
928 	ib_dealloc_device(&dev->ibdev);
929 	DP_ERR(dev, "qedr driver load failed rc=%d\n", rc);
930 
931 	return NULL;
932 }
933 
934 static void qedr_remove(struct qedr_dev *dev)
935 {
936 	/* First unregister with stack to stop all the active traffic
937 	 * of the registered clients.
938 	 */
939 	ib_unregister_device(&dev->ibdev);
940 
941 	qedr_stop_hw(dev);
942 	qedr_sync_free_irqs(dev);
943 	qedr_free_resources(dev);
944 
945 	if (IS_IWARP(dev) && QEDR_IS_CMT(dev))
946 		dev->ops->iwarp_set_engine_affin(dev->cdev, true);
947 
948 	ib_dealloc_device(&dev->ibdev);
949 }
950 
951 static void qedr_close(struct qedr_dev *dev)
952 {
953 	if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
954 		qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
955 }
956 
957 static void qedr_shutdown(struct qedr_dev *dev)
958 {
959 	qedr_close(dev);
960 	qedr_remove(dev);
961 }
962 
963 static void qedr_open(struct qedr_dev *dev)
964 {
965 	if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
966 		qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
967 }
968 
969 static void qedr_mac_address_change(struct qedr_dev *dev)
970 {
971 	union ib_gid *sgid = &dev->sgid_tbl[0];
972 	u8 guid[8], mac_addr[6];
973 	int rc;
974 
975 	/* Update SGID */
976 	ether_addr_copy(&mac_addr[0], dev->ndev->dev_addr);
977 	guid[0] = mac_addr[0] ^ 2;
978 	guid[1] = mac_addr[1];
979 	guid[2] = mac_addr[2];
980 	guid[3] = 0xff;
981 	guid[4] = 0xfe;
982 	guid[5] = mac_addr[3];
983 	guid[6] = mac_addr[4];
984 	guid[7] = mac_addr[5];
985 	sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
986 	memcpy(&sgid->raw[8], guid, sizeof(guid));
987 
988 	/* Update LL2 */
989 	rc = dev->ops->ll2_set_mac_filter(dev->cdev,
990 					  dev->gsi_ll2_mac_address,
991 					  dev->ndev->dev_addr);
992 
993 	ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
994 
995 	qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
996 
997 	if (rc)
998 		DP_ERR(dev, "Error updating mac filter\n");
999 }
1000 
1001 /* event handling via NIC driver ensures that all the NIC specific
1002  * initialization done before RoCE driver notifies
1003  * event to stack.
1004  */
1005 static void qedr_notify(struct qedr_dev *dev, enum qede_rdma_event event)
1006 {
1007 	switch (event) {
1008 	case QEDE_UP:
1009 		qedr_open(dev);
1010 		break;
1011 	case QEDE_DOWN:
1012 		qedr_close(dev);
1013 		break;
1014 	case QEDE_CLOSE:
1015 		qedr_shutdown(dev);
1016 		break;
1017 	case QEDE_CHANGE_ADDR:
1018 		qedr_mac_address_change(dev);
1019 		break;
1020 	default:
1021 		pr_err("Event not supported\n");
1022 	}
1023 }
1024 
1025 static struct qedr_driver qedr_drv = {
1026 	.name = "qedr_driver",
1027 	.add = qedr_add,
1028 	.remove = qedr_remove,
1029 	.notify = qedr_notify,
1030 };
1031 
1032 static int __init qedr_init_module(void)
1033 {
1034 	return qede_rdma_register_driver(&qedr_drv);
1035 }
1036 
1037 static void __exit qedr_exit_module(void)
1038 {
1039 	qede_rdma_unregister_driver(&qedr_drv);
1040 }
1041 
1042 module_init(qedr_init_module);
1043 module_exit(qedr_exit_module);
1044