xref: /openbmc/linux/drivers/infiniband/hw/qedr/main.c (revision fb09a1ed5c6e507499a9da54bfd34f71a2673961)
12e0cbc4dSRam Amrani /* QLogic qedr NIC Driver
22e0cbc4dSRam Amrani  * Copyright (c) 2015-2016  QLogic Corporation
32e0cbc4dSRam Amrani  *
42e0cbc4dSRam Amrani  * This software is available to you under a choice of one of two
52e0cbc4dSRam Amrani  * licenses.  You may choose to be licensed under the terms of the GNU
62e0cbc4dSRam Amrani  * General Public License (GPL) Version 2, available from the file
72e0cbc4dSRam Amrani  * COPYING in the main directory of this source tree, or the
82e0cbc4dSRam Amrani  * OpenIB.org BSD license below:
92e0cbc4dSRam Amrani  *
102e0cbc4dSRam Amrani  *     Redistribution and use in source and binary forms, with or
112e0cbc4dSRam Amrani  *     without modification, are permitted provided that the following
122e0cbc4dSRam Amrani  *     conditions are met:
132e0cbc4dSRam Amrani  *
142e0cbc4dSRam Amrani  *      - Redistributions of source code must retain the above
152e0cbc4dSRam Amrani  *        copyright notice, this list of conditions and the following
162e0cbc4dSRam Amrani  *        disclaimer.
172e0cbc4dSRam Amrani  *
182e0cbc4dSRam Amrani  *      - Redistributions in binary form must reproduce the above
192e0cbc4dSRam Amrani  *        copyright notice, this list of conditions and the following
202e0cbc4dSRam Amrani  *        disclaimer in the documentation and /or other materials
212e0cbc4dSRam Amrani  *        provided with the distribution.
222e0cbc4dSRam Amrani  *
232e0cbc4dSRam Amrani  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
242e0cbc4dSRam Amrani  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
252e0cbc4dSRam Amrani  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
262e0cbc4dSRam Amrani  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
272e0cbc4dSRam Amrani  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
282e0cbc4dSRam Amrani  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
292e0cbc4dSRam Amrani  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
302e0cbc4dSRam Amrani  * SOFTWARE.
312e0cbc4dSRam Amrani  */
322e0cbc4dSRam Amrani #include <linux/module.h>
332e0cbc4dSRam Amrani #include <rdma/ib_verbs.h>
342e0cbc4dSRam Amrani #include <rdma/ib_addr.h>
35ac1b36e5SRam Amrani #include <rdma/ib_user_verbs.h>
36e6a38c54SKalderon, Michal #include <rdma/iw_cm.h>
37e6a38c54SKalderon, Michal #include <rdma/ib_mad.h>
382e0cbc4dSRam Amrani #include <linux/netdevice.h>
392e0cbc4dSRam Amrani #include <linux/iommu.h>
40461a6946SJoerg Roedel #include <linux/pci.h>
412e0cbc4dSRam Amrani #include <net/addrconf.h>
42b262a06eSMichal Kalderon 
43ec72fce4SRam Amrani #include <linux/qed/qed_chain.h>
44ec72fce4SRam Amrani #include <linux/qed/qed_if.h>
452e0cbc4dSRam Amrani #include "qedr.h"
46ac1b36e5SRam Amrani #include "verbs.h"
47ac1b36e5SRam Amrani #include <rdma/qedr-abi.h>
48de0089e6SKalderon, Michal #include "qedr_iw_cm.h"
492e0cbc4dSRam Amrani 
502e0cbc4dSRam Amrani MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
512e0cbc4dSRam Amrani MODULE_AUTHOR("QLogic Corporation");
522e0cbc4dSRam Amrani MODULE_LICENSE("Dual BSD/GPL");
532e0cbc4dSRam Amrani 
54cecbcddfSRam Amrani #define QEDR_WQ_MULTIPLIER_DFT	(3)
55cecbcddfSRam Amrani 
561fb7f897SMark Bloch static void qedr_ib_dispatch_event(struct qedr_dev *dev, u32 port_num,
572e0cbc4dSRam Amrani 				   enum ib_event_type type)
582e0cbc4dSRam Amrani {
592e0cbc4dSRam Amrani 	struct ib_event ibev;
602e0cbc4dSRam Amrani 
612e0cbc4dSRam Amrani 	ibev.device = &dev->ibdev;
622e0cbc4dSRam Amrani 	ibev.element.port_num = port_num;
632e0cbc4dSRam Amrani 	ibev.event = type;
642e0cbc4dSRam Amrani 
652e0cbc4dSRam Amrani 	ib_dispatch_event(&ibev);
662e0cbc4dSRam Amrani }
672e0cbc4dSRam Amrani 
682e0cbc4dSRam Amrani static enum rdma_link_layer qedr_link_layer(struct ib_device *device,
691fb7f897SMark Bloch 					    u32 port_num)
702e0cbc4dSRam Amrani {
712e0cbc4dSRam Amrani 	return IB_LINK_LAYER_ETHERNET;
722e0cbc4dSRam Amrani }
732e0cbc4dSRam Amrani 
749abb0d1bSLeon Romanovsky static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str)
75ec72fce4SRam Amrani {
76ec72fce4SRam Amrani 	struct qedr_dev *qedr = get_qedr_dev(ibdev);
77ec72fce4SRam Amrani 	u32 fw_ver = (u32)qedr->attr.fw_ver;
78ec72fce4SRam Amrani 
799abb0d1bSLeon Romanovsky 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
80ec72fce4SRam Amrani 		 (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
81ec72fce4SRam Amrani 		 (fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
82ec72fce4SRam Amrani }
83ec72fce4SRam Amrani 
841fb7f897SMark Bloch static int qedr_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
85e6a38c54SKalderon, Michal 				    struct ib_port_immutable *immutable)
86e6a38c54SKalderon, Michal {
87e6a38c54SKalderon, Michal 	struct ib_port_attr attr;
88e6a38c54SKalderon, Michal 	int err;
89e6a38c54SKalderon, Michal 
90e6a38c54SKalderon, Michal 	err = qedr_query_port(ibdev, port_num, &attr);
91e6a38c54SKalderon, Michal 	if (err)
92e6a38c54SKalderon, Michal 		return err;
93e6a38c54SKalderon, Michal 
94e6a38c54SKalderon, Michal 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
95e6a38c54SKalderon, Michal 	immutable->gid_tbl_len = attr.gid_tbl_len;
96e6a38c54SKalderon, Michal 	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
97e6a38c54SKalderon, Michal 	    RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
98e6a38c54SKalderon, Michal 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
99e6a38c54SKalderon, Michal 
100e6a38c54SKalderon, Michal 	return 0;
101e6a38c54SKalderon, Michal }
102e6a38c54SKalderon, Michal 
1031fb7f897SMark Bloch static int qedr_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
104e6a38c54SKalderon, Michal 				  struct ib_port_immutable *immutable)
105e6a38c54SKalderon, Michal {
106e6a38c54SKalderon, Michal 	struct ib_port_attr attr;
107e6a38c54SKalderon, Michal 	int err;
108e6a38c54SKalderon, Michal 
109e6a38c54SKalderon, Michal 	err = qedr_query_port(ibdev, port_num, &attr);
110e6a38c54SKalderon, Michal 	if (err)
111e6a38c54SKalderon, Michal 		return err;
112e6a38c54SKalderon, Michal 
113e6a38c54SKalderon, Michal 	immutable->gid_tbl_len = 1;
114e6a38c54SKalderon, Michal 	immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
115e6a38c54SKalderon, Michal 	immutable->max_mad_size = 0;
116e6a38c54SKalderon, Michal 
117e6a38c54SKalderon, Michal 	return 0;
118e6a38c54SKalderon, Michal }
119e6a38c54SKalderon, Michal 
120508a523fSParav Pandit /* QEDR sysfs interface */
121508a523fSParav Pandit static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
122508a523fSParav Pandit 			   char *buf)
123508a523fSParav Pandit {
12454747231SParav Pandit 	struct qedr_dev *dev =
12554747231SParav Pandit 		rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
126508a523fSParav Pandit 
1271c7fd726SJoe Perches 	return sysfs_emit(buf, "0x%x\n", dev->attr.hw_ver);
128508a523fSParav Pandit }
129508a523fSParav Pandit static DEVICE_ATTR_RO(hw_rev);
130508a523fSParav Pandit 
131508a523fSParav Pandit static ssize_t hca_type_show(struct device *device,
132508a523fSParav Pandit 			     struct device_attribute *attr, char *buf)
133508a523fSParav Pandit {
13415fe6a8dSMichal Kalderon 	struct qedr_dev *dev =
13515fe6a8dSMichal Kalderon 		rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
13615fe6a8dSMichal Kalderon 
1371c7fd726SJoe Perches 	return sysfs_emit(buf, "FastLinQ QL%x %s\n", dev->pdev->device,
1381c7fd726SJoe Perches 			  rdma_protocol_iwarp(&dev->ibdev, 1) ? "iWARP" :
1391c7fd726SJoe Perches 								"RoCE");
140508a523fSParav Pandit }
141508a523fSParav Pandit static DEVICE_ATTR_RO(hca_type);
142508a523fSParav Pandit 
143508a523fSParav Pandit static struct attribute *qedr_attributes[] = {
144508a523fSParav Pandit 	&dev_attr_hw_rev.attr,
145508a523fSParav Pandit 	&dev_attr_hca_type.attr,
146508a523fSParav Pandit 	NULL
147508a523fSParav Pandit };
148508a523fSParav Pandit 
149508a523fSParav Pandit static const struct attribute_group qedr_attr_group = {
150508a523fSParav Pandit 	.attrs = qedr_attributes,
151508a523fSParav Pandit };
152508a523fSParav Pandit 
153bd59461eSKamal Heib static const struct ib_device_ops qedr_iw_dev_ops = {
154bd59461eSKamal Heib 	.get_port_immutable = qedr_iw_port_immutable,
155dd05cb82SKamal Heib 	.iw_accept = qedr_iw_accept,
156dd05cb82SKamal Heib 	.iw_add_ref = qedr_iw_qp_add_ref,
157dd05cb82SKamal Heib 	.iw_connect = qedr_iw_connect,
158dd05cb82SKamal Heib 	.iw_create_listen = qedr_iw_create_listen,
159dd05cb82SKamal Heib 	.iw_destroy_listen = qedr_iw_destroy_listen,
160dd05cb82SKamal Heib 	.iw_get_qp = qedr_iw_get_qp,
161dd05cb82SKamal Heib 	.iw_reject = qedr_iw_reject,
162dd05cb82SKamal Heib 	.iw_rem_ref = qedr_iw_qp_rem_ref,
163bd59461eSKamal Heib 	.query_gid = qedr_iw_query_gid,
164bd59461eSKamal Heib };
165bd59461eSKamal Heib 
1660089985eSBart Van Assche static int qedr_iw_register_device(struct qedr_dev *dev)
167e6a38c54SKalderon, Michal {
168e6a38c54SKalderon, Michal 	dev->ibdev.node_type = RDMA_NODE_RNIC;
169e6a38c54SKalderon, Michal 
170bd59461eSKamal Heib 	ib_set_device_ops(&dev->ibdev, &qedr_iw_dev_ops);
171e6a38c54SKalderon, Michal 
172dd05cb82SKamal Heib 	memcpy(dev->ibdev.iw_ifname,
173dd05cb82SKamal Heib 	       dev->ndev->name, sizeof(dev->ibdev.iw_ifname));
174e6a38c54SKalderon, Michal 
175e6a38c54SKalderon, Michal 	return 0;
176e6a38c54SKalderon, Michal }
177e6a38c54SKalderon, Michal 
178bd59461eSKamal Heib static const struct ib_device_ops qedr_roce_dev_ops = {
17906e8d1dfSYuval Basson 	.alloc_xrcd = qedr_alloc_xrcd,
18006e8d1dfSYuval Basson 	.dealloc_xrcd = qedr_dealloc_xrcd,
181bd59461eSKamal Heib 	.get_port_immutable = qedr_roce_port_immutable,
182ca4beeeeSKamal Heib 	.query_pkey = qedr_query_pkey,
183bd59461eSKamal Heib };
184bd59461eSKamal Heib 
1850089985eSBart Van Assche static void qedr_roce_register_device(struct qedr_dev *dev)
186e6a38c54SKalderon, Michal {
187e6a38c54SKalderon, Michal 	dev->ibdev.node_type = RDMA_NODE_IB_CA;
188e6a38c54SKalderon, Michal 
189bd59461eSKamal Heib 	ib_set_device_ops(&dev->ibdev, &qedr_roce_dev_ops);
190e6a38c54SKalderon, Michal }
191e6a38c54SKalderon, Michal 
192bd59461eSKamal Heib static const struct ib_device_ops qedr_dev_ops = {
1937a154142SJason Gunthorpe 	.owner = THIS_MODULE,
194b9560a41SJason Gunthorpe 	.driver_id = RDMA_DRIVER_QEDR,
19572c6ec18SJason Gunthorpe 	.uverbs_abi_ver = QEDR_ABI_VERSION,
196b9560a41SJason Gunthorpe 
197bd59461eSKamal Heib 	.alloc_mr = qedr_alloc_mr,
198bd59461eSKamal Heib 	.alloc_pd = qedr_alloc_pd,
199bd59461eSKamal Heib 	.alloc_ucontext = qedr_alloc_ucontext,
200bd59461eSKamal Heib 	.create_ah = qedr_create_ah,
201bd59461eSKamal Heib 	.create_cq = qedr_create_cq,
202bd59461eSKamal Heib 	.create_qp = qedr_create_qp,
203bd59461eSKamal Heib 	.create_srq = qedr_create_srq,
204bd59461eSKamal Heib 	.dealloc_pd = qedr_dealloc_pd,
205bd59461eSKamal Heib 	.dealloc_ucontext = qedr_dealloc_ucontext,
206bd59461eSKamal Heib 	.dereg_mr = qedr_dereg_mr,
207bd59461eSKamal Heib 	.destroy_ah = qedr_destroy_ah,
208bd59461eSKamal Heib 	.destroy_cq = qedr_destroy_cq,
209bd59461eSKamal Heib 	.destroy_qp = qedr_destroy_qp,
210bd59461eSKamal Heib 	.destroy_srq = qedr_destroy_srq,
211915e4af5SJason Gunthorpe 	.device_group = &qedr_attr_group,
212bd59461eSKamal Heib 	.get_dev_fw_str = qedr_get_dev_fw_str,
213bd59461eSKamal Heib 	.get_dma_mr = qedr_get_dma_mr,
214bd59461eSKamal Heib 	.get_link_layer = qedr_link_layer,
215bd59461eSKamal Heib 	.map_mr_sg = qedr_map_mr_sg,
216bd59461eSKamal Heib 	.mmap = qedr_mmap,
2174c6bb02dSMichal Kalderon 	.mmap_free = qedr_mmap_free,
218bd59461eSKamal Heib 	.modify_qp = qedr_modify_qp,
219bd59461eSKamal Heib 	.modify_srq = qedr_modify_srq,
220bd59461eSKamal Heib 	.poll_cq = qedr_poll_cq,
221bd59461eSKamal Heib 	.post_recv = qedr_post_recv,
222bd59461eSKamal Heib 	.post_send = qedr_post_send,
223bd59461eSKamal Heib 	.post_srq_recv = qedr_post_srq_recv,
224bd59461eSKamal Heib 	.process_mad = qedr_process_mad,
225bd59461eSKamal Heib 	.query_device = qedr_query_device,
226bd59461eSKamal Heib 	.query_port = qedr_query_port,
227bd59461eSKamal Heib 	.query_qp = qedr_query_qp,
228bd59461eSKamal Heib 	.query_srq = qedr_query_srq,
229bd59461eSKamal Heib 	.reg_user_mr = qedr_reg_user_mr,
230bd59461eSKamal Heib 	.req_notify_cq = qedr_arm_cq,
231bd59461eSKamal Heib 	.resize_cq = qedr_resize_cq,
232d3456914SLeon Romanovsky 
233d3456914SLeon Romanovsky 	INIT_RDMA_OBJ_SIZE(ib_ah, qedr_ah, ibah),
234e39afe3dSLeon Romanovsky 	INIT_RDMA_OBJ_SIZE(ib_cq, qedr_cq, ibcq),
23521a428a0SLeon Romanovsky 	INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd),
236514aee66SLeon Romanovsky 	INIT_RDMA_OBJ_SIZE(ib_qp, qedr_qp, ibqp),
23768e326deSLeon Romanovsky 	INIT_RDMA_OBJ_SIZE(ib_srq, qedr_srq, ibsrq),
23806e8d1dfSYuval Basson 	INIT_RDMA_OBJ_SIZE(ib_xrcd, qedr_xrcd, ibxrcd),
239a2a074efSLeon Romanovsky 	INIT_RDMA_OBJ_SIZE(ib_ucontext, qedr_ucontext, ibucontext),
240bd59461eSKamal Heib };
241bd59461eSKamal Heib 
2422e0cbc4dSRam Amrani static int qedr_register_device(struct qedr_dev *dev)
2432e0cbc4dSRam Amrani {
244e6a38c54SKalderon, Michal 	int rc;
245e6a38c54SKalderon, Michal 
246993d1b52SRam Amrani 	dev->ibdev.node_guid = dev->attr.node_guid;
2472e0cbc4dSRam Amrani 	memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC));
248ac1b36e5SRam Amrani 
249e6a38c54SKalderon, Michal 	if (IS_IWARP(dev)) {
250e6a38c54SKalderon, Michal 		rc = qedr_iw_register_device(dev);
251e6a38c54SKalderon, Michal 		if (rc)
252e6a38c54SKalderon, Michal 			return rc;
253e6a38c54SKalderon, Michal 	} else {
254e6a38c54SKalderon, Michal 		qedr_roce_register_device(dev);
255e6a38c54SKalderon, Michal 	}
256e6a38c54SKalderon, Michal 
257ac1b36e5SRam Amrani 	dev->ibdev.phys_port_cnt = 1;
258ac1b36e5SRam Amrani 	dev->ibdev.num_comp_vectors = dev->num_cnq;
25969117101SBart Van Assche 	dev->ibdev.dev.parent = &dev->pdev->dev;
2602e0cbc4dSRam Amrani 
261bd59461eSKamal Heib 	ib_set_device_ops(&dev->ibdev, &qedr_dev_ops);
262bd59461eSKamal Heib 
2634b38da75SJason Gunthorpe 	rc = ib_device_set_netdev(&dev->ibdev, dev->ndev, 1);
2644b38da75SJason Gunthorpe 	if (rc)
2654b38da75SJason Gunthorpe 		return rc;
2664b38da75SJason Gunthorpe 
267e0477b34SJason Gunthorpe 	dma_set_max_seg_size(&dev->pdev->dev, UINT_MAX);
268e0477b34SJason Gunthorpe 	return ib_register_device(&dev->ibdev, "qedr%d", &dev->pdev->dev);
2692e0cbc4dSRam Amrani }
2702e0cbc4dSRam Amrani 
271ec72fce4SRam Amrani /* This function allocates fast-path status block memory */
272ec72fce4SRam Amrani static int qedr_alloc_mem_sb(struct qedr_dev *dev,
273ec72fce4SRam Amrani 			     struct qed_sb_info *sb_info, u16 sb_id)
274ec72fce4SRam Amrani {
275*fb09a1edSShai Malin 	struct status_block *sb_virt;
276ec72fce4SRam Amrani 	dma_addr_t sb_phys;
277ec72fce4SRam Amrani 	int rc;
278ec72fce4SRam Amrani 
279ec72fce4SRam Amrani 	sb_virt = dma_alloc_coherent(&dev->pdev->dev,
280ec72fce4SRam Amrani 				     sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
281ec72fce4SRam Amrani 	if (!sb_virt)
282ec72fce4SRam Amrani 		return -ENOMEM;
283ec72fce4SRam Amrani 
284ec72fce4SRam Amrani 	rc = dev->ops->common->sb_init(dev->cdev, sb_info,
285ec72fce4SRam Amrani 				       sb_virt, sb_phys, sb_id,
286ec72fce4SRam Amrani 				       QED_SB_TYPE_CNQ);
287ec72fce4SRam Amrani 	if (rc) {
288ec72fce4SRam Amrani 		pr_err("Status block initialization failed\n");
289ec72fce4SRam Amrani 		dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt),
290ec72fce4SRam Amrani 				  sb_virt, sb_phys);
291ec72fce4SRam Amrani 		return rc;
292ec72fce4SRam Amrani 	}
293ec72fce4SRam Amrani 
294ec72fce4SRam Amrani 	return 0;
295ec72fce4SRam Amrani }
296ec72fce4SRam Amrani 
297ec72fce4SRam Amrani static void qedr_free_mem_sb(struct qedr_dev *dev,
298ec72fce4SRam Amrani 			     struct qed_sb_info *sb_info, int sb_id)
299ec72fce4SRam Amrani {
300ec72fce4SRam Amrani 	if (sb_info->sb_virt) {
30108eb1fb0SMichal Kalderon 		dev->ops->common->sb_release(dev->cdev, sb_info, sb_id,
30208eb1fb0SMichal Kalderon 					     QED_SB_TYPE_CNQ);
303ec72fce4SRam Amrani 		dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt),
304ec72fce4SRam Amrani 				  (void *)sb_info->sb_virt, sb_info->sb_phys);
305ec72fce4SRam Amrani 	}
306ec72fce4SRam Amrani }
307ec72fce4SRam Amrani 
308ec72fce4SRam Amrani static void qedr_free_resources(struct qedr_dev *dev)
309ec72fce4SRam Amrani {
310ec72fce4SRam Amrani 	int i;
311ec72fce4SRam Amrani 
312e411e058SKalderon, Michal 	if (IS_IWARP(dev))
313e411e058SKalderon, Michal 		destroy_workqueue(dev->iwarp_wq);
314e411e058SKalderon, Michal 
315ec72fce4SRam Amrani 	for (i = 0; i < dev->num_cnq; i++) {
316ec72fce4SRam Amrani 		qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
317ec72fce4SRam Amrani 		dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
318ec72fce4SRam Amrani 	}
319ec72fce4SRam Amrani 
320ec72fce4SRam Amrani 	kfree(dev->cnq_array);
321ec72fce4SRam Amrani 	kfree(dev->sb_array);
322ec72fce4SRam Amrani 	kfree(dev->sgid_tbl);
323ec72fce4SRam Amrani }
324ec72fce4SRam Amrani 
325ec72fce4SRam Amrani static int qedr_alloc_resources(struct qedr_dev *dev)
326ec72fce4SRam Amrani {
327b6db3f71SAlexander Lobakin 	struct qed_chain_init_params params = {
328b6db3f71SAlexander Lobakin 		.mode		= QED_CHAIN_MODE_PBL,
329b6db3f71SAlexander Lobakin 		.intended_use	= QED_CHAIN_USE_TO_CONSUME,
330b6db3f71SAlexander Lobakin 		.cnt_type	= QED_CHAIN_CNT_TYPE_U16,
331b6db3f71SAlexander Lobakin 		.elem_size	= sizeof(struct regpair *),
332b6db3f71SAlexander Lobakin 	};
333ec72fce4SRam Amrani 	struct qedr_cnq *cnq;
334ec72fce4SRam Amrani 	__le16 *cons_pi;
335ec72fce4SRam Amrani 	int i, rc;
336ec72fce4SRam Amrani 
3376396bb22SKees Cook 	dev->sgid_tbl = kcalloc(QEDR_MAX_SGID, sizeof(union ib_gid),
3386396bb22SKees Cook 				GFP_KERNEL);
339ec72fce4SRam Amrani 	if (!dev->sgid_tbl)
340ec72fce4SRam Amrani 		return -ENOMEM;
341ec72fce4SRam Amrani 
342ec72fce4SRam Amrani 	spin_lock_init(&dev->sgid_lock);
34373ab512fSMichal Kalderon 	xa_init_flags(&dev->srqs, XA_FLAGS_LOCK_IRQ);
344ec72fce4SRam Amrani 
345de0089e6SKalderon, Michal 	if (IS_IWARP(dev)) {
3465fdff18bSMichal Kalderon 		xa_init(&dev->qps);
347e411e058SKalderon, Michal 		dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
348de0089e6SKalderon, Michal 	}
349de0089e6SKalderon, Michal 
350ec72fce4SRam Amrani 	/* Allocate Status blocks for CNQ */
351ec72fce4SRam Amrani 	dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array),
352ec72fce4SRam Amrani 				GFP_KERNEL);
353ec72fce4SRam Amrani 	if (!dev->sb_array) {
354ec72fce4SRam Amrani 		rc = -ENOMEM;
355ec72fce4SRam Amrani 		goto err1;
356ec72fce4SRam Amrani 	}
357ec72fce4SRam Amrani 
358ec72fce4SRam Amrani 	dev->cnq_array = kcalloc(dev->num_cnq,
359ec72fce4SRam Amrani 				 sizeof(*dev->cnq_array), GFP_KERNEL);
360ec72fce4SRam Amrani 	if (!dev->cnq_array) {
361ec72fce4SRam Amrani 		rc = -ENOMEM;
362ec72fce4SRam Amrani 		goto err2;
363ec72fce4SRam Amrani 	}
364ec72fce4SRam Amrani 
365ec72fce4SRam Amrani 	dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev);
366ec72fce4SRam Amrani 
367ec72fce4SRam Amrani 	/* Allocate CNQ PBLs */
368b6db3f71SAlexander Lobakin 	params.num_elems = min_t(u32, QED_RDMA_MAX_CNQ_SIZE,
369b6db3f71SAlexander Lobakin 				 QEDR_ROCE_MAX_CNQ_SIZE);
370b6db3f71SAlexander Lobakin 
371ec72fce4SRam Amrani 	for (i = 0; i < dev->num_cnq; i++) {
372ec72fce4SRam Amrani 		cnq = &dev->cnq_array[i];
373ec72fce4SRam Amrani 
374ec72fce4SRam Amrani 		rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i],
375ec72fce4SRam Amrani 				       dev->sb_start + i);
376ec72fce4SRam Amrani 		if (rc)
377ec72fce4SRam Amrani 			goto err3;
378ec72fce4SRam Amrani 
379b6db3f71SAlexander Lobakin 		rc = dev->ops->common->chain_alloc(dev->cdev, &cnq->pbl,
380b6db3f71SAlexander Lobakin 						   &params);
381ec72fce4SRam Amrani 		if (rc)
382ec72fce4SRam Amrani 			goto err4;
383ec72fce4SRam Amrani 
384ec72fce4SRam Amrani 		cnq->dev = dev;
385ec72fce4SRam Amrani 		cnq->sb = &dev->sb_array[i];
386ec72fce4SRam Amrani 		cons_pi = dev->sb_array[i].sb_virt->pi_array;
387ec72fce4SRam Amrani 		cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX];
388ec72fce4SRam Amrani 		cnq->index = i;
389ec72fce4SRam Amrani 		sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev));
390ec72fce4SRam Amrani 
391ec72fce4SRam Amrani 		DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n",
392ec72fce4SRam Amrani 			 i, qed_chain_get_cons_idx(&cnq->pbl));
393ec72fce4SRam Amrani 	}
394ec72fce4SRam Amrani 
395ec72fce4SRam Amrani 	return 0;
396ec72fce4SRam Amrani err4:
397ec72fce4SRam Amrani 	qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
398ec72fce4SRam Amrani err3:
399ec72fce4SRam Amrani 	for (--i; i >= 0; i--) {
400ec72fce4SRam Amrani 		dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
401ec72fce4SRam Amrani 		qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
402ec72fce4SRam Amrani 	}
403ec72fce4SRam Amrani 	kfree(dev->cnq_array);
404ec72fce4SRam Amrani err2:
405ec72fce4SRam Amrani 	kfree(dev->sb_array);
406ec72fce4SRam Amrani err1:
407ec72fce4SRam Amrani 	kfree(dev->sgid_tbl);
408ec72fce4SRam Amrani 	return rc;
409ec72fce4SRam Amrani }
410ec72fce4SRam Amrani 
4112e0cbc4dSRam Amrani static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
4122e0cbc4dSRam Amrani {
41320c3ff61SFelix Kuehling 	int rc = pci_enable_atomic_ops_to_root(pdev,
41420c3ff61SFelix Kuehling 					       PCI_EXP_DEVCAP2_ATOMIC_COMP64);
4152e0cbc4dSRam Amrani 
41620c3ff61SFelix Kuehling 	if (rc) {
417f92faabaSAmrani, Ram 		dev->atomic_cap = IB_ATOMIC_NONE;
418f92faabaSAmrani, Ram 		DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability disabled\n");
41920c3ff61SFelix Kuehling 	} else {
42020c3ff61SFelix Kuehling 		dev->atomic_cap = IB_ATOMIC_GLOB;
42120c3ff61SFelix Kuehling 		DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability enabled\n");
42220c3ff61SFelix Kuehling 	}
4232e0cbc4dSRam Amrani }
4242e0cbc4dSRam Amrani 
425ec72fce4SRam Amrani static const struct qed_rdma_ops *qed_ops;
426ec72fce4SRam Amrani 
427ec72fce4SRam Amrani #define HILO_U64(hi, lo)		((((u64)(hi)) << 32) + (lo))
428ec72fce4SRam Amrani 
429ec72fce4SRam Amrani static irqreturn_t qedr_irq_handler(int irq, void *handle)
430ec72fce4SRam Amrani {
431ec72fce4SRam Amrani 	u16 hw_comp_cons, sw_comp_cons;
432ec72fce4SRam Amrani 	struct qedr_cnq *cnq = handle;
433a7efd777SRam Amrani 	struct regpair *cq_handle;
434a7efd777SRam Amrani 	struct qedr_cq *cq;
435ec72fce4SRam Amrani 
436ec72fce4SRam Amrani 	qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0);
437ec72fce4SRam Amrani 
438ec72fce4SRam Amrani 	qed_sb_update_sb_idx(cnq->sb);
439ec72fce4SRam Amrani 
440ec72fce4SRam Amrani 	hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr);
441ec72fce4SRam Amrani 	sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
442ec72fce4SRam Amrani 
443ec72fce4SRam Amrani 	/* Align protocol-index and chain reads */
444ec72fce4SRam Amrani 	rmb();
445ec72fce4SRam Amrani 
446ec72fce4SRam Amrani 	while (sw_comp_cons != hw_comp_cons) {
447a7efd777SRam Amrani 		cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl);
448a7efd777SRam Amrani 		cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi,
449a7efd777SRam Amrani 				cq_handle->lo);
450a7efd777SRam Amrani 
451a7efd777SRam Amrani 		if (cq == NULL) {
452a7efd777SRam Amrani 			DP_ERR(cnq->dev,
453a7efd777SRam Amrani 			       "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n",
454a7efd777SRam Amrani 			       cq_handle->hi, cq_handle->lo, sw_comp_cons,
455a7efd777SRam Amrani 			       hw_comp_cons);
456a7efd777SRam Amrani 
457a7efd777SRam Amrani 			break;
458a7efd777SRam Amrani 		}
459a7efd777SRam Amrani 
460a7efd777SRam Amrani 		if (cq->sig != QEDR_CQ_MAGIC_NUMBER) {
461a7efd777SRam Amrani 			DP_ERR(cnq->dev,
462a7efd777SRam Amrani 			       "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n",
463a7efd777SRam Amrani 			       cq_handle->hi, cq_handle->lo, cq);
464a7efd777SRam Amrani 			break;
465a7efd777SRam Amrani 		}
466a7efd777SRam Amrani 
467a7efd777SRam Amrani 		cq->arm_flags = 0;
468a7efd777SRam Amrani 
4694dd72636SAmrani, Ram 		if (!cq->destroyed && cq->ibcq.comp_handler)
470a7efd777SRam Amrani 			(*cq->ibcq.comp_handler)
471a7efd777SRam Amrani 				(&cq->ibcq, cq->ibcq.cq_context);
472a7efd777SRam Amrani 
4734dd72636SAmrani, Ram 		/* The CQ's CNQ notification counter is checked before
4744dd72636SAmrani, Ram 		 * destroying the CQ in a busy-wait loop that waits for all of
4754dd72636SAmrani, Ram 		 * the CQ's CNQ interrupts to be processed. It is increased
4764dd72636SAmrani, Ram 		 * here, only after the completion handler, to ensure that the
4774dd72636SAmrani, Ram 		 * the handler is not running when the CQ is destroyed.
4784dd72636SAmrani, Ram 		 */
4794dd72636SAmrani, Ram 		cq->cnq_notif++;
4804dd72636SAmrani, Ram 
481ec72fce4SRam Amrani 		sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
482a7efd777SRam Amrani 
483ec72fce4SRam Amrani 		cnq->n_comp++;
484ec72fce4SRam Amrani 	}
485ec72fce4SRam Amrani 
486ec72fce4SRam Amrani 	qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index,
487ec72fce4SRam Amrani 				      sw_comp_cons);
488ec72fce4SRam Amrani 
489ec72fce4SRam Amrani 	qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1);
490ec72fce4SRam Amrani 
491ec72fce4SRam Amrani 	return IRQ_HANDLED;
492ec72fce4SRam Amrani }
493ec72fce4SRam Amrani 
494ec72fce4SRam Amrani static void qedr_sync_free_irqs(struct qedr_dev *dev)
495ec72fce4SRam Amrani {
496ec72fce4SRam Amrani 	u32 vector;
497443473d2SMichal Kalderon 	u16 idx;
498ec72fce4SRam Amrani 	int i;
499ec72fce4SRam Amrani 
500ec72fce4SRam Amrani 	for (i = 0; i < dev->int_info.used_cnt; i++) {
501ec72fce4SRam Amrani 		if (dev->int_info.msix_cnt) {
502443473d2SMichal Kalderon 			idx = i * dev->num_hwfns + dev->affin_hwfn_idx;
503443473d2SMichal Kalderon 			vector = dev->int_info.msix[idx].vector;
504ec72fce4SRam Amrani 			synchronize_irq(vector);
505ec72fce4SRam Amrani 			free_irq(vector, &dev->cnq_array[i]);
506ec72fce4SRam Amrani 		}
507ec72fce4SRam Amrani 	}
508ec72fce4SRam Amrani 
509ec72fce4SRam Amrani 	dev->int_info.used_cnt = 0;
510ec72fce4SRam Amrani }
511ec72fce4SRam Amrani 
512ec72fce4SRam Amrani static int qedr_req_msix_irqs(struct qedr_dev *dev)
513ec72fce4SRam Amrani {
514ec72fce4SRam Amrani 	int i, rc = 0;
515443473d2SMichal Kalderon 	u16 idx;
516ec72fce4SRam Amrani 
517ec72fce4SRam Amrani 	if (dev->num_cnq > dev->int_info.msix_cnt) {
518ec72fce4SRam Amrani 		DP_ERR(dev,
519ec72fce4SRam Amrani 		       "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n",
520ec72fce4SRam Amrani 		       dev->num_cnq, dev->int_info.msix_cnt);
521ec72fce4SRam Amrani 		return -EINVAL;
522ec72fce4SRam Amrani 	}
523ec72fce4SRam Amrani 
524ec72fce4SRam Amrani 	for (i = 0; i < dev->num_cnq; i++) {
525443473d2SMichal Kalderon 		idx = i * dev->num_hwfns + dev->affin_hwfn_idx;
526443473d2SMichal Kalderon 		rc = request_irq(dev->int_info.msix[idx].vector,
527ec72fce4SRam Amrani 				 qedr_irq_handler, 0, dev->cnq_array[i].name,
528ec72fce4SRam Amrani 				 &dev->cnq_array[i]);
529ec72fce4SRam Amrani 		if (rc) {
530ec72fce4SRam Amrani 			DP_ERR(dev, "Request cnq %d irq failed\n", i);
531ec72fce4SRam Amrani 			qedr_sync_free_irqs(dev);
532ec72fce4SRam Amrani 		} else {
533ec72fce4SRam Amrani 			DP_DEBUG(dev, QEDR_MSG_INIT,
534ec72fce4SRam Amrani 				 "Requested cnq irq for %s [entry %d]. Cookie is at %p\n",
535ec72fce4SRam Amrani 				 dev->cnq_array[i].name, i,
536ec72fce4SRam Amrani 				 &dev->cnq_array[i]);
537ec72fce4SRam Amrani 			dev->int_info.used_cnt++;
538ec72fce4SRam Amrani 		}
539ec72fce4SRam Amrani 	}
540ec72fce4SRam Amrani 
541ec72fce4SRam Amrani 	return rc;
542ec72fce4SRam Amrani }
543ec72fce4SRam Amrani 
544ec72fce4SRam Amrani static int qedr_setup_irqs(struct qedr_dev *dev)
545ec72fce4SRam Amrani {
546ec72fce4SRam Amrani 	int rc;
547ec72fce4SRam Amrani 
548ec72fce4SRam Amrani 	DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n");
549ec72fce4SRam Amrani 
550ec72fce4SRam Amrani 	/* Learn Interrupt configuration */
551ec72fce4SRam Amrani 	rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq);
552ec72fce4SRam Amrani 	if (rc < 0)
553ec72fce4SRam Amrani 		return rc;
554ec72fce4SRam Amrani 
555ec72fce4SRam Amrani 	rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info);
556ec72fce4SRam Amrani 	if (rc) {
557ec72fce4SRam Amrani 		DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n");
558ec72fce4SRam Amrani 		return rc;
559ec72fce4SRam Amrani 	}
560ec72fce4SRam Amrani 
561ec72fce4SRam Amrani 	if (dev->int_info.msix_cnt) {
562ec72fce4SRam Amrani 		DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n",
563ec72fce4SRam Amrani 			 dev->int_info.msix_cnt);
564ec72fce4SRam Amrani 		rc = qedr_req_msix_irqs(dev);
565ec72fce4SRam Amrani 		if (rc)
566ec72fce4SRam Amrani 			return rc;
567ec72fce4SRam Amrani 	}
568ec72fce4SRam Amrani 
569ec72fce4SRam Amrani 	DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n");
570ec72fce4SRam Amrani 
571ec72fce4SRam Amrani 	return 0;
572ec72fce4SRam Amrani }
573ec72fce4SRam Amrani 
574ec72fce4SRam Amrani static int qedr_set_device_attr(struct qedr_dev *dev)
575ec72fce4SRam Amrani {
576ec72fce4SRam Amrani 	struct qed_rdma_device *qed_attr;
577ec72fce4SRam Amrani 	struct qedr_device_attr *attr;
578ec72fce4SRam Amrani 	u32 page_size;
579ec72fce4SRam Amrani 
580ec72fce4SRam Amrani 	/* Part 1 - query core capabilities */
581ec72fce4SRam Amrani 	qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
582ec72fce4SRam Amrani 
583ec72fce4SRam Amrani 	/* Part 2 - check capabilities */
584a379ad54SMichal Kalderon 	page_size = ~qed_attr->page_size_caps + 1;
585ec72fce4SRam Amrani 	if (page_size > PAGE_SIZE) {
586ec72fce4SRam Amrani 		DP_ERR(dev,
587ec72fce4SRam Amrani 		       "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
588ec72fce4SRam Amrani 		       PAGE_SIZE, page_size);
589ec72fce4SRam Amrani 		return -ENODEV;
590ec72fce4SRam Amrani 	}
591ec72fce4SRam Amrani 
592ec72fce4SRam Amrani 	/* Part 3 - copy and update capabilities */
593ec72fce4SRam Amrani 	attr = &dev->attr;
594ec72fce4SRam Amrani 	attr->vendor_id = qed_attr->vendor_id;
595ec72fce4SRam Amrani 	attr->vendor_part_id = qed_attr->vendor_part_id;
596ec72fce4SRam Amrani 	attr->hw_ver = qed_attr->hw_ver;
597ec72fce4SRam Amrani 	attr->fw_ver = qed_attr->fw_ver;
598ec72fce4SRam Amrani 	attr->node_guid = qed_attr->node_guid;
599ec72fce4SRam Amrani 	attr->sys_image_guid = qed_attr->sys_image_guid;
600ec72fce4SRam Amrani 	attr->max_cnq = qed_attr->max_cnq;
601ec72fce4SRam Amrani 	attr->max_sge = qed_attr->max_sge;
602ec72fce4SRam Amrani 	attr->max_inline = qed_attr->max_inline;
603ec72fce4SRam Amrani 	attr->max_sqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_SQE);
604ec72fce4SRam Amrani 	attr->max_rqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_RQE);
605ec72fce4SRam Amrani 	attr->max_qp_resp_rd_atomic_resc = qed_attr->max_qp_resp_rd_atomic_resc;
606ec72fce4SRam Amrani 	attr->max_qp_req_rd_atomic_resc = qed_attr->max_qp_req_rd_atomic_resc;
607ec72fce4SRam Amrani 	attr->max_dev_resp_rd_atomic_resc =
608ec72fce4SRam Amrani 	    qed_attr->max_dev_resp_rd_atomic_resc;
609ec72fce4SRam Amrani 	attr->max_cq = qed_attr->max_cq;
610ec72fce4SRam Amrani 	attr->max_qp = qed_attr->max_qp;
611ec72fce4SRam Amrani 	attr->max_mr = qed_attr->max_mr;
612ec72fce4SRam Amrani 	attr->max_mr_size = qed_attr->max_mr_size;
613ec72fce4SRam Amrani 	attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES);
614ec72fce4SRam Amrani 	attr->max_mw = qed_attr->max_mw;
615ec72fce4SRam Amrani 	attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl;
616ec72fce4SRam Amrani 	attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size;
617ec72fce4SRam Amrani 	attr->max_pd = qed_attr->max_pd;
618ec72fce4SRam Amrani 	attr->max_ah = qed_attr->max_ah;
619ec72fce4SRam Amrani 	attr->max_pkey = qed_attr->max_pkey;
620ec72fce4SRam Amrani 	attr->max_srq = qed_attr->max_srq;
621ec72fce4SRam Amrani 	attr->max_srq_wr = qed_attr->max_srq_wr;
622ec72fce4SRam Amrani 	attr->dev_caps = qed_attr->dev_caps;
623ec72fce4SRam Amrani 	attr->page_size_caps = qed_attr->page_size_caps;
624ec72fce4SRam Amrani 	attr->dev_ack_delay = qed_attr->dev_ack_delay;
625ec72fce4SRam Amrani 	attr->reserved_lkey = qed_attr->reserved_lkey;
626ec72fce4SRam Amrani 	attr->bad_pkey_counter = qed_attr->bad_pkey_counter;
627ec72fce4SRam Amrani 	attr->max_stats_queues = qed_attr->max_stats_queues;
628ec72fce4SRam Amrani 
629ec72fce4SRam Amrani 	return 0;
630ec72fce4SRam Amrani }
631ec72fce4SRam Amrani 
6320089985eSBart Van Assche static void qedr_unaffiliated_event(void *context, u8 event_code)
633993d1b52SRam Amrani {
634993d1b52SRam Amrani 	pr_err("unaffiliated event not implemented yet\n");
635993d1b52SRam Amrani }
636993d1b52SRam Amrani 
6370089985eSBart Van Assche static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
638993d1b52SRam Amrani {
639993d1b52SRam Amrani #define EVENT_TYPE_NOT_DEFINED	0
640993d1b52SRam Amrani #define EVENT_TYPE_CQ		1
641993d1b52SRam Amrani #define EVENT_TYPE_QP		2
64240b173ddSYuval Bason #define EVENT_TYPE_SRQ		3
643993d1b52SRam Amrani 	struct qedr_dev *dev = (struct qedr_dev *)context;
644be086e7cSMintz, Yuval 	struct regpair *async_handle = (struct regpair *)fw_handle;
645be086e7cSMintz, Yuval 	u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo;
646993d1b52SRam Amrani 	u8 event_type = EVENT_TYPE_NOT_DEFINED;
647993d1b52SRam Amrani 	struct ib_event event;
64840b173ddSYuval Bason 	struct ib_srq *ibsrq;
64940b173ddSYuval Bason 	struct qedr_srq *srq;
65040b173ddSYuval Bason 	unsigned long flags;
651993d1b52SRam Amrani 	struct ib_cq *ibcq;
652993d1b52SRam Amrani 	struct ib_qp *ibqp;
653993d1b52SRam Amrani 	struct qedr_cq *cq;
654993d1b52SRam Amrani 	struct qedr_qp *qp;
65540b173ddSYuval Bason 	u16 srq_id;
656993d1b52SRam Amrani 
65740b173ddSYuval Bason 	if (IS_ROCE(dev)) {
658993d1b52SRam Amrani 		switch (e_code) {
659993d1b52SRam Amrani 		case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
660993d1b52SRam Amrani 			event.event = IB_EVENT_CQ_ERR;
661993d1b52SRam Amrani 			event_type = EVENT_TYPE_CQ;
662993d1b52SRam Amrani 			break;
663993d1b52SRam Amrani 		case ROCE_ASYNC_EVENT_SQ_DRAINED:
664993d1b52SRam Amrani 			event.event = IB_EVENT_SQ_DRAINED;
665993d1b52SRam Amrani 			event_type = EVENT_TYPE_QP;
666993d1b52SRam Amrani 			break;
667993d1b52SRam Amrani 		case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR:
668993d1b52SRam Amrani 			event.event = IB_EVENT_QP_FATAL;
669993d1b52SRam Amrani 			event_type = EVENT_TYPE_QP;
670993d1b52SRam Amrani 			break;
671993d1b52SRam Amrani 		case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR:
672993d1b52SRam Amrani 			event.event = IB_EVENT_QP_REQ_ERR;
673993d1b52SRam Amrani 			event_type = EVENT_TYPE_QP;
674993d1b52SRam Amrani 			break;
675993d1b52SRam Amrani 		case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR:
676993d1b52SRam Amrani 			event.event = IB_EVENT_QP_ACCESS_ERR;
677993d1b52SRam Amrani 			event_type = EVENT_TYPE_QP;
678993d1b52SRam Amrani 			break;
67940b173ddSYuval Bason 		case ROCE_ASYNC_EVENT_SRQ_LIMIT:
68040b173ddSYuval Bason 			event.event = IB_EVENT_SRQ_LIMIT_REACHED;
68140b173ddSYuval Bason 			event_type = EVENT_TYPE_SRQ;
68240b173ddSYuval Bason 			break;
68340b173ddSYuval Bason 		case ROCE_ASYNC_EVENT_SRQ_EMPTY:
68440b173ddSYuval Bason 			event.event = IB_EVENT_SRQ_ERR;
68540b173ddSYuval Bason 			event_type = EVENT_TYPE_SRQ;
68640b173ddSYuval Bason 			break;
68706e8d1dfSYuval Basson 		case ROCE_ASYNC_EVENT_XRC_DOMAIN_ERR:
68806e8d1dfSYuval Basson 			event.event = IB_EVENT_QP_ACCESS_ERR;
68906e8d1dfSYuval Basson 			event_type = EVENT_TYPE_QP;
69006e8d1dfSYuval Basson 			break;
69106e8d1dfSYuval Basson 		case ROCE_ASYNC_EVENT_INVALID_XRCETH_ERR:
69206e8d1dfSYuval Basson 			event.event = IB_EVENT_QP_ACCESS_ERR;
69306e8d1dfSYuval Basson 			event_type = EVENT_TYPE_QP;
69406e8d1dfSYuval Basson 			break;
69506e8d1dfSYuval Basson 		case ROCE_ASYNC_EVENT_XRC_SRQ_CATASTROPHIC_ERR:
69606e8d1dfSYuval Basson 			event.event = IB_EVENT_CQ_ERR;
69706e8d1dfSYuval Basson 			event_type = EVENT_TYPE_CQ;
69806e8d1dfSYuval Basson 			break;
69940b173ddSYuval Bason 		default:
70040b173ddSYuval Bason 			DP_ERR(dev, "unsupported event %d on handle=%llx\n",
70140b173ddSYuval Bason 			       e_code, roce_handle64);
70240b173ddSYuval Bason 		}
70340b173ddSYuval Bason 	} else {
70440b173ddSYuval Bason 		switch (e_code) {
70540b173ddSYuval Bason 		case QED_IWARP_EVENT_SRQ_LIMIT:
70640b173ddSYuval Bason 			event.event = IB_EVENT_SRQ_LIMIT_REACHED;
70740b173ddSYuval Bason 			event_type = EVENT_TYPE_SRQ;
70840b173ddSYuval Bason 			break;
70940b173ddSYuval Bason 		case QED_IWARP_EVENT_SRQ_EMPTY:
71040b173ddSYuval Bason 			event.event = IB_EVENT_SRQ_ERR;
71140b173ddSYuval Bason 			event_type = EVENT_TYPE_SRQ;
71240b173ddSYuval Bason 			break;
713993d1b52SRam Amrani 		default:
714993d1b52SRam Amrani 		DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code,
715993d1b52SRam Amrani 		       roce_handle64);
716993d1b52SRam Amrani 		}
71740b173ddSYuval Bason 	}
718993d1b52SRam Amrani 	switch (event_type) {
719993d1b52SRam Amrani 	case EVENT_TYPE_CQ:
720993d1b52SRam Amrani 		cq = (struct qedr_cq *)(uintptr_t)roce_handle64;
721993d1b52SRam Amrani 		if (cq) {
722993d1b52SRam Amrani 			ibcq = &cq->ibcq;
723993d1b52SRam Amrani 			if (ibcq->event_handler) {
724993d1b52SRam Amrani 				event.device = ibcq->device;
725993d1b52SRam Amrani 				event.element.cq = ibcq;
726993d1b52SRam Amrani 				ibcq->event_handler(&event, ibcq->cq_context);
727993d1b52SRam Amrani 			}
728993d1b52SRam Amrani 		} else {
729993d1b52SRam Amrani 			WARN(1,
730993d1b52SRam Amrani 			     "Error: CQ event with NULL pointer ibcq. Handle=%llx\n",
731993d1b52SRam Amrani 			     roce_handle64);
732993d1b52SRam Amrani 		}
733a343e3f8SColin Ian King 		DP_ERR(dev, "CQ event %d on handle %p\n", e_code, cq);
734993d1b52SRam Amrani 		break;
735993d1b52SRam Amrani 	case EVENT_TYPE_QP:
736993d1b52SRam Amrani 		qp = (struct qedr_qp *)(uintptr_t)roce_handle64;
737993d1b52SRam Amrani 		if (qp) {
738993d1b52SRam Amrani 			ibqp = &qp->ibqp;
739993d1b52SRam Amrani 			if (ibqp->event_handler) {
740993d1b52SRam Amrani 				event.device = ibqp->device;
741993d1b52SRam Amrani 				event.element.qp = ibqp;
742993d1b52SRam Amrani 				ibqp->event_handler(&event, ibqp->qp_context);
743993d1b52SRam Amrani 			}
744993d1b52SRam Amrani 		} else {
745993d1b52SRam Amrani 			WARN(1,
746993d1b52SRam Amrani 			     "Error: QP event with NULL pointer ibqp. Handle=%llx\n",
747993d1b52SRam Amrani 			     roce_handle64);
748993d1b52SRam Amrani 		}
749a343e3f8SColin Ian King 		DP_ERR(dev, "QP event %d on handle %p\n", e_code, qp);
750993d1b52SRam Amrani 		break;
75140b173ddSYuval Bason 	case EVENT_TYPE_SRQ:
75240b173ddSYuval Bason 		srq_id = (u16)roce_handle64;
7539fd15987SMatthew Wilcox 		xa_lock_irqsave(&dev->srqs, flags);
7549fd15987SMatthew Wilcox 		srq = xa_load(&dev->srqs, srq_id);
75540b173ddSYuval Bason 		if (srq) {
75640b173ddSYuval Bason 			ibsrq = &srq->ibsrq;
75740b173ddSYuval Bason 			if (ibsrq->event_handler) {
75840b173ddSYuval Bason 				event.device = ibsrq->device;
75940b173ddSYuval Bason 				event.element.srq = ibsrq;
76040b173ddSYuval Bason 				ibsrq->event_handler(&event,
76140b173ddSYuval Bason 						     ibsrq->srq_context);
76240b173ddSYuval Bason 			}
76340b173ddSYuval Bason 		} else {
76440b173ddSYuval Bason 			DP_NOTICE(dev,
76540b173ddSYuval Bason 				  "SRQ event with NULL pointer ibsrq. Handle=%llx\n",
76640b173ddSYuval Bason 				  roce_handle64);
76740b173ddSYuval Bason 		}
7689fd15987SMatthew Wilcox 		xa_unlock_irqrestore(&dev->srqs, flags);
76940b173ddSYuval Bason 		DP_NOTICE(dev, "SRQ event %d on handle %p\n", e_code, srq);
770c6191f83SGustavo A. R. Silva 		break;
771993d1b52SRam Amrani 	default:
772993d1b52SRam Amrani 		break;
773993d1b52SRam Amrani 	}
774993d1b52SRam Amrani }
775993d1b52SRam Amrani 
776ec72fce4SRam Amrani static int qedr_init_hw(struct qedr_dev *dev)
777ec72fce4SRam Amrani {
778ec72fce4SRam Amrani 	struct qed_rdma_add_user_out_params out_params;
779ec72fce4SRam Amrani 	struct qed_rdma_start_in_params *in_params;
780ec72fce4SRam Amrani 	struct qed_rdma_cnq_params *cur_pbl;
781ec72fce4SRam Amrani 	struct qed_rdma_events events;
782ec72fce4SRam Amrani 	dma_addr_t p_phys_table;
783ec72fce4SRam Amrani 	u32 page_cnt;
784ec72fce4SRam Amrani 	int rc = 0;
785ec72fce4SRam Amrani 	int i;
786ec72fce4SRam Amrani 
787ec72fce4SRam Amrani 	in_params =  kzalloc(sizeof(*in_params), GFP_KERNEL);
788ec72fce4SRam Amrani 	if (!in_params) {
789ec72fce4SRam Amrani 		rc = -ENOMEM;
790ec72fce4SRam Amrani 		goto out;
791ec72fce4SRam Amrani 	}
792ec72fce4SRam Amrani 
793ec72fce4SRam Amrani 	in_params->desired_cnq = dev->num_cnq;
794ec72fce4SRam Amrani 	for (i = 0; i < dev->num_cnq; i++) {
795ec72fce4SRam Amrani 		cur_pbl = &in_params->cnq_pbl_list[i];
796ec72fce4SRam Amrani 
797ec72fce4SRam Amrani 		page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl);
798ec72fce4SRam Amrani 		cur_pbl->num_pbl_pages = page_cnt;
799ec72fce4SRam Amrani 
800ec72fce4SRam Amrani 		p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl);
801ec72fce4SRam Amrani 		cur_pbl->pbl_ptr = (u64)p_phys_table;
802ec72fce4SRam Amrani 	}
803ec72fce4SRam Amrani 
804993d1b52SRam Amrani 	events.affiliated_event = qedr_affiliated_event;
805993d1b52SRam Amrani 	events.unaffiliated_event = qedr_unaffiliated_event;
806ec72fce4SRam Amrani 	events.context = dev;
807ec72fce4SRam Amrani 
808ec72fce4SRam Amrani 	in_params->events = &events;
809ec72fce4SRam Amrani 	in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS;
810ec72fce4SRam Amrani 	in_params->max_mtu = dev->ndev->mtu;
811e411e058SKalderon, Michal 	dev->iwarp_max_mtu = dev->ndev->mtu;
812ec72fce4SRam Amrani 	ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr);
813ec72fce4SRam Amrani 
814ec72fce4SRam Amrani 	rc = dev->ops->rdma_init(dev->cdev, in_params);
815ec72fce4SRam Amrani 	if (rc)
816ec72fce4SRam Amrani 		goto out;
817ec72fce4SRam Amrani 
818ec72fce4SRam Amrani 	rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params);
819ec72fce4SRam Amrani 	if (rc)
820ec72fce4SRam Amrani 		goto out;
821ec72fce4SRam Amrani 
8220058eb58SMichal Kalderon 	dev->db_addr = out_params.dpi_addr;
823ec72fce4SRam Amrani 	dev->db_phys_addr = out_params.dpi_phys_addr;
824ec72fce4SRam Amrani 	dev->db_size = out_params.dpi_size;
825ec72fce4SRam Amrani 	dev->dpi = out_params.dpi;
826ec72fce4SRam Amrani 
827ec72fce4SRam Amrani 	rc = qedr_set_device_attr(dev);
828ec72fce4SRam Amrani out:
829ec72fce4SRam Amrani 	kfree(in_params);
830ec72fce4SRam Amrani 	if (rc)
831ec72fce4SRam Amrani 		DP_ERR(dev, "Init HW Failed rc = %d\n", rc);
832ec72fce4SRam Amrani 
833ec72fce4SRam Amrani 	return rc;
834ec72fce4SRam Amrani }
835ec72fce4SRam Amrani 
8360089985eSBart Van Assche static void qedr_stop_hw(struct qedr_dev *dev)
837ec72fce4SRam Amrani {
838ec72fce4SRam Amrani 	dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi);
839ec72fce4SRam Amrani 	dev->ops->rdma_stop(dev->rdma_ctx);
840ec72fce4SRam Amrani }
841ec72fce4SRam Amrani 
8422e0cbc4dSRam Amrani static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
8432e0cbc4dSRam Amrani 				 struct net_device *ndev)
8442e0cbc4dSRam Amrani {
845ec72fce4SRam Amrani 	struct qed_dev_rdma_info dev_info;
8462e0cbc4dSRam Amrani 	struct qedr_dev *dev;
847508a523fSParav Pandit 	int rc = 0;
8482e0cbc4dSRam Amrani 
849459cc69fSLeon Romanovsky 	dev = ib_alloc_device(qedr_dev, ibdev);
8502e0cbc4dSRam Amrani 	if (!dev) {
8512e0cbc4dSRam Amrani 		pr_err("Unable to allocate ib device\n");
8522e0cbc4dSRam Amrani 		return NULL;
8532e0cbc4dSRam Amrani 	}
8542e0cbc4dSRam Amrani 
8552e0cbc4dSRam Amrani 	DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n");
8562e0cbc4dSRam Amrani 
8572e0cbc4dSRam Amrani 	dev->pdev = pdev;
8582e0cbc4dSRam Amrani 	dev->ndev = ndev;
8592e0cbc4dSRam Amrani 	dev->cdev = cdev;
8602e0cbc4dSRam Amrani 
861ec72fce4SRam Amrani 	qed_ops = qed_get_rdma_ops();
862ec72fce4SRam Amrani 	if (!qed_ops) {
863ec72fce4SRam Amrani 		DP_ERR(dev, "Failed to get qed roce operations\n");
864ec72fce4SRam Amrani 		goto init_err;
865ec72fce4SRam Amrani 	}
866ec72fce4SRam Amrani 
867ec72fce4SRam Amrani 	dev->ops = qed_ops;
868ec72fce4SRam Amrani 	rc = qed_ops->fill_dev_info(cdev, &dev_info);
869ec72fce4SRam Amrani 	if (rc)
870ec72fce4SRam Amrani 		goto init_err;
871ec72fce4SRam Amrani 
872ad84dad2SAmrani, Ram 	dev->user_dpm_enabled = dev_info.user_dpm_enabled;
873e538e0acSKalderon, Michal 	dev->rdma_type = dev_info.rdma_type;
874ec72fce4SRam Amrani 	dev->num_hwfns = dev_info.common.num_hwfns;
8753576e99eSMichal Kalderon 
8763576e99eSMichal Kalderon 	if (IS_IWARP(dev) && QEDR_IS_CMT(dev)) {
8773576e99eSMichal Kalderon 		rc = dev->ops->iwarp_set_engine_affin(cdev, false);
8783576e99eSMichal Kalderon 		if (rc) {
8793576e99eSMichal Kalderon 			DP_ERR(dev, "iWARP is disabled over a 100g device Enabling it may impact L2 performance. To enable it run devlink dev param set <dev> name iwarp_cmt value true cmode runtime\n");
8803576e99eSMichal Kalderon 			goto init_err;
8813576e99eSMichal Kalderon 		}
8823576e99eSMichal Kalderon 	}
883443473d2SMichal Kalderon 	dev->affin_hwfn_idx = dev->ops->common->get_affin_hwfn_idx(cdev);
8843576e99eSMichal Kalderon 
885ec72fce4SRam Amrani 	dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
886ec72fce4SRam Amrani 
887ec72fce4SRam Amrani 	dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
888ec72fce4SRam Amrani 	if (!dev->num_cnq) {
889b15606f4SKalderon, Michal 		DP_ERR(dev, "Failed. At least one CNQ is required.\n");
890b15606f4SKalderon, Michal 		rc = -ENOMEM;
891ec72fce4SRam Amrani 		goto init_err;
892ec72fce4SRam Amrani 	}
893ec72fce4SRam Amrani 
894cecbcddfSRam Amrani 	dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT;
895cecbcddfSRam Amrani 
8962e0cbc4dSRam Amrani 	qedr_pci_set_atomic(dev, pdev);
8972e0cbc4dSRam Amrani 
898ec72fce4SRam Amrani 	rc = qedr_alloc_resources(dev);
899ec72fce4SRam Amrani 	if (rc)
900ec72fce4SRam Amrani 		goto init_err;
901ec72fce4SRam Amrani 
902ec72fce4SRam Amrani 	rc = qedr_init_hw(dev);
903ec72fce4SRam Amrani 	if (rc)
904ec72fce4SRam Amrani 		goto alloc_err;
905ec72fce4SRam Amrani 
906ec72fce4SRam Amrani 	rc = qedr_setup_irqs(dev);
907ec72fce4SRam Amrani 	if (rc)
908ec72fce4SRam Amrani 		goto irq_err;
909ec72fce4SRam Amrani 
9102e0cbc4dSRam Amrani 	rc = qedr_register_device(dev);
9112e0cbc4dSRam Amrani 	if (rc) {
9122e0cbc4dSRam Amrani 		DP_ERR(dev, "Unable to allocate register device\n");
913ec72fce4SRam Amrani 		goto reg_err;
9142e0cbc4dSRam Amrani 	}
9152e0cbc4dSRam Amrani 
916f449c7a2SRam Amrani 	if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
917f449c7a2SRam Amrani 		qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
918f449c7a2SRam Amrani 
9192e0cbc4dSRam Amrani 	DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
9202e0cbc4dSRam Amrani 	return dev;
9212e0cbc4dSRam Amrani 
922ec72fce4SRam Amrani reg_err:
923ec72fce4SRam Amrani 	qedr_sync_free_irqs(dev);
924ec72fce4SRam Amrani irq_err:
925ec72fce4SRam Amrani 	qedr_stop_hw(dev);
926ec72fce4SRam Amrani alloc_err:
927ec72fce4SRam Amrani 	qedr_free_resources(dev);
9282e0cbc4dSRam Amrani init_err:
9292e0cbc4dSRam Amrani 	ib_dealloc_device(&dev->ibdev);
9302e0cbc4dSRam Amrani 	DP_ERR(dev, "qedr driver load failed rc=%d\n", rc);
9312e0cbc4dSRam Amrani 
9322e0cbc4dSRam Amrani 	return NULL;
9332e0cbc4dSRam Amrani }
9342e0cbc4dSRam Amrani 
9352e0cbc4dSRam Amrani static void qedr_remove(struct qedr_dev *dev)
9362e0cbc4dSRam Amrani {
9372e0cbc4dSRam Amrani 	/* First unregister with stack to stop all the active traffic
9382e0cbc4dSRam Amrani 	 * of the registered clients.
9392e0cbc4dSRam Amrani 	 */
940993d1b52SRam Amrani 	ib_unregister_device(&dev->ibdev);
9412e0cbc4dSRam Amrani 
942ec72fce4SRam Amrani 	qedr_stop_hw(dev);
943ec72fce4SRam Amrani 	qedr_sync_free_irqs(dev);
944ec72fce4SRam Amrani 	qedr_free_resources(dev);
9453576e99eSMichal Kalderon 
9463576e99eSMichal Kalderon 	if (IS_IWARP(dev) && QEDR_IS_CMT(dev))
9473576e99eSMichal Kalderon 		dev->ops->iwarp_set_engine_affin(dev->cdev, true);
9483576e99eSMichal Kalderon 
9492e0cbc4dSRam Amrani 	ib_dealloc_device(&dev->ibdev);
9502e0cbc4dSRam Amrani }
9512e0cbc4dSRam Amrani 
952f449c7a2SRam Amrani static void qedr_close(struct qedr_dev *dev)
9532e0cbc4dSRam Amrani {
954f449c7a2SRam Amrani 	if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
955f449c7a2SRam Amrani 		qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
9562e0cbc4dSRam Amrani }
9572e0cbc4dSRam Amrani 
9582e0cbc4dSRam Amrani static void qedr_shutdown(struct qedr_dev *dev)
9592e0cbc4dSRam Amrani {
9602e0cbc4dSRam Amrani 	qedr_close(dev);
9612e0cbc4dSRam Amrani 	qedr_remove(dev);
9622e0cbc4dSRam Amrani }
9632e0cbc4dSRam Amrani 
964f449c7a2SRam Amrani static void qedr_open(struct qedr_dev *dev)
965f449c7a2SRam Amrani {
966f449c7a2SRam Amrani 	if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
967f449c7a2SRam Amrani 		qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
968f449c7a2SRam Amrani }
969f449c7a2SRam Amrani 
9701d1424c8SRam Amrani static void qedr_mac_address_change(struct qedr_dev *dev)
9711d1424c8SRam Amrani {
9721d1424c8SRam Amrani 	union ib_gid *sgid = &dev->sgid_tbl[0];
9731d1424c8SRam Amrani 	u8 guid[8], mac_addr[6];
9741d1424c8SRam Amrani 	int rc;
9751d1424c8SRam Amrani 
9761d1424c8SRam Amrani 	/* Update SGID */
9771d1424c8SRam Amrani 	ether_addr_copy(&mac_addr[0], dev->ndev->dev_addr);
9781d1424c8SRam Amrani 	guid[0] = mac_addr[0] ^ 2;
9791d1424c8SRam Amrani 	guid[1] = mac_addr[1];
9801d1424c8SRam Amrani 	guid[2] = mac_addr[2];
9811d1424c8SRam Amrani 	guid[3] = 0xff;
9821d1424c8SRam Amrani 	guid[4] = 0xfe;
9831d1424c8SRam Amrani 	guid[5] = mac_addr[3];
9841d1424c8SRam Amrani 	guid[6] = mac_addr[4];
9851d1424c8SRam Amrani 	guid[7] = mac_addr[5];
9861d1424c8SRam Amrani 	sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
9871d1424c8SRam Amrani 	memcpy(&sgid->raw[8], guid, sizeof(guid));
9881d1424c8SRam Amrani 
9891d1424c8SRam Amrani 	/* Update LL2 */
9900518c12fSMichal Kalderon 	rc = dev->ops->ll2_set_mac_filter(dev->cdev,
9911d1424c8SRam Amrani 					  dev->gsi_ll2_mac_address,
9921d1424c8SRam Amrani 					  dev->ndev->dev_addr);
9931d1424c8SRam Amrani 
9941d1424c8SRam Amrani 	ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
9951d1424c8SRam Amrani 
996f449c7a2SRam Amrani 	qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
9971d1424c8SRam Amrani 
9981d1424c8SRam Amrani 	if (rc)
9991d1424c8SRam Amrani 		DP_ERR(dev, "Error updating mac filter\n");
10001d1424c8SRam Amrani }
10011d1424c8SRam Amrani 
10022e0cbc4dSRam Amrani /* event handling via NIC driver ensures that all the NIC specific
10032e0cbc4dSRam Amrani  * initialization done before RoCE driver notifies
10042e0cbc4dSRam Amrani  * event to stack.
10052e0cbc4dSRam Amrani  */
1006bbfcd1e8SMichal Kalderon static void qedr_notify(struct qedr_dev *dev, enum qede_rdma_event event)
10072e0cbc4dSRam Amrani {
10082e0cbc4dSRam Amrani 	switch (event) {
10092e0cbc4dSRam Amrani 	case QEDE_UP:
1010f449c7a2SRam Amrani 		qedr_open(dev);
10112e0cbc4dSRam Amrani 		break;
10122e0cbc4dSRam Amrani 	case QEDE_DOWN:
10132e0cbc4dSRam Amrani 		qedr_close(dev);
10142e0cbc4dSRam Amrani 		break;
10152e0cbc4dSRam Amrani 	case QEDE_CLOSE:
10162e0cbc4dSRam Amrani 		qedr_shutdown(dev);
10172e0cbc4dSRam Amrani 		break;
10182e0cbc4dSRam Amrani 	case QEDE_CHANGE_ADDR:
10191d1424c8SRam Amrani 		qedr_mac_address_change(dev);
10202e0cbc4dSRam Amrani 		break;
1021cc293f54SMichal Kalderon 	case QEDE_CHANGE_MTU:
1022cc293f54SMichal Kalderon 		if (rdma_protocol_iwarp(&dev->ibdev, 1))
1023cc293f54SMichal Kalderon 			if (dev->ndev->mtu != dev->iwarp_max_mtu)
1024cc293f54SMichal Kalderon 				DP_NOTICE(dev,
1025cc293f54SMichal Kalderon 					  "Mtu was changed from %d to %d. This will not take affect for iWARP until qedr is reloaded\n",
1026cc293f54SMichal Kalderon 					  dev->iwarp_max_mtu, dev->ndev->mtu);
1027cc293f54SMichal Kalderon 		break;
10282e0cbc4dSRam Amrani 	default:
10292e0cbc4dSRam Amrani 		pr_err("Event not supported\n");
10302e0cbc4dSRam Amrani 	}
10312e0cbc4dSRam Amrani }
10322e0cbc4dSRam Amrani 
10332e0cbc4dSRam Amrani static struct qedr_driver qedr_drv = {
10342e0cbc4dSRam Amrani 	.name = "qedr_driver",
10352e0cbc4dSRam Amrani 	.add = qedr_add,
10362e0cbc4dSRam Amrani 	.remove = qedr_remove,
10372e0cbc4dSRam Amrani 	.notify = qedr_notify,
10382e0cbc4dSRam Amrani };
10392e0cbc4dSRam Amrani 
10402e0cbc4dSRam Amrani static int __init qedr_init_module(void)
10412e0cbc4dSRam Amrani {
1042bbfcd1e8SMichal Kalderon 	return qede_rdma_register_driver(&qedr_drv);
10432e0cbc4dSRam Amrani }
10442e0cbc4dSRam Amrani 
10452e0cbc4dSRam Amrani static void __exit qedr_exit_module(void)
10462e0cbc4dSRam Amrani {
1047bbfcd1e8SMichal Kalderon 	qede_rdma_unregister_driver(&qedr_drv);
10482e0cbc4dSRam Amrani }
10492e0cbc4dSRam Amrani 
10502e0cbc4dSRam Amrani module_init(qedr_init_module);
10512e0cbc4dSRam Amrani module_exit(qedr_exit_module);
1052