xref: /openbmc/linux/drivers/infiniband/hw/mlx4/main.c (revision d2999e1b)
1 /*
2  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/netdevice.h>
39 #include <linux/inetdevice.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/if_vlan.h>
42 #include <net/ipv6.h>
43 #include <net/addrconf.h>
44 
45 #include <rdma/ib_smi.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_addr.h>
48 
49 #include <linux/mlx4/driver.h>
50 #include <linux/mlx4/cmd.h>
51 #include <linux/mlx4/qp.h>
52 
53 #include "mlx4_ib.h"
54 #include "user.h"
55 
56 #define DRV_NAME	MLX4_IB_DRV_NAME
57 #define DRV_VERSION	"2.2-1"
58 #define DRV_RELDATE	"Feb 2014"
59 
60 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
61 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
62 
63 MODULE_AUTHOR("Roland Dreier");
64 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
65 MODULE_LICENSE("Dual BSD/GPL");
66 MODULE_VERSION(DRV_VERSION);
67 
68 int mlx4_ib_sm_guid_assign = 1;
69 module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
70 MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 1)");
71 
72 static const char mlx4_ib_version[] =
73 	DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
74 	DRV_VERSION " (" DRV_RELDATE ")\n";
75 
76 struct update_gid_work {
77 	struct work_struct	work;
78 	union ib_gid		gids[128];
79 	struct mlx4_ib_dev     *dev;
80 	int			port;
81 };
82 
83 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
84 
85 static struct workqueue_struct *wq;
86 
87 static void init_query_mad(struct ib_smp *mad)
88 {
89 	mad->base_version  = 1;
90 	mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
91 	mad->class_version = 1;
92 	mad->method	   = IB_MGMT_METHOD_GET;
93 }
94 
95 static union ib_gid zgid;
96 
97 static int check_flow_steering_support(struct mlx4_dev *dev)
98 {
99 	int eth_num_ports = 0;
100 	int ib_num_ports = 0;
101 
102 	int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
103 
104 	if (dmfs) {
105 		int i;
106 		mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
107 			eth_num_ports++;
108 		mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
109 			ib_num_ports++;
110 		dmfs &= (!ib_num_ports ||
111 			 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
112 			(!eth_num_ports ||
113 			 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
114 		if (ib_num_ports && mlx4_is_mfunc(dev)) {
115 			pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
116 			dmfs = 0;
117 		}
118 	}
119 	return dmfs;
120 }
121 
122 static int mlx4_ib_query_device(struct ib_device *ibdev,
123 				struct ib_device_attr *props)
124 {
125 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
126 	struct ib_smp *in_mad  = NULL;
127 	struct ib_smp *out_mad = NULL;
128 	int err = -ENOMEM;
129 
130 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
131 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
132 	if (!in_mad || !out_mad)
133 		goto out;
134 
135 	init_query_mad(in_mad);
136 	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
137 
138 	err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
139 			   1, NULL, NULL, in_mad, out_mad);
140 	if (err)
141 		goto out;
142 
143 	memset(props, 0, sizeof *props);
144 
145 	props->fw_ver = dev->dev->caps.fw_ver;
146 	props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
147 		IB_DEVICE_PORT_ACTIVE_EVENT		|
148 		IB_DEVICE_SYS_IMAGE_GUID		|
149 		IB_DEVICE_RC_RNR_NAK_GEN		|
150 		IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
151 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
152 		props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
153 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
154 		props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
155 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM)
156 		props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
157 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
158 		props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
159 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
160 		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
161 	if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)
162 		props->device_cap_flags |= IB_DEVICE_UD_TSO;
163 	if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
164 		props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
165 	if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
166 	    (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
167 	    (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
168 		props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
169 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
170 		props->device_cap_flags |= IB_DEVICE_XRC;
171 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
172 		props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
173 	if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
174 		if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
175 			props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
176 		else
177 			props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
178 	if (dev->steering_support ==  MLX4_STEERING_MODE_DEVICE_MANAGED)
179 		props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
180 	}
181 
182 	props->vendor_id	   = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
183 		0xffffff;
184 	props->vendor_part_id	   = dev->dev->pdev->device;
185 	props->hw_ver		   = be32_to_cpup((__be32 *) (out_mad->data + 32));
186 	memcpy(&props->sys_image_guid, out_mad->data +	4, 8);
187 
188 	props->max_mr_size	   = ~0ull;
189 	props->page_size_cap	   = dev->dev->caps.page_size_cap;
190 	props->max_qp		   = dev->dev->quotas.qp;
191 	props->max_qp_wr	   = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
192 	props->max_sge		   = min(dev->dev->caps.max_sq_sg,
193 					 dev->dev->caps.max_rq_sg);
194 	props->max_cq		   = dev->dev->quotas.cq;
195 	props->max_cqe		   = dev->dev->caps.max_cqes;
196 	props->max_mr		   = dev->dev->quotas.mpt;
197 	props->max_pd		   = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
198 	props->max_qp_rd_atom	   = dev->dev->caps.max_qp_dest_rdma;
199 	props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
200 	props->max_res_rd_atom	   = props->max_qp_rd_atom * props->max_qp;
201 	props->max_srq		   = dev->dev->quotas.srq;
202 	props->max_srq_wr	   = dev->dev->caps.max_srq_wqes - 1;
203 	props->max_srq_sge	   = dev->dev->caps.max_srq_sge;
204 	props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
205 	props->local_ca_ack_delay  = dev->dev->caps.local_ca_ack_delay;
206 	props->atomic_cap	   = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
207 		IB_ATOMIC_HCA : IB_ATOMIC_NONE;
208 	props->masked_atomic_cap   = props->atomic_cap;
209 	props->max_pkeys	   = dev->dev->caps.pkey_table_len[1];
210 	props->max_mcast_grp	   = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
211 	props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
212 	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
213 					   props->max_mcast_grp;
214 	props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
215 
216 out:
217 	kfree(in_mad);
218 	kfree(out_mad);
219 
220 	return err;
221 }
222 
223 static enum rdma_link_layer
224 mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
225 {
226 	struct mlx4_dev *dev = to_mdev(device)->dev;
227 
228 	return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
229 		IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
230 }
231 
232 static int ib_link_query_port(struct ib_device *ibdev, u8 port,
233 			      struct ib_port_attr *props, int netw_view)
234 {
235 	struct ib_smp *in_mad  = NULL;
236 	struct ib_smp *out_mad = NULL;
237 	int ext_active_speed;
238 	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
239 	int err = -ENOMEM;
240 
241 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
242 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
243 	if (!in_mad || !out_mad)
244 		goto out;
245 
246 	init_query_mad(in_mad);
247 	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
248 	in_mad->attr_mod = cpu_to_be32(port);
249 
250 	if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
251 		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
252 
253 	err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
254 				in_mad, out_mad);
255 	if (err)
256 		goto out;
257 
258 
259 	props->lid		= be16_to_cpup((__be16 *) (out_mad->data + 16));
260 	props->lmc		= out_mad->data[34] & 0x7;
261 	props->sm_lid		= be16_to_cpup((__be16 *) (out_mad->data + 18));
262 	props->sm_sl		= out_mad->data[36] & 0xf;
263 	props->state		= out_mad->data[32] & 0xf;
264 	props->phys_state	= out_mad->data[33] >> 4;
265 	props->port_cap_flags	= be32_to_cpup((__be32 *) (out_mad->data + 20));
266 	if (netw_view)
267 		props->gid_tbl_len = out_mad->data[50];
268 	else
269 		props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
270 	props->max_msg_sz	= to_mdev(ibdev)->dev->caps.max_msg_sz;
271 	props->pkey_tbl_len	= to_mdev(ibdev)->dev->caps.pkey_table_len[port];
272 	props->bad_pkey_cntr	= be16_to_cpup((__be16 *) (out_mad->data + 46));
273 	props->qkey_viol_cntr	= be16_to_cpup((__be16 *) (out_mad->data + 48));
274 	props->active_width	= out_mad->data[31] & 0xf;
275 	props->active_speed	= out_mad->data[35] >> 4;
276 	props->max_mtu		= out_mad->data[41] & 0xf;
277 	props->active_mtu	= out_mad->data[36] >> 4;
278 	props->subnet_timeout	= out_mad->data[51] & 0x1f;
279 	props->max_vl_num	= out_mad->data[37] >> 4;
280 	props->init_type_reply	= out_mad->data[41] >> 4;
281 
282 	/* Check if extended speeds (EDR/FDR/...) are supported */
283 	if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
284 		ext_active_speed = out_mad->data[62] >> 4;
285 
286 		switch (ext_active_speed) {
287 		case 1:
288 			props->active_speed = IB_SPEED_FDR;
289 			break;
290 		case 2:
291 			props->active_speed = IB_SPEED_EDR;
292 			break;
293 		}
294 	}
295 
296 	/* If reported active speed is QDR, check if is FDR-10 */
297 	if (props->active_speed == IB_SPEED_QDR) {
298 		init_query_mad(in_mad);
299 		in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
300 		in_mad->attr_mod = cpu_to_be32(port);
301 
302 		err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
303 				   NULL, NULL, in_mad, out_mad);
304 		if (err)
305 			goto out;
306 
307 		/* Checking LinkSpeedActive for FDR-10 */
308 		if (out_mad->data[15] & 0x1)
309 			props->active_speed = IB_SPEED_FDR10;
310 	}
311 
312 	/* Avoid wrong speed value returned by FW if the IB link is down. */
313 	if (props->state == IB_PORT_DOWN)
314 		 props->active_speed = IB_SPEED_SDR;
315 
316 out:
317 	kfree(in_mad);
318 	kfree(out_mad);
319 	return err;
320 }
321 
322 static u8 state_to_phys_state(enum ib_port_state state)
323 {
324 	return state == IB_PORT_ACTIVE ? 5 : 3;
325 }
326 
327 static int eth_link_query_port(struct ib_device *ibdev, u8 port,
328 			       struct ib_port_attr *props, int netw_view)
329 {
330 
331 	struct mlx4_ib_dev *mdev = to_mdev(ibdev);
332 	struct mlx4_ib_iboe *iboe = &mdev->iboe;
333 	struct net_device *ndev;
334 	enum ib_mtu tmp;
335 	struct mlx4_cmd_mailbox *mailbox;
336 	int err = 0;
337 
338 	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
339 	if (IS_ERR(mailbox))
340 		return PTR_ERR(mailbox);
341 
342 	err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
343 			   MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
344 			   MLX4_CMD_WRAPPED);
345 	if (err)
346 		goto out;
347 
348 	props->active_width	=  (((u8 *)mailbox->buf)[5] == 0x40) ?
349 						IB_WIDTH_4X : IB_WIDTH_1X;
350 	props->active_speed	= IB_SPEED_QDR;
351 	props->port_cap_flags	= IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
352 	props->gid_tbl_len	= mdev->dev->caps.gid_table_len[port];
353 	props->max_msg_sz	= mdev->dev->caps.max_msg_sz;
354 	props->pkey_tbl_len	= 1;
355 	props->max_mtu		= IB_MTU_4096;
356 	props->max_vl_num	= 2;
357 	props->state		= IB_PORT_DOWN;
358 	props->phys_state	= state_to_phys_state(props->state);
359 	props->active_mtu	= IB_MTU_256;
360 	spin_lock(&iboe->lock);
361 	ndev = iboe->netdevs[port - 1];
362 	if (!ndev)
363 		goto out_unlock;
364 
365 	tmp = iboe_get_mtu(ndev->mtu);
366 	props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
367 
368 	props->state		= (netif_running(ndev) && netif_carrier_ok(ndev)) ?
369 					IB_PORT_ACTIVE : IB_PORT_DOWN;
370 	props->phys_state	= state_to_phys_state(props->state);
371 out_unlock:
372 	spin_unlock(&iboe->lock);
373 out:
374 	mlx4_free_cmd_mailbox(mdev->dev, mailbox);
375 	return err;
376 }
377 
378 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
379 			 struct ib_port_attr *props, int netw_view)
380 {
381 	int err;
382 
383 	memset(props, 0, sizeof *props);
384 
385 	err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
386 		ib_link_query_port(ibdev, port, props, netw_view) :
387 				eth_link_query_port(ibdev, port, props, netw_view);
388 
389 	return err;
390 }
391 
392 static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
393 			      struct ib_port_attr *props)
394 {
395 	/* returns host view */
396 	return __mlx4_ib_query_port(ibdev, port, props, 0);
397 }
398 
399 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
400 			union ib_gid *gid, int netw_view)
401 {
402 	struct ib_smp *in_mad  = NULL;
403 	struct ib_smp *out_mad = NULL;
404 	int err = -ENOMEM;
405 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
406 	int clear = 0;
407 	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
408 
409 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
410 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
411 	if (!in_mad || !out_mad)
412 		goto out;
413 
414 	init_query_mad(in_mad);
415 	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
416 	in_mad->attr_mod = cpu_to_be32(port);
417 
418 	if (mlx4_is_mfunc(dev->dev) && netw_view)
419 		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
420 
421 	err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
422 	if (err)
423 		goto out;
424 
425 	memcpy(gid->raw, out_mad->data + 8, 8);
426 
427 	if (mlx4_is_mfunc(dev->dev) && !netw_view) {
428 		if (index) {
429 			/* For any index > 0, return the null guid */
430 			err = 0;
431 			clear = 1;
432 			goto out;
433 		}
434 	}
435 
436 	init_query_mad(in_mad);
437 	in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
438 	in_mad->attr_mod = cpu_to_be32(index / 8);
439 
440 	err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
441 			   NULL, NULL, in_mad, out_mad);
442 	if (err)
443 		goto out;
444 
445 	memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
446 
447 out:
448 	if (clear)
449 		memset(gid->raw + 8, 0, 8);
450 	kfree(in_mad);
451 	kfree(out_mad);
452 	return err;
453 }
454 
455 static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index,
456 			  union ib_gid *gid)
457 {
458 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
459 
460 	*gid = dev->iboe.gid_table[port - 1][index];
461 
462 	return 0;
463 }
464 
465 static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
466 			     union ib_gid *gid)
467 {
468 	if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
469 		return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
470 	else
471 		return iboe_query_gid(ibdev, port, index, gid);
472 }
473 
474 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
475 			 u16 *pkey, int netw_view)
476 {
477 	struct ib_smp *in_mad  = NULL;
478 	struct ib_smp *out_mad = NULL;
479 	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
480 	int err = -ENOMEM;
481 
482 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
483 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
484 	if (!in_mad || !out_mad)
485 		goto out;
486 
487 	init_query_mad(in_mad);
488 	in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
489 	in_mad->attr_mod = cpu_to_be32(index / 32);
490 
491 	if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
492 		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
493 
494 	err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
495 			   in_mad, out_mad);
496 	if (err)
497 		goto out;
498 
499 	*pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
500 
501 out:
502 	kfree(in_mad);
503 	kfree(out_mad);
504 	return err;
505 }
506 
507 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
508 {
509 	return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
510 }
511 
512 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
513 				 struct ib_device_modify *props)
514 {
515 	struct mlx4_cmd_mailbox *mailbox;
516 	unsigned long flags;
517 
518 	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
519 		return -EOPNOTSUPP;
520 
521 	if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
522 		return 0;
523 
524 	if (mlx4_is_slave(to_mdev(ibdev)->dev))
525 		return -EOPNOTSUPP;
526 
527 	spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
528 	memcpy(ibdev->node_desc, props->node_desc, 64);
529 	spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
530 
531 	/*
532 	 * If possible, pass node desc to FW, so it can generate
533 	 * a 144 trap.  If cmd fails, just ignore.
534 	 */
535 	mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
536 	if (IS_ERR(mailbox))
537 		return 0;
538 
539 	memcpy(mailbox->buf, props->node_desc, 64);
540 	mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
541 		 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
542 
543 	mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
544 
545 	return 0;
546 }
547 
548 static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
549 			    u32 cap_mask)
550 {
551 	struct mlx4_cmd_mailbox *mailbox;
552 	int err;
553 
554 	mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
555 	if (IS_ERR(mailbox))
556 		return PTR_ERR(mailbox);
557 
558 	if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
559 		*(u8 *) mailbox->buf	     = !!reset_qkey_viols << 6;
560 		((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
561 	} else {
562 		((u8 *) mailbox->buf)[3]     = !!reset_qkey_viols;
563 		((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
564 	}
565 
566 	err = mlx4_cmd(dev->dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
567 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
568 
569 	mlx4_free_cmd_mailbox(dev->dev, mailbox);
570 	return err;
571 }
572 
573 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
574 			       struct ib_port_modify *props)
575 {
576 	struct mlx4_ib_dev *mdev = to_mdev(ibdev);
577 	u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
578 	struct ib_port_attr attr;
579 	u32 cap_mask;
580 	int err;
581 
582 	/* return OK if this is RoCE. CM calls ib_modify_port() regardless
583 	 * of whether port link layer is ETH or IB. For ETH ports, qkey
584 	 * violations and port capabilities are not meaningful.
585 	 */
586 	if (is_eth)
587 		return 0;
588 
589 	mutex_lock(&mdev->cap_mask_mutex);
590 
591 	err = mlx4_ib_query_port(ibdev, port, &attr);
592 	if (err)
593 		goto out;
594 
595 	cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
596 		~props->clr_port_cap_mask;
597 
598 	err = mlx4_ib_SET_PORT(mdev, port,
599 			       !!(mask & IB_PORT_RESET_QKEY_CNTR),
600 			       cap_mask);
601 
602 out:
603 	mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
604 	return err;
605 }
606 
607 static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
608 						  struct ib_udata *udata)
609 {
610 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
611 	struct mlx4_ib_ucontext *context;
612 	struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
613 	struct mlx4_ib_alloc_ucontext_resp resp;
614 	int err;
615 
616 	if (!dev->ib_active)
617 		return ERR_PTR(-EAGAIN);
618 
619 	if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
620 		resp_v3.qp_tab_size      = dev->dev->caps.num_qps;
621 		resp_v3.bf_reg_size      = dev->dev->caps.bf_reg_size;
622 		resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
623 	} else {
624 		resp.dev_caps	      = dev->dev->caps.userspace_caps;
625 		resp.qp_tab_size      = dev->dev->caps.num_qps;
626 		resp.bf_reg_size      = dev->dev->caps.bf_reg_size;
627 		resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
628 		resp.cqe_size	      = dev->dev->caps.cqe_size;
629 	}
630 
631 	context = kmalloc(sizeof *context, GFP_KERNEL);
632 	if (!context)
633 		return ERR_PTR(-ENOMEM);
634 
635 	err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
636 	if (err) {
637 		kfree(context);
638 		return ERR_PTR(err);
639 	}
640 
641 	INIT_LIST_HEAD(&context->db_page_list);
642 	mutex_init(&context->db_page_mutex);
643 
644 	if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
645 		err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
646 	else
647 		err = ib_copy_to_udata(udata, &resp, sizeof(resp));
648 
649 	if (err) {
650 		mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
651 		kfree(context);
652 		return ERR_PTR(-EFAULT);
653 	}
654 
655 	return &context->ibucontext;
656 }
657 
658 static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
659 {
660 	struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
661 
662 	mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
663 	kfree(context);
664 
665 	return 0;
666 }
667 
668 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
669 {
670 	struct mlx4_ib_dev *dev = to_mdev(context->device);
671 
672 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
673 		return -EINVAL;
674 
675 	if (vma->vm_pgoff == 0) {
676 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
677 
678 		if (io_remap_pfn_range(vma, vma->vm_start,
679 				       to_mucontext(context)->uar.pfn,
680 				       PAGE_SIZE, vma->vm_page_prot))
681 			return -EAGAIN;
682 	} else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
683 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
684 
685 		if (io_remap_pfn_range(vma, vma->vm_start,
686 				       to_mucontext(context)->uar.pfn +
687 				       dev->dev->caps.num_uars,
688 				       PAGE_SIZE, vma->vm_page_prot))
689 			return -EAGAIN;
690 	} else
691 		return -EINVAL;
692 
693 	return 0;
694 }
695 
696 static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
697 				      struct ib_ucontext *context,
698 				      struct ib_udata *udata)
699 {
700 	struct mlx4_ib_pd *pd;
701 	int err;
702 
703 	pd = kmalloc(sizeof *pd, GFP_KERNEL);
704 	if (!pd)
705 		return ERR_PTR(-ENOMEM);
706 
707 	err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
708 	if (err) {
709 		kfree(pd);
710 		return ERR_PTR(err);
711 	}
712 
713 	if (context)
714 		if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
715 			mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
716 			kfree(pd);
717 			return ERR_PTR(-EFAULT);
718 		}
719 
720 	return &pd->ibpd;
721 }
722 
723 static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
724 {
725 	mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
726 	kfree(pd);
727 
728 	return 0;
729 }
730 
731 static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
732 					  struct ib_ucontext *context,
733 					  struct ib_udata *udata)
734 {
735 	struct mlx4_ib_xrcd *xrcd;
736 	int err;
737 
738 	if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
739 		return ERR_PTR(-ENOSYS);
740 
741 	xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
742 	if (!xrcd)
743 		return ERR_PTR(-ENOMEM);
744 
745 	err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
746 	if (err)
747 		goto err1;
748 
749 	xrcd->pd = ib_alloc_pd(ibdev);
750 	if (IS_ERR(xrcd->pd)) {
751 		err = PTR_ERR(xrcd->pd);
752 		goto err2;
753 	}
754 
755 	xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, 1, 0);
756 	if (IS_ERR(xrcd->cq)) {
757 		err = PTR_ERR(xrcd->cq);
758 		goto err3;
759 	}
760 
761 	return &xrcd->ibxrcd;
762 
763 err3:
764 	ib_dealloc_pd(xrcd->pd);
765 err2:
766 	mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
767 err1:
768 	kfree(xrcd);
769 	return ERR_PTR(err);
770 }
771 
772 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
773 {
774 	ib_destroy_cq(to_mxrcd(xrcd)->cq);
775 	ib_dealloc_pd(to_mxrcd(xrcd)->pd);
776 	mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
777 	kfree(xrcd);
778 
779 	return 0;
780 }
781 
782 static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
783 {
784 	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
785 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
786 	struct mlx4_ib_gid_entry *ge;
787 
788 	ge = kzalloc(sizeof *ge, GFP_KERNEL);
789 	if (!ge)
790 		return -ENOMEM;
791 
792 	ge->gid = *gid;
793 	if (mlx4_ib_add_mc(mdev, mqp, gid)) {
794 		ge->port = mqp->port;
795 		ge->added = 1;
796 	}
797 
798 	mutex_lock(&mqp->mutex);
799 	list_add_tail(&ge->list, &mqp->gid_list);
800 	mutex_unlock(&mqp->mutex);
801 
802 	return 0;
803 }
804 
805 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
806 		   union ib_gid *gid)
807 {
808 	struct net_device *ndev;
809 	int ret = 0;
810 
811 	if (!mqp->port)
812 		return 0;
813 
814 	spin_lock(&mdev->iboe.lock);
815 	ndev = mdev->iboe.netdevs[mqp->port - 1];
816 	if (ndev)
817 		dev_hold(ndev);
818 	spin_unlock(&mdev->iboe.lock);
819 
820 	if (ndev) {
821 		ret = 1;
822 		dev_put(ndev);
823 	}
824 
825 	return ret;
826 }
827 
828 struct mlx4_ib_steering {
829 	struct list_head list;
830 	u64 reg_id;
831 	union ib_gid gid;
832 };
833 
834 static int parse_flow_attr(struct mlx4_dev *dev,
835 			   u32 qp_num,
836 			   union ib_flow_spec *ib_spec,
837 			   struct _rule_hw *mlx4_spec)
838 {
839 	enum mlx4_net_trans_rule_id type;
840 
841 	switch (ib_spec->type) {
842 	case IB_FLOW_SPEC_ETH:
843 		type = MLX4_NET_TRANS_RULE_ID_ETH;
844 		memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
845 		       ETH_ALEN);
846 		memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
847 		       ETH_ALEN);
848 		mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
849 		mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
850 		break;
851 	case IB_FLOW_SPEC_IB:
852 		type = MLX4_NET_TRANS_RULE_ID_IB;
853 		mlx4_spec->ib.l3_qpn =
854 			cpu_to_be32(qp_num);
855 		mlx4_spec->ib.qpn_mask =
856 			cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
857 		break;
858 
859 
860 	case IB_FLOW_SPEC_IPV4:
861 		type = MLX4_NET_TRANS_RULE_ID_IPV4;
862 		mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
863 		mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
864 		mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
865 		mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
866 		break;
867 
868 	case IB_FLOW_SPEC_TCP:
869 	case IB_FLOW_SPEC_UDP:
870 		type = ib_spec->type == IB_FLOW_SPEC_TCP ?
871 					MLX4_NET_TRANS_RULE_ID_TCP :
872 					MLX4_NET_TRANS_RULE_ID_UDP;
873 		mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
874 		mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
875 		mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
876 		mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
877 		break;
878 
879 	default:
880 		return -EINVAL;
881 	}
882 	if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
883 	    mlx4_hw_rule_sz(dev, type) < 0)
884 		return -EINVAL;
885 	mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
886 	mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
887 	return mlx4_hw_rule_sz(dev, type);
888 }
889 
890 struct default_rules {
891 	__u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
892 	__u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
893 	__u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
894 	__u8  link_layer;
895 };
896 static const struct default_rules default_table[] = {
897 	{
898 		.mandatory_fields = {IB_FLOW_SPEC_IPV4},
899 		.mandatory_not_fields = {IB_FLOW_SPEC_ETH},
900 		.rules_create_list = {IB_FLOW_SPEC_IB},
901 		.link_layer = IB_LINK_LAYER_INFINIBAND
902 	}
903 };
904 
905 static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
906 					 struct ib_flow_attr *flow_attr)
907 {
908 	int i, j, k;
909 	void *ib_flow;
910 	const struct default_rules *pdefault_rules = default_table;
911 	u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
912 
913 	for (i = 0; i < sizeof(default_table)/sizeof(default_table[0]); i++,
914 	     pdefault_rules++) {
915 		__u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
916 		memset(&field_types, 0, sizeof(field_types));
917 
918 		if (link_layer != pdefault_rules->link_layer)
919 			continue;
920 
921 		ib_flow = flow_attr + 1;
922 		/* we assume the specs are sorted */
923 		for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
924 		     j < flow_attr->num_of_specs; k++) {
925 			union ib_flow_spec *current_flow =
926 				(union ib_flow_spec *)ib_flow;
927 
928 			/* same layer but different type */
929 			if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
930 			     (pdefault_rules->mandatory_fields[k] &
931 			      IB_FLOW_SPEC_LAYER_MASK)) &&
932 			    (current_flow->type !=
933 			     pdefault_rules->mandatory_fields[k]))
934 				goto out;
935 
936 			/* same layer, try match next one */
937 			if (current_flow->type ==
938 			    pdefault_rules->mandatory_fields[k]) {
939 				j++;
940 				ib_flow +=
941 					((union ib_flow_spec *)ib_flow)->size;
942 			}
943 		}
944 
945 		ib_flow = flow_attr + 1;
946 		for (j = 0; j < flow_attr->num_of_specs;
947 		     j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
948 			for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
949 				/* same layer and same type */
950 				if (((union ib_flow_spec *)ib_flow)->type ==
951 				    pdefault_rules->mandatory_not_fields[k])
952 					goto out;
953 
954 		return i;
955 	}
956 out:
957 	return -1;
958 }
959 
960 static int __mlx4_ib_create_default_rules(
961 		struct mlx4_ib_dev *mdev,
962 		struct ib_qp *qp,
963 		const struct default_rules *pdefault_rules,
964 		struct _rule_hw *mlx4_spec) {
965 	int size = 0;
966 	int i;
967 
968 	for (i = 0; i < sizeof(pdefault_rules->rules_create_list)/
969 			sizeof(pdefault_rules->rules_create_list[0]); i++) {
970 		int ret;
971 		union ib_flow_spec ib_spec;
972 		switch (pdefault_rules->rules_create_list[i]) {
973 		case 0:
974 			/* no rule */
975 			continue;
976 		case IB_FLOW_SPEC_IB:
977 			ib_spec.type = IB_FLOW_SPEC_IB;
978 			ib_spec.size = sizeof(struct ib_flow_spec_ib);
979 
980 			break;
981 		default:
982 			/* invalid rule */
983 			return -EINVAL;
984 		}
985 		/* We must put empty rule, qpn is being ignored */
986 		ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
987 				      mlx4_spec);
988 		if (ret < 0) {
989 			pr_info("invalid parsing\n");
990 			return -EINVAL;
991 		}
992 
993 		mlx4_spec = (void *)mlx4_spec + ret;
994 		size += ret;
995 	}
996 	return size;
997 }
998 
999 static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1000 			  int domain,
1001 			  enum mlx4_net_trans_promisc_mode flow_type,
1002 			  u64 *reg_id)
1003 {
1004 	int ret, i;
1005 	int size = 0;
1006 	void *ib_flow;
1007 	struct mlx4_ib_dev *mdev = to_mdev(qp->device);
1008 	struct mlx4_cmd_mailbox *mailbox;
1009 	struct mlx4_net_trans_rule_hw_ctrl *ctrl;
1010 	int default_flow;
1011 
1012 	static const u16 __mlx4_domain[] = {
1013 		[IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
1014 		[IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
1015 		[IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
1016 		[IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
1017 	};
1018 
1019 	if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
1020 		pr_err("Invalid priority value %d\n", flow_attr->priority);
1021 		return -EINVAL;
1022 	}
1023 
1024 	if (domain >= IB_FLOW_DOMAIN_NUM) {
1025 		pr_err("Invalid domain value %d\n", domain);
1026 		return -EINVAL;
1027 	}
1028 
1029 	if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
1030 		return -EINVAL;
1031 
1032 	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
1033 	if (IS_ERR(mailbox))
1034 		return PTR_ERR(mailbox);
1035 	ctrl = mailbox->buf;
1036 
1037 	ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
1038 				 flow_attr->priority);
1039 	ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
1040 	ctrl->port = flow_attr->port;
1041 	ctrl->qpn = cpu_to_be32(qp->qp_num);
1042 
1043 	ib_flow = flow_attr + 1;
1044 	size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
1045 	/* Add default flows */
1046 	default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
1047 	if (default_flow >= 0) {
1048 		ret = __mlx4_ib_create_default_rules(
1049 				mdev, qp, default_table + default_flow,
1050 				mailbox->buf + size);
1051 		if (ret < 0) {
1052 			mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1053 			return -EINVAL;
1054 		}
1055 		size += ret;
1056 	}
1057 	for (i = 0; i < flow_attr->num_of_specs; i++) {
1058 		ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
1059 				      mailbox->buf + size);
1060 		if (ret < 0) {
1061 			mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1062 			return -EINVAL;
1063 		}
1064 		ib_flow += ((union ib_flow_spec *) ib_flow)->size;
1065 		size += ret;
1066 	}
1067 
1068 	ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
1069 			   MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
1070 			   MLX4_CMD_NATIVE);
1071 	if (ret == -ENOMEM)
1072 		pr_err("mcg table is full. Fail to register network rule.\n");
1073 	else if (ret == -ENXIO)
1074 		pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1075 	else if (ret)
1076 		pr_err("Invalid argumant. Fail to register network rule.\n");
1077 
1078 	mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1079 	return ret;
1080 }
1081 
1082 static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1083 {
1084 	int err;
1085 	err = mlx4_cmd(dev, reg_id, 0, 0,
1086 		       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1087 		       MLX4_CMD_NATIVE);
1088 	if (err)
1089 		pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1090 		       reg_id);
1091 	return err;
1092 }
1093 
1094 static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1095 				    struct ib_flow_attr *flow_attr,
1096 				    int domain)
1097 {
1098 	int err = 0, i = 0;
1099 	struct mlx4_ib_flow *mflow;
1100 	enum mlx4_net_trans_promisc_mode type[2];
1101 
1102 	memset(type, 0, sizeof(type));
1103 
1104 	mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
1105 	if (!mflow) {
1106 		err = -ENOMEM;
1107 		goto err_free;
1108 	}
1109 
1110 	switch (flow_attr->type) {
1111 	case IB_FLOW_ATTR_NORMAL:
1112 		type[0] = MLX4_FS_REGULAR;
1113 		break;
1114 
1115 	case IB_FLOW_ATTR_ALL_DEFAULT:
1116 		type[0] = MLX4_FS_ALL_DEFAULT;
1117 		break;
1118 
1119 	case IB_FLOW_ATTR_MC_DEFAULT:
1120 		type[0] = MLX4_FS_MC_DEFAULT;
1121 		break;
1122 
1123 	case IB_FLOW_ATTR_SNIFFER:
1124 		type[0] = MLX4_FS_UC_SNIFFER;
1125 		type[1] = MLX4_FS_MC_SNIFFER;
1126 		break;
1127 
1128 	default:
1129 		err = -EINVAL;
1130 		goto err_free;
1131 	}
1132 
1133 	while (i < ARRAY_SIZE(type) && type[i]) {
1134 		err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
1135 					    &mflow->reg_id[i]);
1136 		if (err)
1137 			goto err_free;
1138 		i++;
1139 	}
1140 
1141 	return &mflow->ibflow;
1142 
1143 err_free:
1144 	kfree(mflow);
1145 	return ERR_PTR(err);
1146 }
1147 
1148 static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1149 {
1150 	int err, ret = 0;
1151 	int i = 0;
1152 	struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1153 	struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1154 
1155 	while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i]) {
1156 		err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i]);
1157 		if (err)
1158 			ret = err;
1159 		i++;
1160 	}
1161 
1162 	kfree(mflow);
1163 	return ret;
1164 }
1165 
1166 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1167 {
1168 	int err;
1169 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1170 	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1171 	u64 reg_id;
1172 	struct mlx4_ib_steering *ib_steering = NULL;
1173 	enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
1174 		MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
1175 
1176 	if (mdev->dev->caps.steering_mode ==
1177 	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
1178 		ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
1179 		if (!ib_steering)
1180 			return -ENOMEM;
1181 	}
1182 
1183 	err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
1184 				    !!(mqp->flags &
1185 				       MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1186 				    prot, &reg_id);
1187 	if (err)
1188 		goto err_malloc;
1189 
1190 	err = add_gid_entry(ibqp, gid);
1191 	if (err)
1192 		goto err_add;
1193 
1194 	if (ib_steering) {
1195 		memcpy(ib_steering->gid.raw, gid->raw, 16);
1196 		ib_steering->reg_id = reg_id;
1197 		mutex_lock(&mqp->mutex);
1198 		list_add(&ib_steering->list, &mqp->steering_rules);
1199 		mutex_unlock(&mqp->mutex);
1200 	}
1201 	return 0;
1202 
1203 err_add:
1204 	mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1205 			      prot, reg_id);
1206 err_malloc:
1207 	kfree(ib_steering);
1208 
1209 	return err;
1210 }
1211 
1212 static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
1213 {
1214 	struct mlx4_ib_gid_entry *ge;
1215 	struct mlx4_ib_gid_entry *tmp;
1216 	struct mlx4_ib_gid_entry *ret = NULL;
1217 
1218 	list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1219 		if (!memcmp(raw, ge->gid.raw, 16)) {
1220 			ret = ge;
1221 			break;
1222 		}
1223 	}
1224 
1225 	return ret;
1226 }
1227 
1228 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1229 {
1230 	int err;
1231 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1232 	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1233 	struct net_device *ndev;
1234 	struct mlx4_ib_gid_entry *ge;
1235 	u64 reg_id = 0;
1236 	enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
1237 		MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
1238 
1239 	if (mdev->dev->caps.steering_mode ==
1240 	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
1241 		struct mlx4_ib_steering *ib_steering;
1242 
1243 		mutex_lock(&mqp->mutex);
1244 		list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
1245 			if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
1246 				list_del(&ib_steering->list);
1247 				break;
1248 			}
1249 		}
1250 		mutex_unlock(&mqp->mutex);
1251 		if (&ib_steering->list == &mqp->steering_rules) {
1252 			pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1253 			return -EINVAL;
1254 		}
1255 		reg_id = ib_steering->reg_id;
1256 		kfree(ib_steering);
1257 	}
1258 
1259 	err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1260 				    prot, reg_id);
1261 	if (err)
1262 		return err;
1263 
1264 	mutex_lock(&mqp->mutex);
1265 	ge = find_gid_entry(mqp, gid->raw);
1266 	if (ge) {
1267 		spin_lock(&mdev->iboe.lock);
1268 		ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1269 		if (ndev)
1270 			dev_hold(ndev);
1271 		spin_unlock(&mdev->iboe.lock);
1272 		if (ndev)
1273 			dev_put(ndev);
1274 		list_del(&ge->list);
1275 		kfree(ge);
1276 	} else
1277 		pr_warn("could not find mgid entry\n");
1278 
1279 	mutex_unlock(&mqp->mutex);
1280 
1281 	return 0;
1282 }
1283 
1284 static int init_node_data(struct mlx4_ib_dev *dev)
1285 {
1286 	struct ib_smp *in_mad  = NULL;
1287 	struct ib_smp *out_mad = NULL;
1288 	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
1289 	int err = -ENOMEM;
1290 
1291 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
1292 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1293 	if (!in_mad || !out_mad)
1294 		goto out;
1295 
1296 	init_query_mad(in_mad);
1297 	in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1298 	if (mlx4_is_master(dev->dev))
1299 		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
1300 
1301 	err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
1302 	if (err)
1303 		goto out;
1304 
1305 	memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
1306 
1307 	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1308 
1309 	err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
1310 	if (err)
1311 		goto out;
1312 
1313 	dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
1314 	memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1315 
1316 out:
1317 	kfree(in_mad);
1318 	kfree(out_mad);
1319 	return err;
1320 }
1321 
1322 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
1323 			char *buf)
1324 {
1325 	struct mlx4_ib_dev *dev =
1326 		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1327 	return sprintf(buf, "MT%d\n", dev->dev->pdev->device);
1328 }
1329 
1330 static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
1331 			   char *buf)
1332 {
1333 	struct mlx4_ib_dev *dev =
1334 		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1335 	return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
1336 		       (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
1337 		       (int) dev->dev->caps.fw_ver & 0xffff);
1338 }
1339 
1340 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
1341 			char *buf)
1342 {
1343 	struct mlx4_ib_dev *dev =
1344 		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1345 	return sprintf(buf, "%x\n", dev->dev->rev_id);
1346 }
1347 
1348 static ssize_t show_board(struct device *device, struct device_attribute *attr,
1349 			  char *buf)
1350 {
1351 	struct mlx4_ib_dev *dev =
1352 		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1353 	return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
1354 		       dev->dev->board_id);
1355 }
1356 
1357 static DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
1358 static DEVICE_ATTR(fw_ver,   S_IRUGO, show_fw_ver, NULL);
1359 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca,    NULL);
1360 static DEVICE_ATTR(board_id, S_IRUGO, show_board,  NULL);
1361 
1362 static struct device_attribute *mlx4_class_attributes[] = {
1363 	&dev_attr_hw_rev,
1364 	&dev_attr_fw_ver,
1365 	&dev_attr_hca_type,
1366 	&dev_attr_board_id
1367 };
1368 
1369 static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id,
1370 				     struct net_device *dev)
1371 {
1372 	memcpy(eui, dev->dev_addr, 3);
1373 	memcpy(eui + 5, dev->dev_addr + 3, 3);
1374 	if (vlan_id < 0x1000) {
1375 		eui[3] = vlan_id >> 8;
1376 		eui[4] = vlan_id & 0xff;
1377 	} else {
1378 		eui[3] = 0xff;
1379 		eui[4] = 0xfe;
1380 	}
1381 	eui[0] ^= 2;
1382 }
1383 
1384 static void update_gids_task(struct work_struct *work)
1385 {
1386 	struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
1387 	struct mlx4_cmd_mailbox *mailbox;
1388 	union ib_gid *gids;
1389 	int err;
1390 	struct mlx4_dev	*dev = gw->dev->dev;
1391 
1392 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1393 	if (IS_ERR(mailbox)) {
1394 		pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
1395 		return;
1396 	}
1397 
1398 	gids = mailbox->buf;
1399 	memcpy(gids, gw->gids, sizeof gw->gids);
1400 
1401 	err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
1402 		       1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1403 		       MLX4_CMD_WRAPPED);
1404 	if (err)
1405 		pr_warn("set port command failed\n");
1406 	else
1407 		mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE);
1408 
1409 	mlx4_free_cmd_mailbox(dev, mailbox);
1410 	kfree(gw);
1411 }
1412 
1413 static void reset_gids_task(struct work_struct *work)
1414 {
1415 	struct update_gid_work *gw =
1416 			container_of(work, struct update_gid_work, work);
1417 	struct mlx4_cmd_mailbox *mailbox;
1418 	union ib_gid *gids;
1419 	int err;
1420 	struct mlx4_dev	*dev = gw->dev->dev;
1421 
1422 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1423 	if (IS_ERR(mailbox)) {
1424 		pr_warn("reset gid table failed\n");
1425 		goto free;
1426 	}
1427 
1428 	gids = mailbox->buf;
1429 	memcpy(gids, gw->gids, sizeof(gw->gids));
1430 
1431 	if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, gw->port) ==
1432 				    IB_LINK_LAYER_ETHERNET) {
1433 		err = mlx4_cmd(dev, mailbox->dma,
1434 			       MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
1435 			       1, MLX4_CMD_SET_PORT,
1436 			       MLX4_CMD_TIME_CLASS_B,
1437 			       MLX4_CMD_WRAPPED);
1438 		if (err)
1439 			pr_warn(KERN_WARNING
1440 				"set port %d command failed\n", gw->port);
1441 	}
1442 
1443 	mlx4_free_cmd_mailbox(dev, mailbox);
1444 free:
1445 	kfree(gw);
1446 }
1447 
1448 static int update_gid_table(struct mlx4_ib_dev *dev, int port,
1449 			    union ib_gid *gid, int clear,
1450 			    int default_gid)
1451 {
1452 	struct update_gid_work *work;
1453 	int i;
1454 	int need_update = 0;
1455 	int free = -1;
1456 	int found = -1;
1457 	int max_gids;
1458 
1459 	if (default_gid) {
1460 		free = 0;
1461 	} else {
1462 		max_gids = dev->dev->caps.gid_table_len[port];
1463 		for (i = 1; i < max_gids; ++i) {
1464 			if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid,
1465 				    sizeof(*gid)))
1466 				found = i;
1467 
1468 			if (clear) {
1469 				if (found >= 0) {
1470 					need_update = 1;
1471 					dev->iboe.gid_table[port - 1][found] =
1472 						zgid;
1473 					break;
1474 				}
1475 			} else {
1476 				if (found >= 0)
1477 					break;
1478 
1479 				if (free < 0 &&
1480 				    !memcmp(&dev->iboe.gid_table[port - 1][i],
1481 					    &zgid, sizeof(*gid)))
1482 					free = i;
1483 			}
1484 		}
1485 	}
1486 
1487 	if (found == -1 && !clear && free >= 0) {
1488 		dev->iboe.gid_table[port - 1][free] = *gid;
1489 		need_update = 1;
1490 	}
1491 
1492 	if (!need_update)
1493 		return 0;
1494 
1495 	work = kzalloc(sizeof(*work), GFP_ATOMIC);
1496 	if (!work)
1497 		return -ENOMEM;
1498 
1499 	memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof(work->gids));
1500 	INIT_WORK(&work->work, update_gids_task);
1501 	work->port = port;
1502 	work->dev = dev;
1503 	queue_work(wq, &work->work);
1504 
1505 	return 0;
1506 }
1507 
1508 static void mlx4_make_default_gid(struct  net_device *dev, union ib_gid *gid)
1509 {
1510 	gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
1511 	mlx4_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev);
1512 }
1513 
1514 
1515 static int reset_gid_table(struct mlx4_ib_dev *dev, u8 port)
1516 {
1517 	struct update_gid_work *work;
1518 
1519 	work = kzalloc(sizeof(*work), GFP_ATOMIC);
1520 	if (!work)
1521 		return -ENOMEM;
1522 
1523 	memset(dev->iboe.gid_table[port - 1], 0, sizeof(work->gids));
1524 	memset(work->gids, 0, sizeof(work->gids));
1525 	INIT_WORK(&work->work, reset_gids_task);
1526 	work->dev = dev;
1527 	work->port = port;
1528 	queue_work(wq, &work->work);
1529 	return 0;
1530 }
1531 
1532 static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
1533 			      struct mlx4_ib_dev *ibdev, union ib_gid *gid)
1534 {
1535 	struct mlx4_ib_iboe *iboe;
1536 	int port = 0;
1537 	struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
1538 				rdma_vlan_dev_real_dev(event_netdev) :
1539 				event_netdev;
1540 	union ib_gid default_gid;
1541 
1542 	mlx4_make_default_gid(real_dev, &default_gid);
1543 
1544 	if (!memcmp(gid, &default_gid, sizeof(*gid)))
1545 		return 0;
1546 
1547 	if (event != NETDEV_DOWN && event != NETDEV_UP)
1548 		return 0;
1549 
1550 	if ((real_dev != event_netdev) &&
1551 	    (event == NETDEV_DOWN) &&
1552 	    rdma_link_local_addr((struct in6_addr *)gid))
1553 		return 0;
1554 
1555 	iboe = &ibdev->iboe;
1556 	spin_lock(&iboe->lock);
1557 
1558 	for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
1559 		if ((netif_is_bond_master(real_dev) &&
1560 		     (real_dev == iboe->masters[port - 1])) ||
1561 		     (!netif_is_bond_master(real_dev) &&
1562 		     (real_dev == iboe->netdevs[port - 1])))
1563 			update_gid_table(ibdev, port, gid,
1564 					 event == NETDEV_DOWN, 0);
1565 
1566 	spin_unlock(&iboe->lock);
1567 	return 0;
1568 
1569 }
1570 
1571 static u8 mlx4_ib_get_dev_port(struct net_device *dev,
1572 			       struct mlx4_ib_dev *ibdev)
1573 {
1574 	u8 port = 0;
1575 	struct mlx4_ib_iboe *iboe;
1576 	struct net_device *real_dev = rdma_vlan_dev_real_dev(dev) ?
1577 				rdma_vlan_dev_real_dev(dev) : dev;
1578 
1579 	iboe = &ibdev->iboe;
1580 
1581 	for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
1582 		if ((netif_is_bond_master(real_dev) &&
1583 		     (real_dev == iboe->masters[port - 1])) ||
1584 		     (!netif_is_bond_master(real_dev) &&
1585 		     (real_dev == iboe->netdevs[port - 1])))
1586 			break;
1587 
1588 	if ((port == 0) || (port > ibdev->dev->caps.num_ports))
1589 		return 0;
1590 	else
1591 		return port;
1592 }
1593 
1594 static int mlx4_ib_inet_event(struct notifier_block *this, unsigned long event,
1595 				void *ptr)
1596 {
1597 	struct mlx4_ib_dev *ibdev;
1598 	struct in_ifaddr *ifa = ptr;
1599 	union ib_gid gid;
1600 	struct net_device *event_netdev = ifa->ifa_dev->dev;
1601 
1602 	ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
1603 
1604 	ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet);
1605 
1606 	mlx4_ib_addr_event(event, event_netdev, ibdev, &gid);
1607 	return NOTIFY_DONE;
1608 }
1609 
1610 #if IS_ENABLED(CONFIG_IPV6)
1611 static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event,
1612 				void *ptr)
1613 {
1614 	struct mlx4_ib_dev *ibdev;
1615 	struct inet6_ifaddr *ifa = ptr;
1616 	union  ib_gid *gid = (union ib_gid *)&ifa->addr;
1617 	struct net_device *event_netdev = ifa->idev->dev;
1618 
1619 	ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet6);
1620 
1621 	mlx4_ib_addr_event(event, event_netdev, ibdev, gid);
1622 	return NOTIFY_DONE;
1623 }
1624 #endif
1625 
1626 #define MLX4_IB_INVALID_MAC	((u64)-1)
1627 static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
1628 			       struct net_device *dev,
1629 			       int port)
1630 {
1631 	u64 new_smac = 0;
1632 	u64 release_mac = MLX4_IB_INVALID_MAC;
1633 	struct mlx4_ib_qp *qp;
1634 
1635 	read_lock(&dev_base_lock);
1636 	new_smac = mlx4_mac_to_u64(dev->dev_addr);
1637 	read_unlock(&dev_base_lock);
1638 
1639 	mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
1640 	qp = ibdev->qp1_proxy[port - 1];
1641 	if (qp) {
1642 		int new_smac_index;
1643 		u64 old_smac = qp->pri.smac;
1644 		struct mlx4_update_qp_params update_params;
1645 
1646 		if (new_smac == old_smac)
1647 			goto unlock;
1648 
1649 		new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
1650 
1651 		if (new_smac_index < 0)
1652 			goto unlock;
1653 
1654 		update_params.smac_index = new_smac_index;
1655 		if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC,
1656 				   &update_params)) {
1657 			release_mac = new_smac;
1658 			goto unlock;
1659 		}
1660 
1661 		qp->pri.smac = new_smac;
1662 		qp->pri.smac_index = new_smac_index;
1663 
1664 		release_mac = old_smac;
1665 	}
1666 
1667 unlock:
1668 	mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
1669 	if (release_mac != MLX4_IB_INVALID_MAC)
1670 		mlx4_unregister_mac(ibdev->dev, port, release_mac);
1671 }
1672 
1673 static void mlx4_ib_get_dev_addr(struct net_device *dev,
1674 				 struct mlx4_ib_dev *ibdev, u8 port)
1675 {
1676 	struct in_device *in_dev;
1677 #if IS_ENABLED(CONFIG_IPV6)
1678 	struct inet6_dev *in6_dev;
1679 	union ib_gid  *pgid;
1680 	struct inet6_ifaddr *ifp;
1681 #endif
1682 	union ib_gid gid;
1683 
1684 
1685 	if ((port == 0) || (port > ibdev->dev->caps.num_ports))
1686 		return;
1687 
1688 	/* IPv4 gids */
1689 	in_dev = in_dev_get(dev);
1690 	if (in_dev) {
1691 		for_ifa(in_dev) {
1692 			/*ifa->ifa_address;*/
1693 			ipv6_addr_set_v4mapped(ifa->ifa_address,
1694 					       (struct in6_addr *)&gid);
1695 			update_gid_table(ibdev, port, &gid, 0, 0);
1696 		}
1697 		endfor_ifa(in_dev);
1698 		in_dev_put(in_dev);
1699 	}
1700 #if IS_ENABLED(CONFIG_IPV6)
1701 	/* IPv6 gids */
1702 	in6_dev = in6_dev_get(dev);
1703 	if (in6_dev) {
1704 		read_lock_bh(&in6_dev->lock);
1705 		list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
1706 			pgid = (union ib_gid *)&ifp->addr;
1707 			update_gid_table(ibdev, port, pgid, 0, 0);
1708 		}
1709 		read_unlock_bh(&in6_dev->lock);
1710 		in6_dev_put(in6_dev);
1711 	}
1712 #endif
1713 }
1714 
1715 static void mlx4_ib_set_default_gid(struct mlx4_ib_dev *ibdev,
1716 				 struct  net_device *dev, u8 port)
1717 {
1718 	union ib_gid gid;
1719 	mlx4_make_default_gid(dev, &gid);
1720 	update_gid_table(ibdev, port, &gid, 0, 1);
1721 }
1722 
1723 static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
1724 {
1725 	struct	net_device *dev;
1726 	struct mlx4_ib_iboe *iboe = &ibdev->iboe;
1727 	int i;
1728 
1729 	for (i = 1; i <= ibdev->num_ports; ++i)
1730 		if (reset_gid_table(ibdev, i))
1731 			return -1;
1732 
1733 	read_lock(&dev_base_lock);
1734 	spin_lock(&iboe->lock);
1735 
1736 	for_each_netdev(&init_net, dev) {
1737 		u8 port = mlx4_ib_get_dev_port(dev, ibdev);
1738 		if (port)
1739 			mlx4_ib_get_dev_addr(dev, ibdev, port);
1740 	}
1741 
1742 	spin_unlock(&iboe->lock);
1743 	read_unlock(&dev_base_lock);
1744 
1745 	return 0;
1746 }
1747 
1748 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
1749 				 struct net_device *dev,
1750 				 unsigned long event)
1751 
1752 {
1753 	struct mlx4_ib_iboe *iboe;
1754 	int update_qps_port = -1;
1755 	int port;
1756 
1757 	iboe = &ibdev->iboe;
1758 
1759 	spin_lock(&iboe->lock);
1760 	mlx4_foreach_ib_transport_port(port, ibdev->dev) {
1761 		enum ib_port_state	port_state = IB_PORT_NOP;
1762 		struct net_device *old_master = iboe->masters[port - 1];
1763 		struct net_device *curr_netdev;
1764 		struct net_device *curr_master;
1765 
1766 		iboe->netdevs[port - 1] =
1767 			mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
1768 		if (iboe->netdevs[port - 1])
1769 			mlx4_ib_set_default_gid(ibdev,
1770 						iboe->netdevs[port - 1], port);
1771 		curr_netdev = iboe->netdevs[port - 1];
1772 
1773 		if (iboe->netdevs[port - 1] &&
1774 		    netif_is_bond_slave(iboe->netdevs[port - 1])) {
1775 			iboe->masters[port - 1] = netdev_master_upper_dev_get(
1776 				iboe->netdevs[port - 1]);
1777 		} else {
1778 			iboe->masters[port - 1] = NULL;
1779 		}
1780 		curr_master = iboe->masters[port - 1];
1781 
1782 		if (dev == iboe->netdevs[port - 1] &&
1783 		    (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
1784 		     event == NETDEV_UP || event == NETDEV_CHANGE))
1785 			update_qps_port = port;
1786 
1787 		if (curr_netdev) {
1788 			port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
1789 						IB_PORT_ACTIVE : IB_PORT_DOWN;
1790 			mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
1791 		} else {
1792 			reset_gid_table(ibdev, port);
1793 		}
1794 		/* if using bonding/team and a slave port is down, we don't the bond IP
1795 		 * based gids in the table since flows that select port by gid may get
1796 		 * the down port.
1797 		 */
1798 		if (curr_master && (port_state == IB_PORT_DOWN)) {
1799 			reset_gid_table(ibdev, port);
1800 			mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
1801 		}
1802 		/* if bonding is used it is possible that we add it to masters
1803 		 * only after IP address is assigned to the net bonding
1804 		 * interface.
1805 		*/
1806 		if (curr_master && (old_master != curr_master)) {
1807 			reset_gid_table(ibdev, port);
1808 			mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
1809 			mlx4_ib_get_dev_addr(curr_master, ibdev, port);
1810 		}
1811 
1812 		if (!curr_master && (old_master != curr_master)) {
1813 			reset_gid_table(ibdev, port);
1814 			mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
1815 			mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
1816 		}
1817 	}
1818 
1819 	spin_unlock(&iboe->lock);
1820 
1821 	if (update_qps_port > 0)
1822 		mlx4_ib_update_qps(ibdev, dev, update_qps_port);
1823 }
1824 
1825 static int mlx4_ib_netdev_event(struct notifier_block *this,
1826 				unsigned long event, void *ptr)
1827 {
1828 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1829 	struct mlx4_ib_dev *ibdev;
1830 
1831 	if (!net_eq(dev_net(dev), &init_net))
1832 		return NOTIFY_DONE;
1833 
1834 	ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
1835 	mlx4_ib_scan_netdevs(ibdev, dev, event);
1836 
1837 	return NOTIFY_DONE;
1838 }
1839 
1840 static void init_pkeys(struct mlx4_ib_dev *ibdev)
1841 {
1842 	int port;
1843 	int slave;
1844 	int i;
1845 
1846 	if (mlx4_is_master(ibdev->dev)) {
1847 		for (slave = 0; slave <= ibdev->dev->num_vfs; ++slave) {
1848 			for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
1849 				for (i = 0;
1850 				     i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
1851 				     ++i) {
1852 					ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
1853 					/* master has the identity virt2phys pkey mapping */
1854 						(slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
1855 							ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
1856 					mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
1857 							     ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
1858 				}
1859 			}
1860 		}
1861 		/* initialize pkey cache */
1862 		for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
1863 			for (i = 0;
1864 			     i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
1865 			     ++i)
1866 				ibdev->pkeys.phys_pkey_cache[port-1][i] =
1867 					(i) ? 0 : 0xFFFF;
1868 		}
1869 	}
1870 }
1871 
1872 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1873 {
1874 	char name[80];
1875 	int eq_per_port = 0;
1876 	int added_eqs = 0;
1877 	int total_eqs = 0;
1878 	int i, j, eq;
1879 
1880 	/* Legacy mode or comp_pool is not large enough */
1881 	if (dev->caps.comp_pool == 0 ||
1882 	    dev->caps.num_ports > dev->caps.comp_pool)
1883 		return;
1884 
1885 	eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/
1886 					dev->caps.num_ports);
1887 
1888 	/* Init eq table */
1889 	added_eqs = 0;
1890 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
1891 		added_eqs += eq_per_port;
1892 
1893 	total_eqs = dev->caps.num_comp_vectors + added_eqs;
1894 
1895 	ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL);
1896 	if (!ibdev->eq_table)
1897 		return;
1898 
1899 	ibdev->eq_added = added_eqs;
1900 
1901 	eq = 0;
1902 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
1903 		for (j = 0; j < eq_per_port; j++) {
1904 			snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s",
1905 				 i, j, dev->pdev->bus->name);
1906 			/* Set IRQ for specific name (per ring) */
1907 			if (mlx4_assign_eq(dev, name, NULL,
1908 					   &ibdev->eq_table[eq])) {
1909 				/* Use legacy (same as mlx4_en driver) */
1910 				pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
1911 				ibdev->eq_table[eq] =
1912 					(eq % dev->caps.num_comp_vectors);
1913 			}
1914 			eq++;
1915 		}
1916 	}
1917 
1918 	/* Fill the reset of the vector with legacy EQ */
1919 	for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++)
1920 		ibdev->eq_table[eq++] = i;
1921 
1922 	/* Advertise the new number of EQs to clients */
1923 	ibdev->ib_dev.num_comp_vectors = total_eqs;
1924 }
1925 
1926 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1927 {
1928 	int i;
1929 
1930 	/* no additional eqs were added */
1931 	if (!ibdev->eq_table)
1932 		return;
1933 
1934 	/* Reset the advertised EQ number */
1935 	ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
1936 
1937 	/* Free only the added eqs */
1938 	for (i = 0; i < ibdev->eq_added; i++) {
1939 		/* Don't free legacy eqs if used */
1940 		if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
1941 			continue;
1942 		mlx4_release_eq(dev, ibdev->eq_table[i]);
1943 	}
1944 
1945 	kfree(ibdev->eq_table);
1946 }
1947 
1948 static void *mlx4_ib_add(struct mlx4_dev *dev)
1949 {
1950 	struct mlx4_ib_dev *ibdev;
1951 	int num_ports = 0;
1952 	int i, j;
1953 	int err;
1954 	struct mlx4_ib_iboe *iboe;
1955 	int ib_num_ports = 0;
1956 
1957 	pr_info_once("%s", mlx4_ib_version);
1958 
1959 	num_ports = 0;
1960 	mlx4_foreach_ib_transport_port(i, dev)
1961 		num_ports++;
1962 
1963 	/* No point in registering a device with no ports... */
1964 	if (num_ports == 0)
1965 		return NULL;
1966 
1967 	ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
1968 	if (!ibdev) {
1969 		dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
1970 		return NULL;
1971 	}
1972 
1973 	iboe = &ibdev->iboe;
1974 
1975 	if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
1976 		goto err_dealloc;
1977 
1978 	if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
1979 		goto err_pd;
1980 
1981 	ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
1982 				 PAGE_SIZE);
1983 	if (!ibdev->uar_map)
1984 		goto err_uar;
1985 	MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
1986 
1987 	ibdev->dev = dev;
1988 
1989 	strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
1990 	ibdev->ib_dev.owner		= THIS_MODULE;
1991 	ibdev->ib_dev.node_type		= RDMA_NODE_IB_CA;
1992 	ibdev->ib_dev.local_dma_lkey	= dev->caps.reserved_lkey;
1993 	ibdev->num_ports		= num_ports;
1994 	ibdev->ib_dev.phys_port_cnt     = ibdev->num_ports;
1995 	ibdev->ib_dev.num_comp_vectors	= dev->caps.num_comp_vectors;
1996 	ibdev->ib_dev.dma_device	= &dev->pdev->dev;
1997 
1998 	if (dev->caps.userspace_caps)
1999 		ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
2000 	else
2001 		ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
2002 
2003 	ibdev->ib_dev.uverbs_cmd_mask	=
2004 		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)		|
2005 		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)	|
2006 		(1ull << IB_USER_VERBS_CMD_QUERY_PORT)		|
2007 		(1ull << IB_USER_VERBS_CMD_ALLOC_PD)		|
2008 		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)		|
2009 		(1ull << IB_USER_VERBS_CMD_REG_MR)		|
2010 		(1ull << IB_USER_VERBS_CMD_DEREG_MR)		|
2011 		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)	|
2012 		(1ull << IB_USER_VERBS_CMD_CREATE_CQ)		|
2013 		(1ull << IB_USER_VERBS_CMD_RESIZE_CQ)		|
2014 		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ)		|
2015 		(1ull << IB_USER_VERBS_CMD_CREATE_QP)		|
2016 		(1ull << IB_USER_VERBS_CMD_MODIFY_QP)		|
2017 		(1ull << IB_USER_VERBS_CMD_QUERY_QP)		|
2018 		(1ull << IB_USER_VERBS_CMD_DESTROY_QP)		|
2019 		(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)	|
2020 		(1ull << IB_USER_VERBS_CMD_DETACH_MCAST)	|
2021 		(1ull << IB_USER_VERBS_CMD_CREATE_SRQ)		|
2022 		(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)		|
2023 		(1ull << IB_USER_VERBS_CMD_QUERY_SRQ)		|
2024 		(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)		|
2025 		(1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)		|
2026 		(1ull << IB_USER_VERBS_CMD_OPEN_QP);
2027 
2028 	ibdev->ib_dev.query_device	= mlx4_ib_query_device;
2029 	ibdev->ib_dev.query_port	= mlx4_ib_query_port;
2030 	ibdev->ib_dev.get_link_layer	= mlx4_ib_port_link_layer;
2031 	ibdev->ib_dev.query_gid		= mlx4_ib_query_gid;
2032 	ibdev->ib_dev.query_pkey	= mlx4_ib_query_pkey;
2033 	ibdev->ib_dev.modify_device	= mlx4_ib_modify_device;
2034 	ibdev->ib_dev.modify_port	= mlx4_ib_modify_port;
2035 	ibdev->ib_dev.alloc_ucontext	= mlx4_ib_alloc_ucontext;
2036 	ibdev->ib_dev.dealloc_ucontext	= mlx4_ib_dealloc_ucontext;
2037 	ibdev->ib_dev.mmap		= mlx4_ib_mmap;
2038 	ibdev->ib_dev.alloc_pd		= mlx4_ib_alloc_pd;
2039 	ibdev->ib_dev.dealloc_pd	= mlx4_ib_dealloc_pd;
2040 	ibdev->ib_dev.create_ah		= mlx4_ib_create_ah;
2041 	ibdev->ib_dev.query_ah		= mlx4_ib_query_ah;
2042 	ibdev->ib_dev.destroy_ah	= mlx4_ib_destroy_ah;
2043 	ibdev->ib_dev.create_srq	= mlx4_ib_create_srq;
2044 	ibdev->ib_dev.modify_srq	= mlx4_ib_modify_srq;
2045 	ibdev->ib_dev.query_srq		= mlx4_ib_query_srq;
2046 	ibdev->ib_dev.destroy_srq	= mlx4_ib_destroy_srq;
2047 	ibdev->ib_dev.post_srq_recv	= mlx4_ib_post_srq_recv;
2048 	ibdev->ib_dev.create_qp		= mlx4_ib_create_qp;
2049 	ibdev->ib_dev.modify_qp		= mlx4_ib_modify_qp;
2050 	ibdev->ib_dev.query_qp		= mlx4_ib_query_qp;
2051 	ibdev->ib_dev.destroy_qp	= mlx4_ib_destroy_qp;
2052 	ibdev->ib_dev.post_send		= mlx4_ib_post_send;
2053 	ibdev->ib_dev.post_recv		= mlx4_ib_post_recv;
2054 	ibdev->ib_dev.create_cq		= mlx4_ib_create_cq;
2055 	ibdev->ib_dev.modify_cq		= mlx4_ib_modify_cq;
2056 	ibdev->ib_dev.resize_cq		= mlx4_ib_resize_cq;
2057 	ibdev->ib_dev.destroy_cq	= mlx4_ib_destroy_cq;
2058 	ibdev->ib_dev.poll_cq		= mlx4_ib_poll_cq;
2059 	ibdev->ib_dev.req_notify_cq	= mlx4_ib_arm_cq;
2060 	ibdev->ib_dev.get_dma_mr	= mlx4_ib_get_dma_mr;
2061 	ibdev->ib_dev.reg_user_mr	= mlx4_ib_reg_user_mr;
2062 	ibdev->ib_dev.dereg_mr		= mlx4_ib_dereg_mr;
2063 	ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr;
2064 	ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
2065 	ibdev->ib_dev.free_fast_reg_page_list  = mlx4_ib_free_fast_reg_page_list;
2066 	ibdev->ib_dev.attach_mcast	= mlx4_ib_mcg_attach;
2067 	ibdev->ib_dev.detach_mcast	= mlx4_ib_mcg_detach;
2068 	ibdev->ib_dev.process_mad	= mlx4_ib_process_mad;
2069 
2070 	if (!mlx4_is_slave(ibdev->dev)) {
2071 		ibdev->ib_dev.alloc_fmr		= mlx4_ib_fmr_alloc;
2072 		ibdev->ib_dev.map_phys_fmr	= mlx4_ib_map_phys_fmr;
2073 		ibdev->ib_dev.unmap_fmr		= mlx4_ib_unmap_fmr;
2074 		ibdev->ib_dev.dealloc_fmr	= mlx4_ib_fmr_dealloc;
2075 	}
2076 
2077 	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2078 	    dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
2079 		ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw;
2080 		ibdev->ib_dev.bind_mw = mlx4_ib_bind_mw;
2081 		ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw;
2082 
2083 		ibdev->ib_dev.uverbs_cmd_mask |=
2084 			(1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
2085 			(1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
2086 	}
2087 
2088 	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2089 		ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
2090 		ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
2091 		ibdev->ib_dev.uverbs_cmd_mask |=
2092 			(1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
2093 			(1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
2094 	}
2095 
2096 	if (check_flow_steering_support(dev)) {
2097 		ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
2098 		ibdev->ib_dev.create_flow	= mlx4_ib_create_flow;
2099 		ibdev->ib_dev.destroy_flow	= mlx4_ib_destroy_flow;
2100 
2101 		ibdev->ib_dev.uverbs_ex_cmd_mask	|=
2102 			(1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
2103 			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
2104 	}
2105 
2106 	mlx4_ib_alloc_eqs(dev, ibdev);
2107 
2108 	spin_lock_init(&iboe->lock);
2109 
2110 	if (init_node_data(ibdev))
2111 		goto err_map;
2112 
2113 	for (i = 0; i < ibdev->num_ports; ++i) {
2114 		mutex_init(&ibdev->qp1_proxy_lock[i]);
2115 		if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2116 						IB_LINK_LAYER_ETHERNET) {
2117 			err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
2118 			if (err)
2119 				ibdev->counters[i] = -1;
2120 		} else {
2121 			ibdev->counters[i] = -1;
2122 		}
2123 	}
2124 
2125 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2126 		ib_num_ports++;
2127 
2128 	spin_lock_init(&ibdev->sm_lock);
2129 	mutex_init(&ibdev->cap_mask_mutex);
2130 
2131 	if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2132 	    ib_num_ports) {
2133 		ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2134 		err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2135 					    MLX4_IB_UC_STEER_QPN_ALIGN,
2136 					    &ibdev->steer_qpn_base);
2137 		if (err)
2138 			goto err_counter;
2139 
2140 		ibdev->ib_uc_qpns_bitmap =
2141 			kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
2142 				sizeof(long),
2143 				GFP_KERNEL);
2144 		if (!ibdev->ib_uc_qpns_bitmap) {
2145 			dev_err(&dev->pdev->dev, "bit map alloc failed\n");
2146 			goto err_steer_qp_release;
2147 		}
2148 
2149 		bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
2150 
2151 		err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2152 				dev, ibdev->steer_qpn_base,
2153 				ibdev->steer_qpn_base +
2154 				ibdev->steer_qpn_count - 1);
2155 		if (err)
2156 			goto err_steer_free_bitmap;
2157 	}
2158 
2159 	if (ib_register_device(&ibdev->ib_dev, NULL))
2160 		goto err_steer_free_bitmap;
2161 
2162 	if (mlx4_ib_mad_init(ibdev))
2163 		goto err_reg;
2164 
2165 	if (mlx4_ib_init_sriov(ibdev))
2166 		goto err_mad;
2167 
2168 	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) {
2169 		if (!iboe->nb.notifier_call) {
2170 			iboe->nb.notifier_call = mlx4_ib_netdev_event;
2171 			err = register_netdevice_notifier(&iboe->nb);
2172 			if (err) {
2173 				iboe->nb.notifier_call = NULL;
2174 				goto err_notif;
2175 			}
2176 		}
2177 		if (!iboe->nb_inet.notifier_call) {
2178 			iboe->nb_inet.notifier_call = mlx4_ib_inet_event;
2179 			err = register_inetaddr_notifier(&iboe->nb_inet);
2180 			if (err) {
2181 				iboe->nb_inet.notifier_call = NULL;
2182 				goto err_notif;
2183 			}
2184 		}
2185 #if IS_ENABLED(CONFIG_IPV6)
2186 		if (!iboe->nb_inet6.notifier_call) {
2187 			iboe->nb_inet6.notifier_call = mlx4_ib_inet6_event;
2188 			err = register_inet6addr_notifier(&iboe->nb_inet6);
2189 			if (err) {
2190 				iboe->nb_inet6.notifier_call = NULL;
2191 				goto err_notif;
2192 			}
2193 		}
2194 #endif
2195 		for (i = 1 ; i <= ibdev->num_ports ; ++i)
2196 			reset_gid_table(ibdev, i);
2197 		rtnl_lock();
2198 		mlx4_ib_scan_netdevs(ibdev, NULL, 0);
2199 		rtnl_unlock();
2200 		mlx4_ib_init_gid_table(ibdev);
2201 	}
2202 
2203 	for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
2204 		if (device_create_file(&ibdev->ib_dev.dev,
2205 				       mlx4_class_attributes[j]))
2206 			goto err_notif;
2207 	}
2208 
2209 	ibdev->ib_active = true;
2210 
2211 	if (mlx4_is_mfunc(ibdev->dev))
2212 		init_pkeys(ibdev);
2213 
2214 	/* create paravirt contexts for any VFs which are active */
2215 	if (mlx4_is_master(ibdev->dev)) {
2216 		for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2217 			if (j == mlx4_master_func_num(ibdev->dev))
2218 				continue;
2219 			if (mlx4_is_slave_active(ibdev->dev, j))
2220 				do_slave_init(ibdev, j, 1);
2221 		}
2222 	}
2223 	return ibdev;
2224 
2225 err_notif:
2226 	if (ibdev->iboe.nb.notifier_call) {
2227 		if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2228 			pr_warn("failure unregistering notifier\n");
2229 		ibdev->iboe.nb.notifier_call = NULL;
2230 	}
2231 	if (ibdev->iboe.nb_inet.notifier_call) {
2232 		if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
2233 			pr_warn("failure unregistering notifier\n");
2234 		ibdev->iboe.nb_inet.notifier_call = NULL;
2235 	}
2236 #if IS_ENABLED(CONFIG_IPV6)
2237 	if (ibdev->iboe.nb_inet6.notifier_call) {
2238 		if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6))
2239 			pr_warn("failure unregistering notifier\n");
2240 		ibdev->iboe.nb_inet6.notifier_call = NULL;
2241 	}
2242 #endif
2243 	flush_workqueue(wq);
2244 
2245 	mlx4_ib_close_sriov(ibdev);
2246 
2247 err_mad:
2248 	mlx4_ib_mad_cleanup(ibdev);
2249 
2250 err_reg:
2251 	ib_unregister_device(&ibdev->ib_dev);
2252 
2253 err_steer_free_bitmap:
2254 	kfree(ibdev->ib_uc_qpns_bitmap);
2255 
2256 err_steer_qp_release:
2257 	if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
2258 		mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2259 				      ibdev->steer_qpn_count);
2260 err_counter:
2261 	for (; i; --i)
2262 		if (ibdev->counters[i - 1] != -1)
2263 			mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
2264 
2265 err_map:
2266 	iounmap(ibdev->uar_map);
2267 
2268 err_uar:
2269 	mlx4_uar_free(dev, &ibdev->priv_uar);
2270 
2271 err_pd:
2272 	mlx4_pd_free(dev, ibdev->priv_pdn);
2273 
2274 err_dealloc:
2275 	ib_dealloc_device(&ibdev->ib_dev);
2276 
2277 	return NULL;
2278 }
2279 
2280 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
2281 {
2282 	int offset;
2283 
2284 	WARN_ON(!dev->ib_uc_qpns_bitmap);
2285 
2286 	offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
2287 					 dev->steer_qpn_count,
2288 					 get_count_order(count));
2289 	if (offset < 0)
2290 		return offset;
2291 
2292 	*qpn = dev->steer_qpn_base + offset;
2293 	return 0;
2294 }
2295 
2296 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
2297 {
2298 	if (!qpn ||
2299 	    dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
2300 		return;
2301 
2302 	BUG_ON(qpn < dev->steer_qpn_base);
2303 
2304 	bitmap_release_region(dev->ib_uc_qpns_bitmap,
2305 			      qpn - dev->steer_qpn_base,
2306 			      get_count_order(count));
2307 }
2308 
2309 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
2310 			 int is_attach)
2311 {
2312 	int err;
2313 	size_t flow_size;
2314 	struct ib_flow_attr *flow = NULL;
2315 	struct ib_flow_spec_ib *ib_spec;
2316 
2317 	if (is_attach) {
2318 		flow_size = sizeof(struct ib_flow_attr) +
2319 			    sizeof(struct ib_flow_spec_ib);
2320 		flow = kzalloc(flow_size, GFP_KERNEL);
2321 		if (!flow)
2322 			return -ENOMEM;
2323 		flow->port = mqp->port;
2324 		flow->num_of_specs = 1;
2325 		flow->size = flow_size;
2326 		ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
2327 		ib_spec->type = IB_FLOW_SPEC_IB;
2328 		ib_spec->size = sizeof(struct ib_flow_spec_ib);
2329 		/* Add an empty rule for IB L2 */
2330 		memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
2331 
2332 		err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
2333 					    IB_FLOW_DOMAIN_NIC,
2334 					    MLX4_FS_REGULAR,
2335 					    &mqp->reg_id);
2336 	} else {
2337 		err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
2338 	}
2339 	kfree(flow);
2340 	return err;
2341 }
2342 
2343 static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2344 {
2345 	struct mlx4_ib_dev *ibdev = ibdev_ptr;
2346 	int p;
2347 
2348 	mlx4_ib_close_sriov(ibdev);
2349 	mlx4_ib_mad_cleanup(ibdev);
2350 	ib_unregister_device(&ibdev->ib_dev);
2351 	if (ibdev->iboe.nb.notifier_call) {
2352 		if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2353 			pr_warn("failure unregistering notifier\n");
2354 		ibdev->iboe.nb.notifier_call = NULL;
2355 	}
2356 
2357 	if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
2358 		mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2359 				      ibdev->steer_qpn_count);
2360 		kfree(ibdev->ib_uc_qpns_bitmap);
2361 	}
2362 
2363 	if (ibdev->iboe.nb_inet.notifier_call) {
2364 		if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
2365 			pr_warn("failure unregistering notifier\n");
2366 		ibdev->iboe.nb_inet.notifier_call = NULL;
2367 	}
2368 #if IS_ENABLED(CONFIG_IPV6)
2369 	if (ibdev->iboe.nb_inet6.notifier_call) {
2370 		if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6))
2371 			pr_warn("failure unregistering notifier\n");
2372 		ibdev->iboe.nb_inet6.notifier_call = NULL;
2373 	}
2374 #endif
2375 
2376 	iounmap(ibdev->uar_map);
2377 	for (p = 0; p < ibdev->num_ports; ++p)
2378 		if (ibdev->counters[p] != -1)
2379 			mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
2380 	mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
2381 		mlx4_CLOSE_PORT(dev, p);
2382 
2383 	mlx4_ib_free_eqs(dev, ibdev);
2384 
2385 	mlx4_uar_free(dev, &ibdev->priv_uar);
2386 	mlx4_pd_free(dev, ibdev->priv_pdn);
2387 	ib_dealloc_device(&ibdev->ib_dev);
2388 }
2389 
2390 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2391 {
2392 	struct mlx4_ib_demux_work **dm = NULL;
2393 	struct mlx4_dev *dev = ibdev->dev;
2394 	int i;
2395 	unsigned long flags;
2396 	struct mlx4_active_ports actv_ports;
2397 	unsigned int ports;
2398 	unsigned int first_port;
2399 
2400 	if (!mlx4_is_master(dev))
2401 		return;
2402 
2403 	actv_ports = mlx4_get_active_ports(dev, slave);
2404 	ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2405 	first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2406 
2407 	dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
2408 	if (!dm) {
2409 		pr_err("failed to allocate memory for tunneling qp update\n");
2410 		goto out;
2411 	}
2412 
2413 	for (i = 0; i < ports; i++) {
2414 		dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
2415 		if (!dm[i]) {
2416 			pr_err("failed to allocate memory for tunneling qp update work struct\n");
2417 			for (i = 0; i < dev->caps.num_ports; i++) {
2418 				if (dm[i])
2419 					kfree(dm[i]);
2420 			}
2421 			goto out;
2422 		}
2423 	}
2424 	/* initialize or tear down tunnel QPs for the slave */
2425 	for (i = 0; i < ports; i++) {
2426 		INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
2427 		dm[i]->port = first_port + i + 1;
2428 		dm[i]->slave = slave;
2429 		dm[i]->do_init = do_init;
2430 		dm[i]->dev = ibdev;
2431 		spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
2432 		if (!ibdev->sriov.is_going_down)
2433 			queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
2434 		spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
2435 	}
2436 out:
2437 	kfree(dm);
2438 	return;
2439 }
2440 
2441 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
2442 			  enum mlx4_dev_event event, unsigned long param)
2443 {
2444 	struct ib_event ibev;
2445 	struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
2446 	struct mlx4_eqe *eqe = NULL;
2447 	struct ib_event_work *ew;
2448 	int p = 0;
2449 
2450 	if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
2451 		eqe = (struct mlx4_eqe *)param;
2452 	else
2453 		p = (int) param;
2454 
2455 	switch (event) {
2456 	case MLX4_DEV_EVENT_PORT_UP:
2457 		if (p > ibdev->num_ports)
2458 			return;
2459 		if (mlx4_is_master(dev) &&
2460 		    rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
2461 			IB_LINK_LAYER_INFINIBAND) {
2462 			mlx4_ib_invalidate_all_guid_record(ibdev, p);
2463 		}
2464 		ibev.event = IB_EVENT_PORT_ACTIVE;
2465 		break;
2466 
2467 	case MLX4_DEV_EVENT_PORT_DOWN:
2468 		if (p > ibdev->num_ports)
2469 			return;
2470 		ibev.event = IB_EVENT_PORT_ERR;
2471 		break;
2472 
2473 	case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
2474 		ibdev->ib_active = false;
2475 		ibev.event = IB_EVENT_DEVICE_FATAL;
2476 		break;
2477 
2478 	case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
2479 		ew = kmalloc(sizeof *ew, GFP_ATOMIC);
2480 		if (!ew) {
2481 			pr_err("failed to allocate memory for events work\n");
2482 			break;
2483 		}
2484 
2485 		INIT_WORK(&ew->work, handle_port_mgmt_change_event);
2486 		memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
2487 		ew->ib_dev = ibdev;
2488 		/* need to queue only for port owner, which uses GEN_EQE */
2489 		if (mlx4_is_master(dev))
2490 			queue_work(wq, &ew->work);
2491 		else
2492 			handle_port_mgmt_change_event(&ew->work);
2493 		return;
2494 
2495 	case MLX4_DEV_EVENT_SLAVE_INIT:
2496 		/* here, p is the slave id */
2497 		do_slave_init(ibdev, p, 1);
2498 		return;
2499 
2500 	case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
2501 		/* here, p is the slave id */
2502 		do_slave_init(ibdev, p, 0);
2503 		return;
2504 
2505 	default:
2506 		return;
2507 	}
2508 
2509 	ibev.device	      = ibdev_ptr;
2510 	ibev.element.port_num = (u8) p;
2511 
2512 	ib_dispatch_event(&ibev);
2513 }
2514 
2515 static struct mlx4_interface mlx4_ib_interface = {
2516 	.add		= mlx4_ib_add,
2517 	.remove		= mlx4_ib_remove,
2518 	.event		= mlx4_ib_event,
2519 	.protocol	= MLX4_PROT_IB_IPV6
2520 };
2521 
2522 static int __init mlx4_ib_init(void)
2523 {
2524 	int err;
2525 
2526 	wq = create_singlethread_workqueue("mlx4_ib");
2527 	if (!wq)
2528 		return -ENOMEM;
2529 
2530 	err = mlx4_ib_mcg_init();
2531 	if (err)
2532 		goto clean_wq;
2533 
2534 	err = mlx4_register_interface(&mlx4_ib_interface);
2535 	if (err)
2536 		goto clean_mcg;
2537 
2538 	return 0;
2539 
2540 clean_mcg:
2541 	mlx4_ib_mcg_destroy();
2542 
2543 clean_wq:
2544 	destroy_workqueue(wq);
2545 	return err;
2546 }
2547 
2548 static void __exit mlx4_ib_cleanup(void)
2549 {
2550 	mlx4_unregister_interface(&mlx4_ib_interface);
2551 	mlx4_ib_mcg_destroy();
2552 	destroy_workqueue(wq);
2553 }
2554 
2555 module_init(mlx4_ib_init);
2556 module_exit(mlx4_ib_cleanup);
2557