xref: /openbmc/linux/drivers/infiniband/hw/mlx4/main.c (revision 31e67366)
1 /*
2  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/netdevice.h>
39 #include <linux/inetdevice.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/if_vlan.h>
42 #include <linux/sched/mm.h>
43 #include <linux/sched/task.h>
44 
45 #include <net/ipv6.h>
46 #include <net/addrconf.h>
47 #include <net/devlink.h>
48 
49 #include <rdma/ib_smi.h>
50 #include <rdma/ib_user_verbs.h>
51 #include <rdma/ib_addr.h>
52 #include <rdma/ib_cache.h>
53 
54 #include <net/bonding.h>
55 
56 #include <linux/mlx4/driver.h>
57 #include <linux/mlx4/cmd.h>
58 #include <linux/mlx4/qp.h>
59 
60 #include "mlx4_ib.h"
61 #include <rdma/mlx4-abi.h>
62 
63 #define DRV_NAME	MLX4_IB_DRV_NAME
64 #define DRV_VERSION	"4.0-0"
65 
66 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
67 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
68 #define MLX4_IB_CARD_REV_A0   0xA0
69 
70 MODULE_AUTHOR("Roland Dreier");
71 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
72 MODULE_LICENSE("Dual BSD/GPL");
73 
74 int mlx4_ib_sm_guid_assign = 0;
75 module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
76 MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
77 
78 static const char mlx4_ib_version[] =
79 	DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
80 	DRV_VERSION "\n";
81 
82 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
83 static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device,
84 						    u8 port_num);
85 
86 static struct workqueue_struct *wq;
87 
88 static void init_query_mad(struct ib_smp *mad)
89 {
90 	mad->base_version  = 1;
91 	mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
92 	mad->class_version = 1;
93 	mad->method	   = IB_MGMT_METHOD_GET;
94 }
95 
96 static int check_flow_steering_support(struct mlx4_dev *dev)
97 {
98 	int eth_num_ports = 0;
99 	int ib_num_ports = 0;
100 
101 	int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
102 
103 	if (dmfs) {
104 		int i;
105 		mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
106 			eth_num_ports++;
107 		mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
108 			ib_num_ports++;
109 		dmfs &= (!ib_num_ports ||
110 			 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
111 			(!eth_num_ports ||
112 			 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
113 		if (ib_num_ports && mlx4_is_mfunc(dev)) {
114 			pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
115 			dmfs = 0;
116 		}
117 	}
118 	return dmfs;
119 }
120 
121 static int num_ib_ports(struct mlx4_dev *dev)
122 {
123 	int ib_ports = 0;
124 	int i;
125 
126 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
127 		ib_ports++;
128 
129 	return ib_ports;
130 }
131 
132 static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num)
133 {
134 	struct mlx4_ib_dev *ibdev = to_mdev(device);
135 	struct net_device *dev;
136 
137 	rcu_read_lock();
138 	dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);
139 
140 	if (dev) {
141 		if (mlx4_is_bonded(ibdev->dev)) {
142 			struct net_device *upper = NULL;
143 
144 			upper = netdev_master_upper_dev_get_rcu(dev);
145 			if (upper) {
146 				struct net_device *active;
147 
148 				active = bond_option_active_slave_get_rcu(netdev_priv(upper));
149 				if (active)
150 					dev = active;
151 			}
152 		}
153 	}
154 	if (dev)
155 		dev_hold(dev);
156 
157 	rcu_read_unlock();
158 	return dev;
159 }
160 
161 static int mlx4_ib_update_gids_v1(struct gid_entry *gids,
162 				  struct mlx4_ib_dev *ibdev,
163 				  u8 port_num)
164 {
165 	struct mlx4_cmd_mailbox *mailbox;
166 	int err;
167 	struct mlx4_dev *dev = ibdev->dev;
168 	int i;
169 	union ib_gid *gid_tbl;
170 
171 	mailbox = mlx4_alloc_cmd_mailbox(dev);
172 	if (IS_ERR(mailbox))
173 		return -ENOMEM;
174 
175 	gid_tbl = mailbox->buf;
176 
177 	for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
178 		memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid));
179 
180 	err = mlx4_cmd(dev, mailbox->dma,
181 		       MLX4_SET_PORT_GID_TABLE << 8 | port_num,
182 		       1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
183 		       MLX4_CMD_WRAPPED);
184 	if (mlx4_is_bonded(dev))
185 		err += mlx4_cmd(dev, mailbox->dma,
186 				MLX4_SET_PORT_GID_TABLE << 8 | 2,
187 				1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
188 				MLX4_CMD_WRAPPED);
189 
190 	mlx4_free_cmd_mailbox(dev, mailbox);
191 	return err;
192 }
193 
194 static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
195 				     struct mlx4_ib_dev *ibdev,
196 				     u8 port_num)
197 {
198 	struct mlx4_cmd_mailbox *mailbox;
199 	int err;
200 	struct mlx4_dev *dev = ibdev->dev;
201 	int i;
202 	struct {
203 		union ib_gid	gid;
204 		__be32		rsrvd1[2];
205 		__be16		rsrvd2;
206 		u8		type;
207 		u8		version;
208 		__be32		rsrvd3;
209 	} *gid_tbl;
210 
211 	mailbox = mlx4_alloc_cmd_mailbox(dev);
212 	if (IS_ERR(mailbox))
213 		return -ENOMEM;
214 
215 	gid_tbl = mailbox->buf;
216 	for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
217 		memcpy(&gid_tbl[i].gid, &gids[i].gid, sizeof(union ib_gid));
218 		if (gids[i].gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
219 			gid_tbl[i].version = 2;
220 			if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
221 				gid_tbl[i].type = 1;
222 		}
223 	}
224 
225 	err = mlx4_cmd(dev, mailbox->dma,
226 		       MLX4_SET_PORT_ROCE_ADDR << 8 | port_num,
227 		       1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
228 		       MLX4_CMD_WRAPPED);
229 	if (mlx4_is_bonded(dev))
230 		err += mlx4_cmd(dev, mailbox->dma,
231 				MLX4_SET_PORT_ROCE_ADDR << 8 | 2,
232 				1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
233 				MLX4_CMD_WRAPPED);
234 
235 	mlx4_free_cmd_mailbox(dev, mailbox);
236 	return err;
237 }
238 
239 static int mlx4_ib_update_gids(struct gid_entry *gids,
240 			       struct mlx4_ib_dev *ibdev,
241 			       u8 port_num)
242 {
243 	if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
244 		return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num);
245 
246 	return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
247 }
248 
249 static void free_gid_entry(struct gid_entry *entry)
250 {
251 	memset(&entry->gid, 0, sizeof(entry->gid));
252 	kfree(entry->ctx);
253 	entry->ctx = NULL;
254 }
255 
256 static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
257 {
258 	struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
259 	struct mlx4_ib_iboe *iboe = &ibdev->iboe;
260 	struct mlx4_port_gid_table   *port_gid_table;
261 	int free = -1, found = -1;
262 	int ret = 0;
263 	int hw_update = 0;
264 	int i;
265 	struct gid_entry *gids = NULL;
266 	u16 vlan_id = 0xffff;
267 	u8 mac[ETH_ALEN];
268 
269 	if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
270 		return -EINVAL;
271 
272 	if (attr->port_num > MLX4_MAX_PORTS)
273 		return -EINVAL;
274 
275 	if (!context)
276 		return -EINVAL;
277 
278 	ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
279 	if (ret)
280 		return ret;
281 	port_gid_table = &iboe->gids[attr->port_num - 1];
282 	spin_lock_bh(&iboe->lock);
283 	for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
284 		if (!memcmp(&port_gid_table->gids[i].gid,
285 			    &attr->gid, sizeof(attr->gid)) &&
286 		    port_gid_table->gids[i].gid_type == attr->gid_type &&
287 		    port_gid_table->gids[i].vlan_id == vlan_id)  {
288 			found = i;
289 			break;
290 		}
291 		if (free < 0 && rdma_is_zero_gid(&port_gid_table->gids[i].gid))
292 			free = i; /* HW has space */
293 	}
294 
295 	if (found < 0) {
296 		if (free < 0) {
297 			ret = -ENOSPC;
298 		} else {
299 			port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC);
300 			if (!port_gid_table->gids[free].ctx) {
301 				ret = -ENOMEM;
302 			} else {
303 				*context = port_gid_table->gids[free].ctx;
304 				memcpy(&port_gid_table->gids[free].gid,
305 				       &attr->gid, sizeof(attr->gid));
306 				port_gid_table->gids[free].gid_type = attr->gid_type;
307 				port_gid_table->gids[free].vlan_id = vlan_id;
308 				port_gid_table->gids[free].ctx->real_index = free;
309 				port_gid_table->gids[free].ctx->refcount = 1;
310 				hw_update = 1;
311 			}
312 		}
313 	} else {
314 		struct gid_cache_context *ctx = port_gid_table->gids[found].ctx;
315 		*context = ctx;
316 		ctx->refcount++;
317 	}
318 	if (!ret && hw_update) {
319 		gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids),
320 				     GFP_ATOMIC);
321 		if (!gids) {
322 			ret = -ENOMEM;
323 			*context = NULL;
324 			free_gid_entry(&port_gid_table->gids[free]);
325 		} else {
326 			for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
327 				memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
328 				gids[i].gid_type = port_gid_table->gids[i].gid_type;
329 			}
330 		}
331 	}
332 	spin_unlock_bh(&iboe->lock);
333 
334 	if (!ret && hw_update) {
335 		ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
336 		if (ret) {
337 			spin_lock_bh(&iboe->lock);
338 			*context = NULL;
339 			free_gid_entry(&port_gid_table->gids[free]);
340 			spin_unlock_bh(&iboe->lock);
341 		}
342 		kfree(gids);
343 	}
344 
345 	return ret;
346 }
347 
348 static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
349 {
350 	struct gid_cache_context *ctx = *context;
351 	struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
352 	struct mlx4_ib_iboe *iboe = &ibdev->iboe;
353 	struct mlx4_port_gid_table   *port_gid_table;
354 	int ret = 0;
355 	int hw_update = 0;
356 	struct gid_entry *gids = NULL;
357 
358 	if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
359 		return -EINVAL;
360 
361 	if (attr->port_num > MLX4_MAX_PORTS)
362 		return -EINVAL;
363 
364 	port_gid_table = &iboe->gids[attr->port_num - 1];
365 	spin_lock_bh(&iboe->lock);
366 	if (ctx) {
367 		ctx->refcount--;
368 		if (!ctx->refcount) {
369 			unsigned int real_index = ctx->real_index;
370 
371 			free_gid_entry(&port_gid_table->gids[real_index]);
372 			hw_update = 1;
373 		}
374 	}
375 	if (!ret && hw_update) {
376 		int i;
377 
378 		gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids),
379 				     GFP_ATOMIC);
380 		if (!gids) {
381 			ret = -ENOMEM;
382 		} else {
383 			for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
384 				memcpy(&gids[i].gid,
385 				       &port_gid_table->gids[i].gid,
386 				       sizeof(union ib_gid));
387 				gids[i].gid_type =
388 				    port_gid_table->gids[i].gid_type;
389 			}
390 		}
391 	}
392 	spin_unlock_bh(&iboe->lock);
393 
394 	if (!ret && hw_update) {
395 		ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
396 		kfree(gids);
397 	}
398 	return ret;
399 }
400 
401 int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
402 				    const struct ib_gid_attr *attr)
403 {
404 	struct mlx4_ib_iboe *iboe = &ibdev->iboe;
405 	struct gid_cache_context *ctx = NULL;
406 	struct mlx4_port_gid_table   *port_gid_table;
407 	int real_index = -EINVAL;
408 	int i;
409 	unsigned long flags;
410 	u8 port_num = attr->port_num;
411 
412 	if (port_num > MLX4_MAX_PORTS)
413 		return -EINVAL;
414 
415 	if (mlx4_is_bonded(ibdev->dev))
416 		port_num = 1;
417 
418 	if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
419 		return attr->index;
420 
421 	spin_lock_irqsave(&iboe->lock, flags);
422 	port_gid_table = &iboe->gids[port_num - 1];
423 
424 	for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
425 		if (!memcmp(&port_gid_table->gids[i].gid,
426 			    &attr->gid, sizeof(attr->gid)) &&
427 		    attr->gid_type == port_gid_table->gids[i].gid_type) {
428 			ctx = port_gid_table->gids[i].ctx;
429 			break;
430 		}
431 	if (ctx)
432 		real_index = ctx->real_index;
433 	spin_unlock_irqrestore(&iboe->lock, flags);
434 	return real_index;
435 }
436 
437 static int mlx4_ib_query_device(struct ib_device *ibdev,
438 				struct ib_device_attr *props,
439 				struct ib_udata *uhw)
440 {
441 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
442 	struct ib_smp *in_mad  = NULL;
443 	struct ib_smp *out_mad = NULL;
444 	int err;
445 	int have_ib_ports;
446 	struct mlx4_uverbs_ex_query_device cmd;
447 	struct mlx4_uverbs_ex_query_device_resp resp = {};
448 	struct mlx4_clock_params clock_params;
449 
450 	if (uhw->inlen) {
451 		if (uhw->inlen < sizeof(cmd))
452 			return -EINVAL;
453 
454 		err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd));
455 		if (err)
456 			return err;
457 
458 		if (cmd.comp_mask)
459 			return -EINVAL;
460 
461 		if (cmd.reserved)
462 			return -EINVAL;
463 	}
464 
465 	resp.response_length = offsetof(typeof(resp), response_length) +
466 		sizeof(resp.response_length);
467 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
468 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
469 	err = -ENOMEM;
470 	if (!in_mad || !out_mad)
471 		goto out;
472 
473 	init_query_mad(in_mad);
474 	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
475 
476 	err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
477 			   1, NULL, NULL, in_mad, out_mad);
478 	if (err)
479 		goto out;
480 
481 	memset(props, 0, sizeof *props);
482 
483 	have_ib_ports = num_ib_ports(dev->dev);
484 
485 	props->fw_ver = dev->dev->caps.fw_ver;
486 	props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
487 		IB_DEVICE_PORT_ACTIVE_EVENT		|
488 		IB_DEVICE_SYS_IMAGE_GUID		|
489 		IB_DEVICE_RC_RNR_NAK_GEN		|
490 		IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
491 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
492 		props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
493 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
494 		props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
495 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
496 		props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
497 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
498 		props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
499 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
500 		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
501 	if (dev->dev->caps.max_gso_sz &&
502 	    (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
503 	    (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
504 		props->device_cap_flags |= IB_DEVICE_UD_TSO;
505 	if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
506 		props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
507 	if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
508 	    (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
509 	    (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
510 		props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
511 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
512 		props->device_cap_flags |= IB_DEVICE_XRC;
513 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
514 		props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
515 	if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
516 		if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
517 			props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
518 		else
519 			props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
520 	}
521 	if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
522 		props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
523 
524 	props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
525 
526 	props->vendor_id	   = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
527 		0xffffff;
528 	props->vendor_part_id	   = dev->dev->persist->pdev->device;
529 	props->hw_ver		   = be32_to_cpup((__be32 *) (out_mad->data + 32));
530 	memcpy(&props->sys_image_guid, out_mad->data +	4, 8);
531 
532 	props->max_mr_size	   = ~0ull;
533 	props->page_size_cap	   = dev->dev->caps.page_size_cap;
534 	props->max_qp		   = dev->dev->quotas.qp;
535 	props->max_qp_wr	   = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
536 	props->max_send_sge =
537 		min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
538 	props->max_recv_sge =
539 		min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
540 	props->max_sge_rd = MLX4_MAX_SGE_RD;
541 	props->max_cq		   = dev->dev->quotas.cq;
542 	props->max_cqe		   = dev->dev->caps.max_cqes;
543 	props->max_mr		   = dev->dev->quotas.mpt;
544 	props->max_pd		   = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
545 	props->max_qp_rd_atom	   = dev->dev->caps.max_qp_dest_rdma;
546 	props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
547 	props->max_res_rd_atom	   = props->max_qp_rd_atom * props->max_qp;
548 	props->max_srq		   = dev->dev->quotas.srq;
549 	props->max_srq_wr	   = dev->dev->caps.max_srq_wqes - 1;
550 	props->max_srq_sge	   = dev->dev->caps.max_srq_sge;
551 	props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
552 	props->local_ca_ack_delay  = dev->dev->caps.local_ca_ack_delay;
553 	props->atomic_cap	   = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
554 		IB_ATOMIC_HCA : IB_ATOMIC_NONE;
555 	props->masked_atomic_cap   = props->atomic_cap;
556 	props->max_pkeys	   = dev->dev->caps.pkey_table_len[1];
557 	props->max_mcast_grp	   = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
558 	props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
559 	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
560 					   props->max_mcast_grp;
561 	props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
562 	props->timestamp_mask = 0xFFFFFFFFFFFFULL;
563 	props->max_ah = INT_MAX;
564 
565 	if (mlx4_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET ||
566 	    mlx4_ib_port_link_layer(ibdev, 2) == IB_LINK_LAYER_ETHERNET) {
567 		if (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) {
568 			props->rss_caps.max_rwq_indirection_tables =
569 				props->max_qp;
570 			props->rss_caps.max_rwq_indirection_table_size =
571 				dev->dev->caps.max_rss_tbl_sz;
572 			props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
573 			props->max_wq_type_rq = props->max_qp;
574 		}
575 
576 		if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
577 			props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
578 	}
579 
580 	props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
581 	props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
582 
583 	if (!mlx4_is_slave(dev->dev))
584 		err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
585 
586 	if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
587 		resp.response_length += sizeof(resp.hca_core_clock_offset);
588 		if (!err && !mlx4_is_slave(dev->dev)) {
589 			resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET;
590 			resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
591 		}
592 	}
593 
594 	if (uhw->outlen >= resp.response_length +
595 	    sizeof(resp.max_inl_recv_sz)) {
596 		resp.response_length += sizeof(resp.max_inl_recv_sz);
597 		resp.max_inl_recv_sz  = dev->dev->caps.max_rq_sg *
598 			sizeof(struct mlx4_wqe_data_seg);
599 	}
600 
601 	if (offsetofend(typeof(resp), rss_caps) <= uhw->outlen) {
602 		if (props->rss_caps.supported_qpts) {
603 			resp.rss_caps.rx_hash_function =
604 				MLX4_IB_RX_HASH_FUNC_TOEPLITZ;
605 
606 			resp.rss_caps.rx_hash_fields_mask =
607 				MLX4_IB_RX_HASH_SRC_IPV4 |
608 				MLX4_IB_RX_HASH_DST_IPV4 |
609 				MLX4_IB_RX_HASH_SRC_IPV6 |
610 				MLX4_IB_RX_HASH_DST_IPV6 |
611 				MLX4_IB_RX_HASH_SRC_PORT_TCP |
612 				MLX4_IB_RX_HASH_DST_PORT_TCP |
613 				MLX4_IB_RX_HASH_SRC_PORT_UDP |
614 				MLX4_IB_RX_HASH_DST_PORT_UDP;
615 
616 			if (dev->dev->caps.tunnel_offload_mode ==
617 			    MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
618 				resp.rss_caps.rx_hash_fields_mask |=
619 					MLX4_IB_RX_HASH_INNER;
620 		}
621 		resp.response_length = offsetof(typeof(resp), rss_caps) +
622 				       sizeof(resp.rss_caps);
623 	}
624 
625 	if (offsetofend(typeof(resp), tso_caps) <= uhw->outlen) {
626 		if (dev->dev->caps.max_gso_sz &&
627 		    ((mlx4_ib_port_link_layer(ibdev, 1) ==
628 		    IB_LINK_LAYER_ETHERNET) ||
629 		    (mlx4_ib_port_link_layer(ibdev, 2) ==
630 		    IB_LINK_LAYER_ETHERNET))) {
631 			resp.tso_caps.max_tso = dev->dev->caps.max_gso_sz;
632 			resp.tso_caps.supported_qpts |=
633 				1 << IB_QPT_RAW_PACKET;
634 		}
635 		resp.response_length = offsetof(typeof(resp), tso_caps) +
636 				       sizeof(resp.tso_caps);
637 	}
638 
639 	if (uhw->outlen) {
640 		err = ib_copy_to_udata(uhw, &resp, resp.response_length);
641 		if (err)
642 			goto out;
643 	}
644 out:
645 	kfree(in_mad);
646 	kfree(out_mad);
647 
648 	return err;
649 }
650 
651 static enum rdma_link_layer
652 mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
653 {
654 	struct mlx4_dev *dev = to_mdev(device)->dev;
655 
656 	return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
657 		IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
658 }
659 
660 static int ib_link_query_port(struct ib_device *ibdev, u8 port,
661 			      struct ib_port_attr *props, int netw_view)
662 {
663 	struct ib_smp *in_mad  = NULL;
664 	struct ib_smp *out_mad = NULL;
665 	int ext_active_speed;
666 	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
667 	int err = -ENOMEM;
668 
669 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
670 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
671 	if (!in_mad || !out_mad)
672 		goto out;
673 
674 	init_query_mad(in_mad);
675 	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
676 	in_mad->attr_mod = cpu_to_be32(port);
677 
678 	if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
679 		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
680 
681 	err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
682 				in_mad, out_mad);
683 	if (err)
684 		goto out;
685 
686 
687 	props->lid		= be16_to_cpup((__be16 *) (out_mad->data + 16));
688 	props->lmc		= out_mad->data[34] & 0x7;
689 	props->sm_lid		= be16_to_cpup((__be16 *) (out_mad->data + 18));
690 	props->sm_sl		= out_mad->data[36] & 0xf;
691 	props->state		= out_mad->data[32] & 0xf;
692 	props->phys_state	= out_mad->data[33] >> 4;
693 	props->port_cap_flags	= be32_to_cpup((__be32 *) (out_mad->data + 20));
694 	if (netw_view)
695 		props->gid_tbl_len = out_mad->data[50];
696 	else
697 		props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
698 	props->max_msg_sz	= to_mdev(ibdev)->dev->caps.max_msg_sz;
699 	props->pkey_tbl_len	= to_mdev(ibdev)->dev->caps.pkey_table_len[port];
700 	props->bad_pkey_cntr	= be16_to_cpup((__be16 *) (out_mad->data + 46));
701 	props->qkey_viol_cntr	= be16_to_cpup((__be16 *) (out_mad->data + 48));
702 	props->active_width	= out_mad->data[31] & 0xf;
703 	props->active_speed	= out_mad->data[35] >> 4;
704 	props->max_mtu		= out_mad->data[41] & 0xf;
705 	props->active_mtu	= out_mad->data[36] >> 4;
706 	props->subnet_timeout	= out_mad->data[51] & 0x1f;
707 	props->max_vl_num	= out_mad->data[37] >> 4;
708 	props->init_type_reply	= out_mad->data[41] >> 4;
709 
710 	/* Check if extended speeds (EDR/FDR/...) are supported */
711 	if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
712 		ext_active_speed = out_mad->data[62] >> 4;
713 
714 		switch (ext_active_speed) {
715 		case 1:
716 			props->active_speed = IB_SPEED_FDR;
717 			break;
718 		case 2:
719 			props->active_speed = IB_SPEED_EDR;
720 			break;
721 		}
722 	}
723 
724 	/* If reported active speed is QDR, check if is FDR-10 */
725 	if (props->active_speed == IB_SPEED_QDR) {
726 		init_query_mad(in_mad);
727 		in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
728 		in_mad->attr_mod = cpu_to_be32(port);
729 
730 		err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
731 				   NULL, NULL, in_mad, out_mad);
732 		if (err)
733 			goto out;
734 
735 		/* Checking LinkSpeedActive for FDR-10 */
736 		if (out_mad->data[15] & 0x1)
737 			props->active_speed = IB_SPEED_FDR10;
738 	}
739 
740 	/* Avoid wrong speed value returned by FW if the IB link is down. */
741 	if (props->state == IB_PORT_DOWN)
742 		 props->active_speed = IB_SPEED_SDR;
743 
744 out:
745 	kfree(in_mad);
746 	kfree(out_mad);
747 	return err;
748 }
749 
750 static u8 state_to_phys_state(enum ib_port_state state)
751 {
752 	return state == IB_PORT_ACTIVE ?
753 		IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
754 }
755 
756 static int eth_link_query_port(struct ib_device *ibdev, u8 port,
757 			       struct ib_port_attr *props)
758 {
759 
760 	struct mlx4_ib_dev *mdev = to_mdev(ibdev);
761 	struct mlx4_ib_iboe *iboe = &mdev->iboe;
762 	struct net_device *ndev;
763 	enum ib_mtu tmp;
764 	struct mlx4_cmd_mailbox *mailbox;
765 	int err = 0;
766 	int is_bonded = mlx4_is_bonded(mdev->dev);
767 
768 	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
769 	if (IS_ERR(mailbox))
770 		return PTR_ERR(mailbox);
771 
772 	err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
773 			   MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
774 			   MLX4_CMD_WRAPPED);
775 	if (err)
776 		goto out;
777 
778 	props->active_width	=  (((u8 *)mailbox->buf)[5] == 0x40) ||
779 				   (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
780 					   IB_WIDTH_4X : IB_WIDTH_1X;
781 	props->active_speed	=  (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
782 					   IB_SPEED_FDR : IB_SPEED_QDR;
783 	props->port_cap_flags	= IB_PORT_CM_SUP;
784 	props->ip_gids = true;
785 	props->gid_tbl_len	= mdev->dev->caps.gid_table_len[port];
786 	props->max_msg_sz	= mdev->dev->caps.max_msg_sz;
787 	if (mdev->dev->caps.pkey_table_len[port])
788 		props->pkey_tbl_len = 1;
789 	props->max_mtu		= IB_MTU_4096;
790 	props->max_vl_num	= 2;
791 	props->state		= IB_PORT_DOWN;
792 	props->phys_state	= state_to_phys_state(props->state);
793 	props->active_mtu	= IB_MTU_256;
794 	spin_lock_bh(&iboe->lock);
795 	ndev = iboe->netdevs[port - 1];
796 	if (ndev && is_bonded) {
797 		rcu_read_lock(); /* required to get upper dev */
798 		ndev = netdev_master_upper_dev_get_rcu(ndev);
799 		rcu_read_unlock();
800 	}
801 	if (!ndev)
802 		goto out_unlock;
803 
804 	tmp = iboe_get_mtu(ndev->mtu);
805 	props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
806 
807 	props->state		= (netif_running(ndev) && netif_carrier_ok(ndev)) ?
808 					IB_PORT_ACTIVE : IB_PORT_DOWN;
809 	props->phys_state	= state_to_phys_state(props->state);
810 out_unlock:
811 	spin_unlock_bh(&iboe->lock);
812 out:
813 	mlx4_free_cmd_mailbox(mdev->dev, mailbox);
814 	return err;
815 }
816 
817 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
818 			 struct ib_port_attr *props, int netw_view)
819 {
820 	int err;
821 
822 	/* props being zeroed by the caller, avoid zeroing it here */
823 
824 	err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
825 		ib_link_query_port(ibdev, port, props, netw_view) :
826 				eth_link_query_port(ibdev, port, props);
827 
828 	return err;
829 }
830 
831 static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
832 			      struct ib_port_attr *props)
833 {
834 	/* returns host view */
835 	return __mlx4_ib_query_port(ibdev, port, props, 0);
836 }
837 
838 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
839 			union ib_gid *gid, int netw_view)
840 {
841 	struct ib_smp *in_mad  = NULL;
842 	struct ib_smp *out_mad = NULL;
843 	int err = -ENOMEM;
844 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
845 	int clear = 0;
846 	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
847 
848 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
849 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
850 	if (!in_mad || !out_mad)
851 		goto out;
852 
853 	init_query_mad(in_mad);
854 	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
855 	in_mad->attr_mod = cpu_to_be32(port);
856 
857 	if (mlx4_is_mfunc(dev->dev) && netw_view)
858 		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
859 
860 	err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
861 	if (err)
862 		goto out;
863 
864 	memcpy(gid->raw, out_mad->data + 8, 8);
865 
866 	if (mlx4_is_mfunc(dev->dev) && !netw_view) {
867 		if (index) {
868 			/* For any index > 0, return the null guid */
869 			err = 0;
870 			clear = 1;
871 			goto out;
872 		}
873 	}
874 
875 	init_query_mad(in_mad);
876 	in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
877 	in_mad->attr_mod = cpu_to_be32(index / 8);
878 
879 	err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
880 			   NULL, NULL, in_mad, out_mad);
881 	if (err)
882 		goto out;
883 
884 	memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
885 
886 out:
887 	if (clear)
888 		memset(gid->raw + 8, 0, 8);
889 	kfree(in_mad);
890 	kfree(out_mad);
891 	return err;
892 }
893 
894 static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
895 			     union ib_gid *gid)
896 {
897 	if (rdma_protocol_ib(ibdev, port))
898 		return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
899 	return 0;
900 }
901 
902 static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u8 port, u64 *sl2vl_tbl)
903 {
904 	union sl2vl_tbl_to_u64 sl2vl64;
905 	struct ib_smp *in_mad  = NULL;
906 	struct ib_smp *out_mad = NULL;
907 	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
908 	int err = -ENOMEM;
909 	int jj;
910 
911 	if (mlx4_is_slave(to_mdev(ibdev)->dev)) {
912 		*sl2vl_tbl = 0;
913 		return 0;
914 	}
915 
916 	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
917 	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
918 	if (!in_mad || !out_mad)
919 		goto out;
920 
921 	init_query_mad(in_mad);
922 	in_mad->attr_id  = IB_SMP_ATTR_SL_TO_VL_TABLE;
923 	in_mad->attr_mod = 0;
924 
925 	if (mlx4_is_mfunc(to_mdev(ibdev)->dev))
926 		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
927 
928 	err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
929 			   in_mad, out_mad);
930 	if (err)
931 		goto out;
932 
933 	for (jj = 0; jj < 8; jj++)
934 		sl2vl64.sl8[jj] = ((struct ib_smp *)out_mad)->data[jj];
935 	*sl2vl_tbl = sl2vl64.sl64;
936 
937 out:
938 	kfree(in_mad);
939 	kfree(out_mad);
940 	return err;
941 }
942 
943 static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev)
944 {
945 	u64 sl2vl;
946 	int i;
947 	int err;
948 
949 	for (i = 1; i <= mdev->dev->caps.num_ports; i++) {
950 		if (mdev->dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
951 			continue;
952 		err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl);
953 		if (err) {
954 			pr_err("Unable to get default sl to vl mapping for port %d.  Using all zeroes (%d)\n",
955 			       i, err);
956 			sl2vl = 0;
957 		}
958 		atomic64_set(&mdev->sl2vl[i - 1], sl2vl);
959 	}
960 }
961 
962 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
963 			 u16 *pkey, int netw_view)
964 {
965 	struct ib_smp *in_mad  = NULL;
966 	struct ib_smp *out_mad = NULL;
967 	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
968 	int err = -ENOMEM;
969 
970 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
971 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
972 	if (!in_mad || !out_mad)
973 		goto out;
974 
975 	init_query_mad(in_mad);
976 	in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
977 	in_mad->attr_mod = cpu_to_be32(index / 32);
978 
979 	if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
980 		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
981 
982 	err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
983 			   in_mad, out_mad);
984 	if (err)
985 		goto out;
986 
987 	*pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
988 
989 out:
990 	kfree(in_mad);
991 	kfree(out_mad);
992 	return err;
993 }
994 
995 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
996 {
997 	return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
998 }
999 
1000 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
1001 				 struct ib_device_modify *props)
1002 {
1003 	struct mlx4_cmd_mailbox *mailbox;
1004 	unsigned long flags;
1005 
1006 	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
1007 		return -EOPNOTSUPP;
1008 
1009 	if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
1010 		return 0;
1011 
1012 	if (mlx4_is_slave(to_mdev(ibdev)->dev))
1013 		return -EOPNOTSUPP;
1014 
1015 	spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
1016 	memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1017 	spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
1018 
1019 	/*
1020 	 * If possible, pass node desc to FW, so it can generate
1021 	 * a 144 trap.  If cmd fails, just ignore.
1022 	 */
1023 	mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
1024 	if (IS_ERR(mailbox))
1025 		return 0;
1026 
1027 	memcpy(mailbox->buf, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1028 	mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
1029 		 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1030 
1031 	mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
1032 
1033 	return 0;
1034 }
1035 
1036 static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
1037 			    u32 cap_mask)
1038 {
1039 	struct mlx4_cmd_mailbox *mailbox;
1040 	int err;
1041 
1042 	mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
1043 	if (IS_ERR(mailbox))
1044 		return PTR_ERR(mailbox);
1045 
1046 	if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1047 		*(u8 *) mailbox->buf	     = !!reset_qkey_viols << 6;
1048 		((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
1049 	} else {
1050 		((u8 *) mailbox->buf)[3]     = !!reset_qkey_viols;
1051 		((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
1052 	}
1053 
1054 	err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
1055 		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1056 		       MLX4_CMD_WRAPPED);
1057 
1058 	mlx4_free_cmd_mailbox(dev->dev, mailbox);
1059 	return err;
1060 }
1061 
1062 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1063 			       struct ib_port_modify *props)
1064 {
1065 	struct mlx4_ib_dev *mdev = to_mdev(ibdev);
1066 	u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
1067 	struct ib_port_attr attr;
1068 	u32 cap_mask;
1069 	int err;
1070 
1071 	/* return OK if this is RoCE. CM calls ib_modify_port() regardless
1072 	 * of whether port link layer is ETH or IB. For ETH ports, qkey
1073 	 * violations and port capabilities are not meaningful.
1074 	 */
1075 	if (is_eth)
1076 		return 0;
1077 
1078 	mutex_lock(&mdev->cap_mask_mutex);
1079 
1080 	err = ib_query_port(ibdev, port, &attr);
1081 	if (err)
1082 		goto out;
1083 
1084 	cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
1085 		~props->clr_port_cap_mask;
1086 
1087 	err = mlx4_ib_SET_PORT(mdev, port,
1088 			       !!(mask & IB_PORT_RESET_QKEY_CNTR),
1089 			       cap_mask);
1090 
1091 out:
1092 	mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
1093 	return err;
1094 }
1095 
1096 static int mlx4_ib_alloc_ucontext(struct ib_ucontext *uctx,
1097 				  struct ib_udata *udata)
1098 {
1099 	struct ib_device *ibdev = uctx->device;
1100 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
1101 	struct mlx4_ib_ucontext *context = to_mucontext(uctx);
1102 	struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
1103 	struct mlx4_ib_alloc_ucontext_resp resp;
1104 	int err;
1105 
1106 	if (!dev->ib_active)
1107 		return -EAGAIN;
1108 
1109 	if (ibdev->ops.uverbs_abi_ver ==
1110 	    MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
1111 		resp_v3.qp_tab_size      = dev->dev->caps.num_qps;
1112 		resp_v3.bf_reg_size      = dev->dev->caps.bf_reg_size;
1113 		resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1114 	} else {
1115 		resp.dev_caps	      = dev->dev->caps.userspace_caps;
1116 		resp.qp_tab_size      = dev->dev->caps.num_qps;
1117 		resp.bf_reg_size      = dev->dev->caps.bf_reg_size;
1118 		resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1119 		resp.cqe_size	      = dev->dev->caps.cqe_size;
1120 	}
1121 
1122 	err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
1123 	if (err)
1124 		return err;
1125 
1126 	INIT_LIST_HEAD(&context->db_page_list);
1127 	mutex_init(&context->db_page_mutex);
1128 
1129 	INIT_LIST_HEAD(&context->wqn_ranges_list);
1130 	mutex_init(&context->wqn_ranges_mutex);
1131 
1132 	if (ibdev->ops.uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
1133 		err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
1134 	else
1135 		err = ib_copy_to_udata(udata, &resp, sizeof(resp));
1136 
1137 	if (err) {
1138 		mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
1139 		return -EFAULT;
1140 	}
1141 
1142 	return err;
1143 }
1144 
1145 static void mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1146 {
1147 	struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1148 
1149 	mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
1150 }
1151 
1152 static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1153 {
1154 }
1155 
1156 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
1157 {
1158 	struct mlx4_ib_dev *dev = to_mdev(context->device);
1159 
1160 	switch (vma->vm_pgoff) {
1161 	case 0:
1162 		return rdma_user_mmap_io(context, vma,
1163 					 to_mucontext(context)->uar.pfn,
1164 					 PAGE_SIZE,
1165 					 pgprot_noncached(vma->vm_page_prot),
1166 					 NULL);
1167 
1168 	case 1:
1169 		if (dev->dev->caps.bf_reg_size == 0)
1170 			return -EINVAL;
1171 		return rdma_user_mmap_io(
1172 			context, vma,
1173 			to_mucontext(context)->uar.pfn +
1174 				dev->dev->caps.num_uars,
1175 			PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot),
1176 			NULL);
1177 
1178 	case 3: {
1179 		struct mlx4_clock_params params;
1180 		int ret;
1181 
1182 		ret = mlx4_get_internal_clock_params(dev->dev, &params);
1183 		if (ret)
1184 			return ret;
1185 
1186 		return rdma_user_mmap_io(
1187 			context, vma,
1188 			(pci_resource_start(dev->dev->persist->pdev,
1189 					    params.bar) +
1190 			 params.offset) >>
1191 				PAGE_SHIFT,
1192 			PAGE_SIZE, pgprot_noncached(vma->vm_page_prot),
1193 			NULL);
1194 	}
1195 
1196 	default:
1197 		return -EINVAL;
1198 	}
1199 }
1200 
1201 static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
1202 {
1203 	struct mlx4_ib_pd *pd = to_mpd(ibpd);
1204 	struct ib_device *ibdev = ibpd->device;
1205 	int err;
1206 
1207 	err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
1208 	if (err)
1209 		return err;
1210 
1211 	if (udata && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
1212 		mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
1213 		return -EFAULT;
1214 	}
1215 	return 0;
1216 }
1217 
1218 static int mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
1219 {
1220 	mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
1221 	return 0;
1222 }
1223 
1224 static int mlx4_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
1225 {
1226 	struct mlx4_ib_dev *dev = to_mdev(ibxrcd->device);
1227 	struct mlx4_ib_xrcd *xrcd = to_mxrcd(ibxrcd);
1228 	struct ib_cq_init_attr cq_attr = {};
1229 	int err;
1230 
1231 	if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1232 		return -EOPNOTSUPP;
1233 
1234 	err = mlx4_xrcd_alloc(dev->dev, &xrcd->xrcdn);
1235 	if (err)
1236 		return err;
1237 
1238 	xrcd->pd = ib_alloc_pd(ibxrcd->device, 0);
1239 	if (IS_ERR(xrcd->pd)) {
1240 		err = PTR_ERR(xrcd->pd);
1241 		goto err2;
1242 	}
1243 
1244 	cq_attr.cqe = 1;
1245 	xrcd->cq = ib_create_cq(ibxrcd->device, NULL, NULL, xrcd, &cq_attr);
1246 	if (IS_ERR(xrcd->cq)) {
1247 		err = PTR_ERR(xrcd->cq);
1248 		goto err3;
1249 	}
1250 
1251 	return 0;
1252 
1253 err3:
1254 	ib_dealloc_pd(xrcd->pd);
1255 err2:
1256 	mlx4_xrcd_free(dev->dev, xrcd->xrcdn);
1257 	return err;
1258 }
1259 
1260 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
1261 {
1262 	ib_destroy_cq(to_mxrcd(xrcd)->cq);
1263 	ib_dealloc_pd(to_mxrcd(xrcd)->pd);
1264 	mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
1265 	return 0;
1266 }
1267 
1268 static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
1269 {
1270 	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1271 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1272 	struct mlx4_ib_gid_entry *ge;
1273 
1274 	ge = kzalloc(sizeof *ge, GFP_KERNEL);
1275 	if (!ge)
1276 		return -ENOMEM;
1277 
1278 	ge->gid = *gid;
1279 	if (mlx4_ib_add_mc(mdev, mqp, gid)) {
1280 		ge->port = mqp->port;
1281 		ge->added = 1;
1282 	}
1283 
1284 	mutex_lock(&mqp->mutex);
1285 	list_add_tail(&ge->list, &mqp->gid_list);
1286 	mutex_unlock(&mqp->mutex);
1287 
1288 	return 0;
1289 }
1290 
1291 static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev,
1292 					  struct mlx4_ib_counters *ctr_table)
1293 {
1294 	struct counter_index *counter, *tmp_count;
1295 
1296 	mutex_lock(&ctr_table->mutex);
1297 	list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list,
1298 				 list) {
1299 		if (counter->allocated)
1300 			mlx4_counter_free(ibdev->dev, counter->index);
1301 		list_del(&counter->list);
1302 		kfree(counter);
1303 	}
1304 	mutex_unlock(&ctr_table->mutex);
1305 }
1306 
1307 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
1308 		   union ib_gid *gid)
1309 {
1310 	struct net_device *ndev;
1311 	int ret = 0;
1312 
1313 	if (!mqp->port)
1314 		return 0;
1315 
1316 	spin_lock_bh(&mdev->iboe.lock);
1317 	ndev = mdev->iboe.netdevs[mqp->port - 1];
1318 	if (ndev)
1319 		dev_hold(ndev);
1320 	spin_unlock_bh(&mdev->iboe.lock);
1321 
1322 	if (ndev) {
1323 		ret = 1;
1324 		dev_put(ndev);
1325 	}
1326 
1327 	return ret;
1328 }
1329 
1330 struct mlx4_ib_steering {
1331 	struct list_head list;
1332 	struct mlx4_flow_reg_id reg_id;
1333 	union ib_gid gid;
1334 };
1335 
1336 #define LAST_ETH_FIELD vlan_tag
1337 #define LAST_IB_FIELD sl
1338 #define LAST_IPV4_FIELD dst_ip
1339 #define LAST_TCP_UDP_FIELD src_port
1340 
1341 /* Field is the last supported field */
1342 #define FIELDS_NOT_SUPPORTED(filter, field)\
1343 	memchr_inv((void *)&filter.field  +\
1344 		   sizeof(filter.field), 0,\
1345 		   sizeof(filter) -\
1346 		   offsetof(typeof(filter), field) -\
1347 		   sizeof(filter.field))
1348 
1349 static int parse_flow_attr(struct mlx4_dev *dev,
1350 			   u32 qp_num,
1351 			   union ib_flow_spec *ib_spec,
1352 			   struct _rule_hw *mlx4_spec)
1353 {
1354 	enum mlx4_net_trans_rule_id type;
1355 
1356 	switch (ib_spec->type) {
1357 	case IB_FLOW_SPEC_ETH:
1358 		if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
1359 			return -ENOTSUPP;
1360 
1361 		type = MLX4_NET_TRANS_RULE_ID_ETH;
1362 		memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
1363 		       ETH_ALEN);
1364 		memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
1365 		       ETH_ALEN);
1366 		mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
1367 		mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
1368 		break;
1369 	case IB_FLOW_SPEC_IB:
1370 		if (FIELDS_NOT_SUPPORTED(ib_spec->ib.mask, LAST_IB_FIELD))
1371 			return -ENOTSUPP;
1372 
1373 		type = MLX4_NET_TRANS_RULE_ID_IB;
1374 		mlx4_spec->ib.l3_qpn =
1375 			cpu_to_be32(qp_num);
1376 		mlx4_spec->ib.qpn_mask =
1377 			cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
1378 		break;
1379 
1380 
1381 	case IB_FLOW_SPEC_IPV4:
1382 		if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
1383 			return -ENOTSUPP;
1384 
1385 		type = MLX4_NET_TRANS_RULE_ID_IPV4;
1386 		mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
1387 		mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
1388 		mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
1389 		mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
1390 		break;
1391 
1392 	case IB_FLOW_SPEC_TCP:
1393 	case IB_FLOW_SPEC_UDP:
1394 		if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, LAST_TCP_UDP_FIELD))
1395 			return -ENOTSUPP;
1396 
1397 		type = ib_spec->type == IB_FLOW_SPEC_TCP ?
1398 					MLX4_NET_TRANS_RULE_ID_TCP :
1399 					MLX4_NET_TRANS_RULE_ID_UDP;
1400 		mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
1401 		mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
1402 		mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
1403 		mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
1404 		break;
1405 
1406 	default:
1407 		return -EINVAL;
1408 	}
1409 	if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
1410 	    mlx4_hw_rule_sz(dev, type) < 0)
1411 		return -EINVAL;
1412 	mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
1413 	mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
1414 	return mlx4_hw_rule_sz(dev, type);
1415 }
1416 
1417 struct default_rules {
1418 	__u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1419 	__u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1420 	__u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
1421 	__u8  link_layer;
1422 };
1423 static const struct default_rules default_table[] = {
1424 	{
1425 		.mandatory_fields = {IB_FLOW_SPEC_IPV4},
1426 		.mandatory_not_fields = {IB_FLOW_SPEC_ETH},
1427 		.rules_create_list = {IB_FLOW_SPEC_IB},
1428 		.link_layer = IB_LINK_LAYER_INFINIBAND
1429 	}
1430 };
1431 
1432 static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
1433 					 struct ib_flow_attr *flow_attr)
1434 {
1435 	int i, j, k;
1436 	void *ib_flow;
1437 	const struct default_rules *pdefault_rules = default_table;
1438 	u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
1439 
1440 	for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
1441 		__u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
1442 		memset(&field_types, 0, sizeof(field_types));
1443 
1444 		if (link_layer != pdefault_rules->link_layer)
1445 			continue;
1446 
1447 		ib_flow = flow_attr + 1;
1448 		/* we assume the specs are sorted */
1449 		for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
1450 		     j < flow_attr->num_of_specs; k++) {
1451 			union ib_flow_spec *current_flow =
1452 				(union ib_flow_spec *)ib_flow;
1453 
1454 			/* same layer but different type */
1455 			if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
1456 			     (pdefault_rules->mandatory_fields[k] &
1457 			      IB_FLOW_SPEC_LAYER_MASK)) &&
1458 			    (current_flow->type !=
1459 			     pdefault_rules->mandatory_fields[k]))
1460 				goto out;
1461 
1462 			/* same layer, try match next one */
1463 			if (current_flow->type ==
1464 			    pdefault_rules->mandatory_fields[k]) {
1465 				j++;
1466 				ib_flow +=
1467 					((union ib_flow_spec *)ib_flow)->size;
1468 			}
1469 		}
1470 
1471 		ib_flow = flow_attr + 1;
1472 		for (j = 0; j < flow_attr->num_of_specs;
1473 		     j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
1474 			for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
1475 				/* same layer and same type */
1476 				if (((union ib_flow_spec *)ib_flow)->type ==
1477 				    pdefault_rules->mandatory_not_fields[k])
1478 					goto out;
1479 
1480 		return i;
1481 	}
1482 out:
1483 	return -1;
1484 }
1485 
1486 static int __mlx4_ib_create_default_rules(
1487 		struct mlx4_ib_dev *mdev,
1488 		struct ib_qp *qp,
1489 		const struct default_rules *pdefault_rules,
1490 		struct _rule_hw *mlx4_spec) {
1491 	int size = 0;
1492 	int i;
1493 
1494 	for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
1495 		union ib_flow_spec ib_spec = {};
1496 		int ret;
1497 
1498 		switch (pdefault_rules->rules_create_list[i]) {
1499 		case 0:
1500 			/* no rule */
1501 			continue;
1502 		case IB_FLOW_SPEC_IB:
1503 			ib_spec.type = IB_FLOW_SPEC_IB;
1504 			ib_spec.size = sizeof(struct ib_flow_spec_ib);
1505 
1506 			break;
1507 		default:
1508 			/* invalid rule */
1509 			return -EINVAL;
1510 		}
1511 		/* We must put empty rule, qpn is being ignored */
1512 		ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
1513 				      mlx4_spec);
1514 		if (ret < 0) {
1515 			pr_info("invalid parsing\n");
1516 			return -EINVAL;
1517 		}
1518 
1519 		mlx4_spec = (void *)mlx4_spec + ret;
1520 		size += ret;
1521 	}
1522 	return size;
1523 }
1524 
1525 static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1526 			  int domain,
1527 			  enum mlx4_net_trans_promisc_mode flow_type,
1528 			  u64 *reg_id)
1529 {
1530 	int ret, i;
1531 	int size = 0;
1532 	void *ib_flow;
1533 	struct mlx4_ib_dev *mdev = to_mdev(qp->device);
1534 	struct mlx4_cmd_mailbox *mailbox;
1535 	struct mlx4_net_trans_rule_hw_ctrl *ctrl;
1536 	int default_flow;
1537 
1538 	if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
1539 		pr_err("Invalid priority value %d\n", flow_attr->priority);
1540 		return -EINVAL;
1541 	}
1542 
1543 	if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
1544 		return -EINVAL;
1545 
1546 	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
1547 	if (IS_ERR(mailbox))
1548 		return PTR_ERR(mailbox);
1549 	ctrl = mailbox->buf;
1550 
1551 	ctrl->prio = cpu_to_be16(domain | flow_attr->priority);
1552 	ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
1553 	ctrl->port = flow_attr->port;
1554 	ctrl->qpn = cpu_to_be32(qp->qp_num);
1555 
1556 	ib_flow = flow_attr + 1;
1557 	size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
1558 	/* Add default flows */
1559 	default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
1560 	if (default_flow >= 0) {
1561 		ret = __mlx4_ib_create_default_rules(
1562 				mdev, qp, default_table + default_flow,
1563 				mailbox->buf + size);
1564 		if (ret < 0) {
1565 			mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1566 			return -EINVAL;
1567 		}
1568 		size += ret;
1569 	}
1570 	for (i = 0; i < flow_attr->num_of_specs; i++) {
1571 		ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
1572 				      mailbox->buf + size);
1573 		if (ret < 0) {
1574 			mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1575 			return -EINVAL;
1576 		}
1577 		ib_flow += ((union ib_flow_spec *) ib_flow)->size;
1578 		size += ret;
1579 	}
1580 
1581 	if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR &&
1582 	    flow_attr->num_of_specs == 1) {
1583 		struct _rule_hw *rule_header = (struct _rule_hw *)(ctrl + 1);
1584 		enum ib_flow_spec_type header_spec =
1585 			((union ib_flow_spec *)(flow_attr + 1))->type;
1586 
1587 		if (header_spec == IB_FLOW_SPEC_ETH)
1588 			mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
1589 	}
1590 
1591 	ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
1592 			   MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
1593 			   MLX4_CMD_NATIVE);
1594 	if (ret == -ENOMEM)
1595 		pr_err("mcg table is full. Fail to register network rule.\n");
1596 	else if (ret == -ENXIO)
1597 		pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1598 	else if (ret)
1599 		pr_err("Invalid argument. Fail to register network rule.\n");
1600 
1601 	mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1602 	return ret;
1603 }
1604 
1605 static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1606 {
1607 	int err;
1608 	err = mlx4_cmd(dev, reg_id, 0, 0,
1609 		       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1610 		       MLX4_CMD_NATIVE);
1611 	if (err)
1612 		pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1613 		       reg_id);
1614 	return err;
1615 }
1616 
1617 static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1618 				    u64 *reg_id)
1619 {
1620 	void *ib_flow;
1621 	union ib_flow_spec *ib_spec;
1622 	struct mlx4_dev	*dev = to_mdev(qp->device)->dev;
1623 	int err = 0;
1624 
1625 	if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
1626 	    dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
1627 		return 0; /* do nothing */
1628 
1629 	ib_flow = flow_attr + 1;
1630 	ib_spec = (union ib_flow_spec *)ib_flow;
1631 
1632 	if (ib_spec->type !=  IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
1633 		return 0; /* do nothing */
1634 
1635 	err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
1636 				    flow_attr->port, qp->qp_num,
1637 				    MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
1638 				    reg_id);
1639 	return err;
1640 }
1641 
1642 static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
1643 				      struct ib_flow_attr *flow_attr,
1644 				      enum mlx4_net_trans_promisc_mode *type)
1645 {
1646 	int err = 0;
1647 
1648 	if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) ||
1649 	    (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) ||
1650 	    (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) {
1651 		return -EOPNOTSUPP;
1652 	}
1653 
1654 	if (flow_attr->num_of_specs == 0) {
1655 		type[0] = MLX4_FS_MC_SNIFFER;
1656 		type[1] = MLX4_FS_UC_SNIFFER;
1657 	} else {
1658 		union ib_flow_spec *ib_spec;
1659 
1660 		ib_spec = (union ib_flow_spec *)(flow_attr + 1);
1661 		if (ib_spec->type !=  IB_FLOW_SPEC_ETH)
1662 			return -EINVAL;
1663 
1664 		/* if all is zero than MC and UC */
1665 		if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) {
1666 			type[0] = MLX4_FS_MC_SNIFFER;
1667 			type[1] = MLX4_FS_UC_SNIFFER;
1668 		} else {
1669 			u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01,
1670 					    ib_spec->eth.mask.dst_mac[1],
1671 					    ib_spec->eth.mask.dst_mac[2],
1672 					    ib_spec->eth.mask.dst_mac[3],
1673 					    ib_spec->eth.mask.dst_mac[4],
1674 					    ib_spec->eth.mask.dst_mac[5]};
1675 
1676 			/* Above xor was only on MC bit, non empty mask is valid
1677 			 * only if this bit is set and rest are zero.
1678 			 */
1679 			if (!is_zero_ether_addr(&mac[0]))
1680 				return -EINVAL;
1681 
1682 			if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac))
1683 				type[0] = MLX4_FS_MC_SNIFFER;
1684 			else
1685 				type[0] = MLX4_FS_UC_SNIFFER;
1686 		}
1687 	}
1688 
1689 	return err;
1690 }
1691 
1692 static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1693 					   struct ib_flow_attr *flow_attr,
1694 					   struct ib_udata *udata)
1695 {
1696 	int err = 0, i = 0, j = 0;
1697 	struct mlx4_ib_flow *mflow;
1698 	enum mlx4_net_trans_promisc_mode type[2];
1699 	struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
1700 	int is_bonded = mlx4_is_bonded(dev);
1701 
1702 	if (!rdma_is_port_valid(qp->device, flow_attr->port))
1703 		return ERR_PTR(-EINVAL);
1704 
1705 	if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)
1706 		return ERR_PTR(-EOPNOTSUPP);
1707 
1708 	if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
1709 	    (flow_attr->type != IB_FLOW_ATTR_NORMAL))
1710 		return ERR_PTR(-EOPNOTSUPP);
1711 
1712 	if (udata &&
1713 	    udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen))
1714 		return ERR_PTR(-EOPNOTSUPP);
1715 
1716 	memset(type, 0, sizeof(type));
1717 
1718 	mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
1719 	if (!mflow) {
1720 		err = -ENOMEM;
1721 		goto err_free;
1722 	}
1723 
1724 	switch (flow_attr->type) {
1725 	case IB_FLOW_ATTR_NORMAL:
1726 		/* If dont trap flag (continue match) is set, under specific
1727 		 * condition traffic be replicated to given qp,
1728 		 * without stealing it
1729 		 */
1730 		if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) {
1731 			err = mlx4_ib_add_dont_trap_rule(dev,
1732 							 flow_attr,
1733 							 type);
1734 			if (err)
1735 				goto err_free;
1736 		} else {
1737 			type[0] = MLX4_FS_REGULAR;
1738 		}
1739 		break;
1740 
1741 	case IB_FLOW_ATTR_ALL_DEFAULT:
1742 		type[0] = MLX4_FS_ALL_DEFAULT;
1743 		break;
1744 
1745 	case IB_FLOW_ATTR_MC_DEFAULT:
1746 		type[0] = MLX4_FS_MC_DEFAULT;
1747 		break;
1748 
1749 	case IB_FLOW_ATTR_SNIFFER:
1750 		type[0] = MLX4_FS_MIRROR_RX_PORT;
1751 		type[1] = MLX4_FS_MIRROR_SX_PORT;
1752 		break;
1753 
1754 	default:
1755 		err = -EINVAL;
1756 		goto err_free;
1757 	}
1758 
1759 	while (i < ARRAY_SIZE(type) && type[i]) {
1760 		err = __mlx4_ib_create_flow(qp, flow_attr, MLX4_DOMAIN_UVERBS,
1761 					    type[i], &mflow->reg_id[i].id);
1762 		if (err)
1763 			goto err_create_flow;
1764 		if (is_bonded) {
1765 			/* Application always sees one port so the mirror rule
1766 			 * must be on port #2
1767 			 */
1768 			flow_attr->port = 2;
1769 			err = __mlx4_ib_create_flow(qp, flow_attr,
1770 						    MLX4_DOMAIN_UVERBS, type[j],
1771 						    &mflow->reg_id[j].mirror);
1772 			flow_attr->port = 1;
1773 			if (err)
1774 				goto err_create_flow;
1775 			j++;
1776 		}
1777 
1778 		i++;
1779 	}
1780 
1781 	if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1782 		err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1783 					       &mflow->reg_id[i].id);
1784 		if (err)
1785 			goto err_create_flow;
1786 
1787 		if (is_bonded) {
1788 			flow_attr->port = 2;
1789 			err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1790 						       &mflow->reg_id[j].mirror);
1791 			flow_attr->port = 1;
1792 			if (err)
1793 				goto err_create_flow;
1794 			j++;
1795 		}
1796 		/* function to create mirror rule */
1797 		i++;
1798 	}
1799 
1800 	return &mflow->ibflow;
1801 
1802 err_create_flow:
1803 	while (i) {
1804 		(void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1805 					     mflow->reg_id[i].id);
1806 		i--;
1807 	}
1808 
1809 	while (j) {
1810 		(void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1811 					     mflow->reg_id[j].mirror);
1812 		j--;
1813 	}
1814 err_free:
1815 	kfree(mflow);
1816 	return ERR_PTR(err);
1817 }
1818 
1819 static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1820 {
1821 	int err, ret = 0;
1822 	int i = 0;
1823 	struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1824 	struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1825 
1826 	while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
1827 		err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
1828 		if (err)
1829 			ret = err;
1830 		if (mflow->reg_id[i].mirror) {
1831 			err = __mlx4_ib_destroy_flow(mdev->dev,
1832 						     mflow->reg_id[i].mirror);
1833 			if (err)
1834 				ret = err;
1835 		}
1836 		i++;
1837 	}
1838 
1839 	kfree(mflow);
1840 	return ret;
1841 }
1842 
1843 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1844 {
1845 	int err;
1846 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1847 	struct mlx4_dev	*dev = mdev->dev;
1848 	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1849 	struct mlx4_ib_steering *ib_steering = NULL;
1850 	enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1851 	struct mlx4_flow_reg_id	reg_id;
1852 
1853 	if (mdev->dev->caps.steering_mode ==
1854 	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
1855 		ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
1856 		if (!ib_steering)
1857 			return -ENOMEM;
1858 	}
1859 
1860 	err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
1861 				    !!(mqp->flags &
1862 				       MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1863 				    prot, &reg_id.id);
1864 	if (err) {
1865 		pr_err("multicast attach op failed, err %d\n", err);
1866 		goto err_malloc;
1867 	}
1868 
1869 	reg_id.mirror = 0;
1870 	if (mlx4_is_bonded(dev)) {
1871 		err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
1872 					    (mqp->port == 1) ? 2 : 1,
1873 					    !!(mqp->flags &
1874 					    MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1875 					    prot, &reg_id.mirror);
1876 		if (err)
1877 			goto err_add;
1878 	}
1879 
1880 	err = add_gid_entry(ibqp, gid);
1881 	if (err)
1882 		goto err_add;
1883 
1884 	if (ib_steering) {
1885 		memcpy(ib_steering->gid.raw, gid->raw, 16);
1886 		ib_steering->reg_id = reg_id;
1887 		mutex_lock(&mqp->mutex);
1888 		list_add(&ib_steering->list, &mqp->steering_rules);
1889 		mutex_unlock(&mqp->mutex);
1890 	}
1891 	return 0;
1892 
1893 err_add:
1894 	mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1895 			      prot, reg_id.id);
1896 	if (reg_id.mirror)
1897 		mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1898 				      prot, reg_id.mirror);
1899 err_malloc:
1900 	kfree(ib_steering);
1901 
1902 	return err;
1903 }
1904 
1905 static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
1906 {
1907 	struct mlx4_ib_gid_entry *ge;
1908 	struct mlx4_ib_gid_entry *tmp;
1909 	struct mlx4_ib_gid_entry *ret = NULL;
1910 
1911 	list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1912 		if (!memcmp(raw, ge->gid.raw, 16)) {
1913 			ret = ge;
1914 			break;
1915 		}
1916 	}
1917 
1918 	return ret;
1919 }
1920 
1921 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1922 {
1923 	int err;
1924 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1925 	struct mlx4_dev *dev = mdev->dev;
1926 	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1927 	struct net_device *ndev;
1928 	struct mlx4_ib_gid_entry *ge;
1929 	struct mlx4_flow_reg_id reg_id = {0, 0};
1930 	enum mlx4_protocol prot =  MLX4_PROT_IB_IPV6;
1931 
1932 	if (mdev->dev->caps.steering_mode ==
1933 	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
1934 		struct mlx4_ib_steering *ib_steering;
1935 
1936 		mutex_lock(&mqp->mutex);
1937 		list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
1938 			if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
1939 				list_del(&ib_steering->list);
1940 				break;
1941 			}
1942 		}
1943 		mutex_unlock(&mqp->mutex);
1944 		if (&ib_steering->list == &mqp->steering_rules) {
1945 			pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1946 			return -EINVAL;
1947 		}
1948 		reg_id = ib_steering->reg_id;
1949 		kfree(ib_steering);
1950 	}
1951 
1952 	err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1953 				    prot, reg_id.id);
1954 	if (err)
1955 		return err;
1956 
1957 	if (mlx4_is_bonded(dev)) {
1958 		err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1959 					    prot, reg_id.mirror);
1960 		if (err)
1961 			return err;
1962 	}
1963 
1964 	mutex_lock(&mqp->mutex);
1965 	ge = find_gid_entry(mqp, gid->raw);
1966 	if (ge) {
1967 		spin_lock_bh(&mdev->iboe.lock);
1968 		ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1969 		if (ndev)
1970 			dev_hold(ndev);
1971 		spin_unlock_bh(&mdev->iboe.lock);
1972 		if (ndev)
1973 			dev_put(ndev);
1974 		list_del(&ge->list);
1975 		kfree(ge);
1976 	} else
1977 		pr_warn("could not find mgid entry\n");
1978 
1979 	mutex_unlock(&mqp->mutex);
1980 
1981 	return 0;
1982 }
1983 
1984 static int init_node_data(struct mlx4_ib_dev *dev)
1985 {
1986 	struct ib_smp *in_mad  = NULL;
1987 	struct ib_smp *out_mad = NULL;
1988 	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
1989 	int err = -ENOMEM;
1990 
1991 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
1992 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1993 	if (!in_mad || !out_mad)
1994 		goto out;
1995 
1996 	init_query_mad(in_mad);
1997 	in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1998 	if (mlx4_is_master(dev->dev))
1999 		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
2000 
2001 	err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
2002 	if (err)
2003 		goto out;
2004 
2005 	memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
2006 
2007 	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
2008 
2009 	err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
2010 	if (err)
2011 		goto out;
2012 
2013 	dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
2014 	memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
2015 
2016 out:
2017 	kfree(in_mad);
2018 	kfree(out_mad);
2019 	return err;
2020 }
2021 
2022 static ssize_t hca_type_show(struct device *device,
2023 			     struct device_attribute *attr, char *buf)
2024 {
2025 	struct mlx4_ib_dev *dev =
2026 		rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2027 
2028 	return sysfs_emit(buf, "MT%d\n", dev->dev->persist->pdev->device);
2029 }
2030 static DEVICE_ATTR_RO(hca_type);
2031 
2032 static ssize_t hw_rev_show(struct device *device,
2033 			   struct device_attribute *attr, char *buf)
2034 {
2035 	struct mlx4_ib_dev *dev =
2036 		rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2037 
2038 	return sysfs_emit(buf, "%x\n", dev->dev->rev_id);
2039 }
2040 static DEVICE_ATTR_RO(hw_rev);
2041 
2042 static ssize_t board_id_show(struct device *device,
2043 			     struct device_attribute *attr, char *buf)
2044 {
2045 	struct mlx4_ib_dev *dev =
2046 		rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2047 
2048 	return sysfs_emit(buf, "%.*s\n", MLX4_BOARD_ID_LEN, dev->dev->board_id);
2049 }
2050 static DEVICE_ATTR_RO(board_id);
2051 
2052 static struct attribute *mlx4_class_attributes[] = {
2053 	&dev_attr_hw_rev.attr,
2054 	&dev_attr_hca_type.attr,
2055 	&dev_attr_board_id.attr,
2056 	NULL
2057 };
2058 
2059 static const struct attribute_group mlx4_attr_group = {
2060 	.attrs = mlx4_class_attributes,
2061 };
2062 
2063 struct diag_counter {
2064 	const char *name;
2065 	u32 offset;
2066 };
2067 
2068 #define DIAG_COUNTER(_name, _offset)			\
2069 	{ .name = #_name, .offset = _offset }
2070 
2071 static const struct diag_counter diag_basic[] = {
2072 	DIAG_COUNTER(rq_num_lle, 0x00),
2073 	DIAG_COUNTER(sq_num_lle, 0x04),
2074 	DIAG_COUNTER(rq_num_lqpoe, 0x08),
2075 	DIAG_COUNTER(sq_num_lqpoe, 0x0C),
2076 	DIAG_COUNTER(rq_num_lpe, 0x18),
2077 	DIAG_COUNTER(sq_num_lpe, 0x1C),
2078 	DIAG_COUNTER(rq_num_wrfe, 0x20),
2079 	DIAG_COUNTER(sq_num_wrfe, 0x24),
2080 	DIAG_COUNTER(sq_num_mwbe, 0x2C),
2081 	DIAG_COUNTER(sq_num_bre, 0x34),
2082 	DIAG_COUNTER(sq_num_rire, 0x44),
2083 	DIAG_COUNTER(rq_num_rire, 0x48),
2084 	DIAG_COUNTER(sq_num_rae, 0x4C),
2085 	DIAG_COUNTER(rq_num_rae, 0x50),
2086 	DIAG_COUNTER(sq_num_roe, 0x54),
2087 	DIAG_COUNTER(sq_num_tree, 0x5C),
2088 	DIAG_COUNTER(sq_num_rree, 0x64),
2089 	DIAG_COUNTER(rq_num_rnr, 0x68),
2090 	DIAG_COUNTER(sq_num_rnr, 0x6C),
2091 	DIAG_COUNTER(rq_num_oos, 0x100),
2092 	DIAG_COUNTER(sq_num_oos, 0x104),
2093 };
2094 
2095 static const struct diag_counter diag_ext[] = {
2096 	DIAG_COUNTER(rq_num_dup, 0x130),
2097 	DIAG_COUNTER(sq_num_to, 0x134),
2098 };
2099 
2100 static const struct diag_counter diag_device_only[] = {
2101 	DIAG_COUNTER(num_cqovf, 0x1A0),
2102 	DIAG_COUNTER(rq_num_udsdprd, 0x118),
2103 };
2104 
2105 static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev,
2106 						    u8 port_num)
2107 {
2108 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
2109 	struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2110 
2111 	if (!diag[!!port_num].name)
2112 		return NULL;
2113 
2114 	return rdma_alloc_hw_stats_struct(diag[!!port_num].name,
2115 					  diag[!!port_num].num_counters,
2116 					  RDMA_HW_STATS_DEFAULT_LIFESPAN);
2117 }
2118 
2119 static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
2120 				struct rdma_hw_stats *stats,
2121 				u8 port, int index)
2122 {
2123 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
2124 	struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2125 	u32 hw_value[ARRAY_SIZE(diag_device_only) +
2126 		ARRAY_SIZE(diag_ext) + ARRAY_SIZE(diag_basic)] = {};
2127 	int ret;
2128 	int i;
2129 
2130 	ret = mlx4_query_diag_counters(dev->dev,
2131 				       MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS,
2132 				       diag[!!port].offset, hw_value,
2133 				       diag[!!port].num_counters, port);
2134 
2135 	if (ret)
2136 		return ret;
2137 
2138 	for (i = 0; i < diag[!!port].num_counters; i++)
2139 		stats->value[i] = hw_value[i];
2140 
2141 	return diag[!!port].num_counters;
2142 }
2143 
2144 static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
2145 					 const char ***name,
2146 					 u32 **offset,
2147 					 u32 *num,
2148 					 bool port)
2149 {
2150 	u32 num_counters;
2151 
2152 	num_counters = ARRAY_SIZE(diag_basic);
2153 
2154 	if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT)
2155 		num_counters += ARRAY_SIZE(diag_ext);
2156 
2157 	if (!port)
2158 		num_counters += ARRAY_SIZE(diag_device_only);
2159 
2160 	*name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL);
2161 	if (!*name)
2162 		return -ENOMEM;
2163 
2164 	*offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL);
2165 	if (!*offset)
2166 		goto err_name;
2167 
2168 	*num = num_counters;
2169 
2170 	return 0;
2171 
2172 err_name:
2173 	kfree(*name);
2174 	return -ENOMEM;
2175 }
2176 
2177 static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
2178 				       const char **name,
2179 				       u32 *offset,
2180 				       bool port)
2181 {
2182 	int i;
2183 	int j;
2184 
2185 	for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) {
2186 		name[i] = diag_basic[i].name;
2187 		offset[i] = diag_basic[i].offset;
2188 	}
2189 
2190 	if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
2191 		for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) {
2192 			name[j] = diag_ext[i].name;
2193 			offset[j] = diag_ext[i].offset;
2194 		}
2195 	}
2196 
2197 	if (!port) {
2198 		for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) {
2199 			name[j] = diag_device_only[i].name;
2200 			offset[j] = diag_device_only[i].offset;
2201 		}
2202 	}
2203 }
2204 
2205 static const struct ib_device_ops mlx4_ib_hw_stats_ops = {
2206 	.alloc_hw_stats = mlx4_ib_alloc_hw_stats,
2207 	.get_hw_stats = mlx4_ib_get_hw_stats,
2208 };
2209 
2210 static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
2211 {
2212 	struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
2213 	int i;
2214 	int ret;
2215 	bool per_port = !!(ibdev->dev->caps.flags2 &
2216 		MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
2217 
2218 	if (mlx4_is_slave(ibdev->dev))
2219 		return 0;
2220 
2221 	for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2222 		/* i == 1 means we are building port counters */
2223 		if (i && !per_port)
2224 			continue;
2225 
2226 		ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name,
2227 						    &diag[i].offset,
2228 						    &diag[i].num_counters, i);
2229 		if (ret)
2230 			goto err_alloc;
2231 
2232 		mlx4_ib_fill_diag_counters(ibdev, diag[i].name,
2233 					   diag[i].offset, i);
2234 	}
2235 
2236 	ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_hw_stats_ops);
2237 
2238 	return 0;
2239 
2240 err_alloc:
2241 	if (i) {
2242 		kfree(diag[i - 1].name);
2243 		kfree(diag[i - 1].offset);
2244 	}
2245 
2246 	return ret;
2247 }
2248 
2249 static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev)
2250 {
2251 	int i;
2252 
2253 	for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2254 		kfree(ibdev->diag_counters[i].offset);
2255 		kfree(ibdev->diag_counters[i].name);
2256 	}
2257 }
2258 
2259 #define MLX4_IB_INVALID_MAC	((u64)-1)
2260 static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
2261 			       struct net_device *dev,
2262 			       int port)
2263 {
2264 	u64 new_smac = 0;
2265 	u64 release_mac = MLX4_IB_INVALID_MAC;
2266 	struct mlx4_ib_qp *qp;
2267 
2268 	new_smac = mlx4_mac_to_u64(dev->dev_addr);
2269 	atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
2270 
2271 	/* no need for update QP1 and mac registration in non-SRIOV */
2272 	if (!mlx4_is_mfunc(ibdev->dev))
2273 		return;
2274 
2275 	mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
2276 	qp = ibdev->qp1_proxy[port - 1];
2277 	if (qp) {
2278 		int new_smac_index;
2279 		u64 old_smac;
2280 		struct mlx4_update_qp_params update_params;
2281 
2282 		mutex_lock(&qp->mutex);
2283 		old_smac = qp->pri.smac;
2284 		if (new_smac == old_smac)
2285 			goto unlock;
2286 
2287 		new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
2288 
2289 		if (new_smac_index < 0)
2290 			goto unlock;
2291 
2292 		update_params.smac_index = new_smac_index;
2293 		if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
2294 				   &update_params)) {
2295 			release_mac = new_smac;
2296 			goto unlock;
2297 		}
2298 		/* if old port was zero, no mac was yet registered for this QP */
2299 		if (qp->pri.smac_port)
2300 			release_mac = old_smac;
2301 		qp->pri.smac = new_smac;
2302 		qp->pri.smac_port = port;
2303 		qp->pri.smac_index = new_smac_index;
2304 	}
2305 
2306 unlock:
2307 	if (release_mac != MLX4_IB_INVALID_MAC)
2308 		mlx4_unregister_mac(ibdev->dev, port, release_mac);
2309 	if (qp)
2310 		mutex_unlock(&qp->mutex);
2311 	mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
2312 }
2313 
2314 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
2315 				 struct net_device *dev,
2316 				 unsigned long event)
2317 
2318 {
2319 	struct mlx4_ib_iboe *iboe;
2320 	int update_qps_port = -1;
2321 	int port;
2322 
2323 	ASSERT_RTNL();
2324 
2325 	iboe = &ibdev->iboe;
2326 
2327 	spin_lock_bh(&iboe->lock);
2328 	mlx4_foreach_ib_transport_port(port, ibdev->dev) {
2329 
2330 		iboe->netdevs[port - 1] =
2331 			mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
2332 
2333 		if (dev == iboe->netdevs[port - 1] &&
2334 		    (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
2335 		     event == NETDEV_UP || event == NETDEV_CHANGE))
2336 			update_qps_port = port;
2337 
2338 		if (dev == iboe->netdevs[port - 1] &&
2339 		    (event == NETDEV_UP || event == NETDEV_DOWN)) {
2340 			enum ib_port_state port_state;
2341 			struct ib_event ibev = { };
2342 
2343 			if (ib_get_cached_port_state(&ibdev->ib_dev, port,
2344 						     &port_state))
2345 				continue;
2346 
2347 			if (event == NETDEV_UP &&
2348 			    (port_state != IB_PORT_ACTIVE ||
2349 			     iboe->last_port_state[port - 1] != IB_PORT_DOWN))
2350 				continue;
2351 			if (event == NETDEV_DOWN &&
2352 			    (port_state != IB_PORT_DOWN ||
2353 			     iboe->last_port_state[port - 1] != IB_PORT_ACTIVE))
2354 				continue;
2355 			iboe->last_port_state[port - 1] = port_state;
2356 
2357 			ibev.device = &ibdev->ib_dev;
2358 			ibev.element.port_num = port;
2359 			ibev.event = event == NETDEV_UP ? IB_EVENT_PORT_ACTIVE :
2360 							  IB_EVENT_PORT_ERR;
2361 			ib_dispatch_event(&ibev);
2362 		}
2363 
2364 	}
2365 	spin_unlock_bh(&iboe->lock);
2366 
2367 	if (update_qps_port > 0)
2368 		mlx4_ib_update_qps(ibdev, dev, update_qps_port);
2369 }
2370 
2371 static int mlx4_ib_netdev_event(struct notifier_block *this,
2372 				unsigned long event, void *ptr)
2373 {
2374 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2375 	struct mlx4_ib_dev *ibdev;
2376 
2377 	if (!net_eq(dev_net(dev), &init_net))
2378 		return NOTIFY_DONE;
2379 
2380 	ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
2381 	mlx4_ib_scan_netdevs(ibdev, dev, event);
2382 
2383 	return NOTIFY_DONE;
2384 }
2385 
2386 static void init_pkeys(struct mlx4_ib_dev *ibdev)
2387 {
2388 	int port;
2389 	int slave;
2390 	int i;
2391 
2392 	if (mlx4_is_master(ibdev->dev)) {
2393 		for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
2394 		     ++slave) {
2395 			for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2396 				for (i = 0;
2397 				     i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2398 				     ++i) {
2399 					ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
2400 					/* master has the identity virt2phys pkey mapping */
2401 						(slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
2402 							ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
2403 					mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
2404 							     ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
2405 				}
2406 			}
2407 		}
2408 		/* initialize pkey cache */
2409 		for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2410 			for (i = 0;
2411 			     i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2412 			     ++i)
2413 				ibdev->pkeys.phys_pkey_cache[port-1][i] =
2414 					(i) ? 0 : 0xFFFF;
2415 		}
2416 	}
2417 }
2418 
2419 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2420 {
2421 	int i, j, eq = 0, total_eqs = 0;
2422 
2423 	ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
2424 				  sizeof(ibdev->eq_table[0]), GFP_KERNEL);
2425 	if (!ibdev->eq_table)
2426 		return;
2427 
2428 	for (i = 1; i <= dev->caps.num_ports; i++) {
2429 		for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
2430 		     j++, total_eqs++) {
2431 			if (i > 1 &&  mlx4_is_eq_shared(dev, total_eqs))
2432 				continue;
2433 			ibdev->eq_table[eq] = total_eqs;
2434 			if (!mlx4_assign_eq(dev, i,
2435 					    &ibdev->eq_table[eq]))
2436 				eq++;
2437 			else
2438 				ibdev->eq_table[eq] = -1;
2439 		}
2440 	}
2441 
2442 	for (i = eq; i < dev->caps.num_comp_vectors;
2443 	     ibdev->eq_table[i++] = -1)
2444 		;
2445 
2446 	/* Advertise the new number of EQs to clients */
2447 	ibdev->ib_dev.num_comp_vectors = eq;
2448 }
2449 
2450 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2451 {
2452 	int i;
2453 	int total_eqs = ibdev->ib_dev.num_comp_vectors;
2454 
2455 	/* no eqs were allocated */
2456 	if (!ibdev->eq_table)
2457 		return;
2458 
2459 	/* Reset the advertised EQ number */
2460 	ibdev->ib_dev.num_comp_vectors = 0;
2461 
2462 	for (i = 0; i < total_eqs; i++)
2463 		mlx4_release_eq(dev, ibdev->eq_table[i]);
2464 
2465 	kfree(ibdev->eq_table);
2466 	ibdev->eq_table = NULL;
2467 }
2468 
2469 static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
2470 			       struct ib_port_immutable *immutable)
2471 {
2472 	struct ib_port_attr attr;
2473 	struct mlx4_ib_dev *mdev = to_mdev(ibdev);
2474 	int err;
2475 
2476 	if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) {
2477 		immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
2478 		immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2479 	} else {
2480 		if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
2481 			immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
2482 		if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
2483 			immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
2484 				RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
2485 		immutable->core_cap_flags |= RDMA_CORE_PORT_RAW_PACKET;
2486 		if (immutable->core_cap_flags & (RDMA_CORE_PORT_IBA_ROCE |
2487 		    RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP))
2488 			immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2489 	}
2490 
2491 	err = ib_query_port(ibdev, port_num, &attr);
2492 	if (err)
2493 		return err;
2494 
2495 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
2496 	immutable->gid_tbl_len = attr.gid_tbl_len;
2497 
2498 	return 0;
2499 }
2500 
2501 static void get_fw_ver_str(struct ib_device *device, char *str)
2502 {
2503 	struct mlx4_ib_dev *dev =
2504 		container_of(device, struct mlx4_ib_dev, ib_dev);
2505 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d",
2506 		 (int) (dev->dev->caps.fw_ver >> 32),
2507 		 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
2508 		 (int) dev->dev->caps.fw_ver & 0xffff);
2509 }
2510 
2511 static const struct ib_device_ops mlx4_ib_dev_ops = {
2512 	.owner = THIS_MODULE,
2513 	.driver_id = RDMA_DRIVER_MLX4,
2514 	.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION,
2515 
2516 	.add_gid = mlx4_ib_add_gid,
2517 	.alloc_mr = mlx4_ib_alloc_mr,
2518 	.alloc_pd = mlx4_ib_alloc_pd,
2519 	.alloc_ucontext = mlx4_ib_alloc_ucontext,
2520 	.attach_mcast = mlx4_ib_mcg_attach,
2521 	.create_ah = mlx4_ib_create_ah,
2522 	.create_cq = mlx4_ib_create_cq,
2523 	.create_qp = mlx4_ib_create_qp,
2524 	.create_srq = mlx4_ib_create_srq,
2525 	.dealloc_pd = mlx4_ib_dealloc_pd,
2526 	.dealloc_ucontext = mlx4_ib_dealloc_ucontext,
2527 	.del_gid = mlx4_ib_del_gid,
2528 	.dereg_mr = mlx4_ib_dereg_mr,
2529 	.destroy_ah = mlx4_ib_destroy_ah,
2530 	.destroy_cq = mlx4_ib_destroy_cq,
2531 	.destroy_qp = mlx4_ib_destroy_qp,
2532 	.destroy_srq = mlx4_ib_destroy_srq,
2533 	.detach_mcast = mlx4_ib_mcg_detach,
2534 	.disassociate_ucontext = mlx4_ib_disassociate_ucontext,
2535 	.drain_rq = mlx4_ib_drain_rq,
2536 	.drain_sq = mlx4_ib_drain_sq,
2537 	.get_dev_fw_str = get_fw_ver_str,
2538 	.get_dma_mr = mlx4_ib_get_dma_mr,
2539 	.get_link_layer = mlx4_ib_port_link_layer,
2540 	.get_netdev = mlx4_ib_get_netdev,
2541 	.get_port_immutable = mlx4_port_immutable,
2542 	.map_mr_sg = mlx4_ib_map_mr_sg,
2543 	.mmap = mlx4_ib_mmap,
2544 	.modify_cq = mlx4_ib_modify_cq,
2545 	.modify_device = mlx4_ib_modify_device,
2546 	.modify_port = mlx4_ib_modify_port,
2547 	.modify_qp = mlx4_ib_modify_qp,
2548 	.modify_srq = mlx4_ib_modify_srq,
2549 	.poll_cq = mlx4_ib_poll_cq,
2550 	.post_recv = mlx4_ib_post_recv,
2551 	.post_send = mlx4_ib_post_send,
2552 	.post_srq_recv = mlx4_ib_post_srq_recv,
2553 	.process_mad = mlx4_ib_process_mad,
2554 	.query_ah = mlx4_ib_query_ah,
2555 	.query_device = mlx4_ib_query_device,
2556 	.query_gid = mlx4_ib_query_gid,
2557 	.query_pkey = mlx4_ib_query_pkey,
2558 	.query_port = mlx4_ib_query_port,
2559 	.query_qp = mlx4_ib_query_qp,
2560 	.query_srq = mlx4_ib_query_srq,
2561 	.reg_user_mr = mlx4_ib_reg_user_mr,
2562 	.req_notify_cq = mlx4_ib_arm_cq,
2563 	.rereg_user_mr = mlx4_ib_rereg_user_mr,
2564 	.resize_cq = mlx4_ib_resize_cq,
2565 
2566 	INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah),
2567 	INIT_RDMA_OBJ_SIZE(ib_cq, mlx4_ib_cq, ibcq),
2568 	INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd),
2569 	INIT_RDMA_OBJ_SIZE(ib_srq, mlx4_ib_srq, ibsrq),
2570 	INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext),
2571 };
2572 
2573 static const struct ib_device_ops mlx4_ib_dev_wq_ops = {
2574 	.create_rwq_ind_table = mlx4_ib_create_rwq_ind_table,
2575 	.create_wq = mlx4_ib_create_wq,
2576 	.destroy_rwq_ind_table = mlx4_ib_destroy_rwq_ind_table,
2577 	.destroy_wq = mlx4_ib_destroy_wq,
2578 	.modify_wq = mlx4_ib_modify_wq,
2579 
2580 	INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx4_ib_rwq_ind_table,
2581 			   ib_rwq_ind_tbl),
2582 };
2583 
2584 static const struct ib_device_ops mlx4_ib_dev_mw_ops = {
2585 	.alloc_mw = mlx4_ib_alloc_mw,
2586 	.dealloc_mw = mlx4_ib_dealloc_mw,
2587 
2588 	INIT_RDMA_OBJ_SIZE(ib_mw, mlx4_ib_mw, ibmw),
2589 };
2590 
2591 static const struct ib_device_ops mlx4_ib_dev_xrc_ops = {
2592 	.alloc_xrcd = mlx4_ib_alloc_xrcd,
2593 	.dealloc_xrcd = mlx4_ib_dealloc_xrcd,
2594 
2595 	INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx4_ib_xrcd, ibxrcd),
2596 };
2597 
2598 static const struct ib_device_ops mlx4_ib_dev_fs_ops = {
2599 	.create_flow = mlx4_ib_create_flow,
2600 	.destroy_flow = mlx4_ib_destroy_flow,
2601 };
2602 
2603 static void *mlx4_ib_add(struct mlx4_dev *dev)
2604 {
2605 	struct mlx4_ib_dev *ibdev;
2606 	int num_ports = 0;
2607 	int i, j;
2608 	int err;
2609 	struct mlx4_ib_iboe *iboe;
2610 	int ib_num_ports = 0;
2611 	int num_req_counters;
2612 	int allocated;
2613 	u32 counter_index;
2614 	struct counter_index *new_counter_index = NULL;
2615 
2616 	pr_info_once("%s", mlx4_ib_version);
2617 
2618 	num_ports = 0;
2619 	mlx4_foreach_ib_transport_port(i, dev)
2620 		num_ports++;
2621 
2622 	/* No point in registering a device with no ports... */
2623 	if (num_ports == 0)
2624 		return NULL;
2625 
2626 	ibdev = ib_alloc_device(mlx4_ib_dev, ib_dev);
2627 	if (!ibdev) {
2628 		dev_err(&dev->persist->pdev->dev,
2629 			"Device struct alloc failed\n");
2630 		return NULL;
2631 	}
2632 
2633 	iboe = &ibdev->iboe;
2634 
2635 	if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
2636 		goto err_dealloc;
2637 
2638 	if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
2639 		goto err_pd;
2640 
2641 	ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
2642 				 PAGE_SIZE);
2643 	if (!ibdev->uar_map)
2644 		goto err_uar;
2645 	MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
2646 
2647 	ibdev->dev = dev;
2648 	ibdev->bond_next_port	= 0;
2649 
2650 	ibdev->ib_dev.node_type		= RDMA_NODE_IB_CA;
2651 	ibdev->ib_dev.local_dma_lkey	= dev->caps.reserved_lkey;
2652 	ibdev->num_ports		= num_ports;
2653 	ibdev->ib_dev.phys_port_cnt     = mlx4_is_bonded(dev) ?
2654 						1 : ibdev->num_ports;
2655 	ibdev->ib_dev.num_comp_vectors	= dev->caps.num_comp_vectors;
2656 	ibdev->ib_dev.dev.parent	= &dev->persist->pdev->dev;
2657 
2658 	ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_ops);
2659 
2660 	if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
2661 	    ((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) ==
2662 	    IB_LINK_LAYER_ETHERNET) ||
2663 	    (mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) ==
2664 	    IB_LINK_LAYER_ETHERNET)))
2665 		ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_wq_ops);
2666 
2667 	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2668 	    dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
2669 		ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_mw_ops);
2670 
2671 	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2672 		ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_xrc_ops);
2673 	}
2674 
2675 	if (check_flow_steering_support(dev)) {
2676 		ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
2677 		ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fs_ops);
2678 	}
2679 
2680 	if (!dev->caps.userspace_caps)
2681 		ibdev->ib_dev.ops.uverbs_abi_ver =
2682 			MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
2683 
2684 	mlx4_ib_alloc_eqs(dev, ibdev);
2685 
2686 	spin_lock_init(&iboe->lock);
2687 
2688 	if (init_node_data(ibdev))
2689 		goto err_map;
2690 	mlx4_init_sl2vl_tbl(ibdev);
2691 
2692 	for (i = 0; i < ibdev->num_ports; ++i) {
2693 		mutex_init(&ibdev->counters_table[i].mutex);
2694 		INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list);
2695 		iboe->last_port_state[i] = IB_PORT_DOWN;
2696 	}
2697 
2698 	num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
2699 	for (i = 0; i < num_req_counters; ++i) {
2700 		mutex_init(&ibdev->qp1_proxy_lock[i]);
2701 		allocated = 0;
2702 		if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2703 						IB_LINK_LAYER_ETHERNET) {
2704 			err = mlx4_counter_alloc(ibdev->dev, &counter_index,
2705 						 MLX4_RES_USAGE_DRIVER);
2706 			/* if failed to allocate a new counter, use default */
2707 			if (err)
2708 				counter_index =
2709 					mlx4_get_default_counter_index(dev,
2710 								       i + 1);
2711 			else
2712 				allocated = 1;
2713 		} else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
2714 			counter_index = mlx4_get_default_counter_index(dev,
2715 								       i + 1);
2716 		}
2717 		new_counter_index = kmalloc(sizeof(*new_counter_index),
2718 					    GFP_KERNEL);
2719 		if (!new_counter_index) {
2720 			if (allocated)
2721 				mlx4_counter_free(ibdev->dev, counter_index);
2722 			goto err_counter;
2723 		}
2724 		new_counter_index->index = counter_index;
2725 		new_counter_index->allocated = allocated;
2726 		list_add_tail(&new_counter_index->list,
2727 			      &ibdev->counters_table[i].counters_list);
2728 		ibdev->counters_table[i].default_counter = counter_index;
2729 		pr_info("counter index %d for port %d allocated %d\n",
2730 			counter_index, i + 1, allocated);
2731 	}
2732 	if (mlx4_is_bonded(dev))
2733 		for (i = 1; i < ibdev->num_ports ; ++i) {
2734 			new_counter_index =
2735 					kmalloc(sizeof(struct counter_index),
2736 						GFP_KERNEL);
2737 			if (!new_counter_index)
2738 				goto err_counter;
2739 			new_counter_index->index = counter_index;
2740 			new_counter_index->allocated = 0;
2741 			list_add_tail(&new_counter_index->list,
2742 				      &ibdev->counters_table[i].counters_list);
2743 			ibdev->counters_table[i].default_counter =
2744 								counter_index;
2745 		}
2746 
2747 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2748 		ib_num_ports++;
2749 
2750 	spin_lock_init(&ibdev->sm_lock);
2751 	mutex_init(&ibdev->cap_mask_mutex);
2752 	INIT_LIST_HEAD(&ibdev->qp_list);
2753 	spin_lock_init(&ibdev->reset_flow_resource_lock);
2754 
2755 	if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2756 	    ib_num_ports) {
2757 		ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2758 		err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2759 					    MLX4_IB_UC_STEER_QPN_ALIGN,
2760 					    &ibdev->steer_qpn_base, 0,
2761 					    MLX4_RES_USAGE_DRIVER);
2762 		if (err)
2763 			goto err_counter;
2764 
2765 		ibdev->ib_uc_qpns_bitmap =
2766 			kmalloc_array(BITS_TO_LONGS(ibdev->steer_qpn_count),
2767 				      sizeof(long),
2768 				      GFP_KERNEL);
2769 		if (!ibdev->ib_uc_qpns_bitmap)
2770 			goto err_steer_qp_release;
2771 
2772 		if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) {
2773 			bitmap_zero(ibdev->ib_uc_qpns_bitmap,
2774 				    ibdev->steer_qpn_count);
2775 			err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2776 					dev, ibdev->steer_qpn_base,
2777 					ibdev->steer_qpn_base +
2778 					ibdev->steer_qpn_count - 1);
2779 			if (err)
2780 				goto err_steer_free_bitmap;
2781 		} else {
2782 			bitmap_fill(ibdev->ib_uc_qpns_bitmap,
2783 				    ibdev->steer_qpn_count);
2784 		}
2785 	}
2786 
2787 	for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
2788 		atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
2789 
2790 	if (mlx4_ib_alloc_diag_counters(ibdev))
2791 		goto err_steer_free_bitmap;
2792 
2793 	rdma_set_device_sysfs_group(&ibdev->ib_dev, &mlx4_attr_group);
2794 	if (ib_register_device(&ibdev->ib_dev, "mlx4_%d",
2795 			       &dev->persist->pdev->dev))
2796 		goto err_diag_counters;
2797 
2798 	if (mlx4_ib_mad_init(ibdev))
2799 		goto err_reg;
2800 
2801 	if (mlx4_ib_init_sriov(ibdev))
2802 		goto err_mad;
2803 
2804 	if (!iboe->nb.notifier_call) {
2805 		iboe->nb.notifier_call = mlx4_ib_netdev_event;
2806 		err = register_netdevice_notifier(&iboe->nb);
2807 		if (err) {
2808 			iboe->nb.notifier_call = NULL;
2809 			goto err_notif;
2810 		}
2811 	}
2812 	if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
2813 		err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT);
2814 		if (err)
2815 			goto err_notif;
2816 	}
2817 
2818 	ibdev->ib_active = true;
2819 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2820 		devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
2821 					 &ibdev->ib_dev);
2822 
2823 	if (mlx4_is_mfunc(ibdev->dev))
2824 		init_pkeys(ibdev);
2825 
2826 	/* create paravirt contexts for any VFs which are active */
2827 	if (mlx4_is_master(ibdev->dev)) {
2828 		for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2829 			if (j == mlx4_master_func_num(ibdev->dev))
2830 				continue;
2831 			if (mlx4_is_slave_active(ibdev->dev, j))
2832 				do_slave_init(ibdev, j, 1);
2833 		}
2834 	}
2835 	return ibdev;
2836 
2837 err_notif:
2838 	if (ibdev->iboe.nb.notifier_call) {
2839 		if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2840 			pr_warn("failure unregistering notifier\n");
2841 		ibdev->iboe.nb.notifier_call = NULL;
2842 	}
2843 	flush_workqueue(wq);
2844 
2845 	mlx4_ib_close_sriov(ibdev);
2846 
2847 err_mad:
2848 	mlx4_ib_mad_cleanup(ibdev);
2849 
2850 err_reg:
2851 	ib_unregister_device(&ibdev->ib_dev);
2852 
2853 err_diag_counters:
2854 	mlx4_ib_diag_cleanup(ibdev);
2855 
2856 err_steer_free_bitmap:
2857 	kfree(ibdev->ib_uc_qpns_bitmap);
2858 
2859 err_steer_qp_release:
2860 	mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2861 			      ibdev->steer_qpn_count);
2862 err_counter:
2863 	for (i = 0; i < ibdev->num_ports; ++i)
2864 		mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
2865 
2866 err_map:
2867 	mlx4_ib_free_eqs(dev, ibdev);
2868 	iounmap(ibdev->uar_map);
2869 
2870 err_uar:
2871 	mlx4_uar_free(dev, &ibdev->priv_uar);
2872 
2873 err_pd:
2874 	mlx4_pd_free(dev, ibdev->priv_pdn);
2875 
2876 err_dealloc:
2877 	ib_dealloc_device(&ibdev->ib_dev);
2878 
2879 	return NULL;
2880 }
2881 
2882 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
2883 {
2884 	int offset;
2885 
2886 	WARN_ON(!dev->ib_uc_qpns_bitmap);
2887 
2888 	offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
2889 					 dev->steer_qpn_count,
2890 					 get_count_order(count));
2891 	if (offset < 0)
2892 		return offset;
2893 
2894 	*qpn = dev->steer_qpn_base + offset;
2895 	return 0;
2896 }
2897 
2898 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
2899 {
2900 	if (!qpn ||
2901 	    dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
2902 		return;
2903 
2904 	if (WARN(qpn < dev->steer_qpn_base, "qpn = %u, steer_qpn_base = %u\n",
2905 		 qpn, dev->steer_qpn_base))
2906 		/* not supposed to be here */
2907 		return;
2908 
2909 	bitmap_release_region(dev->ib_uc_qpns_bitmap,
2910 			      qpn - dev->steer_qpn_base,
2911 			      get_count_order(count));
2912 }
2913 
2914 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
2915 			 int is_attach)
2916 {
2917 	int err;
2918 	size_t flow_size;
2919 	struct ib_flow_attr *flow = NULL;
2920 	struct ib_flow_spec_ib *ib_spec;
2921 
2922 	if (is_attach) {
2923 		flow_size = sizeof(struct ib_flow_attr) +
2924 			    sizeof(struct ib_flow_spec_ib);
2925 		flow = kzalloc(flow_size, GFP_KERNEL);
2926 		if (!flow)
2927 			return -ENOMEM;
2928 		flow->port = mqp->port;
2929 		flow->num_of_specs = 1;
2930 		flow->size = flow_size;
2931 		ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
2932 		ib_spec->type = IB_FLOW_SPEC_IB;
2933 		ib_spec->size = sizeof(struct ib_flow_spec_ib);
2934 		/* Add an empty rule for IB L2 */
2935 		memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
2936 
2937 		err = __mlx4_ib_create_flow(&mqp->ibqp, flow, MLX4_DOMAIN_NIC,
2938 					    MLX4_FS_REGULAR, &mqp->reg_id);
2939 	} else {
2940 		err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
2941 	}
2942 	kfree(flow);
2943 	return err;
2944 }
2945 
2946 static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2947 {
2948 	struct mlx4_ib_dev *ibdev = ibdev_ptr;
2949 	int p;
2950 	int i;
2951 
2952 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2953 		devlink_port_type_clear(mlx4_get_devlink_port(dev, i));
2954 	ibdev->ib_active = false;
2955 	flush_workqueue(wq);
2956 
2957 	if (ibdev->iboe.nb.notifier_call) {
2958 		if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2959 			pr_warn("failure unregistering notifier\n");
2960 		ibdev->iboe.nb.notifier_call = NULL;
2961 	}
2962 
2963 	mlx4_ib_close_sriov(ibdev);
2964 	mlx4_ib_mad_cleanup(ibdev);
2965 	ib_unregister_device(&ibdev->ib_dev);
2966 	mlx4_ib_diag_cleanup(ibdev);
2967 
2968 	mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2969 			      ibdev->steer_qpn_count);
2970 	kfree(ibdev->ib_uc_qpns_bitmap);
2971 
2972 	iounmap(ibdev->uar_map);
2973 	for (p = 0; p < ibdev->num_ports; ++p)
2974 		mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]);
2975 
2976 	mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
2977 		mlx4_CLOSE_PORT(dev, p);
2978 
2979 	mlx4_ib_free_eqs(dev, ibdev);
2980 
2981 	mlx4_uar_free(dev, &ibdev->priv_uar);
2982 	mlx4_pd_free(dev, ibdev->priv_pdn);
2983 	ib_dealloc_device(&ibdev->ib_dev);
2984 }
2985 
2986 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2987 {
2988 	struct mlx4_ib_demux_work **dm = NULL;
2989 	struct mlx4_dev *dev = ibdev->dev;
2990 	int i;
2991 	unsigned long flags;
2992 	struct mlx4_active_ports actv_ports;
2993 	unsigned int ports;
2994 	unsigned int first_port;
2995 
2996 	if (!mlx4_is_master(dev))
2997 		return;
2998 
2999 	actv_ports = mlx4_get_active_ports(dev, slave);
3000 	ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
3001 	first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
3002 
3003 	dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
3004 	if (!dm)
3005 		return;
3006 
3007 	for (i = 0; i < ports; i++) {
3008 		dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
3009 		if (!dm[i]) {
3010 			while (--i >= 0)
3011 				kfree(dm[i]);
3012 			goto out;
3013 		}
3014 		INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
3015 		dm[i]->port = first_port + i + 1;
3016 		dm[i]->slave = slave;
3017 		dm[i]->do_init = do_init;
3018 		dm[i]->dev = ibdev;
3019 	}
3020 	/* initialize or tear down tunnel QPs for the slave */
3021 	spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
3022 	if (!ibdev->sriov.is_going_down) {
3023 		for (i = 0; i < ports; i++)
3024 			queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
3025 		spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3026 	} else {
3027 		spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3028 		for (i = 0; i < ports; i++)
3029 			kfree(dm[i]);
3030 	}
3031 out:
3032 	kfree(dm);
3033 	return;
3034 }
3035 
3036 static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
3037 {
3038 	struct mlx4_ib_qp *mqp;
3039 	unsigned long flags_qp;
3040 	unsigned long flags_cq;
3041 	struct mlx4_ib_cq *send_mcq, *recv_mcq;
3042 	struct list_head    cq_notify_list;
3043 	struct mlx4_cq *mcq;
3044 	unsigned long flags;
3045 
3046 	pr_warn("mlx4_ib_handle_catas_error was started\n");
3047 	INIT_LIST_HEAD(&cq_notify_list);
3048 
3049 	/* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
3050 	spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
3051 
3052 	list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
3053 		spin_lock_irqsave(&mqp->sq.lock, flags_qp);
3054 		if (mqp->sq.tail != mqp->sq.head) {
3055 			send_mcq = to_mcq(mqp->ibqp.send_cq);
3056 			spin_lock_irqsave(&send_mcq->lock, flags_cq);
3057 			if (send_mcq->mcq.comp &&
3058 			    mqp->ibqp.send_cq->comp_handler) {
3059 				if (!send_mcq->mcq.reset_notify_added) {
3060 					send_mcq->mcq.reset_notify_added = 1;
3061 					list_add_tail(&send_mcq->mcq.reset_notify,
3062 						      &cq_notify_list);
3063 				}
3064 			}
3065 			spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
3066 		}
3067 		spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
3068 		/* Now, handle the QP's receive queue */
3069 		spin_lock_irqsave(&mqp->rq.lock, flags_qp);
3070 		/* no handling is needed for SRQ */
3071 		if (!mqp->ibqp.srq) {
3072 			if (mqp->rq.tail != mqp->rq.head) {
3073 				recv_mcq = to_mcq(mqp->ibqp.recv_cq);
3074 				spin_lock_irqsave(&recv_mcq->lock, flags_cq);
3075 				if (recv_mcq->mcq.comp &&
3076 				    mqp->ibqp.recv_cq->comp_handler) {
3077 					if (!recv_mcq->mcq.reset_notify_added) {
3078 						recv_mcq->mcq.reset_notify_added = 1;
3079 						list_add_tail(&recv_mcq->mcq.reset_notify,
3080 							      &cq_notify_list);
3081 					}
3082 				}
3083 				spin_unlock_irqrestore(&recv_mcq->lock,
3084 						       flags_cq);
3085 			}
3086 		}
3087 		spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
3088 	}
3089 
3090 	list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
3091 		mcq->comp(mcq);
3092 	}
3093 	spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
3094 	pr_warn("mlx4_ib_handle_catas_error ended\n");
3095 }
3096 
3097 static void handle_bonded_port_state_event(struct work_struct *work)
3098 {
3099 	struct ib_event_work *ew =
3100 		container_of(work, struct ib_event_work, work);
3101 	struct mlx4_ib_dev *ibdev = ew->ib_dev;
3102 	enum ib_port_state bonded_port_state = IB_PORT_NOP;
3103 	int i;
3104 	struct ib_event ibev;
3105 
3106 	kfree(ew);
3107 	spin_lock_bh(&ibdev->iboe.lock);
3108 	for (i = 0; i < MLX4_MAX_PORTS; ++i) {
3109 		struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
3110 		enum ib_port_state curr_port_state;
3111 
3112 		if (!curr_netdev)
3113 			continue;
3114 
3115 		curr_port_state =
3116 			(netif_running(curr_netdev) &&
3117 			 netif_carrier_ok(curr_netdev)) ?
3118 			IB_PORT_ACTIVE : IB_PORT_DOWN;
3119 
3120 		bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
3121 			curr_port_state : IB_PORT_ACTIVE;
3122 	}
3123 	spin_unlock_bh(&ibdev->iboe.lock);
3124 
3125 	ibev.device = &ibdev->ib_dev;
3126 	ibev.element.port_num = 1;
3127 	ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
3128 		IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
3129 
3130 	ib_dispatch_event(&ibev);
3131 }
3132 
3133 void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port)
3134 {
3135 	u64 sl2vl;
3136 	int err;
3137 
3138 	err = mlx4_ib_query_sl2vl(&mdev->ib_dev, port, &sl2vl);
3139 	if (err) {
3140 		pr_err("Unable to get current sl to vl mapping for port %d.  Using all zeroes (%d)\n",
3141 		       port, err);
3142 		sl2vl = 0;
3143 	}
3144 	atomic64_set(&mdev->sl2vl[port - 1], sl2vl);
3145 }
3146 
3147 static void ib_sl2vl_update_work(struct work_struct *work)
3148 {
3149 	struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
3150 	struct mlx4_ib_dev *mdev = ew->ib_dev;
3151 	int port = ew->port;
3152 
3153 	mlx4_ib_sl2vl_update(mdev, port);
3154 
3155 	kfree(ew);
3156 }
3157 
3158 void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
3159 				     int port)
3160 {
3161 	struct ib_event_work *ew;
3162 
3163 	ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
3164 	if (ew) {
3165 		INIT_WORK(&ew->work, ib_sl2vl_update_work);
3166 		ew->port = port;
3167 		ew->ib_dev = ibdev;
3168 		queue_work(wq, &ew->work);
3169 	}
3170 }
3171 
3172 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
3173 			  enum mlx4_dev_event event, unsigned long param)
3174 {
3175 	struct ib_event ibev;
3176 	struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
3177 	struct mlx4_eqe *eqe = NULL;
3178 	struct ib_event_work *ew;
3179 	int p = 0;
3180 
3181 	if (mlx4_is_bonded(dev) &&
3182 	    ((event == MLX4_DEV_EVENT_PORT_UP) ||
3183 	    (event == MLX4_DEV_EVENT_PORT_DOWN))) {
3184 		ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
3185 		if (!ew)
3186 			return;
3187 		INIT_WORK(&ew->work, handle_bonded_port_state_event);
3188 		ew->ib_dev = ibdev;
3189 		queue_work(wq, &ew->work);
3190 		return;
3191 	}
3192 
3193 	if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
3194 		eqe = (struct mlx4_eqe *)param;
3195 	else
3196 		p = (int) param;
3197 
3198 	switch (event) {
3199 	case MLX4_DEV_EVENT_PORT_UP:
3200 		if (p > ibdev->num_ports)
3201 			return;
3202 		if (!mlx4_is_slave(dev) &&
3203 		    rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
3204 			IB_LINK_LAYER_INFINIBAND) {
3205 			if (mlx4_is_master(dev))
3206 				mlx4_ib_invalidate_all_guid_record(ibdev, p);
3207 			if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST &&
3208 			    !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT))
3209 				mlx4_sched_ib_sl2vl_update_work(ibdev, p);
3210 		}
3211 		ibev.event = IB_EVENT_PORT_ACTIVE;
3212 		break;
3213 
3214 	case MLX4_DEV_EVENT_PORT_DOWN:
3215 		if (p > ibdev->num_ports)
3216 			return;
3217 		ibev.event = IB_EVENT_PORT_ERR;
3218 		break;
3219 
3220 	case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
3221 		ibdev->ib_active = false;
3222 		ibev.event = IB_EVENT_DEVICE_FATAL;
3223 		mlx4_ib_handle_catas_error(ibdev);
3224 		break;
3225 
3226 	case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
3227 		ew = kmalloc(sizeof *ew, GFP_ATOMIC);
3228 		if (!ew)
3229 			break;
3230 
3231 		INIT_WORK(&ew->work, handle_port_mgmt_change_event);
3232 		memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
3233 		ew->ib_dev = ibdev;
3234 		/* need to queue only for port owner, which uses GEN_EQE */
3235 		if (mlx4_is_master(dev))
3236 			queue_work(wq, &ew->work);
3237 		else
3238 			handle_port_mgmt_change_event(&ew->work);
3239 		return;
3240 
3241 	case MLX4_DEV_EVENT_SLAVE_INIT:
3242 		/* here, p is the slave id */
3243 		do_slave_init(ibdev, p, 1);
3244 		if (mlx4_is_master(dev)) {
3245 			int i;
3246 
3247 			for (i = 1; i <= ibdev->num_ports; i++) {
3248 				if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3249 					== IB_LINK_LAYER_INFINIBAND)
3250 					mlx4_ib_slave_alias_guid_event(ibdev,
3251 								       p, i,
3252 								       1);
3253 			}
3254 		}
3255 		return;
3256 
3257 	case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
3258 		if (mlx4_is_master(dev)) {
3259 			int i;
3260 
3261 			for (i = 1; i <= ibdev->num_ports; i++) {
3262 				if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3263 					== IB_LINK_LAYER_INFINIBAND)
3264 					mlx4_ib_slave_alias_guid_event(ibdev,
3265 								       p, i,
3266 								       0);
3267 			}
3268 		}
3269 		/* here, p is the slave id */
3270 		do_slave_init(ibdev, p, 0);
3271 		return;
3272 
3273 	default:
3274 		return;
3275 	}
3276 
3277 	ibev.device	      = ibdev_ptr;
3278 	ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
3279 
3280 	ib_dispatch_event(&ibev);
3281 }
3282 
3283 static struct mlx4_interface mlx4_ib_interface = {
3284 	.add		= mlx4_ib_add,
3285 	.remove		= mlx4_ib_remove,
3286 	.event		= mlx4_ib_event,
3287 	.protocol	= MLX4_PROT_IB_IPV6,
3288 	.flags		= MLX4_INTFF_BONDING
3289 };
3290 
3291 static int __init mlx4_ib_init(void)
3292 {
3293 	int err;
3294 
3295 	wq = alloc_ordered_workqueue("mlx4_ib", WQ_MEM_RECLAIM);
3296 	if (!wq)
3297 		return -ENOMEM;
3298 
3299 	err = mlx4_ib_mcg_init();
3300 	if (err)
3301 		goto clean_wq;
3302 
3303 	err = mlx4_register_interface(&mlx4_ib_interface);
3304 	if (err)
3305 		goto clean_mcg;
3306 
3307 	return 0;
3308 
3309 clean_mcg:
3310 	mlx4_ib_mcg_destroy();
3311 
3312 clean_wq:
3313 	destroy_workqueue(wq);
3314 	return err;
3315 }
3316 
3317 static void __exit mlx4_ib_cleanup(void)
3318 {
3319 	mlx4_unregister_interface(&mlx4_ib_interface);
3320 	mlx4_ib_mcg_destroy();
3321 	destroy_workqueue(wq);
3322 }
3323 
3324 module_init(mlx4_ib_init);
3325 module_exit(mlx4_ib_cleanup);
3326