xref: /openbmc/linux/drivers/infiniband/hw/mlx4/main.c (revision 7b73a9c8)
1 /*
2  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/netdevice.h>
39 #include <linux/inetdevice.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/if_vlan.h>
42 #include <linux/sched/mm.h>
43 #include <linux/sched/task.h>
44 
45 #include <net/ipv6.h>
46 #include <net/addrconf.h>
47 #include <net/devlink.h>
48 
49 #include <rdma/ib_smi.h>
50 #include <rdma/ib_user_verbs.h>
51 #include <rdma/ib_addr.h>
52 #include <rdma/ib_cache.h>
53 
54 #include <net/bonding.h>
55 
56 #include <linux/mlx4/driver.h>
57 #include <linux/mlx4/cmd.h>
58 #include <linux/mlx4/qp.h>
59 
60 #include "mlx4_ib.h"
61 #include <rdma/mlx4-abi.h>
62 
63 #define DRV_NAME	MLX4_IB_DRV_NAME
64 #define DRV_VERSION	"4.0-0"
65 
66 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
67 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
68 #define MLX4_IB_CARD_REV_A0   0xA0
69 
70 MODULE_AUTHOR("Roland Dreier");
71 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
72 MODULE_LICENSE("Dual BSD/GPL");
73 
74 int mlx4_ib_sm_guid_assign = 0;
75 module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
76 MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
77 
78 static const char mlx4_ib_version[] =
79 	DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
80 	DRV_VERSION "\n";
81 
82 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
83 static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device,
84 						    u8 port_num);
85 
86 static struct workqueue_struct *wq;
87 
88 static void init_query_mad(struct ib_smp *mad)
89 {
90 	mad->base_version  = 1;
91 	mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
92 	mad->class_version = 1;
93 	mad->method	   = IB_MGMT_METHOD_GET;
94 }
95 
96 static int check_flow_steering_support(struct mlx4_dev *dev)
97 {
98 	int eth_num_ports = 0;
99 	int ib_num_ports = 0;
100 
101 	int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
102 
103 	if (dmfs) {
104 		int i;
105 		mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
106 			eth_num_ports++;
107 		mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
108 			ib_num_ports++;
109 		dmfs &= (!ib_num_ports ||
110 			 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
111 			(!eth_num_ports ||
112 			 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
113 		if (ib_num_ports && mlx4_is_mfunc(dev)) {
114 			pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
115 			dmfs = 0;
116 		}
117 	}
118 	return dmfs;
119 }
120 
121 static int num_ib_ports(struct mlx4_dev *dev)
122 {
123 	int ib_ports = 0;
124 	int i;
125 
126 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
127 		ib_ports++;
128 
129 	return ib_ports;
130 }
131 
132 static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num)
133 {
134 	struct mlx4_ib_dev *ibdev = to_mdev(device);
135 	struct net_device *dev;
136 
137 	rcu_read_lock();
138 	dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);
139 
140 	if (dev) {
141 		if (mlx4_is_bonded(ibdev->dev)) {
142 			struct net_device *upper = NULL;
143 
144 			upper = netdev_master_upper_dev_get_rcu(dev);
145 			if (upper) {
146 				struct net_device *active;
147 
148 				active = bond_option_active_slave_get_rcu(netdev_priv(upper));
149 				if (active)
150 					dev = active;
151 			}
152 		}
153 	}
154 	if (dev)
155 		dev_hold(dev);
156 
157 	rcu_read_unlock();
158 	return dev;
159 }
160 
161 static int mlx4_ib_update_gids_v1(struct gid_entry *gids,
162 				  struct mlx4_ib_dev *ibdev,
163 				  u8 port_num)
164 {
165 	struct mlx4_cmd_mailbox *mailbox;
166 	int err;
167 	struct mlx4_dev *dev = ibdev->dev;
168 	int i;
169 	union ib_gid *gid_tbl;
170 
171 	mailbox = mlx4_alloc_cmd_mailbox(dev);
172 	if (IS_ERR(mailbox))
173 		return -ENOMEM;
174 
175 	gid_tbl = mailbox->buf;
176 
177 	for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
178 		memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid));
179 
180 	err = mlx4_cmd(dev, mailbox->dma,
181 		       MLX4_SET_PORT_GID_TABLE << 8 | port_num,
182 		       1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
183 		       MLX4_CMD_WRAPPED);
184 	if (mlx4_is_bonded(dev))
185 		err += mlx4_cmd(dev, mailbox->dma,
186 				MLX4_SET_PORT_GID_TABLE << 8 | 2,
187 				1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
188 				MLX4_CMD_WRAPPED);
189 
190 	mlx4_free_cmd_mailbox(dev, mailbox);
191 	return err;
192 }
193 
194 static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
195 				     struct mlx4_ib_dev *ibdev,
196 				     u8 port_num)
197 {
198 	struct mlx4_cmd_mailbox *mailbox;
199 	int err;
200 	struct mlx4_dev *dev = ibdev->dev;
201 	int i;
202 	struct {
203 		union ib_gid	gid;
204 		__be32		rsrvd1[2];
205 		__be16		rsrvd2;
206 		u8		type;
207 		u8		version;
208 		__be32		rsrvd3;
209 	} *gid_tbl;
210 
211 	mailbox = mlx4_alloc_cmd_mailbox(dev);
212 	if (IS_ERR(mailbox))
213 		return -ENOMEM;
214 
215 	gid_tbl = mailbox->buf;
216 	for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
217 		memcpy(&gid_tbl[i].gid, &gids[i].gid, sizeof(union ib_gid));
218 		if (gids[i].gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
219 			gid_tbl[i].version = 2;
220 			if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
221 				gid_tbl[i].type = 1;
222 		}
223 	}
224 
225 	err = mlx4_cmd(dev, mailbox->dma,
226 		       MLX4_SET_PORT_ROCE_ADDR << 8 | port_num,
227 		       1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
228 		       MLX4_CMD_WRAPPED);
229 	if (mlx4_is_bonded(dev))
230 		err += mlx4_cmd(dev, mailbox->dma,
231 				MLX4_SET_PORT_ROCE_ADDR << 8 | 2,
232 				1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
233 				MLX4_CMD_WRAPPED);
234 
235 	mlx4_free_cmd_mailbox(dev, mailbox);
236 	return err;
237 }
238 
239 static int mlx4_ib_update_gids(struct gid_entry *gids,
240 			       struct mlx4_ib_dev *ibdev,
241 			       u8 port_num)
242 {
243 	if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
244 		return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num);
245 
246 	return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
247 }
248 
249 static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
250 {
251 	struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
252 	struct mlx4_ib_iboe *iboe = &ibdev->iboe;
253 	struct mlx4_port_gid_table   *port_gid_table;
254 	int free = -1, found = -1;
255 	int ret = 0;
256 	int hw_update = 0;
257 	int i;
258 	struct gid_entry *gids = NULL;
259 	u16 vlan_id = 0xffff;
260 	u8 mac[ETH_ALEN];
261 
262 	if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
263 		return -EINVAL;
264 
265 	if (attr->port_num > MLX4_MAX_PORTS)
266 		return -EINVAL;
267 
268 	if (!context)
269 		return -EINVAL;
270 
271 	ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
272 	if (ret)
273 		return ret;
274 	port_gid_table = &iboe->gids[attr->port_num - 1];
275 	spin_lock_bh(&iboe->lock);
276 	for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
277 		if (!memcmp(&port_gid_table->gids[i].gid,
278 			    &attr->gid, sizeof(attr->gid)) &&
279 		    port_gid_table->gids[i].gid_type == attr->gid_type &&
280 		    port_gid_table->gids[i].vlan_id == vlan_id)  {
281 			found = i;
282 			break;
283 		}
284 		if (free < 0 && rdma_is_zero_gid(&port_gid_table->gids[i].gid))
285 			free = i; /* HW has space */
286 	}
287 
288 	if (found < 0) {
289 		if (free < 0) {
290 			ret = -ENOSPC;
291 		} else {
292 			port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC);
293 			if (!port_gid_table->gids[free].ctx) {
294 				ret = -ENOMEM;
295 			} else {
296 				*context = port_gid_table->gids[free].ctx;
297 				memcpy(&port_gid_table->gids[free].gid,
298 				       &attr->gid, sizeof(attr->gid));
299 				port_gid_table->gids[free].gid_type = attr->gid_type;
300 				port_gid_table->gids[free].vlan_id = vlan_id;
301 				port_gid_table->gids[free].ctx->real_index = free;
302 				port_gid_table->gids[free].ctx->refcount = 1;
303 				hw_update = 1;
304 			}
305 		}
306 	} else {
307 		struct gid_cache_context *ctx = port_gid_table->gids[found].ctx;
308 		*context = ctx;
309 		ctx->refcount++;
310 	}
311 	if (!ret && hw_update) {
312 		gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids),
313 				     GFP_ATOMIC);
314 		if (!gids) {
315 			ret = -ENOMEM;
316 		} else {
317 			for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
318 				memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
319 				gids[i].gid_type = port_gid_table->gids[i].gid_type;
320 			}
321 		}
322 	}
323 	spin_unlock_bh(&iboe->lock);
324 
325 	if (!ret && hw_update) {
326 		ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
327 		kfree(gids);
328 	}
329 
330 	return ret;
331 }
332 
333 static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
334 {
335 	struct gid_cache_context *ctx = *context;
336 	struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
337 	struct mlx4_ib_iboe *iboe = &ibdev->iboe;
338 	struct mlx4_port_gid_table   *port_gid_table;
339 	int ret = 0;
340 	int hw_update = 0;
341 	struct gid_entry *gids = NULL;
342 
343 	if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
344 		return -EINVAL;
345 
346 	if (attr->port_num > MLX4_MAX_PORTS)
347 		return -EINVAL;
348 
349 	port_gid_table = &iboe->gids[attr->port_num - 1];
350 	spin_lock_bh(&iboe->lock);
351 	if (ctx) {
352 		ctx->refcount--;
353 		if (!ctx->refcount) {
354 			unsigned int real_index = ctx->real_index;
355 
356 			memset(&port_gid_table->gids[real_index].gid, 0,
357 			       sizeof(port_gid_table->gids[real_index].gid));
358 			kfree(port_gid_table->gids[real_index].ctx);
359 			port_gid_table->gids[real_index].ctx = NULL;
360 			hw_update = 1;
361 		}
362 	}
363 	if (!ret && hw_update) {
364 		int i;
365 
366 		gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids),
367 				     GFP_ATOMIC);
368 		if (!gids) {
369 			ret = -ENOMEM;
370 		} else {
371 			for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
372 				memcpy(&gids[i].gid,
373 				       &port_gid_table->gids[i].gid,
374 				       sizeof(union ib_gid));
375 				gids[i].gid_type =
376 				    port_gid_table->gids[i].gid_type;
377 			}
378 		}
379 	}
380 	spin_unlock_bh(&iboe->lock);
381 
382 	if (!ret && hw_update) {
383 		ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
384 		kfree(gids);
385 	}
386 	return ret;
387 }
388 
389 int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
390 				    const struct ib_gid_attr *attr)
391 {
392 	struct mlx4_ib_iboe *iboe = &ibdev->iboe;
393 	struct gid_cache_context *ctx = NULL;
394 	struct mlx4_port_gid_table   *port_gid_table;
395 	int real_index = -EINVAL;
396 	int i;
397 	unsigned long flags;
398 	u8 port_num = attr->port_num;
399 
400 	if (port_num > MLX4_MAX_PORTS)
401 		return -EINVAL;
402 
403 	if (mlx4_is_bonded(ibdev->dev))
404 		port_num = 1;
405 
406 	if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
407 		return attr->index;
408 
409 	spin_lock_irqsave(&iboe->lock, flags);
410 	port_gid_table = &iboe->gids[port_num - 1];
411 
412 	for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
413 		if (!memcmp(&port_gid_table->gids[i].gid,
414 			    &attr->gid, sizeof(attr->gid)) &&
415 		    attr->gid_type == port_gid_table->gids[i].gid_type) {
416 			ctx = port_gid_table->gids[i].ctx;
417 			break;
418 		}
419 	if (ctx)
420 		real_index = ctx->real_index;
421 	spin_unlock_irqrestore(&iboe->lock, flags);
422 	return real_index;
423 }
424 
425 #define field_avail(type, fld, sz) (offsetof(type, fld) + \
426 				    sizeof(((type *)0)->fld) <= (sz))
427 
428 static int mlx4_ib_query_device(struct ib_device *ibdev,
429 				struct ib_device_attr *props,
430 				struct ib_udata *uhw)
431 {
432 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
433 	struct ib_smp *in_mad  = NULL;
434 	struct ib_smp *out_mad = NULL;
435 	int err;
436 	int have_ib_ports;
437 	struct mlx4_uverbs_ex_query_device cmd;
438 	struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0};
439 	struct mlx4_clock_params clock_params;
440 
441 	if (uhw->inlen) {
442 		if (uhw->inlen < sizeof(cmd))
443 			return -EINVAL;
444 
445 		err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd));
446 		if (err)
447 			return err;
448 
449 		if (cmd.comp_mask)
450 			return -EINVAL;
451 
452 		if (cmd.reserved)
453 			return -EINVAL;
454 	}
455 
456 	resp.response_length = offsetof(typeof(resp), response_length) +
457 		sizeof(resp.response_length);
458 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
459 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
460 	err = -ENOMEM;
461 	if (!in_mad || !out_mad)
462 		goto out;
463 
464 	init_query_mad(in_mad);
465 	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
466 
467 	err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
468 			   1, NULL, NULL, in_mad, out_mad);
469 	if (err)
470 		goto out;
471 
472 	memset(props, 0, sizeof *props);
473 
474 	have_ib_ports = num_ib_ports(dev->dev);
475 
476 	props->fw_ver = dev->dev->caps.fw_ver;
477 	props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
478 		IB_DEVICE_PORT_ACTIVE_EVENT		|
479 		IB_DEVICE_SYS_IMAGE_GUID		|
480 		IB_DEVICE_RC_RNR_NAK_GEN		|
481 		IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
482 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
483 		props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
484 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
485 		props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
486 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
487 		props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
488 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
489 		props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
490 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
491 		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
492 	if (dev->dev->caps.max_gso_sz &&
493 	    (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
494 	    (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
495 		props->device_cap_flags |= IB_DEVICE_UD_TSO;
496 	if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
497 		props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
498 	if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
499 	    (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
500 	    (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
501 		props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
502 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
503 		props->device_cap_flags |= IB_DEVICE_XRC;
504 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
505 		props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
506 	if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
507 		if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
508 			props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
509 		else
510 			props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
511 	}
512 	if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
513 		props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
514 
515 	props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
516 
517 	props->vendor_id	   = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
518 		0xffffff;
519 	props->vendor_part_id	   = dev->dev->persist->pdev->device;
520 	props->hw_ver		   = be32_to_cpup((__be32 *) (out_mad->data + 32));
521 	memcpy(&props->sys_image_guid, out_mad->data +	4, 8);
522 
523 	props->max_mr_size	   = ~0ull;
524 	props->page_size_cap	   = dev->dev->caps.page_size_cap;
525 	props->max_qp		   = dev->dev->quotas.qp;
526 	props->max_qp_wr	   = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
527 	props->max_send_sge =
528 		min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
529 	props->max_recv_sge =
530 		min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
531 	props->max_sge_rd = MLX4_MAX_SGE_RD;
532 	props->max_cq		   = dev->dev->quotas.cq;
533 	props->max_cqe		   = dev->dev->caps.max_cqes;
534 	props->max_mr		   = dev->dev->quotas.mpt;
535 	props->max_pd		   = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
536 	props->max_qp_rd_atom	   = dev->dev->caps.max_qp_dest_rdma;
537 	props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
538 	props->max_res_rd_atom	   = props->max_qp_rd_atom * props->max_qp;
539 	props->max_srq		   = dev->dev->quotas.srq;
540 	props->max_srq_wr	   = dev->dev->caps.max_srq_wqes - 1;
541 	props->max_srq_sge	   = dev->dev->caps.max_srq_sge;
542 	props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
543 	props->local_ca_ack_delay  = dev->dev->caps.local_ca_ack_delay;
544 	props->atomic_cap	   = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
545 		IB_ATOMIC_HCA : IB_ATOMIC_NONE;
546 	props->masked_atomic_cap   = props->atomic_cap;
547 	props->max_pkeys	   = dev->dev->caps.pkey_table_len[1];
548 	props->max_mcast_grp	   = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
549 	props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
550 	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
551 					   props->max_mcast_grp;
552 	props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
553 	props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
554 	props->timestamp_mask = 0xFFFFFFFFFFFFULL;
555 	props->max_ah = INT_MAX;
556 
557 	if (mlx4_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET ||
558 	    mlx4_ib_port_link_layer(ibdev, 2) == IB_LINK_LAYER_ETHERNET) {
559 		if (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) {
560 			props->rss_caps.max_rwq_indirection_tables =
561 				props->max_qp;
562 			props->rss_caps.max_rwq_indirection_table_size =
563 				dev->dev->caps.max_rss_tbl_sz;
564 			props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
565 			props->max_wq_type_rq = props->max_qp;
566 		}
567 
568 		if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
569 			props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
570 	}
571 
572 	props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
573 	props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
574 
575 	if (!mlx4_is_slave(dev->dev))
576 		err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
577 
578 	if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
579 		resp.response_length += sizeof(resp.hca_core_clock_offset);
580 		if (!err && !mlx4_is_slave(dev->dev)) {
581 			resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET;
582 			resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
583 		}
584 	}
585 
586 	if (uhw->outlen >= resp.response_length +
587 	    sizeof(resp.max_inl_recv_sz)) {
588 		resp.response_length += sizeof(resp.max_inl_recv_sz);
589 		resp.max_inl_recv_sz  = dev->dev->caps.max_rq_sg *
590 			sizeof(struct mlx4_wqe_data_seg);
591 	}
592 
593 	if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
594 		if (props->rss_caps.supported_qpts) {
595 			resp.rss_caps.rx_hash_function =
596 				MLX4_IB_RX_HASH_FUNC_TOEPLITZ;
597 
598 			resp.rss_caps.rx_hash_fields_mask =
599 				MLX4_IB_RX_HASH_SRC_IPV4 |
600 				MLX4_IB_RX_HASH_DST_IPV4 |
601 				MLX4_IB_RX_HASH_SRC_IPV6 |
602 				MLX4_IB_RX_HASH_DST_IPV6 |
603 				MLX4_IB_RX_HASH_SRC_PORT_TCP |
604 				MLX4_IB_RX_HASH_DST_PORT_TCP |
605 				MLX4_IB_RX_HASH_SRC_PORT_UDP |
606 				MLX4_IB_RX_HASH_DST_PORT_UDP;
607 
608 			if (dev->dev->caps.tunnel_offload_mode ==
609 			    MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
610 				resp.rss_caps.rx_hash_fields_mask |=
611 					MLX4_IB_RX_HASH_INNER;
612 		}
613 		resp.response_length = offsetof(typeof(resp), rss_caps) +
614 				       sizeof(resp.rss_caps);
615 	}
616 
617 	if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
618 		if (dev->dev->caps.max_gso_sz &&
619 		    ((mlx4_ib_port_link_layer(ibdev, 1) ==
620 		    IB_LINK_LAYER_ETHERNET) ||
621 		    (mlx4_ib_port_link_layer(ibdev, 2) ==
622 		    IB_LINK_LAYER_ETHERNET))) {
623 			resp.tso_caps.max_tso = dev->dev->caps.max_gso_sz;
624 			resp.tso_caps.supported_qpts |=
625 				1 << IB_QPT_RAW_PACKET;
626 		}
627 		resp.response_length = offsetof(typeof(resp), tso_caps) +
628 				       sizeof(resp.tso_caps);
629 	}
630 
631 	if (uhw->outlen) {
632 		err = ib_copy_to_udata(uhw, &resp, resp.response_length);
633 		if (err)
634 			goto out;
635 	}
636 out:
637 	kfree(in_mad);
638 	kfree(out_mad);
639 
640 	return err;
641 }
642 
643 static enum rdma_link_layer
644 mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
645 {
646 	struct mlx4_dev *dev = to_mdev(device)->dev;
647 
648 	return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
649 		IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
650 }
651 
652 static int ib_link_query_port(struct ib_device *ibdev, u8 port,
653 			      struct ib_port_attr *props, int netw_view)
654 {
655 	struct ib_smp *in_mad  = NULL;
656 	struct ib_smp *out_mad = NULL;
657 	int ext_active_speed;
658 	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
659 	int err = -ENOMEM;
660 
661 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
662 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
663 	if (!in_mad || !out_mad)
664 		goto out;
665 
666 	init_query_mad(in_mad);
667 	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
668 	in_mad->attr_mod = cpu_to_be32(port);
669 
670 	if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
671 		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
672 
673 	err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
674 				in_mad, out_mad);
675 	if (err)
676 		goto out;
677 
678 
679 	props->lid		= be16_to_cpup((__be16 *) (out_mad->data + 16));
680 	props->lmc		= out_mad->data[34] & 0x7;
681 	props->sm_lid		= be16_to_cpup((__be16 *) (out_mad->data + 18));
682 	props->sm_sl		= out_mad->data[36] & 0xf;
683 	props->state		= out_mad->data[32] & 0xf;
684 	props->phys_state	= out_mad->data[33] >> 4;
685 	props->port_cap_flags	= be32_to_cpup((__be32 *) (out_mad->data + 20));
686 	if (netw_view)
687 		props->gid_tbl_len = out_mad->data[50];
688 	else
689 		props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
690 	props->max_msg_sz	= to_mdev(ibdev)->dev->caps.max_msg_sz;
691 	props->pkey_tbl_len	= to_mdev(ibdev)->dev->caps.pkey_table_len[port];
692 	props->bad_pkey_cntr	= be16_to_cpup((__be16 *) (out_mad->data + 46));
693 	props->qkey_viol_cntr	= be16_to_cpup((__be16 *) (out_mad->data + 48));
694 	props->active_width	= out_mad->data[31] & 0xf;
695 	props->active_speed	= out_mad->data[35] >> 4;
696 	props->max_mtu		= out_mad->data[41] & 0xf;
697 	props->active_mtu	= out_mad->data[36] >> 4;
698 	props->subnet_timeout	= out_mad->data[51] & 0x1f;
699 	props->max_vl_num	= out_mad->data[37] >> 4;
700 	props->init_type_reply	= out_mad->data[41] >> 4;
701 
702 	/* Check if extended speeds (EDR/FDR/...) are supported */
703 	if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
704 		ext_active_speed = out_mad->data[62] >> 4;
705 
706 		switch (ext_active_speed) {
707 		case 1:
708 			props->active_speed = IB_SPEED_FDR;
709 			break;
710 		case 2:
711 			props->active_speed = IB_SPEED_EDR;
712 			break;
713 		}
714 	}
715 
716 	/* If reported active speed is QDR, check if is FDR-10 */
717 	if (props->active_speed == IB_SPEED_QDR) {
718 		init_query_mad(in_mad);
719 		in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
720 		in_mad->attr_mod = cpu_to_be32(port);
721 
722 		err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
723 				   NULL, NULL, in_mad, out_mad);
724 		if (err)
725 			goto out;
726 
727 		/* Checking LinkSpeedActive for FDR-10 */
728 		if (out_mad->data[15] & 0x1)
729 			props->active_speed = IB_SPEED_FDR10;
730 	}
731 
732 	/* Avoid wrong speed value returned by FW if the IB link is down. */
733 	if (props->state == IB_PORT_DOWN)
734 		 props->active_speed = IB_SPEED_SDR;
735 
736 out:
737 	kfree(in_mad);
738 	kfree(out_mad);
739 	return err;
740 }
741 
742 static u8 state_to_phys_state(enum ib_port_state state)
743 {
744 	return state == IB_PORT_ACTIVE ?
745 		IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
746 }
747 
748 static int eth_link_query_port(struct ib_device *ibdev, u8 port,
749 			       struct ib_port_attr *props)
750 {
751 
752 	struct mlx4_ib_dev *mdev = to_mdev(ibdev);
753 	struct mlx4_ib_iboe *iboe = &mdev->iboe;
754 	struct net_device *ndev;
755 	enum ib_mtu tmp;
756 	struct mlx4_cmd_mailbox *mailbox;
757 	int err = 0;
758 	int is_bonded = mlx4_is_bonded(mdev->dev);
759 
760 	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
761 	if (IS_ERR(mailbox))
762 		return PTR_ERR(mailbox);
763 
764 	err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
765 			   MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
766 			   MLX4_CMD_WRAPPED);
767 	if (err)
768 		goto out;
769 
770 	props->active_width	=  (((u8 *)mailbox->buf)[5] == 0x40) ||
771 				   (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
772 					   IB_WIDTH_4X : IB_WIDTH_1X;
773 	props->active_speed	=  (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
774 					   IB_SPEED_FDR : IB_SPEED_QDR;
775 	props->port_cap_flags	= IB_PORT_CM_SUP;
776 	props->ip_gids = true;
777 	props->gid_tbl_len	= mdev->dev->caps.gid_table_len[port];
778 	props->max_msg_sz	= mdev->dev->caps.max_msg_sz;
779 	props->pkey_tbl_len	= 1;
780 	props->max_mtu		= IB_MTU_4096;
781 	props->max_vl_num	= 2;
782 	props->state		= IB_PORT_DOWN;
783 	props->phys_state	= state_to_phys_state(props->state);
784 	props->active_mtu	= IB_MTU_256;
785 	spin_lock_bh(&iboe->lock);
786 	ndev = iboe->netdevs[port - 1];
787 	if (ndev && is_bonded) {
788 		rcu_read_lock(); /* required to get upper dev */
789 		ndev = netdev_master_upper_dev_get_rcu(ndev);
790 		rcu_read_unlock();
791 	}
792 	if (!ndev)
793 		goto out_unlock;
794 
795 	tmp = iboe_get_mtu(ndev->mtu);
796 	props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
797 
798 	props->state		= (netif_running(ndev) && netif_carrier_ok(ndev)) ?
799 					IB_PORT_ACTIVE : IB_PORT_DOWN;
800 	props->phys_state	= state_to_phys_state(props->state);
801 out_unlock:
802 	spin_unlock_bh(&iboe->lock);
803 out:
804 	mlx4_free_cmd_mailbox(mdev->dev, mailbox);
805 	return err;
806 }
807 
808 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
809 			 struct ib_port_attr *props, int netw_view)
810 {
811 	int err;
812 
813 	/* props being zeroed by the caller, avoid zeroing it here */
814 
815 	err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
816 		ib_link_query_port(ibdev, port, props, netw_view) :
817 				eth_link_query_port(ibdev, port, props);
818 
819 	return err;
820 }
821 
822 static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
823 			      struct ib_port_attr *props)
824 {
825 	/* returns host view */
826 	return __mlx4_ib_query_port(ibdev, port, props, 0);
827 }
828 
829 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
830 			union ib_gid *gid, int netw_view)
831 {
832 	struct ib_smp *in_mad  = NULL;
833 	struct ib_smp *out_mad = NULL;
834 	int err = -ENOMEM;
835 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
836 	int clear = 0;
837 	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
838 
839 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
840 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
841 	if (!in_mad || !out_mad)
842 		goto out;
843 
844 	init_query_mad(in_mad);
845 	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
846 	in_mad->attr_mod = cpu_to_be32(port);
847 
848 	if (mlx4_is_mfunc(dev->dev) && netw_view)
849 		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
850 
851 	err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
852 	if (err)
853 		goto out;
854 
855 	memcpy(gid->raw, out_mad->data + 8, 8);
856 
857 	if (mlx4_is_mfunc(dev->dev) && !netw_view) {
858 		if (index) {
859 			/* For any index > 0, return the null guid */
860 			err = 0;
861 			clear = 1;
862 			goto out;
863 		}
864 	}
865 
866 	init_query_mad(in_mad);
867 	in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
868 	in_mad->attr_mod = cpu_to_be32(index / 8);
869 
870 	err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
871 			   NULL, NULL, in_mad, out_mad);
872 	if (err)
873 		goto out;
874 
875 	memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
876 
877 out:
878 	if (clear)
879 		memset(gid->raw + 8, 0, 8);
880 	kfree(in_mad);
881 	kfree(out_mad);
882 	return err;
883 }
884 
885 static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
886 			     union ib_gid *gid)
887 {
888 	if (rdma_protocol_ib(ibdev, port))
889 		return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
890 	return 0;
891 }
892 
893 static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u8 port, u64 *sl2vl_tbl)
894 {
895 	union sl2vl_tbl_to_u64 sl2vl64;
896 	struct ib_smp *in_mad  = NULL;
897 	struct ib_smp *out_mad = NULL;
898 	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
899 	int err = -ENOMEM;
900 	int jj;
901 
902 	if (mlx4_is_slave(to_mdev(ibdev)->dev)) {
903 		*sl2vl_tbl = 0;
904 		return 0;
905 	}
906 
907 	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
908 	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
909 	if (!in_mad || !out_mad)
910 		goto out;
911 
912 	init_query_mad(in_mad);
913 	in_mad->attr_id  = IB_SMP_ATTR_SL_TO_VL_TABLE;
914 	in_mad->attr_mod = 0;
915 
916 	if (mlx4_is_mfunc(to_mdev(ibdev)->dev))
917 		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
918 
919 	err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
920 			   in_mad, out_mad);
921 	if (err)
922 		goto out;
923 
924 	for (jj = 0; jj < 8; jj++)
925 		sl2vl64.sl8[jj] = ((struct ib_smp *)out_mad)->data[jj];
926 	*sl2vl_tbl = sl2vl64.sl64;
927 
928 out:
929 	kfree(in_mad);
930 	kfree(out_mad);
931 	return err;
932 }
933 
934 static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev)
935 {
936 	u64 sl2vl;
937 	int i;
938 	int err;
939 
940 	for (i = 1; i <= mdev->dev->caps.num_ports; i++) {
941 		if (mdev->dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
942 			continue;
943 		err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl);
944 		if (err) {
945 			pr_err("Unable to get default sl to vl mapping for port %d.  Using all zeroes (%d)\n",
946 			       i, err);
947 			sl2vl = 0;
948 		}
949 		atomic64_set(&mdev->sl2vl[i - 1], sl2vl);
950 	}
951 }
952 
953 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
954 			 u16 *pkey, int netw_view)
955 {
956 	struct ib_smp *in_mad  = NULL;
957 	struct ib_smp *out_mad = NULL;
958 	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
959 	int err = -ENOMEM;
960 
961 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
962 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
963 	if (!in_mad || !out_mad)
964 		goto out;
965 
966 	init_query_mad(in_mad);
967 	in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
968 	in_mad->attr_mod = cpu_to_be32(index / 32);
969 
970 	if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
971 		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
972 
973 	err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
974 			   in_mad, out_mad);
975 	if (err)
976 		goto out;
977 
978 	*pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
979 
980 out:
981 	kfree(in_mad);
982 	kfree(out_mad);
983 	return err;
984 }
985 
986 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
987 {
988 	return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
989 }
990 
991 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
992 				 struct ib_device_modify *props)
993 {
994 	struct mlx4_cmd_mailbox *mailbox;
995 	unsigned long flags;
996 
997 	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
998 		return -EOPNOTSUPP;
999 
1000 	if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
1001 		return 0;
1002 
1003 	if (mlx4_is_slave(to_mdev(ibdev)->dev))
1004 		return -EOPNOTSUPP;
1005 
1006 	spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
1007 	memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1008 	spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
1009 
1010 	/*
1011 	 * If possible, pass node desc to FW, so it can generate
1012 	 * a 144 trap.  If cmd fails, just ignore.
1013 	 */
1014 	mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
1015 	if (IS_ERR(mailbox))
1016 		return 0;
1017 
1018 	memcpy(mailbox->buf, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1019 	mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
1020 		 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1021 
1022 	mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
1023 
1024 	return 0;
1025 }
1026 
1027 static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
1028 			    u32 cap_mask)
1029 {
1030 	struct mlx4_cmd_mailbox *mailbox;
1031 	int err;
1032 
1033 	mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
1034 	if (IS_ERR(mailbox))
1035 		return PTR_ERR(mailbox);
1036 
1037 	if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1038 		*(u8 *) mailbox->buf	     = !!reset_qkey_viols << 6;
1039 		((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
1040 	} else {
1041 		((u8 *) mailbox->buf)[3]     = !!reset_qkey_viols;
1042 		((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
1043 	}
1044 
1045 	err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
1046 		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1047 		       MLX4_CMD_WRAPPED);
1048 
1049 	mlx4_free_cmd_mailbox(dev->dev, mailbox);
1050 	return err;
1051 }
1052 
1053 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1054 			       struct ib_port_modify *props)
1055 {
1056 	struct mlx4_ib_dev *mdev = to_mdev(ibdev);
1057 	u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
1058 	struct ib_port_attr attr;
1059 	u32 cap_mask;
1060 	int err;
1061 
1062 	/* return OK if this is RoCE. CM calls ib_modify_port() regardless
1063 	 * of whether port link layer is ETH or IB. For ETH ports, qkey
1064 	 * violations and port capabilities are not meaningful.
1065 	 */
1066 	if (is_eth)
1067 		return 0;
1068 
1069 	mutex_lock(&mdev->cap_mask_mutex);
1070 
1071 	err = ib_query_port(ibdev, port, &attr);
1072 	if (err)
1073 		goto out;
1074 
1075 	cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
1076 		~props->clr_port_cap_mask;
1077 
1078 	err = mlx4_ib_SET_PORT(mdev, port,
1079 			       !!(mask & IB_PORT_RESET_QKEY_CNTR),
1080 			       cap_mask);
1081 
1082 out:
1083 	mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
1084 	return err;
1085 }
1086 
1087 static int mlx4_ib_alloc_ucontext(struct ib_ucontext *uctx,
1088 				  struct ib_udata *udata)
1089 {
1090 	struct ib_device *ibdev = uctx->device;
1091 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
1092 	struct mlx4_ib_ucontext *context = to_mucontext(uctx);
1093 	struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
1094 	struct mlx4_ib_alloc_ucontext_resp resp;
1095 	int err;
1096 
1097 	if (!dev->ib_active)
1098 		return -EAGAIN;
1099 
1100 	if (ibdev->ops.uverbs_abi_ver ==
1101 	    MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
1102 		resp_v3.qp_tab_size      = dev->dev->caps.num_qps;
1103 		resp_v3.bf_reg_size      = dev->dev->caps.bf_reg_size;
1104 		resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1105 	} else {
1106 		resp.dev_caps	      = dev->dev->caps.userspace_caps;
1107 		resp.qp_tab_size      = dev->dev->caps.num_qps;
1108 		resp.bf_reg_size      = dev->dev->caps.bf_reg_size;
1109 		resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1110 		resp.cqe_size	      = dev->dev->caps.cqe_size;
1111 	}
1112 
1113 	err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
1114 	if (err)
1115 		return err;
1116 
1117 	INIT_LIST_HEAD(&context->db_page_list);
1118 	mutex_init(&context->db_page_mutex);
1119 
1120 	INIT_LIST_HEAD(&context->wqn_ranges_list);
1121 	mutex_init(&context->wqn_ranges_mutex);
1122 
1123 	if (ibdev->ops.uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
1124 		err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
1125 	else
1126 		err = ib_copy_to_udata(udata, &resp, sizeof(resp));
1127 
1128 	if (err) {
1129 		mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
1130 		return -EFAULT;
1131 	}
1132 
1133 	return err;
1134 }
1135 
1136 static void mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1137 {
1138 	struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1139 
1140 	mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
1141 }
1142 
1143 static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1144 {
1145 }
1146 
1147 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
1148 {
1149 	struct mlx4_ib_dev *dev = to_mdev(context->device);
1150 
1151 	switch (vma->vm_pgoff) {
1152 	case 0:
1153 		return rdma_user_mmap_io(context, vma,
1154 					 to_mucontext(context)->uar.pfn,
1155 					 PAGE_SIZE,
1156 					 pgprot_noncached(vma->vm_page_prot),
1157 					 NULL);
1158 
1159 	case 1:
1160 		if (dev->dev->caps.bf_reg_size == 0)
1161 			return -EINVAL;
1162 		return rdma_user_mmap_io(
1163 			context, vma,
1164 			to_mucontext(context)->uar.pfn +
1165 				dev->dev->caps.num_uars,
1166 			PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot),
1167 			NULL);
1168 
1169 	case 3: {
1170 		struct mlx4_clock_params params;
1171 		int ret;
1172 
1173 		ret = mlx4_get_internal_clock_params(dev->dev, &params);
1174 		if (ret)
1175 			return ret;
1176 
1177 		return rdma_user_mmap_io(
1178 			context, vma,
1179 			(pci_resource_start(dev->dev->persist->pdev,
1180 					    params.bar) +
1181 			 params.offset) >>
1182 				PAGE_SHIFT,
1183 			PAGE_SIZE, pgprot_noncached(vma->vm_page_prot),
1184 			NULL);
1185 	}
1186 
1187 	default:
1188 		return -EINVAL;
1189 	}
1190 }
1191 
1192 static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
1193 {
1194 	struct mlx4_ib_pd *pd = to_mpd(ibpd);
1195 	struct ib_device *ibdev = ibpd->device;
1196 	int err;
1197 
1198 	err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
1199 	if (err)
1200 		return err;
1201 
1202 	if (udata && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
1203 		mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
1204 		return -EFAULT;
1205 	}
1206 	return 0;
1207 }
1208 
1209 static void mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
1210 {
1211 	mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
1212 }
1213 
1214 static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
1215 					  struct ib_udata *udata)
1216 {
1217 	struct mlx4_ib_xrcd *xrcd;
1218 	struct ib_cq_init_attr cq_attr = {};
1219 	int err;
1220 
1221 	if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1222 		return ERR_PTR(-ENOSYS);
1223 
1224 	xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
1225 	if (!xrcd)
1226 		return ERR_PTR(-ENOMEM);
1227 
1228 	err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
1229 	if (err)
1230 		goto err1;
1231 
1232 	xrcd->pd = ib_alloc_pd(ibdev, 0);
1233 	if (IS_ERR(xrcd->pd)) {
1234 		err = PTR_ERR(xrcd->pd);
1235 		goto err2;
1236 	}
1237 
1238 	cq_attr.cqe = 1;
1239 	xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr);
1240 	if (IS_ERR(xrcd->cq)) {
1241 		err = PTR_ERR(xrcd->cq);
1242 		goto err3;
1243 	}
1244 
1245 	return &xrcd->ibxrcd;
1246 
1247 err3:
1248 	ib_dealloc_pd(xrcd->pd);
1249 err2:
1250 	mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
1251 err1:
1252 	kfree(xrcd);
1253 	return ERR_PTR(err);
1254 }
1255 
1256 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
1257 {
1258 	ib_destroy_cq(to_mxrcd(xrcd)->cq);
1259 	ib_dealloc_pd(to_mxrcd(xrcd)->pd);
1260 	mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
1261 	kfree(xrcd);
1262 
1263 	return 0;
1264 }
1265 
1266 static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
1267 {
1268 	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1269 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1270 	struct mlx4_ib_gid_entry *ge;
1271 
1272 	ge = kzalloc(sizeof *ge, GFP_KERNEL);
1273 	if (!ge)
1274 		return -ENOMEM;
1275 
1276 	ge->gid = *gid;
1277 	if (mlx4_ib_add_mc(mdev, mqp, gid)) {
1278 		ge->port = mqp->port;
1279 		ge->added = 1;
1280 	}
1281 
1282 	mutex_lock(&mqp->mutex);
1283 	list_add_tail(&ge->list, &mqp->gid_list);
1284 	mutex_unlock(&mqp->mutex);
1285 
1286 	return 0;
1287 }
1288 
1289 static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev,
1290 					  struct mlx4_ib_counters *ctr_table)
1291 {
1292 	struct counter_index *counter, *tmp_count;
1293 
1294 	mutex_lock(&ctr_table->mutex);
1295 	list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list,
1296 				 list) {
1297 		if (counter->allocated)
1298 			mlx4_counter_free(ibdev->dev, counter->index);
1299 		list_del(&counter->list);
1300 		kfree(counter);
1301 	}
1302 	mutex_unlock(&ctr_table->mutex);
1303 }
1304 
1305 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
1306 		   union ib_gid *gid)
1307 {
1308 	struct net_device *ndev;
1309 	int ret = 0;
1310 
1311 	if (!mqp->port)
1312 		return 0;
1313 
1314 	spin_lock_bh(&mdev->iboe.lock);
1315 	ndev = mdev->iboe.netdevs[mqp->port - 1];
1316 	if (ndev)
1317 		dev_hold(ndev);
1318 	spin_unlock_bh(&mdev->iboe.lock);
1319 
1320 	if (ndev) {
1321 		ret = 1;
1322 		dev_put(ndev);
1323 	}
1324 
1325 	return ret;
1326 }
1327 
1328 struct mlx4_ib_steering {
1329 	struct list_head list;
1330 	struct mlx4_flow_reg_id reg_id;
1331 	union ib_gid gid;
1332 };
1333 
1334 #define LAST_ETH_FIELD vlan_tag
1335 #define LAST_IB_FIELD sl
1336 #define LAST_IPV4_FIELD dst_ip
1337 #define LAST_TCP_UDP_FIELD src_port
1338 
1339 /* Field is the last supported field */
1340 #define FIELDS_NOT_SUPPORTED(filter, field)\
1341 	memchr_inv((void *)&filter.field  +\
1342 		   sizeof(filter.field), 0,\
1343 		   sizeof(filter) -\
1344 		   offsetof(typeof(filter), field) -\
1345 		   sizeof(filter.field))
1346 
1347 static int parse_flow_attr(struct mlx4_dev *dev,
1348 			   u32 qp_num,
1349 			   union ib_flow_spec *ib_spec,
1350 			   struct _rule_hw *mlx4_spec)
1351 {
1352 	enum mlx4_net_trans_rule_id type;
1353 
1354 	switch (ib_spec->type) {
1355 	case IB_FLOW_SPEC_ETH:
1356 		if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
1357 			return -ENOTSUPP;
1358 
1359 		type = MLX4_NET_TRANS_RULE_ID_ETH;
1360 		memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
1361 		       ETH_ALEN);
1362 		memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
1363 		       ETH_ALEN);
1364 		mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
1365 		mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
1366 		break;
1367 	case IB_FLOW_SPEC_IB:
1368 		if (FIELDS_NOT_SUPPORTED(ib_spec->ib.mask, LAST_IB_FIELD))
1369 			return -ENOTSUPP;
1370 
1371 		type = MLX4_NET_TRANS_RULE_ID_IB;
1372 		mlx4_spec->ib.l3_qpn =
1373 			cpu_to_be32(qp_num);
1374 		mlx4_spec->ib.qpn_mask =
1375 			cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
1376 		break;
1377 
1378 
1379 	case IB_FLOW_SPEC_IPV4:
1380 		if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
1381 			return -ENOTSUPP;
1382 
1383 		type = MLX4_NET_TRANS_RULE_ID_IPV4;
1384 		mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
1385 		mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
1386 		mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
1387 		mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
1388 		break;
1389 
1390 	case IB_FLOW_SPEC_TCP:
1391 	case IB_FLOW_SPEC_UDP:
1392 		if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, LAST_TCP_UDP_FIELD))
1393 			return -ENOTSUPP;
1394 
1395 		type = ib_spec->type == IB_FLOW_SPEC_TCP ?
1396 					MLX4_NET_TRANS_RULE_ID_TCP :
1397 					MLX4_NET_TRANS_RULE_ID_UDP;
1398 		mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
1399 		mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
1400 		mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
1401 		mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
1402 		break;
1403 
1404 	default:
1405 		return -EINVAL;
1406 	}
1407 	if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
1408 	    mlx4_hw_rule_sz(dev, type) < 0)
1409 		return -EINVAL;
1410 	mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
1411 	mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
1412 	return mlx4_hw_rule_sz(dev, type);
1413 }
1414 
1415 struct default_rules {
1416 	__u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1417 	__u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1418 	__u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
1419 	__u8  link_layer;
1420 };
1421 static const struct default_rules default_table[] = {
1422 	{
1423 		.mandatory_fields = {IB_FLOW_SPEC_IPV4},
1424 		.mandatory_not_fields = {IB_FLOW_SPEC_ETH},
1425 		.rules_create_list = {IB_FLOW_SPEC_IB},
1426 		.link_layer = IB_LINK_LAYER_INFINIBAND
1427 	}
1428 };
1429 
1430 static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
1431 					 struct ib_flow_attr *flow_attr)
1432 {
1433 	int i, j, k;
1434 	void *ib_flow;
1435 	const struct default_rules *pdefault_rules = default_table;
1436 	u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
1437 
1438 	for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
1439 		__u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
1440 		memset(&field_types, 0, sizeof(field_types));
1441 
1442 		if (link_layer != pdefault_rules->link_layer)
1443 			continue;
1444 
1445 		ib_flow = flow_attr + 1;
1446 		/* we assume the specs are sorted */
1447 		for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
1448 		     j < flow_attr->num_of_specs; k++) {
1449 			union ib_flow_spec *current_flow =
1450 				(union ib_flow_spec *)ib_flow;
1451 
1452 			/* same layer but different type */
1453 			if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
1454 			     (pdefault_rules->mandatory_fields[k] &
1455 			      IB_FLOW_SPEC_LAYER_MASK)) &&
1456 			    (current_flow->type !=
1457 			     pdefault_rules->mandatory_fields[k]))
1458 				goto out;
1459 
1460 			/* same layer, try match next one */
1461 			if (current_flow->type ==
1462 			    pdefault_rules->mandatory_fields[k]) {
1463 				j++;
1464 				ib_flow +=
1465 					((union ib_flow_spec *)ib_flow)->size;
1466 			}
1467 		}
1468 
1469 		ib_flow = flow_attr + 1;
1470 		for (j = 0; j < flow_attr->num_of_specs;
1471 		     j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
1472 			for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
1473 				/* same layer and same type */
1474 				if (((union ib_flow_spec *)ib_flow)->type ==
1475 				    pdefault_rules->mandatory_not_fields[k])
1476 					goto out;
1477 
1478 		return i;
1479 	}
1480 out:
1481 	return -1;
1482 }
1483 
1484 static int __mlx4_ib_create_default_rules(
1485 		struct mlx4_ib_dev *mdev,
1486 		struct ib_qp *qp,
1487 		const struct default_rules *pdefault_rules,
1488 		struct _rule_hw *mlx4_spec) {
1489 	int size = 0;
1490 	int i;
1491 
1492 	for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
1493 		int ret;
1494 		union ib_flow_spec ib_spec;
1495 		switch (pdefault_rules->rules_create_list[i]) {
1496 		case 0:
1497 			/* no rule */
1498 			continue;
1499 		case IB_FLOW_SPEC_IB:
1500 			ib_spec.type = IB_FLOW_SPEC_IB;
1501 			ib_spec.size = sizeof(struct ib_flow_spec_ib);
1502 
1503 			break;
1504 		default:
1505 			/* invalid rule */
1506 			return -EINVAL;
1507 		}
1508 		/* We must put empty rule, qpn is being ignored */
1509 		ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
1510 				      mlx4_spec);
1511 		if (ret < 0) {
1512 			pr_info("invalid parsing\n");
1513 			return -EINVAL;
1514 		}
1515 
1516 		mlx4_spec = (void *)mlx4_spec + ret;
1517 		size += ret;
1518 	}
1519 	return size;
1520 }
1521 
1522 static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1523 			  int domain,
1524 			  enum mlx4_net_trans_promisc_mode flow_type,
1525 			  u64 *reg_id)
1526 {
1527 	int ret, i;
1528 	int size = 0;
1529 	void *ib_flow;
1530 	struct mlx4_ib_dev *mdev = to_mdev(qp->device);
1531 	struct mlx4_cmd_mailbox *mailbox;
1532 	struct mlx4_net_trans_rule_hw_ctrl *ctrl;
1533 	int default_flow;
1534 
1535 	static const u16 __mlx4_domain[] = {
1536 		[IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
1537 		[IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
1538 		[IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
1539 		[IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
1540 	};
1541 
1542 	if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
1543 		pr_err("Invalid priority value %d\n", flow_attr->priority);
1544 		return -EINVAL;
1545 	}
1546 
1547 	if (domain >= IB_FLOW_DOMAIN_NUM) {
1548 		pr_err("Invalid domain value %d\n", domain);
1549 		return -EINVAL;
1550 	}
1551 
1552 	if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
1553 		return -EINVAL;
1554 
1555 	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
1556 	if (IS_ERR(mailbox))
1557 		return PTR_ERR(mailbox);
1558 	ctrl = mailbox->buf;
1559 
1560 	ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
1561 				 flow_attr->priority);
1562 	ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
1563 	ctrl->port = flow_attr->port;
1564 	ctrl->qpn = cpu_to_be32(qp->qp_num);
1565 
1566 	ib_flow = flow_attr + 1;
1567 	size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
1568 	/* Add default flows */
1569 	default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
1570 	if (default_flow >= 0) {
1571 		ret = __mlx4_ib_create_default_rules(
1572 				mdev, qp, default_table + default_flow,
1573 				mailbox->buf + size);
1574 		if (ret < 0) {
1575 			mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1576 			return -EINVAL;
1577 		}
1578 		size += ret;
1579 	}
1580 	for (i = 0; i < flow_attr->num_of_specs; i++) {
1581 		ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
1582 				      mailbox->buf + size);
1583 		if (ret < 0) {
1584 			mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1585 			return -EINVAL;
1586 		}
1587 		ib_flow += ((union ib_flow_spec *) ib_flow)->size;
1588 		size += ret;
1589 	}
1590 
1591 	if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR &&
1592 	    flow_attr->num_of_specs == 1) {
1593 		struct _rule_hw *rule_header = (struct _rule_hw *)(ctrl + 1);
1594 		enum ib_flow_spec_type header_spec =
1595 			((union ib_flow_spec *)(flow_attr + 1))->type;
1596 
1597 		if (header_spec == IB_FLOW_SPEC_ETH)
1598 			mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
1599 	}
1600 
1601 	ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
1602 			   MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
1603 			   MLX4_CMD_NATIVE);
1604 	if (ret == -ENOMEM)
1605 		pr_err("mcg table is full. Fail to register network rule.\n");
1606 	else if (ret == -ENXIO)
1607 		pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1608 	else if (ret)
1609 		pr_err("Invalid argument. Fail to register network rule.\n");
1610 
1611 	mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1612 	return ret;
1613 }
1614 
1615 static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1616 {
1617 	int err;
1618 	err = mlx4_cmd(dev, reg_id, 0, 0,
1619 		       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1620 		       MLX4_CMD_NATIVE);
1621 	if (err)
1622 		pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1623 		       reg_id);
1624 	return err;
1625 }
1626 
1627 static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1628 				    u64 *reg_id)
1629 {
1630 	void *ib_flow;
1631 	union ib_flow_spec *ib_spec;
1632 	struct mlx4_dev	*dev = to_mdev(qp->device)->dev;
1633 	int err = 0;
1634 
1635 	if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
1636 	    dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
1637 		return 0; /* do nothing */
1638 
1639 	ib_flow = flow_attr + 1;
1640 	ib_spec = (union ib_flow_spec *)ib_flow;
1641 
1642 	if (ib_spec->type !=  IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
1643 		return 0; /* do nothing */
1644 
1645 	err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
1646 				    flow_attr->port, qp->qp_num,
1647 				    MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
1648 				    reg_id);
1649 	return err;
1650 }
1651 
1652 static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
1653 				      struct ib_flow_attr *flow_attr,
1654 				      enum mlx4_net_trans_promisc_mode *type)
1655 {
1656 	int err = 0;
1657 
1658 	if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) ||
1659 	    (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) ||
1660 	    (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) {
1661 		return -EOPNOTSUPP;
1662 	}
1663 
1664 	if (flow_attr->num_of_specs == 0) {
1665 		type[0] = MLX4_FS_MC_SNIFFER;
1666 		type[1] = MLX4_FS_UC_SNIFFER;
1667 	} else {
1668 		union ib_flow_spec *ib_spec;
1669 
1670 		ib_spec = (union ib_flow_spec *)(flow_attr + 1);
1671 		if (ib_spec->type !=  IB_FLOW_SPEC_ETH)
1672 			return -EINVAL;
1673 
1674 		/* if all is zero than MC and UC */
1675 		if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) {
1676 			type[0] = MLX4_FS_MC_SNIFFER;
1677 			type[1] = MLX4_FS_UC_SNIFFER;
1678 		} else {
1679 			u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01,
1680 					    ib_spec->eth.mask.dst_mac[1],
1681 					    ib_spec->eth.mask.dst_mac[2],
1682 					    ib_spec->eth.mask.dst_mac[3],
1683 					    ib_spec->eth.mask.dst_mac[4],
1684 					    ib_spec->eth.mask.dst_mac[5]};
1685 
1686 			/* Above xor was only on MC bit, non empty mask is valid
1687 			 * only if this bit is set and rest are zero.
1688 			 */
1689 			if (!is_zero_ether_addr(&mac[0]))
1690 				return -EINVAL;
1691 
1692 			if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac))
1693 				type[0] = MLX4_FS_MC_SNIFFER;
1694 			else
1695 				type[0] = MLX4_FS_UC_SNIFFER;
1696 		}
1697 	}
1698 
1699 	return err;
1700 }
1701 
1702 static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1703 				    struct ib_flow_attr *flow_attr,
1704 				    int domain, struct ib_udata *udata)
1705 {
1706 	int err = 0, i = 0, j = 0;
1707 	struct mlx4_ib_flow *mflow;
1708 	enum mlx4_net_trans_promisc_mode type[2];
1709 	struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
1710 	int is_bonded = mlx4_is_bonded(dev);
1711 
1712 	if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt)
1713 		return ERR_PTR(-EINVAL);
1714 
1715 	if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)
1716 		return ERR_PTR(-EOPNOTSUPP);
1717 
1718 	if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
1719 	    (flow_attr->type != IB_FLOW_ATTR_NORMAL))
1720 		return ERR_PTR(-EOPNOTSUPP);
1721 
1722 	if (udata &&
1723 	    udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen))
1724 		return ERR_PTR(-EOPNOTSUPP);
1725 
1726 	memset(type, 0, sizeof(type));
1727 
1728 	mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
1729 	if (!mflow) {
1730 		err = -ENOMEM;
1731 		goto err_free;
1732 	}
1733 
1734 	switch (flow_attr->type) {
1735 	case IB_FLOW_ATTR_NORMAL:
1736 		/* If dont trap flag (continue match) is set, under specific
1737 		 * condition traffic be replicated to given qp,
1738 		 * without stealing it
1739 		 */
1740 		if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) {
1741 			err = mlx4_ib_add_dont_trap_rule(dev,
1742 							 flow_attr,
1743 							 type);
1744 			if (err)
1745 				goto err_free;
1746 		} else {
1747 			type[0] = MLX4_FS_REGULAR;
1748 		}
1749 		break;
1750 
1751 	case IB_FLOW_ATTR_ALL_DEFAULT:
1752 		type[0] = MLX4_FS_ALL_DEFAULT;
1753 		break;
1754 
1755 	case IB_FLOW_ATTR_MC_DEFAULT:
1756 		type[0] = MLX4_FS_MC_DEFAULT;
1757 		break;
1758 
1759 	case IB_FLOW_ATTR_SNIFFER:
1760 		type[0] = MLX4_FS_MIRROR_RX_PORT;
1761 		type[1] = MLX4_FS_MIRROR_SX_PORT;
1762 		break;
1763 
1764 	default:
1765 		err = -EINVAL;
1766 		goto err_free;
1767 	}
1768 
1769 	while (i < ARRAY_SIZE(type) && type[i]) {
1770 		err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
1771 					    &mflow->reg_id[i].id);
1772 		if (err)
1773 			goto err_create_flow;
1774 		if (is_bonded) {
1775 			/* Application always sees one port so the mirror rule
1776 			 * must be on port #2
1777 			 */
1778 			flow_attr->port = 2;
1779 			err = __mlx4_ib_create_flow(qp, flow_attr,
1780 						    domain, type[j],
1781 						    &mflow->reg_id[j].mirror);
1782 			flow_attr->port = 1;
1783 			if (err)
1784 				goto err_create_flow;
1785 			j++;
1786 		}
1787 
1788 		i++;
1789 	}
1790 
1791 	if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1792 		err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1793 					       &mflow->reg_id[i].id);
1794 		if (err)
1795 			goto err_create_flow;
1796 
1797 		if (is_bonded) {
1798 			flow_attr->port = 2;
1799 			err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1800 						       &mflow->reg_id[j].mirror);
1801 			flow_attr->port = 1;
1802 			if (err)
1803 				goto err_create_flow;
1804 			j++;
1805 		}
1806 		/* function to create mirror rule */
1807 		i++;
1808 	}
1809 
1810 	return &mflow->ibflow;
1811 
1812 err_create_flow:
1813 	while (i) {
1814 		(void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1815 					     mflow->reg_id[i].id);
1816 		i--;
1817 	}
1818 
1819 	while (j) {
1820 		(void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1821 					     mflow->reg_id[j].mirror);
1822 		j--;
1823 	}
1824 err_free:
1825 	kfree(mflow);
1826 	return ERR_PTR(err);
1827 }
1828 
1829 static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1830 {
1831 	int err, ret = 0;
1832 	int i = 0;
1833 	struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1834 	struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1835 
1836 	while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
1837 		err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
1838 		if (err)
1839 			ret = err;
1840 		if (mflow->reg_id[i].mirror) {
1841 			err = __mlx4_ib_destroy_flow(mdev->dev,
1842 						     mflow->reg_id[i].mirror);
1843 			if (err)
1844 				ret = err;
1845 		}
1846 		i++;
1847 	}
1848 
1849 	kfree(mflow);
1850 	return ret;
1851 }
1852 
1853 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1854 {
1855 	int err;
1856 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1857 	struct mlx4_dev	*dev = mdev->dev;
1858 	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1859 	struct mlx4_ib_steering *ib_steering = NULL;
1860 	enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1861 	struct mlx4_flow_reg_id	reg_id;
1862 
1863 	if (mdev->dev->caps.steering_mode ==
1864 	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
1865 		ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
1866 		if (!ib_steering)
1867 			return -ENOMEM;
1868 	}
1869 
1870 	err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
1871 				    !!(mqp->flags &
1872 				       MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1873 				    prot, &reg_id.id);
1874 	if (err) {
1875 		pr_err("multicast attach op failed, err %d\n", err);
1876 		goto err_malloc;
1877 	}
1878 
1879 	reg_id.mirror = 0;
1880 	if (mlx4_is_bonded(dev)) {
1881 		err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
1882 					    (mqp->port == 1) ? 2 : 1,
1883 					    !!(mqp->flags &
1884 					    MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1885 					    prot, &reg_id.mirror);
1886 		if (err)
1887 			goto err_add;
1888 	}
1889 
1890 	err = add_gid_entry(ibqp, gid);
1891 	if (err)
1892 		goto err_add;
1893 
1894 	if (ib_steering) {
1895 		memcpy(ib_steering->gid.raw, gid->raw, 16);
1896 		ib_steering->reg_id = reg_id;
1897 		mutex_lock(&mqp->mutex);
1898 		list_add(&ib_steering->list, &mqp->steering_rules);
1899 		mutex_unlock(&mqp->mutex);
1900 	}
1901 	return 0;
1902 
1903 err_add:
1904 	mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1905 			      prot, reg_id.id);
1906 	if (reg_id.mirror)
1907 		mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1908 				      prot, reg_id.mirror);
1909 err_malloc:
1910 	kfree(ib_steering);
1911 
1912 	return err;
1913 }
1914 
1915 static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
1916 {
1917 	struct mlx4_ib_gid_entry *ge;
1918 	struct mlx4_ib_gid_entry *tmp;
1919 	struct mlx4_ib_gid_entry *ret = NULL;
1920 
1921 	list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1922 		if (!memcmp(raw, ge->gid.raw, 16)) {
1923 			ret = ge;
1924 			break;
1925 		}
1926 	}
1927 
1928 	return ret;
1929 }
1930 
1931 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1932 {
1933 	int err;
1934 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1935 	struct mlx4_dev *dev = mdev->dev;
1936 	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1937 	struct net_device *ndev;
1938 	struct mlx4_ib_gid_entry *ge;
1939 	struct mlx4_flow_reg_id reg_id = {0, 0};
1940 	enum mlx4_protocol prot =  MLX4_PROT_IB_IPV6;
1941 
1942 	if (mdev->dev->caps.steering_mode ==
1943 	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
1944 		struct mlx4_ib_steering *ib_steering;
1945 
1946 		mutex_lock(&mqp->mutex);
1947 		list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
1948 			if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
1949 				list_del(&ib_steering->list);
1950 				break;
1951 			}
1952 		}
1953 		mutex_unlock(&mqp->mutex);
1954 		if (&ib_steering->list == &mqp->steering_rules) {
1955 			pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1956 			return -EINVAL;
1957 		}
1958 		reg_id = ib_steering->reg_id;
1959 		kfree(ib_steering);
1960 	}
1961 
1962 	err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1963 				    prot, reg_id.id);
1964 	if (err)
1965 		return err;
1966 
1967 	if (mlx4_is_bonded(dev)) {
1968 		err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1969 					    prot, reg_id.mirror);
1970 		if (err)
1971 			return err;
1972 	}
1973 
1974 	mutex_lock(&mqp->mutex);
1975 	ge = find_gid_entry(mqp, gid->raw);
1976 	if (ge) {
1977 		spin_lock_bh(&mdev->iboe.lock);
1978 		ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1979 		if (ndev)
1980 			dev_hold(ndev);
1981 		spin_unlock_bh(&mdev->iboe.lock);
1982 		if (ndev)
1983 			dev_put(ndev);
1984 		list_del(&ge->list);
1985 		kfree(ge);
1986 	} else
1987 		pr_warn("could not find mgid entry\n");
1988 
1989 	mutex_unlock(&mqp->mutex);
1990 
1991 	return 0;
1992 }
1993 
1994 static int init_node_data(struct mlx4_ib_dev *dev)
1995 {
1996 	struct ib_smp *in_mad  = NULL;
1997 	struct ib_smp *out_mad = NULL;
1998 	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
1999 	int err = -ENOMEM;
2000 
2001 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
2002 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
2003 	if (!in_mad || !out_mad)
2004 		goto out;
2005 
2006 	init_query_mad(in_mad);
2007 	in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
2008 	if (mlx4_is_master(dev->dev))
2009 		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
2010 
2011 	err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
2012 	if (err)
2013 		goto out;
2014 
2015 	memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
2016 
2017 	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
2018 
2019 	err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
2020 	if (err)
2021 		goto out;
2022 
2023 	dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
2024 	memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
2025 
2026 out:
2027 	kfree(in_mad);
2028 	kfree(out_mad);
2029 	return err;
2030 }
2031 
2032 static ssize_t hca_type_show(struct device *device,
2033 			     struct device_attribute *attr, char *buf)
2034 {
2035 	struct mlx4_ib_dev *dev =
2036 		rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2037 	return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
2038 }
2039 static DEVICE_ATTR_RO(hca_type);
2040 
2041 static ssize_t hw_rev_show(struct device *device,
2042 			   struct device_attribute *attr, char *buf)
2043 {
2044 	struct mlx4_ib_dev *dev =
2045 		rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2046 	return sprintf(buf, "%x\n", dev->dev->rev_id);
2047 }
2048 static DEVICE_ATTR_RO(hw_rev);
2049 
2050 static ssize_t board_id_show(struct device *device,
2051 			     struct device_attribute *attr, char *buf)
2052 {
2053 	struct mlx4_ib_dev *dev =
2054 		rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2055 
2056 	return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
2057 		       dev->dev->board_id);
2058 }
2059 static DEVICE_ATTR_RO(board_id);
2060 
2061 static struct attribute *mlx4_class_attributes[] = {
2062 	&dev_attr_hw_rev.attr,
2063 	&dev_attr_hca_type.attr,
2064 	&dev_attr_board_id.attr,
2065 	NULL
2066 };
2067 
2068 static const struct attribute_group mlx4_attr_group = {
2069 	.attrs = mlx4_class_attributes,
2070 };
2071 
2072 struct diag_counter {
2073 	const char *name;
2074 	u32 offset;
2075 };
2076 
2077 #define DIAG_COUNTER(_name, _offset)			\
2078 	{ .name = #_name, .offset = _offset }
2079 
2080 static const struct diag_counter diag_basic[] = {
2081 	DIAG_COUNTER(rq_num_lle, 0x00),
2082 	DIAG_COUNTER(sq_num_lle, 0x04),
2083 	DIAG_COUNTER(rq_num_lqpoe, 0x08),
2084 	DIAG_COUNTER(sq_num_lqpoe, 0x0C),
2085 	DIAG_COUNTER(rq_num_lpe, 0x18),
2086 	DIAG_COUNTER(sq_num_lpe, 0x1C),
2087 	DIAG_COUNTER(rq_num_wrfe, 0x20),
2088 	DIAG_COUNTER(sq_num_wrfe, 0x24),
2089 	DIAG_COUNTER(sq_num_mwbe, 0x2C),
2090 	DIAG_COUNTER(sq_num_bre, 0x34),
2091 	DIAG_COUNTER(sq_num_rire, 0x44),
2092 	DIAG_COUNTER(rq_num_rire, 0x48),
2093 	DIAG_COUNTER(sq_num_rae, 0x4C),
2094 	DIAG_COUNTER(rq_num_rae, 0x50),
2095 	DIAG_COUNTER(sq_num_roe, 0x54),
2096 	DIAG_COUNTER(sq_num_tree, 0x5C),
2097 	DIAG_COUNTER(sq_num_rree, 0x64),
2098 	DIAG_COUNTER(rq_num_rnr, 0x68),
2099 	DIAG_COUNTER(sq_num_rnr, 0x6C),
2100 	DIAG_COUNTER(rq_num_oos, 0x100),
2101 	DIAG_COUNTER(sq_num_oos, 0x104),
2102 };
2103 
2104 static const struct diag_counter diag_ext[] = {
2105 	DIAG_COUNTER(rq_num_dup, 0x130),
2106 	DIAG_COUNTER(sq_num_to, 0x134),
2107 };
2108 
2109 static const struct diag_counter diag_device_only[] = {
2110 	DIAG_COUNTER(num_cqovf, 0x1A0),
2111 	DIAG_COUNTER(rq_num_udsdprd, 0x118),
2112 };
2113 
2114 static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev,
2115 						    u8 port_num)
2116 {
2117 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
2118 	struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2119 
2120 	if (!diag[!!port_num].name)
2121 		return NULL;
2122 
2123 	return rdma_alloc_hw_stats_struct(diag[!!port_num].name,
2124 					  diag[!!port_num].num_counters,
2125 					  RDMA_HW_STATS_DEFAULT_LIFESPAN);
2126 }
2127 
2128 static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
2129 				struct rdma_hw_stats *stats,
2130 				u8 port, int index)
2131 {
2132 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
2133 	struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2134 	u32 hw_value[ARRAY_SIZE(diag_device_only) +
2135 		ARRAY_SIZE(diag_ext) + ARRAY_SIZE(diag_basic)] = {};
2136 	int ret;
2137 	int i;
2138 
2139 	ret = mlx4_query_diag_counters(dev->dev,
2140 				       MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS,
2141 				       diag[!!port].offset, hw_value,
2142 				       diag[!!port].num_counters, port);
2143 
2144 	if (ret)
2145 		return ret;
2146 
2147 	for (i = 0; i < diag[!!port].num_counters; i++)
2148 		stats->value[i] = hw_value[i];
2149 
2150 	return diag[!!port].num_counters;
2151 }
2152 
2153 static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
2154 					 const char ***name,
2155 					 u32 **offset,
2156 					 u32 *num,
2157 					 bool port)
2158 {
2159 	u32 num_counters;
2160 
2161 	num_counters = ARRAY_SIZE(diag_basic);
2162 
2163 	if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT)
2164 		num_counters += ARRAY_SIZE(diag_ext);
2165 
2166 	if (!port)
2167 		num_counters += ARRAY_SIZE(diag_device_only);
2168 
2169 	*name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL);
2170 	if (!*name)
2171 		return -ENOMEM;
2172 
2173 	*offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL);
2174 	if (!*offset)
2175 		goto err_name;
2176 
2177 	*num = num_counters;
2178 
2179 	return 0;
2180 
2181 err_name:
2182 	kfree(*name);
2183 	return -ENOMEM;
2184 }
2185 
2186 static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
2187 				       const char **name,
2188 				       u32 *offset,
2189 				       bool port)
2190 {
2191 	int i;
2192 	int j;
2193 
2194 	for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) {
2195 		name[i] = diag_basic[i].name;
2196 		offset[i] = diag_basic[i].offset;
2197 	}
2198 
2199 	if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
2200 		for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) {
2201 			name[j] = diag_ext[i].name;
2202 			offset[j] = diag_ext[i].offset;
2203 		}
2204 	}
2205 
2206 	if (!port) {
2207 		for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) {
2208 			name[j] = diag_device_only[i].name;
2209 			offset[j] = diag_device_only[i].offset;
2210 		}
2211 	}
2212 }
2213 
2214 static const struct ib_device_ops mlx4_ib_hw_stats_ops = {
2215 	.alloc_hw_stats = mlx4_ib_alloc_hw_stats,
2216 	.get_hw_stats = mlx4_ib_get_hw_stats,
2217 };
2218 
2219 static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
2220 {
2221 	struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
2222 	int i;
2223 	int ret;
2224 	bool per_port = !!(ibdev->dev->caps.flags2 &
2225 		MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
2226 
2227 	if (mlx4_is_slave(ibdev->dev))
2228 		return 0;
2229 
2230 	for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2231 		/* i == 1 means we are building port counters */
2232 		if (i && !per_port)
2233 			continue;
2234 
2235 		ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name,
2236 						    &diag[i].offset,
2237 						    &diag[i].num_counters, i);
2238 		if (ret)
2239 			goto err_alloc;
2240 
2241 		mlx4_ib_fill_diag_counters(ibdev, diag[i].name,
2242 					   diag[i].offset, i);
2243 	}
2244 
2245 	ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_hw_stats_ops);
2246 
2247 	return 0;
2248 
2249 err_alloc:
2250 	if (i) {
2251 		kfree(diag[i - 1].name);
2252 		kfree(diag[i - 1].offset);
2253 	}
2254 
2255 	return ret;
2256 }
2257 
2258 static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev)
2259 {
2260 	int i;
2261 
2262 	for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2263 		kfree(ibdev->diag_counters[i].offset);
2264 		kfree(ibdev->diag_counters[i].name);
2265 	}
2266 }
2267 
2268 #define MLX4_IB_INVALID_MAC	((u64)-1)
2269 static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
2270 			       struct net_device *dev,
2271 			       int port)
2272 {
2273 	u64 new_smac = 0;
2274 	u64 release_mac = MLX4_IB_INVALID_MAC;
2275 	struct mlx4_ib_qp *qp;
2276 
2277 	read_lock(&dev_base_lock);
2278 	new_smac = mlx4_mac_to_u64(dev->dev_addr);
2279 	read_unlock(&dev_base_lock);
2280 
2281 	atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
2282 
2283 	/* no need for update QP1 and mac registration in non-SRIOV */
2284 	if (!mlx4_is_mfunc(ibdev->dev))
2285 		return;
2286 
2287 	mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
2288 	qp = ibdev->qp1_proxy[port - 1];
2289 	if (qp) {
2290 		int new_smac_index;
2291 		u64 old_smac;
2292 		struct mlx4_update_qp_params update_params;
2293 
2294 		mutex_lock(&qp->mutex);
2295 		old_smac = qp->pri.smac;
2296 		if (new_smac == old_smac)
2297 			goto unlock;
2298 
2299 		new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
2300 
2301 		if (new_smac_index < 0)
2302 			goto unlock;
2303 
2304 		update_params.smac_index = new_smac_index;
2305 		if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
2306 				   &update_params)) {
2307 			release_mac = new_smac;
2308 			goto unlock;
2309 		}
2310 		/* if old port was zero, no mac was yet registered for this QP */
2311 		if (qp->pri.smac_port)
2312 			release_mac = old_smac;
2313 		qp->pri.smac = new_smac;
2314 		qp->pri.smac_port = port;
2315 		qp->pri.smac_index = new_smac_index;
2316 	}
2317 
2318 unlock:
2319 	if (release_mac != MLX4_IB_INVALID_MAC)
2320 		mlx4_unregister_mac(ibdev->dev, port, release_mac);
2321 	if (qp)
2322 		mutex_unlock(&qp->mutex);
2323 	mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
2324 }
2325 
2326 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
2327 				 struct net_device *dev,
2328 				 unsigned long event)
2329 
2330 {
2331 	struct mlx4_ib_iboe *iboe;
2332 	int update_qps_port = -1;
2333 	int port;
2334 
2335 	ASSERT_RTNL();
2336 
2337 	iboe = &ibdev->iboe;
2338 
2339 	spin_lock_bh(&iboe->lock);
2340 	mlx4_foreach_ib_transport_port(port, ibdev->dev) {
2341 
2342 		iboe->netdevs[port - 1] =
2343 			mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
2344 
2345 		if (dev == iboe->netdevs[port - 1] &&
2346 		    (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
2347 		     event == NETDEV_UP || event == NETDEV_CHANGE))
2348 			update_qps_port = port;
2349 
2350 		if (dev == iboe->netdevs[port - 1] &&
2351 		    (event == NETDEV_UP || event == NETDEV_DOWN)) {
2352 			enum ib_port_state port_state;
2353 			struct ib_event ibev = { };
2354 
2355 			if (ib_get_cached_port_state(&ibdev->ib_dev, port,
2356 						     &port_state))
2357 				continue;
2358 
2359 			if (event == NETDEV_UP &&
2360 			    (port_state != IB_PORT_ACTIVE ||
2361 			     iboe->last_port_state[port - 1] != IB_PORT_DOWN))
2362 				continue;
2363 			if (event == NETDEV_DOWN &&
2364 			    (port_state != IB_PORT_DOWN ||
2365 			     iboe->last_port_state[port - 1] != IB_PORT_ACTIVE))
2366 				continue;
2367 			iboe->last_port_state[port - 1] = port_state;
2368 
2369 			ibev.device = &ibdev->ib_dev;
2370 			ibev.element.port_num = port;
2371 			ibev.event = event == NETDEV_UP ? IB_EVENT_PORT_ACTIVE :
2372 							  IB_EVENT_PORT_ERR;
2373 			ib_dispatch_event(&ibev);
2374 		}
2375 
2376 	}
2377 	spin_unlock_bh(&iboe->lock);
2378 
2379 	if (update_qps_port > 0)
2380 		mlx4_ib_update_qps(ibdev, dev, update_qps_port);
2381 }
2382 
2383 static int mlx4_ib_netdev_event(struct notifier_block *this,
2384 				unsigned long event, void *ptr)
2385 {
2386 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2387 	struct mlx4_ib_dev *ibdev;
2388 
2389 	if (!net_eq(dev_net(dev), &init_net))
2390 		return NOTIFY_DONE;
2391 
2392 	ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
2393 	mlx4_ib_scan_netdevs(ibdev, dev, event);
2394 
2395 	return NOTIFY_DONE;
2396 }
2397 
2398 static void init_pkeys(struct mlx4_ib_dev *ibdev)
2399 {
2400 	int port;
2401 	int slave;
2402 	int i;
2403 
2404 	if (mlx4_is_master(ibdev->dev)) {
2405 		for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
2406 		     ++slave) {
2407 			for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2408 				for (i = 0;
2409 				     i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2410 				     ++i) {
2411 					ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
2412 					/* master has the identity virt2phys pkey mapping */
2413 						(slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
2414 							ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
2415 					mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
2416 							     ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
2417 				}
2418 			}
2419 		}
2420 		/* initialize pkey cache */
2421 		for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2422 			for (i = 0;
2423 			     i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2424 			     ++i)
2425 				ibdev->pkeys.phys_pkey_cache[port-1][i] =
2426 					(i) ? 0 : 0xFFFF;
2427 		}
2428 	}
2429 }
2430 
2431 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2432 {
2433 	int i, j, eq = 0, total_eqs = 0;
2434 
2435 	ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
2436 				  sizeof(ibdev->eq_table[0]), GFP_KERNEL);
2437 	if (!ibdev->eq_table)
2438 		return;
2439 
2440 	for (i = 1; i <= dev->caps.num_ports; i++) {
2441 		for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
2442 		     j++, total_eqs++) {
2443 			if (i > 1 &&  mlx4_is_eq_shared(dev, total_eqs))
2444 				continue;
2445 			ibdev->eq_table[eq] = total_eqs;
2446 			if (!mlx4_assign_eq(dev, i,
2447 					    &ibdev->eq_table[eq]))
2448 				eq++;
2449 			else
2450 				ibdev->eq_table[eq] = -1;
2451 		}
2452 	}
2453 
2454 	for (i = eq; i < dev->caps.num_comp_vectors;
2455 	     ibdev->eq_table[i++] = -1)
2456 		;
2457 
2458 	/* Advertise the new number of EQs to clients */
2459 	ibdev->ib_dev.num_comp_vectors = eq;
2460 }
2461 
2462 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2463 {
2464 	int i;
2465 	int total_eqs = ibdev->ib_dev.num_comp_vectors;
2466 
2467 	/* no eqs were allocated */
2468 	if (!ibdev->eq_table)
2469 		return;
2470 
2471 	/* Reset the advertised EQ number */
2472 	ibdev->ib_dev.num_comp_vectors = 0;
2473 
2474 	for (i = 0; i < total_eqs; i++)
2475 		mlx4_release_eq(dev, ibdev->eq_table[i]);
2476 
2477 	kfree(ibdev->eq_table);
2478 	ibdev->eq_table = NULL;
2479 }
2480 
2481 static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
2482 			       struct ib_port_immutable *immutable)
2483 {
2484 	struct ib_port_attr attr;
2485 	struct mlx4_ib_dev *mdev = to_mdev(ibdev);
2486 	int err;
2487 
2488 	if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) {
2489 		immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
2490 		immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2491 	} else {
2492 		if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
2493 			immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
2494 		if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
2495 			immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
2496 				RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
2497 		immutable->core_cap_flags |= RDMA_CORE_PORT_RAW_PACKET;
2498 		if (immutable->core_cap_flags & (RDMA_CORE_PORT_IBA_ROCE |
2499 		    RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP))
2500 			immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2501 	}
2502 
2503 	err = ib_query_port(ibdev, port_num, &attr);
2504 	if (err)
2505 		return err;
2506 
2507 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
2508 	immutable->gid_tbl_len = attr.gid_tbl_len;
2509 
2510 	return 0;
2511 }
2512 
2513 static void get_fw_ver_str(struct ib_device *device, char *str)
2514 {
2515 	struct mlx4_ib_dev *dev =
2516 		container_of(device, struct mlx4_ib_dev, ib_dev);
2517 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d",
2518 		 (int) (dev->dev->caps.fw_ver >> 32),
2519 		 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
2520 		 (int) dev->dev->caps.fw_ver & 0xffff);
2521 }
2522 
2523 static const struct ib_device_ops mlx4_ib_dev_ops = {
2524 	.owner = THIS_MODULE,
2525 	.driver_id = RDMA_DRIVER_MLX4,
2526 	.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION,
2527 
2528 	.add_gid = mlx4_ib_add_gid,
2529 	.alloc_mr = mlx4_ib_alloc_mr,
2530 	.alloc_pd = mlx4_ib_alloc_pd,
2531 	.alloc_ucontext = mlx4_ib_alloc_ucontext,
2532 	.attach_mcast = mlx4_ib_mcg_attach,
2533 	.create_ah = mlx4_ib_create_ah,
2534 	.create_cq = mlx4_ib_create_cq,
2535 	.create_qp = mlx4_ib_create_qp,
2536 	.create_srq = mlx4_ib_create_srq,
2537 	.dealloc_pd = mlx4_ib_dealloc_pd,
2538 	.dealloc_ucontext = mlx4_ib_dealloc_ucontext,
2539 	.del_gid = mlx4_ib_del_gid,
2540 	.dereg_mr = mlx4_ib_dereg_mr,
2541 	.destroy_ah = mlx4_ib_destroy_ah,
2542 	.destroy_cq = mlx4_ib_destroy_cq,
2543 	.destroy_qp = mlx4_ib_destroy_qp,
2544 	.destroy_srq = mlx4_ib_destroy_srq,
2545 	.detach_mcast = mlx4_ib_mcg_detach,
2546 	.disassociate_ucontext = mlx4_ib_disassociate_ucontext,
2547 	.drain_rq = mlx4_ib_drain_rq,
2548 	.drain_sq = mlx4_ib_drain_sq,
2549 	.get_dev_fw_str = get_fw_ver_str,
2550 	.get_dma_mr = mlx4_ib_get_dma_mr,
2551 	.get_link_layer = mlx4_ib_port_link_layer,
2552 	.get_netdev = mlx4_ib_get_netdev,
2553 	.get_port_immutable = mlx4_port_immutable,
2554 	.map_mr_sg = mlx4_ib_map_mr_sg,
2555 	.mmap = mlx4_ib_mmap,
2556 	.modify_cq = mlx4_ib_modify_cq,
2557 	.modify_device = mlx4_ib_modify_device,
2558 	.modify_port = mlx4_ib_modify_port,
2559 	.modify_qp = mlx4_ib_modify_qp,
2560 	.modify_srq = mlx4_ib_modify_srq,
2561 	.poll_cq = mlx4_ib_poll_cq,
2562 	.post_recv = mlx4_ib_post_recv,
2563 	.post_send = mlx4_ib_post_send,
2564 	.post_srq_recv = mlx4_ib_post_srq_recv,
2565 	.process_mad = mlx4_ib_process_mad,
2566 	.query_ah = mlx4_ib_query_ah,
2567 	.query_device = mlx4_ib_query_device,
2568 	.query_gid = mlx4_ib_query_gid,
2569 	.query_pkey = mlx4_ib_query_pkey,
2570 	.query_port = mlx4_ib_query_port,
2571 	.query_qp = mlx4_ib_query_qp,
2572 	.query_srq = mlx4_ib_query_srq,
2573 	.reg_user_mr = mlx4_ib_reg_user_mr,
2574 	.req_notify_cq = mlx4_ib_arm_cq,
2575 	.rereg_user_mr = mlx4_ib_rereg_user_mr,
2576 	.resize_cq = mlx4_ib_resize_cq,
2577 
2578 	INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah),
2579 	INIT_RDMA_OBJ_SIZE(ib_cq, mlx4_ib_cq, ibcq),
2580 	INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd),
2581 	INIT_RDMA_OBJ_SIZE(ib_srq, mlx4_ib_srq, ibsrq),
2582 	INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext),
2583 };
2584 
2585 static const struct ib_device_ops mlx4_ib_dev_wq_ops = {
2586 	.create_rwq_ind_table = mlx4_ib_create_rwq_ind_table,
2587 	.create_wq = mlx4_ib_create_wq,
2588 	.destroy_rwq_ind_table = mlx4_ib_destroy_rwq_ind_table,
2589 	.destroy_wq = mlx4_ib_destroy_wq,
2590 	.modify_wq = mlx4_ib_modify_wq,
2591 };
2592 
2593 static const struct ib_device_ops mlx4_ib_dev_fmr_ops = {
2594 	.alloc_fmr = mlx4_ib_fmr_alloc,
2595 	.dealloc_fmr = mlx4_ib_fmr_dealloc,
2596 	.map_phys_fmr = mlx4_ib_map_phys_fmr,
2597 	.unmap_fmr = mlx4_ib_unmap_fmr,
2598 };
2599 
2600 static const struct ib_device_ops mlx4_ib_dev_mw_ops = {
2601 	.alloc_mw = mlx4_ib_alloc_mw,
2602 	.dealloc_mw = mlx4_ib_dealloc_mw,
2603 };
2604 
2605 static const struct ib_device_ops mlx4_ib_dev_xrc_ops = {
2606 	.alloc_xrcd = mlx4_ib_alloc_xrcd,
2607 	.dealloc_xrcd = mlx4_ib_dealloc_xrcd,
2608 };
2609 
2610 static const struct ib_device_ops mlx4_ib_dev_fs_ops = {
2611 	.create_flow = mlx4_ib_create_flow,
2612 	.destroy_flow = mlx4_ib_destroy_flow,
2613 };
2614 
2615 static void *mlx4_ib_add(struct mlx4_dev *dev)
2616 {
2617 	struct mlx4_ib_dev *ibdev;
2618 	int num_ports = 0;
2619 	int i, j;
2620 	int err;
2621 	struct mlx4_ib_iboe *iboe;
2622 	int ib_num_ports = 0;
2623 	int num_req_counters;
2624 	int allocated;
2625 	u32 counter_index;
2626 	struct counter_index *new_counter_index = NULL;
2627 
2628 	pr_info_once("%s", mlx4_ib_version);
2629 
2630 	num_ports = 0;
2631 	mlx4_foreach_ib_transport_port(i, dev)
2632 		num_ports++;
2633 
2634 	/* No point in registering a device with no ports... */
2635 	if (num_ports == 0)
2636 		return NULL;
2637 
2638 	ibdev = ib_alloc_device(mlx4_ib_dev, ib_dev);
2639 	if (!ibdev) {
2640 		dev_err(&dev->persist->pdev->dev,
2641 			"Device struct alloc failed\n");
2642 		return NULL;
2643 	}
2644 
2645 	iboe = &ibdev->iboe;
2646 
2647 	if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
2648 		goto err_dealloc;
2649 
2650 	if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
2651 		goto err_pd;
2652 
2653 	ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
2654 				 PAGE_SIZE);
2655 	if (!ibdev->uar_map)
2656 		goto err_uar;
2657 	MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
2658 
2659 	ibdev->dev = dev;
2660 	ibdev->bond_next_port	= 0;
2661 
2662 	ibdev->ib_dev.node_type		= RDMA_NODE_IB_CA;
2663 	ibdev->ib_dev.local_dma_lkey	= dev->caps.reserved_lkey;
2664 	ibdev->num_ports		= num_ports;
2665 	ibdev->ib_dev.phys_port_cnt     = mlx4_is_bonded(dev) ?
2666 						1 : ibdev->num_ports;
2667 	ibdev->ib_dev.num_comp_vectors	= dev->caps.num_comp_vectors;
2668 	ibdev->ib_dev.dev.parent	= &dev->persist->pdev->dev;
2669 
2670 	ibdev->ib_dev.uverbs_cmd_mask	=
2671 		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)		|
2672 		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)	|
2673 		(1ull << IB_USER_VERBS_CMD_QUERY_PORT)		|
2674 		(1ull << IB_USER_VERBS_CMD_ALLOC_PD)		|
2675 		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)		|
2676 		(1ull << IB_USER_VERBS_CMD_REG_MR)		|
2677 		(1ull << IB_USER_VERBS_CMD_REREG_MR)		|
2678 		(1ull << IB_USER_VERBS_CMD_DEREG_MR)		|
2679 		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)	|
2680 		(1ull << IB_USER_VERBS_CMD_CREATE_CQ)		|
2681 		(1ull << IB_USER_VERBS_CMD_RESIZE_CQ)		|
2682 		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ)		|
2683 		(1ull << IB_USER_VERBS_CMD_CREATE_QP)		|
2684 		(1ull << IB_USER_VERBS_CMD_MODIFY_QP)		|
2685 		(1ull << IB_USER_VERBS_CMD_QUERY_QP)		|
2686 		(1ull << IB_USER_VERBS_CMD_DESTROY_QP)		|
2687 		(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)	|
2688 		(1ull << IB_USER_VERBS_CMD_DETACH_MCAST)	|
2689 		(1ull << IB_USER_VERBS_CMD_CREATE_SRQ)		|
2690 		(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)		|
2691 		(1ull << IB_USER_VERBS_CMD_QUERY_SRQ)		|
2692 		(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)		|
2693 		(1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)		|
2694 		(1ull << IB_USER_VERBS_CMD_OPEN_QP);
2695 
2696 	ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_ops);
2697 	ibdev->ib_dev.uverbs_ex_cmd_mask |=
2698 		(1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ) |
2699 		(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
2700 		(1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
2701 		(1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
2702 
2703 	if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
2704 	    ((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) ==
2705 	    IB_LINK_LAYER_ETHERNET) ||
2706 	    (mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) ==
2707 	    IB_LINK_LAYER_ETHERNET))) {
2708 		ibdev->ib_dev.uverbs_ex_cmd_mask |=
2709 			(1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ)	  |
2710 			(1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ)	  |
2711 			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ)	  |
2712 			(1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
2713 			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
2714 		ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_wq_ops);
2715 	}
2716 
2717 	if (!mlx4_is_slave(ibdev->dev))
2718 		ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fmr_ops);
2719 
2720 	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2721 	    dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
2722 		ibdev->ib_dev.uverbs_cmd_mask |=
2723 			(1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
2724 			(1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
2725 		ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_mw_ops);
2726 	}
2727 
2728 	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2729 		ibdev->ib_dev.uverbs_cmd_mask |=
2730 			(1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
2731 			(1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
2732 		ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_xrc_ops);
2733 	}
2734 
2735 	if (check_flow_steering_support(dev)) {
2736 		ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
2737 		ibdev->ib_dev.uverbs_ex_cmd_mask	|=
2738 			(1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
2739 			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
2740 		ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fs_ops);
2741 	}
2742 
2743 	if (!dev->caps.userspace_caps)
2744 		ibdev->ib_dev.ops.uverbs_abi_ver =
2745 			MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
2746 
2747 	mlx4_ib_alloc_eqs(dev, ibdev);
2748 
2749 	spin_lock_init(&iboe->lock);
2750 
2751 	if (init_node_data(ibdev))
2752 		goto err_map;
2753 	mlx4_init_sl2vl_tbl(ibdev);
2754 
2755 	for (i = 0; i < ibdev->num_ports; ++i) {
2756 		mutex_init(&ibdev->counters_table[i].mutex);
2757 		INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list);
2758 		iboe->last_port_state[i] = IB_PORT_DOWN;
2759 	}
2760 
2761 	num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
2762 	for (i = 0; i < num_req_counters; ++i) {
2763 		mutex_init(&ibdev->qp1_proxy_lock[i]);
2764 		allocated = 0;
2765 		if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2766 						IB_LINK_LAYER_ETHERNET) {
2767 			err = mlx4_counter_alloc(ibdev->dev, &counter_index,
2768 						 MLX4_RES_USAGE_DRIVER);
2769 			/* if failed to allocate a new counter, use default */
2770 			if (err)
2771 				counter_index =
2772 					mlx4_get_default_counter_index(dev,
2773 								       i + 1);
2774 			else
2775 				allocated = 1;
2776 		} else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
2777 			counter_index = mlx4_get_default_counter_index(dev,
2778 								       i + 1);
2779 		}
2780 		new_counter_index = kmalloc(sizeof(*new_counter_index),
2781 					    GFP_KERNEL);
2782 		if (!new_counter_index) {
2783 			if (allocated)
2784 				mlx4_counter_free(ibdev->dev, counter_index);
2785 			goto err_counter;
2786 		}
2787 		new_counter_index->index = counter_index;
2788 		new_counter_index->allocated = allocated;
2789 		list_add_tail(&new_counter_index->list,
2790 			      &ibdev->counters_table[i].counters_list);
2791 		ibdev->counters_table[i].default_counter = counter_index;
2792 		pr_info("counter index %d for port %d allocated %d\n",
2793 			counter_index, i + 1, allocated);
2794 	}
2795 	if (mlx4_is_bonded(dev))
2796 		for (i = 1; i < ibdev->num_ports ; ++i) {
2797 			new_counter_index =
2798 					kmalloc(sizeof(struct counter_index),
2799 						GFP_KERNEL);
2800 			if (!new_counter_index)
2801 				goto err_counter;
2802 			new_counter_index->index = counter_index;
2803 			new_counter_index->allocated = 0;
2804 			list_add_tail(&new_counter_index->list,
2805 				      &ibdev->counters_table[i].counters_list);
2806 			ibdev->counters_table[i].default_counter =
2807 								counter_index;
2808 		}
2809 
2810 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2811 		ib_num_ports++;
2812 
2813 	spin_lock_init(&ibdev->sm_lock);
2814 	mutex_init(&ibdev->cap_mask_mutex);
2815 	INIT_LIST_HEAD(&ibdev->qp_list);
2816 	spin_lock_init(&ibdev->reset_flow_resource_lock);
2817 
2818 	if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2819 	    ib_num_ports) {
2820 		ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2821 		err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2822 					    MLX4_IB_UC_STEER_QPN_ALIGN,
2823 					    &ibdev->steer_qpn_base, 0,
2824 					    MLX4_RES_USAGE_DRIVER);
2825 		if (err)
2826 			goto err_counter;
2827 
2828 		ibdev->ib_uc_qpns_bitmap =
2829 			kmalloc_array(BITS_TO_LONGS(ibdev->steer_qpn_count),
2830 				      sizeof(long),
2831 				      GFP_KERNEL);
2832 		if (!ibdev->ib_uc_qpns_bitmap)
2833 			goto err_steer_qp_release;
2834 
2835 		if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) {
2836 			bitmap_zero(ibdev->ib_uc_qpns_bitmap,
2837 				    ibdev->steer_qpn_count);
2838 			err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2839 					dev, ibdev->steer_qpn_base,
2840 					ibdev->steer_qpn_base +
2841 					ibdev->steer_qpn_count - 1);
2842 			if (err)
2843 				goto err_steer_free_bitmap;
2844 		} else {
2845 			bitmap_fill(ibdev->ib_uc_qpns_bitmap,
2846 				    ibdev->steer_qpn_count);
2847 		}
2848 	}
2849 
2850 	for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
2851 		atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
2852 
2853 	if (mlx4_ib_alloc_diag_counters(ibdev))
2854 		goto err_steer_free_bitmap;
2855 
2856 	rdma_set_device_sysfs_group(&ibdev->ib_dev, &mlx4_attr_group);
2857 	if (ib_register_device(&ibdev->ib_dev, "mlx4_%d"))
2858 		goto err_diag_counters;
2859 
2860 	if (mlx4_ib_mad_init(ibdev))
2861 		goto err_reg;
2862 
2863 	if (mlx4_ib_init_sriov(ibdev))
2864 		goto err_mad;
2865 
2866 	if (!iboe->nb.notifier_call) {
2867 		iboe->nb.notifier_call = mlx4_ib_netdev_event;
2868 		err = register_netdevice_notifier(&iboe->nb);
2869 		if (err) {
2870 			iboe->nb.notifier_call = NULL;
2871 			goto err_notif;
2872 		}
2873 	}
2874 	if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
2875 		err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT);
2876 		if (err)
2877 			goto err_notif;
2878 	}
2879 
2880 	ibdev->ib_active = true;
2881 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2882 		devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
2883 					 &ibdev->ib_dev);
2884 
2885 	if (mlx4_is_mfunc(ibdev->dev))
2886 		init_pkeys(ibdev);
2887 
2888 	/* create paravirt contexts for any VFs which are active */
2889 	if (mlx4_is_master(ibdev->dev)) {
2890 		for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2891 			if (j == mlx4_master_func_num(ibdev->dev))
2892 				continue;
2893 			if (mlx4_is_slave_active(ibdev->dev, j))
2894 				do_slave_init(ibdev, j, 1);
2895 		}
2896 	}
2897 	return ibdev;
2898 
2899 err_notif:
2900 	if (ibdev->iboe.nb.notifier_call) {
2901 		if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2902 			pr_warn("failure unregistering notifier\n");
2903 		ibdev->iboe.nb.notifier_call = NULL;
2904 	}
2905 	flush_workqueue(wq);
2906 
2907 	mlx4_ib_close_sriov(ibdev);
2908 
2909 err_mad:
2910 	mlx4_ib_mad_cleanup(ibdev);
2911 
2912 err_reg:
2913 	ib_unregister_device(&ibdev->ib_dev);
2914 
2915 err_diag_counters:
2916 	mlx4_ib_diag_cleanup(ibdev);
2917 
2918 err_steer_free_bitmap:
2919 	kfree(ibdev->ib_uc_qpns_bitmap);
2920 
2921 err_steer_qp_release:
2922 	mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2923 			      ibdev->steer_qpn_count);
2924 err_counter:
2925 	for (i = 0; i < ibdev->num_ports; ++i)
2926 		mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
2927 
2928 err_map:
2929 	mlx4_ib_free_eqs(dev, ibdev);
2930 	iounmap(ibdev->uar_map);
2931 
2932 err_uar:
2933 	mlx4_uar_free(dev, &ibdev->priv_uar);
2934 
2935 err_pd:
2936 	mlx4_pd_free(dev, ibdev->priv_pdn);
2937 
2938 err_dealloc:
2939 	ib_dealloc_device(&ibdev->ib_dev);
2940 
2941 	return NULL;
2942 }
2943 
2944 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
2945 {
2946 	int offset;
2947 
2948 	WARN_ON(!dev->ib_uc_qpns_bitmap);
2949 
2950 	offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
2951 					 dev->steer_qpn_count,
2952 					 get_count_order(count));
2953 	if (offset < 0)
2954 		return offset;
2955 
2956 	*qpn = dev->steer_qpn_base + offset;
2957 	return 0;
2958 }
2959 
2960 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
2961 {
2962 	if (!qpn ||
2963 	    dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
2964 		return;
2965 
2966 	if (WARN(qpn < dev->steer_qpn_base, "qpn = %u, steer_qpn_base = %u\n",
2967 		 qpn, dev->steer_qpn_base))
2968 		/* not supposed to be here */
2969 		return;
2970 
2971 	bitmap_release_region(dev->ib_uc_qpns_bitmap,
2972 			      qpn - dev->steer_qpn_base,
2973 			      get_count_order(count));
2974 }
2975 
2976 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
2977 			 int is_attach)
2978 {
2979 	int err;
2980 	size_t flow_size;
2981 	struct ib_flow_attr *flow = NULL;
2982 	struct ib_flow_spec_ib *ib_spec;
2983 
2984 	if (is_attach) {
2985 		flow_size = sizeof(struct ib_flow_attr) +
2986 			    sizeof(struct ib_flow_spec_ib);
2987 		flow = kzalloc(flow_size, GFP_KERNEL);
2988 		if (!flow)
2989 			return -ENOMEM;
2990 		flow->port = mqp->port;
2991 		flow->num_of_specs = 1;
2992 		flow->size = flow_size;
2993 		ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
2994 		ib_spec->type = IB_FLOW_SPEC_IB;
2995 		ib_spec->size = sizeof(struct ib_flow_spec_ib);
2996 		/* Add an empty rule for IB L2 */
2997 		memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
2998 
2999 		err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
3000 					    IB_FLOW_DOMAIN_NIC,
3001 					    MLX4_FS_REGULAR,
3002 					    &mqp->reg_id);
3003 	} else {
3004 		err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
3005 	}
3006 	kfree(flow);
3007 	return err;
3008 }
3009 
3010 static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
3011 {
3012 	struct mlx4_ib_dev *ibdev = ibdev_ptr;
3013 	int p;
3014 	int i;
3015 
3016 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
3017 		devlink_port_type_clear(mlx4_get_devlink_port(dev, i));
3018 	ibdev->ib_active = false;
3019 	flush_workqueue(wq);
3020 
3021 	if (ibdev->iboe.nb.notifier_call) {
3022 		if (unregister_netdevice_notifier(&ibdev->iboe.nb))
3023 			pr_warn("failure unregistering notifier\n");
3024 		ibdev->iboe.nb.notifier_call = NULL;
3025 	}
3026 
3027 	mlx4_ib_close_sriov(ibdev);
3028 	mlx4_ib_mad_cleanup(ibdev);
3029 	ib_unregister_device(&ibdev->ib_dev);
3030 	mlx4_ib_diag_cleanup(ibdev);
3031 
3032 	mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
3033 			      ibdev->steer_qpn_count);
3034 	kfree(ibdev->ib_uc_qpns_bitmap);
3035 
3036 	iounmap(ibdev->uar_map);
3037 	for (p = 0; p < ibdev->num_ports; ++p)
3038 		mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]);
3039 
3040 	mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
3041 		mlx4_CLOSE_PORT(dev, p);
3042 
3043 	mlx4_ib_free_eqs(dev, ibdev);
3044 
3045 	mlx4_uar_free(dev, &ibdev->priv_uar);
3046 	mlx4_pd_free(dev, ibdev->priv_pdn);
3047 	ib_dealloc_device(&ibdev->ib_dev);
3048 }
3049 
3050 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
3051 {
3052 	struct mlx4_ib_demux_work **dm = NULL;
3053 	struct mlx4_dev *dev = ibdev->dev;
3054 	int i;
3055 	unsigned long flags;
3056 	struct mlx4_active_ports actv_ports;
3057 	unsigned int ports;
3058 	unsigned int first_port;
3059 
3060 	if (!mlx4_is_master(dev))
3061 		return;
3062 
3063 	actv_ports = mlx4_get_active_ports(dev, slave);
3064 	ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
3065 	first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
3066 
3067 	dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
3068 	if (!dm)
3069 		return;
3070 
3071 	for (i = 0; i < ports; i++) {
3072 		dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
3073 		if (!dm[i]) {
3074 			while (--i >= 0)
3075 				kfree(dm[i]);
3076 			goto out;
3077 		}
3078 		INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
3079 		dm[i]->port = first_port + i + 1;
3080 		dm[i]->slave = slave;
3081 		dm[i]->do_init = do_init;
3082 		dm[i]->dev = ibdev;
3083 	}
3084 	/* initialize or tear down tunnel QPs for the slave */
3085 	spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
3086 	if (!ibdev->sriov.is_going_down) {
3087 		for (i = 0; i < ports; i++)
3088 			queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
3089 		spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3090 	} else {
3091 		spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3092 		for (i = 0; i < ports; i++)
3093 			kfree(dm[i]);
3094 	}
3095 out:
3096 	kfree(dm);
3097 	return;
3098 }
3099 
3100 static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
3101 {
3102 	struct mlx4_ib_qp *mqp;
3103 	unsigned long flags_qp;
3104 	unsigned long flags_cq;
3105 	struct mlx4_ib_cq *send_mcq, *recv_mcq;
3106 	struct list_head    cq_notify_list;
3107 	struct mlx4_cq *mcq;
3108 	unsigned long flags;
3109 
3110 	pr_warn("mlx4_ib_handle_catas_error was started\n");
3111 	INIT_LIST_HEAD(&cq_notify_list);
3112 
3113 	/* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
3114 	spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
3115 
3116 	list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
3117 		spin_lock_irqsave(&mqp->sq.lock, flags_qp);
3118 		if (mqp->sq.tail != mqp->sq.head) {
3119 			send_mcq = to_mcq(mqp->ibqp.send_cq);
3120 			spin_lock_irqsave(&send_mcq->lock, flags_cq);
3121 			if (send_mcq->mcq.comp &&
3122 			    mqp->ibqp.send_cq->comp_handler) {
3123 				if (!send_mcq->mcq.reset_notify_added) {
3124 					send_mcq->mcq.reset_notify_added = 1;
3125 					list_add_tail(&send_mcq->mcq.reset_notify,
3126 						      &cq_notify_list);
3127 				}
3128 			}
3129 			spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
3130 		}
3131 		spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
3132 		/* Now, handle the QP's receive queue */
3133 		spin_lock_irqsave(&mqp->rq.lock, flags_qp);
3134 		/* no handling is needed for SRQ */
3135 		if (!mqp->ibqp.srq) {
3136 			if (mqp->rq.tail != mqp->rq.head) {
3137 				recv_mcq = to_mcq(mqp->ibqp.recv_cq);
3138 				spin_lock_irqsave(&recv_mcq->lock, flags_cq);
3139 				if (recv_mcq->mcq.comp &&
3140 				    mqp->ibqp.recv_cq->comp_handler) {
3141 					if (!recv_mcq->mcq.reset_notify_added) {
3142 						recv_mcq->mcq.reset_notify_added = 1;
3143 						list_add_tail(&recv_mcq->mcq.reset_notify,
3144 							      &cq_notify_list);
3145 					}
3146 				}
3147 				spin_unlock_irqrestore(&recv_mcq->lock,
3148 						       flags_cq);
3149 			}
3150 		}
3151 		spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
3152 	}
3153 
3154 	list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
3155 		mcq->comp(mcq);
3156 	}
3157 	spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
3158 	pr_warn("mlx4_ib_handle_catas_error ended\n");
3159 }
3160 
3161 static void handle_bonded_port_state_event(struct work_struct *work)
3162 {
3163 	struct ib_event_work *ew =
3164 		container_of(work, struct ib_event_work, work);
3165 	struct mlx4_ib_dev *ibdev = ew->ib_dev;
3166 	enum ib_port_state bonded_port_state = IB_PORT_NOP;
3167 	int i;
3168 	struct ib_event ibev;
3169 
3170 	kfree(ew);
3171 	spin_lock_bh(&ibdev->iboe.lock);
3172 	for (i = 0; i < MLX4_MAX_PORTS; ++i) {
3173 		struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
3174 		enum ib_port_state curr_port_state;
3175 
3176 		if (!curr_netdev)
3177 			continue;
3178 
3179 		curr_port_state =
3180 			(netif_running(curr_netdev) &&
3181 			 netif_carrier_ok(curr_netdev)) ?
3182 			IB_PORT_ACTIVE : IB_PORT_DOWN;
3183 
3184 		bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
3185 			curr_port_state : IB_PORT_ACTIVE;
3186 	}
3187 	spin_unlock_bh(&ibdev->iboe.lock);
3188 
3189 	ibev.device = &ibdev->ib_dev;
3190 	ibev.element.port_num = 1;
3191 	ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
3192 		IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
3193 
3194 	ib_dispatch_event(&ibev);
3195 }
3196 
3197 void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port)
3198 {
3199 	u64 sl2vl;
3200 	int err;
3201 
3202 	err = mlx4_ib_query_sl2vl(&mdev->ib_dev, port, &sl2vl);
3203 	if (err) {
3204 		pr_err("Unable to get current sl to vl mapping for port %d.  Using all zeroes (%d)\n",
3205 		       port, err);
3206 		sl2vl = 0;
3207 	}
3208 	atomic64_set(&mdev->sl2vl[port - 1], sl2vl);
3209 }
3210 
3211 static void ib_sl2vl_update_work(struct work_struct *work)
3212 {
3213 	struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
3214 	struct mlx4_ib_dev *mdev = ew->ib_dev;
3215 	int port = ew->port;
3216 
3217 	mlx4_ib_sl2vl_update(mdev, port);
3218 
3219 	kfree(ew);
3220 }
3221 
3222 void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
3223 				     int port)
3224 {
3225 	struct ib_event_work *ew;
3226 
3227 	ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
3228 	if (ew) {
3229 		INIT_WORK(&ew->work, ib_sl2vl_update_work);
3230 		ew->port = port;
3231 		ew->ib_dev = ibdev;
3232 		queue_work(wq, &ew->work);
3233 	}
3234 }
3235 
3236 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
3237 			  enum mlx4_dev_event event, unsigned long param)
3238 {
3239 	struct ib_event ibev;
3240 	struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
3241 	struct mlx4_eqe *eqe = NULL;
3242 	struct ib_event_work *ew;
3243 	int p = 0;
3244 
3245 	if (mlx4_is_bonded(dev) &&
3246 	    ((event == MLX4_DEV_EVENT_PORT_UP) ||
3247 	    (event == MLX4_DEV_EVENT_PORT_DOWN))) {
3248 		ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
3249 		if (!ew)
3250 			return;
3251 		INIT_WORK(&ew->work, handle_bonded_port_state_event);
3252 		ew->ib_dev = ibdev;
3253 		queue_work(wq, &ew->work);
3254 		return;
3255 	}
3256 
3257 	if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
3258 		eqe = (struct mlx4_eqe *)param;
3259 	else
3260 		p = (int) param;
3261 
3262 	switch (event) {
3263 	case MLX4_DEV_EVENT_PORT_UP:
3264 		if (p > ibdev->num_ports)
3265 			return;
3266 		if (!mlx4_is_slave(dev) &&
3267 		    rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
3268 			IB_LINK_LAYER_INFINIBAND) {
3269 			if (mlx4_is_master(dev))
3270 				mlx4_ib_invalidate_all_guid_record(ibdev, p);
3271 			if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST &&
3272 			    !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT))
3273 				mlx4_sched_ib_sl2vl_update_work(ibdev, p);
3274 		}
3275 		ibev.event = IB_EVENT_PORT_ACTIVE;
3276 		break;
3277 
3278 	case MLX4_DEV_EVENT_PORT_DOWN:
3279 		if (p > ibdev->num_ports)
3280 			return;
3281 		ibev.event = IB_EVENT_PORT_ERR;
3282 		break;
3283 
3284 	case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
3285 		ibdev->ib_active = false;
3286 		ibev.event = IB_EVENT_DEVICE_FATAL;
3287 		mlx4_ib_handle_catas_error(ibdev);
3288 		break;
3289 
3290 	case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
3291 		ew = kmalloc(sizeof *ew, GFP_ATOMIC);
3292 		if (!ew)
3293 			break;
3294 
3295 		INIT_WORK(&ew->work, handle_port_mgmt_change_event);
3296 		memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
3297 		ew->ib_dev = ibdev;
3298 		/* need to queue only for port owner, which uses GEN_EQE */
3299 		if (mlx4_is_master(dev))
3300 			queue_work(wq, &ew->work);
3301 		else
3302 			handle_port_mgmt_change_event(&ew->work);
3303 		return;
3304 
3305 	case MLX4_DEV_EVENT_SLAVE_INIT:
3306 		/* here, p is the slave id */
3307 		do_slave_init(ibdev, p, 1);
3308 		if (mlx4_is_master(dev)) {
3309 			int i;
3310 
3311 			for (i = 1; i <= ibdev->num_ports; i++) {
3312 				if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3313 					== IB_LINK_LAYER_INFINIBAND)
3314 					mlx4_ib_slave_alias_guid_event(ibdev,
3315 								       p, i,
3316 								       1);
3317 			}
3318 		}
3319 		return;
3320 
3321 	case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
3322 		if (mlx4_is_master(dev)) {
3323 			int i;
3324 
3325 			for (i = 1; i <= ibdev->num_ports; i++) {
3326 				if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3327 					== IB_LINK_LAYER_INFINIBAND)
3328 					mlx4_ib_slave_alias_guid_event(ibdev,
3329 								       p, i,
3330 								       0);
3331 			}
3332 		}
3333 		/* here, p is the slave id */
3334 		do_slave_init(ibdev, p, 0);
3335 		return;
3336 
3337 	default:
3338 		return;
3339 	}
3340 
3341 	ibev.device	      = ibdev_ptr;
3342 	ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
3343 
3344 	ib_dispatch_event(&ibev);
3345 }
3346 
3347 static struct mlx4_interface mlx4_ib_interface = {
3348 	.add		= mlx4_ib_add,
3349 	.remove		= mlx4_ib_remove,
3350 	.event		= mlx4_ib_event,
3351 	.protocol	= MLX4_PROT_IB_IPV6,
3352 	.flags		= MLX4_INTFF_BONDING
3353 };
3354 
3355 static int __init mlx4_ib_init(void)
3356 {
3357 	int err;
3358 
3359 	wq = alloc_ordered_workqueue("mlx4_ib", WQ_MEM_RECLAIM);
3360 	if (!wq)
3361 		return -ENOMEM;
3362 
3363 	err = mlx4_ib_mcg_init();
3364 	if (err)
3365 		goto clean_wq;
3366 
3367 	err = mlx4_register_interface(&mlx4_ib_interface);
3368 	if (err)
3369 		goto clean_mcg;
3370 
3371 	return 0;
3372 
3373 clean_mcg:
3374 	mlx4_ib_mcg_destroy();
3375 
3376 clean_wq:
3377 	destroy_workqueue(wq);
3378 	return err;
3379 }
3380 
3381 static void __exit mlx4_ib_cleanup(void)
3382 {
3383 	mlx4_unregister_interface(&mlx4_ib_interface);
3384 	mlx4_ib_mcg_destroy();
3385 	destroy_workqueue(wq);
3386 }
3387 
3388 module_init(mlx4_ib_init);
3389 module_exit(mlx4_ib_cleanup);
3390