xref: /openbmc/linux/drivers/infiniband/hw/mlx5/main.c (revision 4e95bc26)
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/debugfs.h>
34 #include <linux/highmem.h>
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/errno.h>
38 #include <linux/pci.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/bitmap.h>
42 #if defined(CONFIG_X86)
43 #include <asm/pat.h>
44 #endif
45 #include <linux/sched.h>
46 #include <linux/sched/mm.h>
47 #include <linux/sched/task.h>
48 #include <linux/delay.h>
49 #include <rdma/ib_user_verbs.h>
50 #include <rdma/ib_addr.h>
51 #include <rdma/ib_cache.h>
52 #include <linux/mlx5/port.h>
53 #include <linux/mlx5/vport.h>
54 #include <linux/mlx5/fs.h>
55 #include <linux/list.h>
56 #include <rdma/ib_smi.h>
57 #include <rdma/ib_umem.h>
58 #include <linux/in.h>
59 #include <linux/etherdevice.h>
60 #include "mlx5_ib.h"
61 #include "ib_rep.h"
62 #include "cmd.h"
63 #include "srq.h"
64 #include <linux/mlx5/fs_helpers.h>
65 #include <linux/mlx5/accel.h>
66 #include <rdma/uverbs_std_types.h>
67 #include <rdma/mlx5_user_ioctl_verbs.h>
68 #include <rdma/mlx5_user_ioctl_cmds.h>
69 
70 #define UVERBS_MODULE_NAME mlx5_ib
71 #include <rdma/uverbs_named_ioctl.h>
72 
73 #define DRIVER_NAME "mlx5_ib"
74 #define DRIVER_VERSION "5.0-0"
75 
76 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
77 MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
78 MODULE_LICENSE("Dual BSD/GPL");
79 
80 static char mlx5_version[] =
81 	DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
82 	DRIVER_VERSION "\n";
83 
84 struct mlx5_ib_event_work {
85 	struct work_struct	work;
86 	union {
87 		struct mlx5_ib_dev	      *dev;
88 		struct mlx5_ib_multiport_info *mpi;
89 	};
90 	bool			is_slave;
91 	unsigned int		event;
92 	void			*param;
93 };
94 
95 enum {
96 	MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
97 };
98 
99 static struct workqueue_struct *mlx5_ib_event_wq;
100 static LIST_HEAD(mlx5_ib_unaffiliated_port_list);
101 static LIST_HEAD(mlx5_ib_dev_list);
102 /*
103  * This mutex should be held when accessing either of the above lists
104  */
105 static DEFINE_MUTEX(mlx5_ib_multiport_mutex);
106 
107 /* We can't use an array for xlt_emergency_page because dma_map_single
108  * doesn't work on kernel modules memory
109  */
110 static unsigned long xlt_emergency_page;
111 static struct mutex xlt_emergency_page_mutex;
112 
113 struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi)
114 {
115 	struct mlx5_ib_dev *dev;
116 
117 	mutex_lock(&mlx5_ib_multiport_mutex);
118 	dev = mpi->ibdev;
119 	mutex_unlock(&mlx5_ib_multiport_mutex);
120 	return dev;
121 }
122 
123 static enum rdma_link_layer
124 mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
125 {
126 	switch (port_type_cap) {
127 	case MLX5_CAP_PORT_TYPE_IB:
128 		return IB_LINK_LAYER_INFINIBAND;
129 	case MLX5_CAP_PORT_TYPE_ETH:
130 		return IB_LINK_LAYER_ETHERNET;
131 	default:
132 		return IB_LINK_LAYER_UNSPECIFIED;
133 	}
134 }
135 
136 static enum rdma_link_layer
137 mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
138 {
139 	struct mlx5_ib_dev *dev = to_mdev(device);
140 	int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
141 
142 	return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
143 }
144 
145 static int get_port_state(struct ib_device *ibdev,
146 			  u8 port_num,
147 			  enum ib_port_state *state)
148 {
149 	struct ib_port_attr attr;
150 	int ret;
151 
152 	memset(&attr, 0, sizeof(attr));
153 	ret = ibdev->ops.query_port(ibdev, port_num, &attr);
154 	if (!ret)
155 		*state = attr.state;
156 	return ret;
157 }
158 
159 static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev,
160 					   struct net_device *ndev,
161 					   u8 *port_num)
162 {
163 	struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
164 	struct net_device *rep_ndev;
165 	struct mlx5_ib_port *port;
166 	int i;
167 
168 	for (i = 0; i < dev->num_ports; i++) {
169 		port  = &dev->port[i];
170 		if (!port->rep)
171 			continue;
172 
173 		read_lock(&port->roce.netdev_lock);
174 		rep_ndev = mlx5_ib_get_rep_netdev(esw,
175 						  port->rep->vport);
176 		if (rep_ndev == ndev) {
177 			read_unlock(&port->roce.netdev_lock);
178 			*port_num = i + 1;
179 			return &port->roce;
180 		}
181 		read_unlock(&port->roce.netdev_lock);
182 	}
183 
184 	return NULL;
185 }
186 
187 static int mlx5_netdev_event(struct notifier_block *this,
188 			     unsigned long event, void *ptr)
189 {
190 	struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
191 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
192 	u8 port_num = roce->native_port_num;
193 	struct mlx5_core_dev *mdev;
194 	struct mlx5_ib_dev *ibdev;
195 
196 	ibdev = roce->dev;
197 	mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
198 	if (!mdev)
199 		return NOTIFY_DONE;
200 
201 	switch (event) {
202 	case NETDEV_REGISTER:
203 		/* Should already be registered during the load */
204 		if (ibdev->is_rep)
205 			break;
206 		write_lock(&roce->netdev_lock);
207 		if (ndev->dev.parent == mdev->device)
208 			roce->netdev = ndev;
209 		write_unlock(&roce->netdev_lock);
210 		break;
211 
212 	case NETDEV_UNREGISTER:
213 		/* In case of reps, ib device goes away before the netdevs */
214 		write_lock(&roce->netdev_lock);
215 		if (roce->netdev == ndev)
216 			roce->netdev = NULL;
217 		write_unlock(&roce->netdev_lock);
218 		break;
219 
220 	case NETDEV_CHANGE:
221 	case NETDEV_UP:
222 	case NETDEV_DOWN: {
223 		struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev);
224 		struct net_device *upper = NULL;
225 
226 		if (lag_ndev) {
227 			upper = netdev_master_upper_dev_get(lag_ndev);
228 			dev_put(lag_ndev);
229 		}
230 
231 		if (ibdev->is_rep)
232 			roce = mlx5_get_rep_roce(ibdev, ndev, &port_num);
233 		if (!roce)
234 			return NOTIFY_DONE;
235 		if ((upper == ndev || (!upper && ndev == roce->netdev))
236 		    && ibdev->ib_active) {
237 			struct ib_event ibev = { };
238 			enum ib_port_state port_state;
239 
240 			if (get_port_state(&ibdev->ib_dev, port_num,
241 					   &port_state))
242 				goto done;
243 
244 			if (roce->last_port_state == port_state)
245 				goto done;
246 
247 			roce->last_port_state = port_state;
248 			ibev.device = &ibdev->ib_dev;
249 			if (port_state == IB_PORT_DOWN)
250 				ibev.event = IB_EVENT_PORT_ERR;
251 			else if (port_state == IB_PORT_ACTIVE)
252 				ibev.event = IB_EVENT_PORT_ACTIVE;
253 			else
254 				goto done;
255 
256 			ibev.element.port_num = port_num;
257 			ib_dispatch_event(&ibev);
258 		}
259 		break;
260 	}
261 
262 	default:
263 		break;
264 	}
265 done:
266 	mlx5_ib_put_native_port_mdev(ibdev, port_num);
267 	return NOTIFY_DONE;
268 }
269 
270 static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
271 					     u8 port_num)
272 {
273 	struct mlx5_ib_dev *ibdev = to_mdev(device);
274 	struct net_device *ndev;
275 	struct mlx5_core_dev *mdev;
276 
277 	mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
278 	if (!mdev)
279 		return NULL;
280 
281 	ndev = mlx5_lag_get_roce_netdev(mdev);
282 	if (ndev)
283 		goto out;
284 
285 	/* Ensure ndev does not disappear before we invoke dev_hold()
286 	 */
287 	read_lock(&ibdev->port[port_num - 1].roce.netdev_lock);
288 	ndev = ibdev->port[port_num - 1].roce.netdev;
289 	if (ndev)
290 		dev_hold(ndev);
291 	read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock);
292 
293 out:
294 	mlx5_ib_put_native_port_mdev(ibdev, port_num);
295 	return ndev;
296 }
297 
298 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
299 						   u8 ib_port_num,
300 						   u8 *native_port_num)
301 {
302 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
303 							  ib_port_num);
304 	struct mlx5_core_dev *mdev = NULL;
305 	struct mlx5_ib_multiport_info *mpi;
306 	struct mlx5_ib_port *port;
307 
308 	if (!mlx5_core_mp_enabled(ibdev->mdev) ||
309 	    ll != IB_LINK_LAYER_ETHERNET) {
310 		if (native_port_num)
311 			*native_port_num = ib_port_num;
312 		return ibdev->mdev;
313 	}
314 
315 	if (native_port_num)
316 		*native_port_num = 1;
317 
318 	port = &ibdev->port[ib_port_num - 1];
319 	if (!port)
320 		return NULL;
321 
322 	spin_lock(&port->mp.mpi_lock);
323 	mpi = ibdev->port[ib_port_num - 1].mp.mpi;
324 	if (mpi && !mpi->unaffiliate) {
325 		mdev = mpi->mdev;
326 		/* If it's the master no need to refcount, it'll exist
327 		 * as long as the ib_dev exists.
328 		 */
329 		if (!mpi->is_master)
330 			mpi->mdev_refcnt++;
331 	}
332 	spin_unlock(&port->mp.mpi_lock);
333 
334 	return mdev;
335 }
336 
337 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u8 port_num)
338 {
339 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
340 							  port_num);
341 	struct mlx5_ib_multiport_info *mpi;
342 	struct mlx5_ib_port *port;
343 
344 	if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
345 		return;
346 
347 	port = &ibdev->port[port_num - 1];
348 
349 	spin_lock(&port->mp.mpi_lock);
350 	mpi = ibdev->port[port_num - 1].mp.mpi;
351 	if (mpi->is_master)
352 		goto out;
353 
354 	mpi->mdev_refcnt--;
355 	if (mpi->unaffiliate)
356 		complete(&mpi->unref_comp);
357 out:
358 	spin_unlock(&port->mp.mpi_lock);
359 }
360 
361 static int translate_eth_legacy_proto_oper(u32 eth_proto_oper, u8 *active_speed,
362 					   u8 *active_width)
363 {
364 	switch (eth_proto_oper) {
365 	case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII):
366 	case MLX5E_PROT_MASK(MLX5E_1000BASE_KX):
367 	case MLX5E_PROT_MASK(MLX5E_100BASE_TX):
368 	case MLX5E_PROT_MASK(MLX5E_1000BASE_T):
369 		*active_width = IB_WIDTH_1X;
370 		*active_speed = IB_SPEED_SDR;
371 		break;
372 	case MLX5E_PROT_MASK(MLX5E_10GBASE_T):
373 	case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4):
374 	case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4):
375 	case MLX5E_PROT_MASK(MLX5E_10GBASE_KR):
376 	case MLX5E_PROT_MASK(MLX5E_10GBASE_CR):
377 	case MLX5E_PROT_MASK(MLX5E_10GBASE_SR):
378 	case MLX5E_PROT_MASK(MLX5E_10GBASE_ER):
379 		*active_width = IB_WIDTH_1X;
380 		*active_speed = IB_SPEED_QDR;
381 		break;
382 	case MLX5E_PROT_MASK(MLX5E_25GBASE_CR):
383 	case MLX5E_PROT_MASK(MLX5E_25GBASE_KR):
384 	case MLX5E_PROT_MASK(MLX5E_25GBASE_SR):
385 		*active_width = IB_WIDTH_1X;
386 		*active_speed = IB_SPEED_EDR;
387 		break;
388 	case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4):
389 	case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4):
390 	case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4):
391 	case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4):
392 		*active_width = IB_WIDTH_4X;
393 		*active_speed = IB_SPEED_QDR;
394 		break;
395 	case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2):
396 	case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2):
397 	case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2):
398 		*active_width = IB_WIDTH_1X;
399 		*active_speed = IB_SPEED_HDR;
400 		break;
401 	case MLX5E_PROT_MASK(MLX5E_56GBASE_R4):
402 		*active_width = IB_WIDTH_4X;
403 		*active_speed = IB_SPEED_FDR;
404 		break;
405 	case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4):
406 	case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4):
407 	case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4):
408 	case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4):
409 		*active_width = IB_WIDTH_4X;
410 		*active_speed = IB_SPEED_EDR;
411 		break;
412 	default:
413 		return -EINVAL;
414 	}
415 
416 	return 0;
417 }
418 
419 static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed,
420 					u8 *active_width)
421 {
422 	switch (eth_proto_oper) {
423 	case MLX5E_PROT_MASK(MLX5E_SGMII_100M):
424 	case MLX5E_PROT_MASK(MLX5E_1000BASE_X_SGMII):
425 		*active_width = IB_WIDTH_1X;
426 		*active_speed = IB_SPEED_SDR;
427 		break;
428 	case MLX5E_PROT_MASK(MLX5E_5GBASE_R):
429 		*active_width = IB_WIDTH_1X;
430 		*active_speed = IB_SPEED_DDR;
431 		break;
432 	case MLX5E_PROT_MASK(MLX5E_10GBASE_XFI_XAUI_1):
433 		*active_width = IB_WIDTH_1X;
434 		*active_speed = IB_SPEED_QDR;
435 		break;
436 	case MLX5E_PROT_MASK(MLX5E_40GBASE_XLAUI_4_XLPPI_4):
437 		*active_width = IB_WIDTH_4X;
438 		*active_speed = IB_SPEED_QDR;
439 		break;
440 	case MLX5E_PROT_MASK(MLX5E_25GAUI_1_25GBASE_CR_KR):
441 		*active_width = IB_WIDTH_1X;
442 		*active_speed = IB_SPEED_EDR;
443 		break;
444 	case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2):
445 		*active_width = IB_WIDTH_2X;
446 		*active_speed = IB_SPEED_EDR;
447 		break;
448 	case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR):
449 		*active_width = IB_WIDTH_1X;
450 		*active_speed = IB_SPEED_HDR;
451 		break;
452 	case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4):
453 		*active_width = IB_WIDTH_4X;
454 		*active_speed = IB_SPEED_EDR;
455 		break;
456 	case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2):
457 		*active_width = IB_WIDTH_2X;
458 		*active_speed = IB_SPEED_HDR;
459 		break;
460 	case MLX5E_PROT_MASK(MLX5E_200GAUI_4_200GBASE_CR4_KR4):
461 		*active_width = IB_WIDTH_4X;
462 		*active_speed = IB_SPEED_HDR;
463 		break;
464 	default:
465 		return -EINVAL;
466 	}
467 
468 	return 0;
469 }
470 
471 static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
472 				    u8 *active_width, bool ext)
473 {
474 	return ext ?
475 		translate_eth_ext_proto_oper(eth_proto_oper, active_speed,
476 					     active_width) :
477 		translate_eth_legacy_proto_oper(eth_proto_oper, active_speed,
478 						active_width);
479 }
480 
481 static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
482 				struct ib_port_attr *props)
483 {
484 	struct mlx5_ib_dev *dev = to_mdev(device);
485 	u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
486 	struct mlx5_core_dev *mdev;
487 	struct net_device *ndev, *upper;
488 	enum ib_mtu ndev_ib_mtu;
489 	bool put_mdev = true;
490 	u16 qkey_viol_cntr;
491 	u32 eth_prot_oper;
492 	u8 mdev_port_num;
493 	bool ext;
494 	int err;
495 
496 	mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
497 	if (!mdev) {
498 		/* This means the port isn't affiliated yet. Get the
499 		 * info for the master port instead.
500 		 */
501 		put_mdev = false;
502 		mdev = dev->mdev;
503 		mdev_port_num = 1;
504 		port_num = 1;
505 	}
506 
507 	/* Possible bad flows are checked before filling out props so in case
508 	 * of an error it will still be zeroed out.
509 	 * Use native port in case of reps
510 	 */
511 	if (dev->is_rep)
512 		err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
513 					   1);
514 	else
515 		err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
516 					   mdev_port_num);
517 	if (err)
518 		goto out;
519 	ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet);
520 	eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
521 
522 	props->active_width     = IB_WIDTH_4X;
523 	props->active_speed     = IB_SPEED_QDR;
524 
525 	translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
526 				 &props->active_width, ext);
527 
528 	props->port_cap_flags |= IB_PORT_CM_SUP;
529 	props->ip_gids = true;
530 
531 	props->gid_tbl_len      = MLX5_CAP_ROCE(dev->mdev,
532 						roce_address_table_size);
533 	props->max_mtu          = IB_MTU_4096;
534 	props->max_msg_sz       = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
535 	props->pkey_tbl_len     = 1;
536 	props->state            = IB_PORT_DOWN;
537 	props->phys_state       = 3;
538 
539 	mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
540 	props->qkey_viol_cntr = qkey_viol_cntr;
541 
542 	/* If this is a stub query for an unaffiliated port stop here */
543 	if (!put_mdev)
544 		goto out;
545 
546 	ndev = mlx5_ib_get_netdev(device, port_num);
547 	if (!ndev)
548 		goto out;
549 
550 	if (dev->lag_active) {
551 		rcu_read_lock();
552 		upper = netdev_master_upper_dev_get_rcu(ndev);
553 		if (upper) {
554 			dev_put(ndev);
555 			ndev = upper;
556 			dev_hold(ndev);
557 		}
558 		rcu_read_unlock();
559 	}
560 
561 	if (netif_running(ndev) && netif_carrier_ok(ndev)) {
562 		props->state      = IB_PORT_ACTIVE;
563 		props->phys_state = 5;
564 	}
565 
566 	ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
567 
568 	dev_put(ndev);
569 
570 	props->active_mtu	= min(props->max_mtu, ndev_ib_mtu);
571 out:
572 	if (put_mdev)
573 		mlx5_ib_put_native_port_mdev(dev, port_num);
574 	return err;
575 }
576 
577 static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
578 			 unsigned int index, const union ib_gid *gid,
579 			 const struct ib_gid_attr *attr)
580 {
581 	enum ib_gid_type gid_type = IB_GID_TYPE_IB;
582 	u16 vlan_id = 0xffff;
583 	u8 roce_version = 0;
584 	u8 roce_l3_type = 0;
585 	u8 mac[ETH_ALEN];
586 	int ret;
587 
588 	if (gid) {
589 		gid_type = attr->gid_type;
590 		ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
591 		if (ret)
592 			return ret;
593 	}
594 
595 	switch (gid_type) {
596 	case IB_GID_TYPE_IB:
597 		roce_version = MLX5_ROCE_VERSION_1;
598 		break;
599 	case IB_GID_TYPE_ROCE_UDP_ENCAP:
600 		roce_version = MLX5_ROCE_VERSION_2;
601 		if (ipv6_addr_v4mapped((void *)gid))
602 			roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4;
603 		else
604 			roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6;
605 		break;
606 
607 	default:
608 		mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type);
609 	}
610 
611 	return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
612 				      roce_l3_type, gid->raw, mac,
613 				      vlan_id < VLAN_CFI_MASK, vlan_id,
614 				      port_num);
615 }
616 
617 static int mlx5_ib_add_gid(const struct ib_gid_attr *attr,
618 			   __always_unused void **context)
619 {
620 	return set_roce_addr(to_mdev(attr->device), attr->port_num,
621 			     attr->index, &attr->gid, attr);
622 }
623 
624 static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
625 			   __always_unused void **context)
626 {
627 	return set_roce_addr(to_mdev(attr->device), attr->port_num,
628 			     attr->index, NULL, NULL);
629 }
630 
631 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
632 			       const struct ib_gid_attr *attr)
633 {
634 	if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
635 		return 0;
636 
637 	return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
638 }
639 
640 static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
641 {
642 	if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
643 		return !MLX5_CAP_GEN(dev->mdev, ib_virt);
644 	return 0;
645 }
646 
647 enum {
648 	MLX5_VPORT_ACCESS_METHOD_MAD,
649 	MLX5_VPORT_ACCESS_METHOD_HCA,
650 	MLX5_VPORT_ACCESS_METHOD_NIC,
651 };
652 
653 static int mlx5_get_vport_access_method(struct ib_device *ibdev)
654 {
655 	if (mlx5_use_mad_ifc(to_mdev(ibdev)))
656 		return MLX5_VPORT_ACCESS_METHOD_MAD;
657 
658 	if (mlx5_ib_port_link_layer(ibdev, 1) ==
659 	    IB_LINK_LAYER_ETHERNET)
660 		return MLX5_VPORT_ACCESS_METHOD_NIC;
661 
662 	return MLX5_VPORT_ACCESS_METHOD_HCA;
663 }
664 
665 static void get_atomic_caps(struct mlx5_ib_dev *dev,
666 			    u8 atomic_size_qp,
667 			    struct ib_device_attr *props)
668 {
669 	u8 tmp;
670 	u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
671 	u8 atomic_req_8B_endianness_mode =
672 		MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
673 
674 	/* Check if HW supports 8 bytes standard atomic operations and capable
675 	 * of host endianness respond
676 	 */
677 	tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD;
678 	if (((atomic_operations & tmp) == tmp) &&
679 	    (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) &&
680 	    (atomic_req_8B_endianness_mode)) {
681 		props->atomic_cap = IB_ATOMIC_HCA;
682 	} else {
683 		props->atomic_cap = IB_ATOMIC_NONE;
684 	}
685 }
686 
687 static void get_atomic_caps_qp(struct mlx5_ib_dev *dev,
688 			       struct ib_device_attr *props)
689 {
690 	u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
691 
692 	get_atomic_caps(dev, atomic_size_qp, props);
693 }
694 
695 static void get_atomic_caps_dc(struct mlx5_ib_dev *dev,
696 			       struct ib_device_attr *props)
697 {
698 	u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
699 
700 	get_atomic_caps(dev, atomic_size_qp, props);
701 }
702 
703 bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev)
704 {
705 	struct ib_device_attr props = {};
706 
707 	get_atomic_caps_dc(dev, &props);
708 	return (props.atomic_cap == IB_ATOMIC_HCA) ? true : false;
709 }
710 static int mlx5_query_system_image_guid(struct ib_device *ibdev,
711 					__be64 *sys_image_guid)
712 {
713 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
714 	struct mlx5_core_dev *mdev = dev->mdev;
715 	u64 tmp;
716 	int err;
717 
718 	switch (mlx5_get_vport_access_method(ibdev)) {
719 	case MLX5_VPORT_ACCESS_METHOD_MAD:
720 		return mlx5_query_mad_ifc_system_image_guid(ibdev,
721 							    sys_image_guid);
722 
723 	case MLX5_VPORT_ACCESS_METHOD_HCA:
724 		err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
725 		break;
726 
727 	case MLX5_VPORT_ACCESS_METHOD_NIC:
728 		err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
729 		break;
730 
731 	default:
732 		return -EINVAL;
733 	}
734 
735 	if (!err)
736 		*sys_image_guid = cpu_to_be64(tmp);
737 
738 	return err;
739 
740 }
741 
742 static int mlx5_query_max_pkeys(struct ib_device *ibdev,
743 				u16 *max_pkeys)
744 {
745 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
746 	struct mlx5_core_dev *mdev = dev->mdev;
747 
748 	switch (mlx5_get_vport_access_method(ibdev)) {
749 	case MLX5_VPORT_ACCESS_METHOD_MAD:
750 		return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
751 
752 	case MLX5_VPORT_ACCESS_METHOD_HCA:
753 	case MLX5_VPORT_ACCESS_METHOD_NIC:
754 		*max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
755 						pkey_table_size));
756 		return 0;
757 
758 	default:
759 		return -EINVAL;
760 	}
761 }
762 
763 static int mlx5_query_vendor_id(struct ib_device *ibdev,
764 				u32 *vendor_id)
765 {
766 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
767 
768 	switch (mlx5_get_vport_access_method(ibdev)) {
769 	case MLX5_VPORT_ACCESS_METHOD_MAD:
770 		return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
771 
772 	case MLX5_VPORT_ACCESS_METHOD_HCA:
773 	case MLX5_VPORT_ACCESS_METHOD_NIC:
774 		return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
775 
776 	default:
777 		return -EINVAL;
778 	}
779 }
780 
781 static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
782 				__be64 *node_guid)
783 {
784 	u64 tmp;
785 	int err;
786 
787 	switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
788 	case MLX5_VPORT_ACCESS_METHOD_MAD:
789 		return mlx5_query_mad_ifc_node_guid(dev, node_guid);
790 
791 	case MLX5_VPORT_ACCESS_METHOD_HCA:
792 		err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
793 		break;
794 
795 	case MLX5_VPORT_ACCESS_METHOD_NIC:
796 		err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
797 		break;
798 
799 	default:
800 		return -EINVAL;
801 	}
802 
803 	if (!err)
804 		*node_guid = cpu_to_be64(tmp);
805 
806 	return err;
807 }
808 
809 struct mlx5_reg_node_desc {
810 	u8	desc[IB_DEVICE_NODE_DESC_MAX];
811 };
812 
813 static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
814 {
815 	struct mlx5_reg_node_desc in;
816 
817 	if (mlx5_use_mad_ifc(dev))
818 		return mlx5_query_mad_ifc_node_desc(dev, node_desc);
819 
820 	memset(&in, 0, sizeof(in));
821 
822 	return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
823 				    sizeof(struct mlx5_reg_node_desc),
824 				    MLX5_REG_NODE_DESC, 0, 0);
825 }
826 
827 static int mlx5_ib_query_device(struct ib_device *ibdev,
828 				struct ib_device_attr *props,
829 				struct ib_udata *uhw)
830 {
831 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
832 	struct mlx5_core_dev *mdev = dev->mdev;
833 	int err = -ENOMEM;
834 	int max_sq_desc;
835 	int max_rq_sg;
836 	int max_sq_sg;
837 	u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
838 	bool raw_support = !mlx5_core_mp_enabled(mdev);
839 	struct mlx5_ib_query_device_resp resp = {};
840 	size_t resp_len;
841 	u64 max_tso;
842 
843 	resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
844 	if (uhw->outlen && uhw->outlen < resp_len)
845 		return -EINVAL;
846 	else
847 		resp.response_length = resp_len;
848 
849 	if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
850 		return -EINVAL;
851 
852 	memset(props, 0, sizeof(*props));
853 	err = mlx5_query_system_image_guid(ibdev,
854 					   &props->sys_image_guid);
855 	if (err)
856 		return err;
857 
858 	err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
859 	if (err)
860 		return err;
861 
862 	err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
863 	if (err)
864 		return err;
865 
866 	props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
867 		(fw_rev_min(dev->mdev) << 16) |
868 		fw_rev_sub(dev->mdev);
869 	props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
870 		IB_DEVICE_PORT_ACTIVE_EVENT		|
871 		IB_DEVICE_SYS_IMAGE_GUID		|
872 		IB_DEVICE_RC_RNR_NAK_GEN;
873 
874 	if (MLX5_CAP_GEN(mdev, pkv))
875 		props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
876 	if (MLX5_CAP_GEN(mdev, qkv))
877 		props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
878 	if (MLX5_CAP_GEN(mdev, apm))
879 		props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
880 	if (MLX5_CAP_GEN(mdev, xrc))
881 		props->device_cap_flags |= IB_DEVICE_XRC;
882 	if (MLX5_CAP_GEN(mdev, imaicl)) {
883 		props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
884 					   IB_DEVICE_MEM_WINDOW_TYPE_2B;
885 		props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
886 		/* We support 'Gappy' memory registration too */
887 		props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
888 	}
889 	props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
890 	if (MLX5_CAP_GEN(mdev, sho)) {
891 		props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
892 		/* At this stage no support for signature handover */
893 		props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
894 				      IB_PROT_T10DIF_TYPE_2 |
895 				      IB_PROT_T10DIF_TYPE_3;
896 		props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
897 				       IB_GUARD_T10DIF_CSUM;
898 	}
899 	if (MLX5_CAP_GEN(mdev, block_lb_mc))
900 		props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
901 
902 	if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) {
903 		if (MLX5_CAP_ETH(mdev, csum_cap)) {
904 			/* Legacy bit to support old userspace libraries */
905 			props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
906 			props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM;
907 		}
908 
909 		if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
910 			props->raw_packet_caps |=
911 				IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
912 
913 		if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
914 			max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
915 			if (max_tso) {
916 				resp.tso_caps.max_tso = 1 << max_tso;
917 				resp.tso_caps.supported_qpts |=
918 					1 << IB_QPT_RAW_PACKET;
919 				resp.response_length += sizeof(resp.tso_caps);
920 			}
921 		}
922 
923 		if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
924 			resp.rss_caps.rx_hash_function =
925 						MLX5_RX_HASH_FUNC_TOEPLITZ;
926 			resp.rss_caps.rx_hash_fields_mask =
927 						MLX5_RX_HASH_SRC_IPV4 |
928 						MLX5_RX_HASH_DST_IPV4 |
929 						MLX5_RX_HASH_SRC_IPV6 |
930 						MLX5_RX_HASH_DST_IPV6 |
931 						MLX5_RX_HASH_SRC_PORT_TCP |
932 						MLX5_RX_HASH_DST_PORT_TCP |
933 						MLX5_RX_HASH_SRC_PORT_UDP |
934 						MLX5_RX_HASH_DST_PORT_UDP |
935 						MLX5_RX_HASH_INNER;
936 			if (mlx5_accel_ipsec_device_caps(dev->mdev) &
937 			    MLX5_ACCEL_IPSEC_CAP_DEVICE)
938 				resp.rss_caps.rx_hash_fields_mask |=
939 					MLX5_RX_HASH_IPSEC_SPI;
940 			resp.response_length += sizeof(resp.rss_caps);
941 		}
942 	} else {
943 		if (field_avail(typeof(resp), tso_caps, uhw->outlen))
944 			resp.response_length += sizeof(resp.tso_caps);
945 		if (field_avail(typeof(resp), rss_caps, uhw->outlen))
946 			resp.response_length += sizeof(resp.rss_caps);
947 	}
948 
949 	if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
950 		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
951 		props->device_cap_flags |= IB_DEVICE_UD_TSO;
952 	}
953 
954 	if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
955 	    MLX5_CAP_GEN(dev->mdev, general_notification_event) &&
956 	    raw_support)
957 		props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP;
958 
959 	if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
960 	    MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap))
961 		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
962 
963 	if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
964 	    MLX5_CAP_ETH(dev->mdev, scatter_fcs) &&
965 	    raw_support) {
966 		/* Legacy bit to support old userspace libraries */
967 		props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
968 		props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
969 	}
970 
971 	if (MLX5_CAP_DEV_MEM(mdev, memic)) {
972 		props->max_dm_size =
973 			MLX5_CAP_DEV_MEM(mdev, max_memic_size);
974 	}
975 
976 	if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
977 		props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
978 
979 	if (MLX5_CAP_GEN(mdev, end_pad))
980 		props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING;
981 
982 	props->vendor_part_id	   = mdev->pdev->device;
983 	props->hw_ver		   = mdev->pdev->revision;
984 
985 	props->max_mr_size	   = ~0ull;
986 	props->page_size_cap	   = ~(min_page_size - 1);
987 	props->max_qp		   = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
988 	props->max_qp_wr	   = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
989 	max_rq_sg =  MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
990 		     sizeof(struct mlx5_wqe_data_seg);
991 	max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
992 	max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
993 		     sizeof(struct mlx5_wqe_raddr_seg)) /
994 		sizeof(struct mlx5_wqe_data_seg);
995 	props->max_send_sge = max_sq_sg;
996 	props->max_recv_sge = max_rq_sg;
997 	props->max_sge_rd	   = MLX5_MAX_SGE_RD;
998 	props->max_cq		   = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
999 	props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
1000 	props->max_mr		   = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
1001 	props->max_pd		   = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
1002 	props->max_qp_rd_atom	   = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
1003 	props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
1004 	props->max_srq		   = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
1005 	props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
1006 	props->local_ca_ack_delay  = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
1007 	props->max_res_rd_atom	   = props->max_qp_rd_atom * props->max_qp;
1008 	props->max_srq_sge	   = max_rq_sg - 1;
1009 	props->max_fast_reg_page_list_len =
1010 		1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
1011 	get_atomic_caps_qp(dev, props);
1012 	props->masked_atomic_cap   = IB_ATOMIC_NONE;
1013 	props->max_mcast_grp	   = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
1014 	props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
1015 	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
1016 					   props->max_mcast_grp;
1017 	props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
1018 	props->max_ah = INT_MAX;
1019 	props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
1020 	props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
1021 
1022 	if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
1023 		if (MLX5_CAP_GEN(mdev, pg))
1024 			props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
1025 		props->odp_caps = dev->odp_caps;
1026 	}
1027 
1028 	if (MLX5_CAP_GEN(mdev, cd))
1029 		props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
1030 
1031 	if (!mlx5_core_is_pf(mdev))
1032 		props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
1033 
1034 	if (mlx5_ib_port_link_layer(ibdev, 1) ==
1035 	    IB_LINK_LAYER_ETHERNET && raw_support) {
1036 		props->rss_caps.max_rwq_indirection_tables =
1037 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
1038 		props->rss_caps.max_rwq_indirection_table_size =
1039 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
1040 		props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
1041 		props->max_wq_type_rq =
1042 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
1043 	}
1044 
1045 	if (MLX5_CAP_GEN(mdev, tag_matching)) {
1046 		props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
1047 		props->tm_caps.max_num_tags =
1048 			(1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
1049 		props->tm_caps.flags = IB_TM_CAP_RC;
1050 		props->tm_caps.max_ops =
1051 			1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
1052 		props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
1053 	}
1054 
1055 	if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
1056 		props->cq_caps.max_cq_moderation_count =
1057 						MLX5_MAX_CQ_COUNT;
1058 		props->cq_caps.max_cq_moderation_period =
1059 						MLX5_MAX_CQ_PERIOD;
1060 	}
1061 
1062 	if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
1063 		resp.response_length += sizeof(resp.cqe_comp_caps);
1064 
1065 		if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
1066 			resp.cqe_comp_caps.max_num =
1067 				MLX5_CAP_GEN(dev->mdev,
1068 					     cqe_compression_max_num);
1069 
1070 			resp.cqe_comp_caps.supported_format =
1071 				MLX5_IB_CQE_RES_FORMAT_HASH |
1072 				MLX5_IB_CQE_RES_FORMAT_CSUM;
1073 
1074 			if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
1075 				resp.cqe_comp_caps.supported_format |=
1076 					MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX;
1077 		}
1078 	}
1079 
1080 	if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen) &&
1081 	    raw_support) {
1082 		if (MLX5_CAP_QOS(mdev, packet_pacing) &&
1083 		    MLX5_CAP_GEN(mdev, qos)) {
1084 			resp.packet_pacing_caps.qp_rate_limit_max =
1085 				MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
1086 			resp.packet_pacing_caps.qp_rate_limit_min =
1087 				MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
1088 			resp.packet_pacing_caps.supported_qpts |=
1089 				1 << IB_QPT_RAW_PACKET;
1090 			if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) &&
1091 			    MLX5_CAP_QOS(mdev, packet_pacing_typical_size))
1092 				resp.packet_pacing_caps.cap_flags |=
1093 					MLX5_IB_PP_SUPPORT_BURST;
1094 		}
1095 		resp.response_length += sizeof(resp.packet_pacing_caps);
1096 	}
1097 
1098 	if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
1099 			uhw->outlen)) {
1100 		if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
1101 			resp.mlx5_ib_support_multi_pkt_send_wqes =
1102 				MLX5_IB_ALLOW_MPW;
1103 
1104 		if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
1105 			resp.mlx5_ib_support_multi_pkt_send_wqes |=
1106 				MLX5_IB_SUPPORT_EMPW;
1107 
1108 		resp.response_length +=
1109 			sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
1110 	}
1111 
1112 	if (field_avail(typeof(resp), flags, uhw->outlen)) {
1113 		resp.response_length += sizeof(resp.flags);
1114 
1115 		if (MLX5_CAP_GEN(mdev, cqe_compression_128))
1116 			resp.flags |=
1117 				MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP;
1118 
1119 		if (MLX5_CAP_GEN(mdev, cqe_128_always))
1120 			resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD;
1121 		if (MLX5_CAP_GEN(mdev, qp_packet_based))
1122 			resp.flags |=
1123 				MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
1124 
1125 		resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
1126 	}
1127 
1128 	if (field_avail(typeof(resp), sw_parsing_caps,
1129 			uhw->outlen)) {
1130 		resp.response_length += sizeof(resp.sw_parsing_caps);
1131 		if (MLX5_CAP_ETH(mdev, swp)) {
1132 			resp.sw_parsing_caps.sw_parsing_offloads |=
1133 				MLX5_IB_SW_PARSING;
1134 
1135 			if (MLX5_CAP_ETH(mdev, swp_csum))
1136 				resp.sw_parsing_caps.sw_parsing_offloads |=
1137 					MLX5_IB_SW_PARSING_CSUM;
1138 
1139 			if (MLX5_CAP_ETH(mdev, swp_lso))
1140 				resp.sw_parsing_caps.sw_parsing_offloads |=
1141 					MLX5_IB_SW_PARSING_LSO;
1142 
1143 			if (resp.sw_parsing_caps.sw_parsing_offloads)
1144 				resp.sw_parsing_caps.supported_qpts =
1145 					BIT(IB_QPT_RAW_PACKET);
1146 		}
1147 	}
1148 
1149 	if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen) &&
1150 	    raw_support) {
1151 		resp.response_length += sizeof(resp.striding_rq_caps);
1152 		if (MLX5_CAP_GEN(mdev, striding_rq)) {
1153 			resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
1154 				MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
1155 			resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
1156 				MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
1157 			resp.striding_rq_caps.min_single_wqe_log_num_of_strides =
1158 				MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1159 			resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
1160 				MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
1161 			resp.striding_rq_caps.supported_qpts =
1162 				BIT(IB_QPT_RAW_PACKET);
1163 		}
1164 	}
1165 
1166 	if (field_avail(typeof(resp), tunnel_offloads_caps,
1167 			uhw->outlen)) {
1168 		resp.response_length += sizeof(resp.tunnel_offloads_caps);
1169 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
1170 			resp.tunnel_offloads_caps |=
1171 				MLX5_IB_TUNNELED_OFFLOADS_VXLAN;
1172 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
1173 			resp.tunnel_offloads_caps |=
1174 				MLX5_IB_TUNNELED_OFFLOADS_GENEVE;
1175 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
1176 			resp.tunnel_offloads_caps |=
1177 				MLX5_IB_TUNNELED_OFFLOADS_GRE;
1178 		if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
1179 		    MLX5_FLEX_PROTO_CW_MPLS_GRE)
1180 			resp.tunnel_offloads_caps |=
1181 				MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
1182 		if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
1183 		    MLX5_FLEX_PROTO_CW_MPLS_UDP)
1184 			resp.tunnel_offloads_caps |=
1185 				MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
1186 	}
1187 
1188 	if (uhw->outlen) {
1189 		err = ib_copy_to_udata(uhw, &resp, resp.response_length);
1190 
1191 		if (err)
1192 			return err;
1193 	}
1194 
1195 	return 0;
1196 }
1197 
1198 enum mlx5_ib_width {
1199 	MLX5_IB_WIDTH_1X	= 1 << 0,
1200 	MLX5_IB_WIDTH_2X	= 1 << 1,
1201 	MLX5_IB_WIDTH_4X	= 1 << 2,
1202 	MLX5_IB_WIDTH_8X	= 1 << 3,
1203 	MLX5_IB_WIDTH_12X	= 1 << 4
1204 };
1205 
1206 static void translate_active_width(struct ib_device *ibdev, u8 active_width,
1207 				  u8 *ib_width)
1208 {
1209 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1210 
1211 	if (active_width & MLX5_IB_WIDTH_1X)
1212 		*ib_width = IB_WIDTH_1X;
1213 	else if (active_width & MLX5_IB_WIDTH_2X)
1214 		*ib_width = IB_WIDTH_2X;
1215 	else if (active_width & MLX5_IB_WIDTH_4X)
1216 		*ib_width = IB_WIDTH_4X;
1217 	else if (active_width & MLX5_IB_WIDTH_8X)
1218 		*ib_width = IB_WIDTH_8X;
1219 	else if (active_width & MLX5_IB_WIDTH_12X)
1220 		*ib_width = IB_WIDTH_12X;
1221 	else {
1222 		mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n",
1223 			    (int)active_width);
1224 		*ib_width = IB_WIDTH_4X;
1225 	}
1226 
1227 	return;
1228 }
1229 
1230 static int mlx5_mtu_to_ib_mtu(int mtu)
1231 {
1232 	switch (mtu) {
1233 	case 256: return 1;
1234 	case 512: return 2;
1235 	case 1024: return 3;
1236 	case 2048: return 4;
1237 	case 4096: return 5;
1238 	default:
1239 		pr_warn("invalid mtu\n");
1240 		return -1;
1241 	}
1242 }
1243 
1244 enum ib_max_vl_num {
1245 	__IB_MAX_VL_0		= 1,
1246 	__IB_MAX_VL_0_1		= 2,
1247 	__IB_MAX_VL_0_3		= 3,
1248 	__IB_MAX_VL_0_7		= 4,
1249 	__IB_MAX_VL_0_14	= 5,
1250 };
1251 
1252 enum mlx5_vl_hw_cap {
1253 	MLX5_VL_HW_0	= 1,
1254 	MLX5_VL_HW_0_1	= 2,
1255 	MLX5_VL_HW_0_2	= 3,
1256 	MLX5_VL_HW_0_3	= 4,
1257 	MLX5_VL_HW_0_4	= 5,
1258 	MLX5_VL_HW_0_5	= 6,
1259 	MLX5_VL_HW_0_6	= 7,
1260 	MLX5_VL_HW_0_7	= 8,
1261 	MLX5_VL_HW_0_14	= 15
1262 };
1263 
1264 static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
1265 				u8 *max_vl_num)
1266 {
1267 	switch (vl_hw_cap) {
1268 	case MLX5_VL_HW_0:
1269 		*max_vl_num = __IB_MAX_VL_0;
1270 		break;
1271 	case MLX5_VL_HW_0_1:
1272 		*max_vl_num = __IB_MAX_VL_0_1;
1273 		break;
1274 	case MLX5_VL_HW_0_3:
1275 		*max_vl_num = __IB_MAX_VL_0_3;
1276 		break;
1277 	case MLX5_VL_HW_0_7:
1278 		*max_vl_num = __IB_MAX_VL_0_7;
1279 		break;
1280 	case MLX5_VL_HW_0_14:
1281 		*max_vl_num = __IB_MAX_VL_0_14;
1282 		break;
1283 
1284 	default:
1285 		return -EINVAL;
1286 	}
1287 
1288 	return 0;
1289 }
1290 
1291 static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
1292 			       struct ib_port_attr *props)
1293 {
1294 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1295 	struct mlx5_core_dev *mdev = dev->mdev;
1296 	struct mlx5_hca_vport_context *rep;
1297 	u16 max_mtu;
1298 	u16 oper_mtu;
1299 	int err;
1300 	u8 ib_link_width_oper;
1301 	u8 vl_hw_cap;
1302 
1303 	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
1304 	if (!rep) {
1305 		err = -ENOMEM;
1306 		goto out;
1307 	}
1308 
1309 	/* props being zeroed by the caller, avoid zeroing it here */
1310 
1311 	err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
1312 	if (err)
1313 		goto out;
1314 
1315 	props->lid		= rep->lid;
1316 	props->lmc		= rep->lmc;
1317 	props->sm_lid		= rep->sm_lid;
1318 	props->sm_sl		= rep->sm_sl;
1319 	props->state		= rep->vport_state;
1320 	props->phys_state	= rep->port_physical_state;
1321 	props->port_cap_flags	= rep->cap_mask1;
1322 	props->gid_tbl_len	= mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
1323 	props->max_msg_sz	= 1 << MLX5_CAP_GEN(mdev, log_max_msg);
1324 	props->pkey_tbl_len	= mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
1325 	props->bad_pkey_cntr	= rep->pkey_violation_counter;
1326 	props->qkey_viol_cntr	= rep->qkey_violation_counter;
1327 	props->subnet_timeout	= rep->subnet_timeout;
1328 	props->init_type_reply	= rep->init_type_reply;
1329 
1330 	if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP)
1331 		props->port_cap_flags2 = rep->cap_mask2;
1332 
1333 	err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
1334 	if (err)
1335 		goto out;
1336 
1337 	translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
1338 
1339 	err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
1340 	if (err)
1341 		goto out;
1342 
1343 	mlx5_query_port_max_mtu(mdev, &max_mtu, port);
1344 
1345 	props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
1346 
1347 	mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
1348 
1349 	props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
1350 
1351 	err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
1352 	if (err)
1353 		goto out;
1354 
1355 	err = translate_max_vl_num(ibdev, vl_hw_cap,
1356 				   &props->max_vl_num);
1357 out:
1358 	kfree(rep);
1359 	return err;
1360 }
1361 
1362 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
1363 		       struct ib_port_attr *props)
1364 {
1365 	unsigned int count;
1366 	int ret;
1367 
1368 	switch (mlx5_get_vport_access_method(ibdev)) {
1369 	case MLX5_VPORT_ACCESS_METHOD_MAD:
1370 		ret = mlx5_query_mad_ifc_port(ibdev, port, props);
1371 		break;
1372 
1373 	case MLX5_VPORT_ACCESS_METHOD_HCA:
1374 		ret = mlx5_query_hca_port(ibdev, port, props);
1375 		break;
1376 
1377 	case MLX5_VPORT_ACCESS_METHOD_NIC:
1378 		ret = mlx5_query_port_roce(ibdev, port, props);
1379 		break;
1380 
1381 	default:
1382 		ret = -EINVAL;
1383 	}
1384 
1385 	if (!ret && props) {
1386 		struct mlx5_ib_dev *dev = to_mdev(ibdev);
1387 		struct mlx5_core_dev *mdev;
1388 		bool put_mdev = true;
1389 
1390 		mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL);
1391 		if (!mdev) {
1392 			/* If the port isn't affiliated yet query the master.
1393 			 * The master and slave will have the same values.
1394 			 */
1395 			mdev = dev->mdev;
1396 			port = 1;
1397 			put_mdev = false;
1398 		}
1399 		count = mlx5_core_reserved_gids_count(mdev);
1400 		if (put_mdev)
1401 			mlx5_ib_put_native_port_mdev(dev, port);
1402 		props->gid_tbl_len -= count;
1403 	}
1404 	return ret;
1405 }
1406 
1407 static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port,
1408 				  struct ib_port_attr *props)
1409 {
1410 	int ret;
1411 
1412 	/* Only link layer == ethernet is valid for representors
1413 	 * and we always use port 1
1414 	 */
1415 	ret = mlx5_query_port_roce(ibdev, port, props);
1416 	if (ret || !props)
1417 		return ret;
1418 
1419 	/* We don't support GIDS */
1420 	props->gid_tbl_len = 0;
1421 
1422 	return ret;
1423 }
1424 
1425 static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
1426 			     union ib_gid *gid)
1427 {
1428 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1429 	struct mlx5_core_dev *mdev = dev->mdev;
1430 
1431 	switch (mlx5_get_vport_access_method(ibdev)) {
1432 	case MLX5_VPORT_ACCESS_METHOD_MAD:
1433 		return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
1434 
1435 	case MLX5_VPORT_ACCESS_METHOD_HCA:
1436 		return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
1437 
1438 	default:
1439 		return -EINVAL;
1440 	}
1441 
1442 }
1443 
1444 static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u8 port,
1445 				   u16 index, u16 *pkey)
1446 {
1447 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1448 	struct mlx5_core_dev *mdev;
1449 	bool put_mdev = true;
1450 	u8 mdev_port_num;
1451 	int err;
1452 
1453 	mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
1454 	if (!mdev) {
1455 		/* The port isn't affiliated yet, get the PKey from the master
1456 		 * port. For RoCE the PKey tables will be the same.
1457 		 */
1458 		put_mdev = false;
1459 		mdev = dev->mdev;
1460 		mdev_port_num = 1;
1461 	}
1462 
1463 	err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0,
1464 					index, pkey);
1465 	if (put_mdev)
1466 		mlx5_ib_put_native_port_mdev(dev, port);
1467 
1468 	return err;
1469 }
1470 
1471 static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1472 			      u16 *pkey)
1473 {
1474 	switch (mlx5_get_vport_access_method(ibdev)) {
1475 	case MLX5_VPORT_ACCESS_METHOD_MAD:
1476 		return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
1477 
1478 	case MLX5_VPORT_ACCESS_METHOD_HCA:
1479 	case MLX5_VPORT_ACCESS_METHOD_NIC:
1480 		return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey);
1481 	default:
1482 		return -EINVAL;
1483 	}
1484 }
1485 
1486 static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
1487 				 struct ib_device_modify *props)
1488 {
1489 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1490 	struct mlx5_reg_node_desc in;
1491 	struct mlx5_reg_node_desc out;
1492 	int err;
1493 
1494 	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
1495 		return -EOPNOTSUPP;
1496 
1497 	if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
1498 		return 0;
1499 
1500 	/*
1501 	 * If possible, pass node desc to FW, so it can generate
1502 	 * a 144 trap.  If cmd fails, just ignore.
1503 	 */
1504 	memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1505 	err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
1506 				   sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
1507 	if (err)
1508 		return err;
1509 
1510 	memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1511 
1512 	return err;
1513 }
1514 
1515 static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask,
1516 				u32 value)
1517 {
1518 	struct mlx5_hca_vport_context ctx = {};
1519 	struct mlx5_core_dev *mdev;
1520 	u8 mdev_port_num;
1521 	int err;
1522 
1523 	mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
1524 	if (!mdev)
1525 		return -ENODEV;
1526 
1527 	err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx);
1528 	if (err)
1529 		goto out;
1530 
1531 	if (~ctx.cap_mask1_perm & mask) {
1532 		mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n",
1533 			     mask, ctx.cap_mask1_perm);
1534 		err = -EINVAL;
1535 		goto out;
1536 	}
1537 
1538 	ctx.cap_mask1 = value;
1539 	ctx.cap_mask1_perm = mask;
1540 	err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num,
1541 						 0, &ctx);
1542 
1543 out:
1544 	mlx5_ib_put_native_port_mdev(dev, port_num);
1545 
1546 	return err;
1547 }
1548 
1549 static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1550 			       struct ib_port_modify *props)
1551 {
1552 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1553 	struct ib_port_attr attr;
1554 	u32 tmp;
1555 	int err;
1556 	u32 change_mask;
1557 	u32 value;
1558 	bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
1559 		      IB_LINK_LAYER_INFINIBAND);
1560 
1561 	/* CM layer calls ib_modify_port() regardless of the link layer. For
1562 	 * Ethernet ports, qkey violation and Port capabilities are meaningless.
1563 	 */
1564 	if (!is_ib)
1565 		return 0;
1566 
1567 	if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
1568 		change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
1569 		value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
1570 		return set_port_caps_atomic(dev, port, change_mask, value);
1571 	}
1572 
1573 	mutex_lock(&dev->cap_mask_mutex);
1574 
1575 	err = ib_query_port(ibdev, port, &attr);
1576 	if (err)
1577 		goto out;
1578 
1579 	tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
1580 		~props->clr_port_cap_mask;
1581 
1582 	err = mlx5_set_port_caps(dev->mdev, port, tmp);
1583 
1584 out:
1585 	mutex_unlock(&dev->cap_mask_mutex);
1586 	return err;
1587 }
1588 
1589 static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
1590 {
1591 	mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
1592 		    caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
1593 }
1594 
1595 static u16 calc_dynamic_bfregs(int uars_per_sys_page)
1596 {
1597 	/* Large page with non 4k uar support might limit the dynamic size */
1598 	if (uars_per_sys_page == 1  && PAGE_SIZE > 4096)
1599 		return MLX5_MIN_DYN_BFREGS;
1600 
1601 	return MLX5_MAX_DYN_BFREGS;
1602 }
1603 
1604 static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
1605 			     struct mlx5_ib_alloc_ucontext_req_v2 *req,
1606 			     struct mlx5_bfreg_info *bfregi)
1607 {
1608 	int uars_per_sys_page;
1609 	int bfregs_per_sys_page;
1610 	int ref_bfregs = req->total_num_bfregs;
1611 
1612 	if (req->total_num_bfregs == 0)
1613 		return -EINVAL;
1614 
1615 	BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE);
1616 	BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE);
1617 
1618 	if (req->total_num_bfregs > MLX5_MAX_BFREGS)
1619 		return -ENOMEM;
1620 
1621 	uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
1622 	bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
1623 	/* This holds the required static allocation asked by the user */
1624 	req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
1625 	if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
1626 		return -EINVAL;
1627 
1628 	bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
1629 	bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page);
1630 	bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs;
1631 	bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page;
1632 
1633 	mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n",
1634 		    MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
1635 		    lib_uar_4k ? "yes" : "no", ref_bfregs,
1636 		    req->total_num_bfregs, bfregi->total_num_bfregs,
1637 		    bfregi->num_sys_pages);
1638 
1639 	return 0;
1640 }
1641 
1642 static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
1643 {
1644 	struct mlx5_bfreg_info *bfregi;
1645 	int err;
1646 	int i;
1647 
1648 	bfregi = &context->bfregi;
1649 	for (i = 0; i < bfregi->num_static_sys_pages; i++) {
1650 		err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
1651 		if (err)
1652 			goto error;
1653 
1654 		mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
1655 	}
1656 
1657 	for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++)
1658 		bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX;
1659 
1660 	return 0;
1661 
1662 error:
1663 	for (--i; i >= 0; i--)
1664 		if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]))
1665 			mlx5_ib_warn(dev, "failed to free uar %d\n", i);
1666 
1667 	return err;
1668 }
1669 
1670 static void deallocate_uars(struct mlx5_ib_dev *dev,
1671 			    struct mlx5_ib_ucontext *context)
1672 {
1673 	struct mlx5_bfreg_info *bfregi;
1674 	int i;
1675 
1676 	bfregi = &context->bfregi;
1677 	for (i = 0; i < bfregi->num_sys_pages; i++)
1678 		if (i < bfregi->num_static_sys_pages ||
1679 		    bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX)
1680 			mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
1681 }
1682 
1683 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
1684 {
1685 	int err = 0;
1686 
1687 	mutex_lock(&dev->lb.mutex);
1688 	if (td)
1689 		dev->lb.user_td++;
1690 	if (qp)
1691 		dev->lb.qps++;
1692 
1693 	if (dev->lb.user_td == 2 ||
1694 	    dev->lb.qps == 1) {
1695 		if (!dev->lb.enabled) {
1696 			err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
1697 			dev->lb.enabled = true;
1698 		}
1699 	}
1700 
1701 	mutex_unlock(&dev->lb.mutex);
1702 
1703 	return err;
1704 }
1705 
1706 void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
1707 {
1708 	mutex_lock(&dev->lb.mutex);
1709 	if (td)
1710 		dev->lb.user_td--;
1711 	if (qp)
1712 		dev->lb.qps--;
1713 
1714 	if (dev->lb.user_td == 1 &&
1715 	    dev->lb.qps == 0) {
1716 		if (dev->lb.enabled) {
1717 			mlx5_nic_vport_update_local_lb(dev->mdev, false);
1718 			dev->lb.enabled = false;
1719 		}
1720 	}
1721 
1722 	mutex_unlock(&dev->lb.mutex);
1723 }
1724 
1725 static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn,
1726 					  u16 uid)
1727 {
1728 	int err;
1729 
1730 	if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1731 		return 0;
1732 
1733 	err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid);
1734 	if (err)
1735 		return err;
1736 
1737 	if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1738 	    (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1739 	     !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1740 		return err;
1741 
1742 	return mlx5_ib_enable_lb(dev, true, false);
1743 }
1744 
1745 static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn,
1746 					     u16 uid)
1747 {
1748 	if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1749 		return;
1750 
1751 	mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid);
1752 
1753 	if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1754 	    (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1755 	     !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1756 		return;
1757 
1758 	mlx5_ib_disable_lb(dev, true, false);
1759 }
1760 
1761 static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
1762 				  struct ib_udata *udata)
1763 {
1764 	struct ib_device *ibdev = uctx->device;
1765 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1766 	struct mlx5_ib_alloc_ucontext_req_v2 req = {};
1767 	struct mlx5_ib_alloc_ucontext_resp resp = {};
1768 	struct mlx5_core_dev *mdev = dev->mdev;
1769 	struct mlx5_ib_ucontext *context = to_mucontext(uctx);
1770 	struct mlx5_bfreg_info *bfregi;
1771 	int ver;
1772 	int err;
1773 	size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
1774 				     max_cqe_version);
1775 	u32 dump_fill_mkey;
1776 	bool lib_uar_4k;
1777 
1778 	if (!dev->ib_active)
1779 		return -EAGAIN;
1780 
1781 	if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
1782 		ver = 0;
1783 	else if (udata->inlen >= min_req_v2)
1784 		ver = 2;
1785 	else
1786 		return -EINVAL;
1787 
1788 	err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1789 	if (err)
1790 		return err;
1791 
1792 	if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX)
1793 		return -EOPNOTSUPP;
1794 
1795 	if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
1796 		return -EOPNOTSUPP;
1797 
1798 	req.total_num_bfregs = ALIGN(req.total_num_bfregs,
1799 				    MLX5_NON_FP_BFREGS_PER_UAR);
1800 	if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
1801 		return -EINVAL;
1802 
1803 	resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
1804 	if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
1805 		resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
1806 	resp.cache_line_size = cache_line_size();
1807 	resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
1808 	resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
1809 	resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1810 	resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1811 	resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
1812 	resp.cqe_version = min_t(__u8,
1813 				 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
1814 				 req.max_cqe_version);
1815 	resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1816 				MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
1817 	resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1818 					MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1;
1819 	resp.response_length = min(offsetof(typeof(resp), response_length) +
1820 				   sizeof(resp.response_length), udata->outlen);
1821 
1822 	if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) {
1823 		if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_EGRESS))
1824 			resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM;
1825 		if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA)
1826 			resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA;
1827 		if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
1828 			resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING;
1829 		if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN)
1830 			resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN;
1831 		/* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */
1832 	}
1833 
1834 	lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
1835 	bfregi = &context->bfregi;
1836 
1837 	/* updates req->total_num_bfregs */
1838 	err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
1839 	if (err)
1840 		goto out_ctx;
1841 
1842 	mutex_init(&bfregi->lock);
1843 	bfregi->lib_uar_4k = lib_uar_4k;
1844 	bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count),
1845 				GFP_KERNEL);
1846 	if (!bfregi->count) {
1847 		err = -ENOMEM;
1848 		goto out_ctx;
1849 	}
1850 
1851 	bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
1852 				    sizeof(*bfregi->sys_pages),
1853 				    GFP_KERNEL);
1854 	if (!bfregi->sys_pages) {
1855 		err = -ENOMEM;
1856 		goto out_count;
1857 	}
1858 
1859 	err = allocate_uars(dev, context);
1860 	if (err)
1861 		goto out_sys_pages;
1862 
1863 	if (ibdev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)
1864 		context->ibucontext.invalidate_range =
1865 			&mlx5_ib_invalidate_range;
1866 
1867 	if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
1868 		err = mlx5_ib_devx_create(dev, true);
1869 		if (err < 0)
1870 			goto out_uars;
1871 		context->devx_uid = err;
1872 	}
1873 
1874 	err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
1875 					     context->devx_uid);
1876 	if (err)
1877 		goto out_devx;
1878 
1879 	if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1880 		err = mlx5_cmd_dump_fill_mkey(dev->mdev, &dump_fill_mkey);
1881 		if (err)
1882 			goto out_mdev;
1883 	}
1884 
1885 	INIT_LIST_HEAD(&context->db_page_list);
1886 	mutex_init(&context->db_page_mutex);
1887 
1888 	resp.tot_bfregs = req.total_num_bfregs;
1889 	resp.num_ports = dev->num_ports;
1890 
1891 	if (field_avail(typeof(resp), cqe_version, udata->outlen))
1892 		resp.response_length += sizeof(resp.cqe_version);
1893 
1894 	if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
1895 		resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
1896 				      MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
1897 		resp.response_length += sizeof(resp.cmds_supp_uhw);
1898 	}
1899 
1900 	if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) {
1901 		if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
1902 			mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline);
1903 			resp.eth_min_inline++;
1904 		}
1905 		resp.response_length += sizeof(resp.eth_min_inline);
1906 	}
1907 
1908 	if (field_avail(typeof(resp), clock_info_versions, udata->outlen)) {
1909 		if (mdev->clock_info)
1910 			resp.clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1);
1911 		resp.response_length += sizeof(resp.clock_info_versions);
1912 	}
1913 
1914 	/*
1915 	 * We don't want to expose information from the PCI bar that is located
1916 	 * after 4096 bytes, so if the arch only supports larger pages, let's
1917 	 * pretend we don't support reading the HCA's core clock. This is also
1918 	 * forced by mmap function.
1919 	 */
1920 	if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
1921 		if (PAGE_SIZE <= 4096) {
1922 			resp.comp_mask |=
1923 				MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
1924 			resp.hca_core_clock_offset =
1925 				offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
1926 		}
1927 		resp.response_length += sizeof(resp.hca_core_clock_offset);
1928 	}
1929 
1930 	if (field_avail(typeof(resp), log_uar_size, udata->outlen))
1931 		resp.response_length += sizeof(resp.log_uar_size);
1932 
1933 	if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
1934 		resp.response_length += sizeof(resp.num_uars_per_page);
1935 
1936 	if (field_avail(typeof(resp), num_dyn_bfregs, udata->outlen)) {
1937 		resp.num_dyn_bfregs = bfregi->num_dyn_bfregs;
1938 		resp.response_length += sizeof(resp.num_dyn_bfregs);
1939 	}
1940 
1941 	if (field_avail(typeof(resp), dump_fill_mkey, udata->outlen)) {
1942 		if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1943 			resp.dump_fill_mkey = dump_fill_mkey;
1944 			resp.comp_mask |=
1945 				MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY;
1946 		}
1947 		resp.response_length += sizeof(resp.dump_fill_mkey);
1948 	}
1949 
1950 	err = ib_copy_to_udata(udata, &resp, resp.response_length);
1951 	if (err)
1952 		goto out_mdev;
1953 
1954 	bfregi->ver = ver;
1955 	bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
1956 	context->cqe_version = resp.cqe_version;
1957 	context->lib_caps = req.lib_caps;
1958 	print_lib_caps(dev, context->lib_caps);
1959 
1960 	if (dev->lag_active) {
1961 		u8 port = mlx5_core_native_port_num(dev->mdev) - 1;
1962 
1963 		atomic_set(&context->tx_port_affinity,
1964 			   atomic_add_return(
1965 				   1, &dev->port[port].roce.tx_port_affinity));
1966 	}
1967 
1968 	return 0;
1969 
1970 out_mdev:
1971 	mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
1972 out_devx:
1973 	if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
1974 		mlx5_ib_devx_destroy(dev, context->devx_uid);
1975 
1976 out_uars:
1977 	deallocate_uars(dev, context);
1978 
1979 out_sys_pages:
1980 	kfree(bfregi->sys_pages);
1981 
1982 out_count:
1983 	kfree(bfregi->count);
1984 
1985 out_ctx:
1986 	return err;
1987 }
1988 
1989 static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1990 {
1991 	struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1992 	struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
1993 	struct mlx5_bfreg_info *bfregi;
1994 
1995 	/* All umem's must be destroyed before destroying the ucontext. */
1996 	mutex_lock(&ibcontext->per_mm_list_lock);
1997 	WARN_ON(!list_empty(&ibcontext->per_mm_list));
1998 	mutex_unlock(&ibcontext->per_mm_list_lock);
1999 
2000 	bfregi = &context->bfregi;
2001 	mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
2002 
2003 	if (context->devx_uid)
2004 		mlx5_ib_devx_destroy(dev, context->devx_uid);
2005 
2006 	deallocate_uars(dev, context);
2007 	kfree(bfregi->sys_pages);
2008 	kfree(bfregi->count);
2009 }
2010 
2011 static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
2012 				 int uar_idx)
2013 {
2014 	int fw_uars_per_page;
2015 
2016 	fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
2017 
2018 	return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
2019 }
2020 
2021 static int get_command(unsigned long offset)
2022 {
2023 	return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
2024 }
2025 
2026 static int get_arg(unsigned long offset)
2027 {
2028 	return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
2029 }
2030 
2031 static int get_index(unsigned long offset)
2032 {
2033 	return get_arg(offset);
2034 }
2035 
2036 /* Index resides in an extra byte to enable larger values than 255 */
2037 static int get_extended_index(unsigned long offset)
2038 {
2039 	return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
2040 }
2041 
2042 
2043 static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
2044 {
2045 }
2046 
2047 static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
2048 {
2049 	switch (cmd) {
2050 	case MLX5_IB_MMAP_WC_PAGE:
2051 		return "WC";
2052 	case MLX5_IB_MMAP_REGULAR_PAGE:
2053 		return "best effort WC";
2054 	case MLX5_IB_MMAP_NC_PAGE:
2055 		return "NC";
2056 	case MLX5_IB_MMAP_DEVICE_MEM:
2057 		return "Device Memory";
2058 	default:
2059 		return NULL;
2060 	}
2061 }
2062 
2063 static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
2064 					struct vm_area_struct *vma,
2065 					struct mlx5_ib_ucontext *context)
2066 {
2067 	if ((vma->vm_end - vma->vm_start != PAGE_SIZE) ||
2068 	    !(vma->vm_flags & VM_SHARED))
2069 		return -EINVAL;
2070 
2071 	if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
2072 		return -EOPNOTSUPP;
2073 
2074 	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
2075 		return -EPERM;
2076 	vma->vm_flags &= ~VM_MAYWRITE;
2077 
2078 	if (!dev->mdev->clock_info)
2079 		return -EOPNOTSUPP;
2080 
2081 	return vm_insert_page(vma, vma->vm_start,
2082 			      virt_to_page(dev->mdev->clock_info));
2083 }
2084 
2085 static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
2086 		    struct vm_area_struct *vma,
2087 		    struct mlx5_ib_ucontext *context)
2088 {
2089 	struct mlx5_bfreg_info *bfregi = &context->bfregi;
2090 	int err;
2091 	unsigned long idx;
2092 	phys_addr_t pfn;
2093 	pgprot_t prot;
2094 	u32 bfreg_dyn_idx = 0;
2095 	u32 uar_index;
2096 	int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC);
2097 	int max_valid_idx = dyn_uar ? bfregi->num_sys_pages :
2098 				bfregi->num_static_sys_pages;
2099 
2100 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2101 		return -EINVAL;
2102 
2103 	if (dyn_uar)
2104 		idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages;
2105 	else
2106 		idx = get_index(vma->vm_pgoff);
2107 
2108 	if (idx >= max_valid_idx) {
2109 		mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n",
2110 			     idx, max_valid_idx);
2111 		return -EINVAL;
2112 	}
2113 
2114 	switch (cmd) {
2115 	case MLX5_IB_MMAP_WC_PAGE:
2116 	case MLX5_IB_MMAP_ALLOC_WC:
2117 /* Some architectures don't support WC memory */
2118 #if defined(CONFIG_X86)
2119 		if (!pat_enabled())
2120 			return -EPERM;
2121 #elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU)))
2122 			return -EPERM;
2123 #endif
2124 	/* fall through */
2125 	case MLX5_IB_MMAP_REGULAR_PAGE:
2126 		/* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
2127 		prot = pgprot_writecombine(vma->vm_page_prot);
2128 		break;
2129 	case MLX5_IB_MMAP_NC_PAGE:
2130 		prot = pgprot_noncached(vma->vm_page_prot);
2131 		break;
2132 	default:
2133 		return -EINVAL;
2134 	}
2135 
2136 	if (dyn_uar) {
2137 		int uars_per_page;
2138 
2139 		uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
2140 		bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR);
2141 		if (bfreg_dyn_idx >= bfregi->total_num_bfregs) {
2142 			mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n",
2143 				     bfreg_dyn_idx, bfregi->total_num_bfregs);
2144 			return -EINVAL;
2145 		}
2146 
2147 		mutex_lock(&bfregi->lock);
2148 		/* Fail if uar already allocated, first bfreg index of each
2149 		 * page holds its count.
2150 		 */
2151 		if (bfregi->count[bfreg_dyn_idx]) {
2152 			mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx);
2153 			mutex_unlock(&bfregi->lock);
2154 			return -EINVAL;
2155 		}
2156 
2157 		bfregi->count[bfreg_dyn_idx]++;
2158 		mutex_unlock(&bfregi->lock);
2159 
2160 		err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
2161 		if (err) {
2162 			mlx5_ib_warn(dev, "UAR alloc failed\n");
2163 			goto free_bfreg;
2164 		}
2165 	} else {
2166 		uar_index = bfregi->sys_pages[idx];
2167 	}
2168 
2169 	pfn = uar_index2pfn(dev, uar_index);
2170 	mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
2171 
2172 	err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
2173 				prot);
2174 	if (err) {
2175 		mlx5_ib_err(dev,
2176 			    "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
2177 			    err, mmap_cmd2str(cmd));
2178 		goto err;
2179 	}
2180 
2181 	if (dyn_uar)
2182 		bfregi->sys_pages[idx] = uar_index;
2183 	return 0;
2184 
2185 err:
2186 	if (!dyn_uar)
2187 		return err;
2188 
2189 	mlx5_cmd_free_uar(dev->mdev, idx);
2190 
2191 free_bfreg:
2192 	mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
2193 
2194 	return err;
2195 }
2196 
2197 static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
2198 {
2199 	struct mlx5_ib_ucontext *mctx = to_mucontext(context);
2200 	struct mlx5_ib_dev *dev = to_mdev(context->device);
2201 	u16 page_idx = get_extended_index(vma->vm_pgoff);
2202 	size_t map_size = vma->vm_end - vma->vm_start;
2203 	u32 npages = map_size >> PAGE_SHIFT;
2204 	phys_addr_t pfn;
2205 
2206 	if (find_next_zero_bit(mctx->dm_pages, page_idx + npages, page_idx) !=
2207 	    page_idx + npages)
2208 		return -EINVAL;
2209 
2210 	pfn = ((dev->mdev->bar_addr +
2211 	      MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >>
2212 	      PAGE_SHIFT) +
2213 	      page_idx;
2214 	return rdma_user_mmap_io(context, vma, pfn, map_size,
2215 				 pgprot_writecombine(vma->vm_page_prot));
2216 }
2217 
2218 static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
2219 {
2220 	struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
2221 	struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2222 	unsigned long command;
2223 	phys_addr_t pfn;
2224 
2225 	command = get_command(vma->vm_pgoff);
2226 	switch (command) {
2227 	case MLX5_IB_MMAP_WC_PAGE:
2228 	case MLX5_IB_MMAP_NC_PAGE:
2229 	case MLX5_IB_MMAP_REGULAR_PAGE:
2230 	case MLX5_IB_MMAP_ALLOC_WC:
2231 		return uar_mmap(dev, command, vma, context);
2232 
2233 	case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
2234 		return -ENOSYS;
2235 
2236 	case MLX5_IB_MMAP_CORE_CLOCK:
2237 		if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2238 			return -EINVAL;
2239 
2240 		if (vma->vm_flags & VM_WRITE)
2241 			return -EPERM;
2242 		vma->vm_flags &= ~VM_MAYWRITE;
2243 
2244 		/* Don't expose to user-space information it shouldn't have */
2245 		if (PAGE_SIZE > 4096)
2246 			return -EOPNOTSUPP;
2247 
2248 		pfn = (dev->mdev->iseg_base +
2249 		       offsetof(struct mlx5_init_seg, internal_timer_h)) >>
2250 			PAGE_SHIFT;
2251 		return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
2252 					 PAGE_SIZE,
2253 					 pgprot_noncached(vma->vm_page_prot));
2254 	case MLX5_IB_MMAP_CLOCK_INFO:
2255 		return mlx5_ib_mmap_clock_info_page(dev, vma, context);
2256 
2257 	case MLX5_IB_MMAP_DEVICE_MEM:
2258 		return dm_mmap(ibcontext, vma);
2259 
2260 	default:
2261 		return -EINVAL;
2262 	}
2263 
2264 	return 0;
2265 }
2266 
2267 static inline int check_dm_type_support(struct mlx5_ib_dev *dev,
2268 					u32 type)
2269 {
2270 	switch (type) {
2271 	case MLX5_IB_UAPI_DM_TYPE_MEMIC:
2272 		if (!MLX5_CAP_DEV_MEM(dev->mdev, memic))
2273 			return -EOPNOTSUPP;
2274 		break;
2275 	case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
2276 		if (!capable(CAP_SYS_RAWIO) ||
2277 		    !capable(CAP_NET_RAW))
2278 			return -EPERM;
2279 
2280 		if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) ||
2281 		      MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner)))
2282 			return -EOPNOTSUPP;
2283 		break;
2284 	}
2285 
2286 	return 0;
2287 }
2288 
2289 static int handle_alloc_dm_memic(struct ib_ucontext *ctx,
2290 				 struct mlx5_ib_dm *dm,
2291 				 struct ib_dm_alloc_attr *attr,
2292 				 struct uverbs_attr_bundle *attrs)
2293 {
2294 	struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
2295 	u64 start_offset;
2296 	u32 page_idx;
2297 	int err;
2298 
2299 	dm->size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
2300 
2301 	err = mlx5_cmd_alloc_memic(dm_db, &dm->dev_addr,
2302 				   dm->size, attr->alignment);
2303 	if (err)
2304 		return err;
2305 
2306 	page_idx = (dm->dev_addr - pci_resource_start(dm_db->dev->pdev, 0) -
2307 		    MLX5_CAP64_DEV_MEM(dm_db->dev, memic_bar_start_addr)) >>
2308 		    PAGE_SHIFT;
2309 
2310 	err = uverbs_copy_to(attrs,
2311 			     MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
2312 			     &page_idx, sizeof(page_idx));
2313 	if (err)
2314 		goto err_dealloc;
2315 
2316 	start_offset = dm->dev_addr & ~PAGE_MASK;
2317 	err = uverbs_copy_to(attrs,
2318 			     MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
2319 			     &start_offset, sizeof(start_offset));
2320 	if (err)
2321 		goto err_dealloc;
2322 
2323 	bitmap_set(to_mucontext(ctx)->dm_pages, page_idx,
2324 		   DIV_ROUND_UP(dm->size, PAGE_SIZE));
2325 
2326 	return 0;
2327 
2328 err_dealloc:
2329 	mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size);
2330 
2331 	return err;
2332 }
2333 
2334 static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
2335 				  struct mlx5_ib_dm *dm,
2336 				  struct ib_dm_alloc_attr *attr,
2337 				  struct uverbs_attr_bundle *attrs,
2338 				  int type)
2339 {
2340 	struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
2341 	u64 act_size;
2342 	int err;
2343 
2344 	/* Allocation size must a multiple of the basic block size
2345 	 * and a power of 2.
2346 	 */
2347 	act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dm_db->dev));
2348 	act_size = roundup_pow_of_two(act_size);
2349 
2350 	dm->size = act_size;
2351 	err = mlx5_cmd_alloc_sw_icm(dm_db, type, act_size,
2352 				    to_mucontext(ctx)->devx_uid, &dm->dev_addr,
2353 				    &dm->icm_dm.obj_id);
2354 	if (err)
2355 		return err;
2356 
2357 	err = uverbs_copy_to(attrs,
2358 			     MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
2359 			     &dm->dev_addr, sizeof(dm->dev_addr));
2360 	if (err)
2361 		mlx5_cmd_dealloc_sw_icm(dm_db, type, dm->size,
2362 					to_mucontext(ctx)->devx_uid,
2363 					dm->dev_addr, dm->icm_dm.obj_id);
2364 
2365 	return err;
2366 }
2367 
2368 struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
2369 			       struct ib_ucontext *context,
2370 			       struct ib_dm_alloc_attr *attr,
2371 			       struct uverbs_attr_bundle *attrs)
2372 {
2373 	struct mlx5_ib_dm *dm;
2374 	enum mlx5_ib_uapi_dm_type type;
2375 	int err;
2376 
2377 	err = uverbs_get_const_default(&type, attrs,
2378 				       MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
2379 				       MLX5_IB_UAPI_DM_TYPE_MEMIC);
2380 	if (err)
2381 		return ERR_PTR(err);
2382 
2383 	mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n",
2384 		    type, attr->length, attr->alignment);
2385 
2386 	err = check_dm_type_support(to_mdev(ibdev), type);
2387 	if (err)
2388 		return ERR_PTR(err);
2389 
2390 	dm = kzalloc(sizeof(*dm), GFP_KERNEL);
2391 	if (!dm)
2392 		return ERR_PTR(-ENOMEM);
2393 
2394 	dm->type = type;
2395 
2396 	switch (type) {
2397 	case MLX5_IB_UAPI_DM_TYPE_MEMIC:
2398 		err = handle_alloc_dm_memic(context, dm,
2399 					    attr,
2400 					    attrs);
2401 		break;
2402 	case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
2403 	case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
2404 		err = handle_alloc_dm_sw_icm(context, dm, attr, attrs, type);
2405 		break;
2406 	default:
2407 		err = -EOPNOTSUPP;
2408 	}
2409 
2410 	if (err)
2411 		goto err_free;
2412 
2413 	return &dm->ibdm;
2414 
2415 err_free:
2416 	kfree(dm);
2417 	return ERR_PTR(err);
2418 }
2419 
2420 int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
2421 {
2422 	struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
2423 		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
2424 	struct mlx5_dm *dm_db = &to_mdev(ibdm->device)->dm;
2425 	struct mlx5_ib_dm *dm = to_mdm(ibdm);
2426 	u32 page_idx;
2427 	int ret;
2428 
2429 	switch (dm->type) {
2430 	case MLX5_IB_UAPI_DM_TYPE_MEMIC:
2431 		ret = mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size);
2432 		if (ret)
2433 			return ret;
2434 
2435 		page_idx = (dm->dev_addr -
2436 			    pci_resource_start(dm_db->dev->pdev, 0) -
2437 			    MLX5_CAP64_DEV_MEM(dm_db->dev,
2438 					       memic_bar_start_addr)) >>
2439 			   PAGE_SHIFT;
2440 		bitmap_clear(ctx->dm_pages, page_idx,
2441 			     DIV_ROUND_UP(dm->size, PAGE_SIZE));
2442 		break;
2443 	case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
2444 	case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
2445 		ret = mlx5_cmd_dealloc_sw_icm(dm_db, dm->type, dm->size,
2446 					      ctx->devx_uid, dm->dev_addr,
2447 					      dm->icm_dm.obj_id);
2448 		if (ret)
2449 			return ret;
2450 		break;
2451 	default:
2452 		return -EOPNOTSUPP;
2453 	}
2454 
2455 	kfree(dm);
2456 
2457 	return 0;
2458 }
2459 
2460 static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
2461 {
2462 	struct mlx5_ib_pd *pd = to_mpd(ibpd);
2463 	struct ib_device *ibdev = ibpd->device;
2464 	struct mlx5_ib_alloc_pd_resp resp;
2465 	int err;
2466 	u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
2467 	u32 in[MLX5_ST_SZ_DW(alloc_pd_in)]   = {};
2468 	u16 uid = 0;
2469 	struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
2470 		udata, struct mlx5_ib_ucontext, ibucontext);
2471 
2472 	uid = context ? context->devx_uid : 0;
2473 	MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
2474 	MLX5_SET(alloc_pd_in, in, uid, uid);
2475 	err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in),
2476 			    out, sizeof(out));
2477 	if (err)
2478 		return err;
2479 
2480 	pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
2481 	pd->uid = uid;
2482 	if (udata) {
2483 		resp.pdn = pd->pdn;
2484 		if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
2485 			mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
2486 			return -EFAULT;
2487 		}
2488 	}
2489 
2490 	return 0;
2491 }
2492 
2493 static void mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
2494 {
2495 	struct mlx5_ib_dev *mdev = to_mdev(pd->device);
2496 	struct mlx5_ib_pd *mpd = to_mpd(pd);
2497 
2498 	mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
2499 }
2500 
2501 enum {
2502 	MATCH_CRITERIA_ENABLE_OUTER_BIT,
2503 	MATCH_CRITERIA_ENABLE_MISC_BIT,
2504 	MATCH_CRITERIA_ENABLE_INNER_BIT,
2505 	MATCH_CRITERIA_ENABLE_MISC2_BIT
2506 };
2507 
2508 #define HEADER_IS_ZERO(match_criteria, headers)			           \
2509 	!(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
2510 		    0, MLX5_FLD_SZ_BYTES(fte_match_param, headers)))       \
2511 
2512 static u8 get_match_criteria_enable(u32 *match_criteria)
2513 {
2514 	u8 match_criteria_enable;
2515 
2516 	match_criteria_enable =
2517 		(!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
2518 		MATCH_CRITERIA_ENABLE_OUTER_BIT;
2519 	match_criteria_enable |=
2520 		(!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
2521 		MATCH_CRITERIA_ENABLE_MISC_BIT;
2522 	match_criteria_enable |=
2523 		(!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
2524 		MATCH_CRITERIA_ENABLE_INNER_BIT;
2525 	match_criteria_enable |=
2526 		(!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
2527 		MATCH_CRITERIA_ENABLE_MISC2_BIT;
2528 
2529 	return match_criteria_enable;
2530 }
2531 
2532 static int set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
2533 {
2534 	u8 entry_mask;
2535 	u8 entry_val;
2536 	int err = 0;
2537 
2538 	if (!mask)
2539 		goto out;
2540 
2541 	entry_mask = MLX5_GET(fte_match_set_lyr_2_4, outer_c,
2542 			      ip_protocol);
2543 	entry_val = MLX5_GET(fte_match_set_lyr_2_4, outer_v,
2544 			     ip_protocol);
2545 	if (!entry_mask) {
2546 		MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
2547 		MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
2548 		goto out;
2549 	}
2550 	/* Don't override existing ip protocol */
2551 	if (mask != entry_mask || val != entry_val)
2552 		err = -EINVAL;
2553 out:
2554 	return err;
2555 }
2556 
2557 static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val,
2558 			   bool inner)
2559 {
2560 	if (inner) {
2561 		MLX5_SET(fte_match_set_misc,
2562 			 misc_c, inner_ipv6_flow_label, mask);
2563 		MLX5_SET(fte_match_set_misc,
2564 			 misc_v, inner_ipv6_flow_label, val);
2565 	} else {
2566 		MLX5_SET(fte_match_set_misc,
2567 			 misc_c, outer_ipv6_flow_label, mask);
2568 		MLX5_SET(fte_match_set_misc,
2569 			 misc_v, outer_ipv6_flow_label, val);
2570 	}
2571 }
2572 
2573 static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
2574 {
2575 	MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
2576 	MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val);
2577 	MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2);
2578 	MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2);
2579 }
2580 
2581 static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask)
2582 {
2583 	if (MLX5_GET(fte_match_mpls, set_mask, mpls_label) &&
2584 	    !(field_support & MLX5_FIELD_SUPPORT_MPLS_LABEL))
2585 		return -EOPNOTSUPP;
2586 
2587 	if (MLX5_GET(fte_match_mpls, set_mask, mpls_exp) &&
2588 	    !(field_support & MLX5_FIELD_SUPPORT_MPLS_EXP))
2589 		return -EOPNOTSUPP;
2590 
2591 	if (MLX5_GET(fte_match_mpls, set_mask, mpls_s_bos) &&
2592 	    !(field_support & MLX5_FIELD_SUPPORT_MPLS_S_BOS))
2593 		return -EOPNOTSUPP;
2594 
2595 	if (MLX5_GET(fte_match_mpls, set_mask, mpls_ttl) &&
2596 	    !(field_support & MLX5_FIELD_SUPPORT_MPLS_TTL))
2597 		return -EOPNOTSUPP;
2598 
2599 	return 0;
2600 }
2601 
2602 #define LAST_ETH_FIELD vlan_tag
2603 #define LAST_IB_FIELD sl
2604 #define LAST_IPV4_FIELD tos
2605 #define LAST_IPV6_FIELD traffic_class
2606 #define LAST_TCP_UDP_FIELD src_port
2607 #define LAST_TUNNEL_FIELD tunnel_id
2608 #define LAST_FLOW_TAG_FIELD tag_id
2609 #define LAST_DROP_FIELD size
2610 #define LAST_COUNTERS_FIELD counters
2611 
2612 /* Field is the last supported field */
2613 #define FIELDS_NOT_SUPPORTED(filter, field)\
2614 	memchr_inv((void *)&filter.field  +\
2615 		   sizeof(filter.field), 0,\
2616 		   sizeof(filter) -\
2617 		   offsetof(typeof(filter), field) -\
2618 		   sizeof(filter.field))
2619 
2620 int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
2621 			   bool is_egress,
2622 			   struct mlx5_flow_act *action)
2623 {
2624 
2625 	switch (maction->ib_action.type) {
2626 	case IB_FLOW_ACTION_ESP:
2627 		if (action->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
2628 				      MLX5_FLOW_CONTEXT_ACTION_DECRYPT))
2629 			return -EINVAL;
2630 		/* Currently only AES_GCM keymat is supported by the driver */
2631 		action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx;
2632 		action->action |= is_egress ?
2633 			MLX5_FLOW_CONTEXT_ACTION_ENCRYPT :
2634 			MLX5_FLOW_CONTEXT_ACTION_DECRYPT;
2635 		return 0;
2636 	case IB_FLOW_ACTION_UNSPECIFIED:
2637 		if (maction->flow_action_raw.sub_type ==
2638 		    MLX5_IB_FLOW_ACTION_MODIFY_HEADER) {
2639 			if (action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2640 				return -EINVAL;
2641 			action->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2642 			action->modify_id = maction->flow_action_raw.action_id;
2643 			return 0;
2644 		}
2645 		if (maction->flow_action_raw.sub_type ==
2646 		    MLX5_IB_FLOW_ACTION_DECAP) {
2647 			if (action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
2648 				return -EINVAL;
2649 			action->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2650 			return 0;
2651 		}
2652 		if (maction->flow_action_raw.sub_type ==
2653 		    MLX5_IB_FLOW_ACTION_PACKET_REFORMAT) {
2654 			if (action->action &
2655 			    MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
2656 				return -EINVAL;
2657 			action->action |=
2658 				MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
2659 			action->reformat_id =
2660 				maction->flow_action_raw.action_id;
2661 			return 0;
2662 		}
2663 		/* fall through */
2664 	default:
2665 		return -EOPNOTSUPP;
2666 	}
2667 }
2668 
2669 static int parse_flow_attr(struct mlx5_core_dev *mdev,
2670 			   struct mlx5_flow_spec *spec,
2671 			   const union ib_flow_spec *ib_spec,
2672 			   const struct ib_flow_attr *flow_attr,
2673 			   struct mlx5_flow_act *action, u32 prev_type)
2674 {
2675 	struct mlx5_flow_context *flow_context = &spec->flow_context;
2676 	u32 *match_c = spec->match_criteria;
2677 	u32 *match_v = spec->match_value;
2678 	void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
2679 					   misc_parameters);
2680 	void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
2681 					   misc_parameters);
2682 	void *misc_params2_c = MLX5_ADDR_OF(fte_match_param, match_c,
2683 					    misc_parameters_2);
2684 	void *misc_params2_v = MLX5_ADDR_OF(fte_match_param, match_v,
2685 					    misc_parameters_2);
2686 	void *headers_c;
2687 	void *headers_v;
2688 	int match_ipv;
2689 	int ret;
2690 
2691 	if (ib_spec->type & IB_FLOW_SPEC_INNER) {
2692 		headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
2693 					 inner_headers);
2694 		headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
2695 					 inner_headers);
2696 		match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2697 					ft_field_support.inner_ip_version);
2698 	} else {
2699 		headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
2700 					 outer_headers);
2701 		headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
2702 					 outer_headers);
2703 		match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2704 					ft_field_support.outer_ip_version);
2705 	}
2706 
2707 	switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
2708 	case IB_FLOW_SPEC_ETH:
2709 		if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
2710 			return -EOPNOTSUPP;
2711 
2712 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2713 					     dmac_47_16),
2714 				ib_spec->eth.mask.dst_mac);
2715 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2716 					     dmac_47_16),
2717 				ib_spec->eth.val.dst_mac);
2718 
2719 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2720 					     smac_47_16),
2721 				ib_spec->eth.mask.src_mac);
2722 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2723 					     smac_47_16),
2724 				ib_spec->eth.val.src_mac);
2725 
2726 		if (ib_spec->eth.mask.vlan_tag) {
2727 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2728 				 cvlan_tag, 1);
2729 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2730 				 cvlan_tag, 1);
2731 
2732 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2733 				 first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
2734 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2735 				 first_vid, ntohs(ib_spec->eth.val.vlan_tag));
2736 
2737 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2738 				 first_cfi,
2739 				 ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
2740 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2741 				 first_cfi,
2742 				 ntohs(ib_spec->eth.val.vlan_tag) >> 12);
2743 
2744 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2745 				 first_prio,
2746 				 ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
2747 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2748 				 first_prio,
2749 				 ntohs(ib_spec->eth.val.vlan_tag) >> 13);
2750 		}
2751 		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2752 			 ethertype, ntohs(ib_spec->eth.mask.ether_type));
2753 		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2754 			 ethertype, ntohs(ib_spec->eth.val.ether_type));
2755 		break;
2756 	case IB_FLOW_SPEC_IPV4:
2757 		if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
2758 			return -EOPNOTSUPP;
2759 
2760 		if (match_ipv) {
2761 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2762 				 ip_version, 0xf);
2763 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2764 				 ip_version, MLX5_FS_IPV4_VERSION);
2765 		} else {
2766 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2767 				 ethertype, 0xffff);
2768 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2769 				 ethertype, ETH_P_IP);
2770 		}
2771 
2772 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2773 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
2774 		       &ib_spec->ipv4.mask.src_ip,
2775 		       sizeof(ib_spec->ipv4.mask.src_ip));
2776 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2777 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
2778 		       &ib_spec->ipv4.val.src_ip,
2779 		       sizeof(ib_spec->ipv4.val.src_ip));
2780 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2781 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2782 		       &ib_spec->ipv4.mask.dst_ip,
2783 		       sizeof(ib_spec->ipv4.mask.dst_ip));
2784 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2785 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2786 		       &ib_spec->ipv4.val.dst_ip,
2787 		       sizeof(ib_spec->ipv4.val.dst_ip));
2788 
2789 		set_tos(headers_c, headers_v,
2790 			ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
2791 
2792 		if (set_proto(headers_c, headers_v,
2793 			      ib_spec->ipv4.mask.proto,
2794 			      ib_spec->ipv4.val.proto))
2795 			return -EINVAL;
2796 		break;
2797 	case IB_FLOW_SPEC_IPV6:
2798 		if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
2799 			return -EOPNOTSUPP;
2800 
2801 		if (match_ipv) {
2802 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2803 				 ip_version, 0xf);
2804 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2805 				 ip_version, MLX5_FS_IPV6_VERSION);
2806 		} else {
2807 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2808 				 ethertype, 0xffff);
2809 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2810 				 ethertype, ETH_P_IPV6);
2811 		}
2812 
2813 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2814 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
2815 		       &ib_spec->ipv6.mask.src_ip,
2816 		       sizeof(ib_spec->ipv6.mask.src_ip));
2817 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2818 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
2819 		       &ib_spec->ipv6.val.src_ip,
2820 		       sizeof(ib_spec->ipv6.val.src_ip));
2821 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2822 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2823 		       &ib_spec->ipv6.mask.dst_ip,
2824 		       sizeof(ib_spec->ipv6.mask.dst_ip));
2825 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2826 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2827 		       &ib_spec->ipv6.val.dst_ip,
2828 		       sizeof(ib_spec->ipv6.val.dst_ip));
2829 
2830 		set_tos(headers_c, headers_v,
2831 			ib_spec->ipv6.mask.traffic_class,
2832 			ib_spec->ipv6.val.traffic_class);
2833 
2834 		if (set_proto(headers_c, headers_v,
2835 			      ib_spec->ipv6.mask.next_hdr,
2836 			      ib_spec->ipv6.val.next_hdr))
2837 			return -EINVAL;
2838 
2839 		set_flow_label(misc_params_c, misc_params_v,
2840 			       ntohl(ib_spec->ipv6.mask.flow_label),
2841 			       ntohl(ib_spec->ipv6.val.flow_label),
2842 			       ib_spec->type & IB_FLOW_SPEC_INNER);
2843 		break;
2844 	case IB_FLOW_SPEC_ESP:
2845 		if (ib_spec->esp.mask.seq)
2846 			return -EOPNOTSUPP;
2847 
2848 		MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi,
2849 			 ntohl(ib_spec->esp.mask.spi));
2850 		MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
2851 			 ntohl(ib_spec->esp.val.spi));
2852 		break;
2853 	case IB_FLOW_SPEC_TCP:
2854 		if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
2855 					 LAST_TCP_UDP_FIELD))
2856 			return -EOPNOTSUPP;
2857 
2858 		if (set_proto(headers_c, headers_v, 0xff, IPPROTO_TCP))
2859 			return -EINVAL;
2860 
2861 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
2862 			 ntohs(ib_spec->tcp_udp.mask.src_port));
2863 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2864 			 ntohs(ib_spec->tcp_udp.val.src_port));
2865 
2866 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport,
2867 			 ntohs(ib_spec->tcp_udp.mask.dst_port));
2868 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2869 			 ntohs(ib_spec->tcp_udp.val.dst_port));
2870 		break;
2871 	case IB_FLOW_SPEC_UDP:
2872 		if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
2873 					 LAST_TCP_UDP_FIELD))
2874 			return -EOPNOTSUPP;
2875 
2876 		if (set_proto(headers_c, headers_v, 0xff, IPPROTO_UDP))
2877 			return -EINVAL;
2878 
2879 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
2880 			 ntohs(ib_spec->tcp_udp.mask.src_port));
2881 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2882 			 ntohs(ib_spec->tcp_udp.val.src_port));
2883 
2884 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
2885 			 ntohs(ib_spec->tcp_udp.mask.dst_port));
2886 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2887 			 ntohs(ib_spec->tcp_udp.val.dst_port));
2888 		break;
2889 	case IB_FLOW_SPEC_GRE:
2890 		if (ib_spec->gre.mask.c_ks_res0_ver)
2891 			return -EOPNOTSUPP;
2892 
2893 		if (set_proto(headers_c, headers_v, 0xff, IPPROTO_GRE))
2894 			return -EINVAL;
2895 
2896 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2897 			 0xff);
2898 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2899 			 IPPROTO_GRE);
2900 
2901 		MLX5_SET(fte_match_set_misc, misc_params_c, gre_protocol,
2902 			 ntohs(ib_spec->gre.mask.protocol));
2903 		MLX5_SET(fte_match_set_misc, misc_params_v, gre_protocol,
2904 			 ntohs(ib_spec->gre.val.protocol));
2905 
2906 		memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
2907 				    gre_key.nvgre.hi),
2908 		       &ib_spec->gre.mask.key,
2909 		       sizeof(ib_spec->gre.mask.key));
2910 		memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v,
2911 				    gre_key.nvgre.hi),
2912 		       &ib_spec->gre.val.key,
2913 		       sizeof(ib_spec->gre.val.key));
2914 		break;
2915 	case IB_FLOW_SPEC_MPLS:
2916 		switch (prev_type) {
2917 		case IB_FLOW_SPEC_UDP:
2918 			if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2919 						   ft_field_support.outer_first_mpls_over_udp),
2920 						   &ib_spec->mpls.mask.tag))
2921 				return -EOPNOTSUPP;
2922 
2923 			memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2924 					    outer_first_mpls_over_udp),
2925 			       &ib_spec->mpls.val.tag,
2926 			       sizeof(ib_spec->mpls.val.tag));
2927 			memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2928 					    outer_first_mpls_over_udp),
2929 			       &ib_spec->mpls.mask.tag,
2930 			       sizeof(ib_spec->mpls.mask.tag));
2931 			break;
2932 		case IB_FLOW_SPEC_GRE:
2933 			if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2934 						   ft_field_support.outer_first_mpls_over_gre),
2935 						   &ib_spec->mpls.mask.tag))
2936 				return -EOPNOTSUPP;
2937 
2938 			memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2939 					    outer_first_mpls_over_gre),
2940 			       &ib_spec->mpls.val.tag,
2941 			       sizeof(ib_spec->mpls.val.tag));
2942 			memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2943 					    outer_first_mpls_over_gre),
2944 			       &ib_spec->mpls.mask.tag,
2945 			       sizeof(ib_spec->mpls.mask.tag));
2946 			break;
2947 		default:
2948 			if (ib_spec->type & IB_FLOW_SPEC_INNER) {
2949 				if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2950 							   ft_field_support.inner_first_mpls),
2951 							   &ib_spec->mpls.mask.tag))
2952 					return -EOPNOTSUPP;
2953 
2954 				memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2955 						    inner_first_mpls),
2956 				       &ib_spec->mpls.val.tag,
2957 				       sizeof(ib_spec->mpls.val.tag));
2958 				memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2959 						    inner_first_mpls),
2960 				       &ib_spec->mpls.mask.tag,
2961 				       sizeof(ib_spec->mpls.mask.tag));
2962 			} else {
2963 				if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2964 							   ft_field_support.outer_first_mpls),
2965 							   &ib_spec->mpls.mask.tag))
2966 					return -EOPNOTSUPP;
2967 
2968 				memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2969 						    outer_first_mpls),
2970 				       &ib_spec->mpls.val.tag,
2971 				       sizeof(ib_spec->mpls.val.tag));
2972 				memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2973 						    outer_first_mpls),
2974 				       &ib_spec->mpls.mask.tag,
2975 				       sizeof(ib_spec->mpls.mask.tag));
2976 			}
2977 		}
2978 		break;
2979 	case IB_FLOW_SPEC_VXLAN_TUNNEL:
2980 		if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
2981 					 LAST_TUNNEL_FIELD))
2982 			return -EOPNOTSUPP;
2983 
2984 		MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni,
2985 			 ntohl(ib_spec->tunnel.mask.tunnel_id));
2986 		MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni,
2987 			 ntohl(ib_spec->tunnel.val.tunnel_id));
2988 		break;
2989 	case IB_FLOW_SPEC_ACTION_TAG:
2990 		if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag,
2991 					 LAST_FLOW_TAG_FIELD))
2992 			return -EOPNOTSUPP;
2993 		if (ib_spec->flow_tag.tag_id >= BIT(24))
2994 			return -EINVAL;
2995 
2996 		flow_context->flow_tag = ib_spec->flow_tag.tag_id;
2997 		flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
2998 		break;
2999 	case IB_FLOW_SPEC_ACTION_DROP:
3000 		if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
3001 					 LAST_DROP_FIELD))
3002 			return -EOPNOTSUPP;
3003 		action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
3004 		break;
3005 	case IB_FLOW_SPEC_ACTION_HANDLE:
3006 		ret = parse_flow_flow_action(to_mflow_act(ib_spec->action.act),
3007 			flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS, action);
3008 		if (ret)
3009 			return ret;
3010 		break;
3011 	case IB_FLOW_SPEC_ACTION_COUNT:
3012 		if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count,
3013 					 LAST_COUNTERS_FIELD))
3014 			return -EOPNOTSUPP;
3015 
3016 		/* for now support only one counters spec per flow */
3017 		if (action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
3018 			return -EINVAL;
3019 
3020 		action->counters = ib_spec->flow_count.counters;
3021 		action->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3022 		break;
3023 	default:
3024 		return -EINVAL;
3025 	}
3026 
3027 	return 0;
3028 }
3029 
3030 /* If a flow could catch both multicast and unicast packets,
3031  * it won't fall into the multicast flow steering table and this rule
3032  * could steal other multicast packets.
3033  */
3034 static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr)
3035 {
3036 	union ib_flow_spec *flow_spec;
3037 
3038 	if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
3039 	    ib_attr->num_of_specs < 1)
3040 		return false;
3041 
3042 	flow_spec = (union ib_flow_spec *)(ib_attr + 1);
3043 	if (flow_spec->type == IB_FLOW_SPEC_IPV4) {
3044 		struct ib_flow_spec_ipv4 *ipv4_spec;
3045 
3046 		ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec;
3047 		if (ipv4_is_multicast(ipv4_spec->val.dst_ip))
3048 			return true;
3049 
3050 		return false;
3051 	}
3052 
3053 	if (flow_spec->type == IB_FLOW_SPEC_ETH) {
3054 		struct ib_flow_spec_eth *eth_spec;
3055 
3056 		eth_spec = (struct ib_flow_spec_eth *)flow_spec;
3057 		return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
3058 		       is_multicast_ether_addr(eth_spec->val.dst_mac);
3059 	}
3060 
3061 	return false;
3062 }
3063 
3064 enum valid_spec {
3065 	VALID_SPEC_INVALID,
3066 	VALID_SPEC_VALID,
3067 	VALID_SPEC_NA,
3068 };
3069 
3070 static enum valid_spec
3071 is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev,
3072 		     const struct mlx5_flow_spec *spec,
3073 		     const struct mlx5_flow_act *flow_act,
3074 		     bool egress)
3075 {
3076 	const u32 *match_c = spec->match_criteria;
3077 	bool is_crypto =
3078 		(flow_act->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
3079 				     MLX5_FLOW_CONTEXT_ACTION_DECRYPT));
3080 	bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c);
3081 	bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP;
3082 
3083 	/*
3084 	 * Currently only crypto is supported in egress, when regular egress
3085 	 * rules would be supported, always return VALID_SPEC_NA.
3086 	 */
3087 	if (!is_crypto)
3088 		return VALID_SPEC_NA;
3089 
3090 	return is_crypto && is_ipsec &&
3091 		(!egress || (!is_drop &&
3092 			     !(spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG))) ?
3093 		VALID_SPEC_VALID : VALID_SPEC_INVALID;
3094 }
3095 
3096 static bool is_valid_spec(struct mlx5_core_dev *mdev,
3097 			  const struct mlx5_flow_spec *spec,
3098 			  const struct mlx5_flow_act *flow_act,
3099 			  bool egress)
3100 {
3101 	/* We curretly only support ipsec egress flow */
3102 	return is_valid_esp_aes_gcm(mdev, spec, flow_act, egress) != VALID_SPEC_INVALID;
3103 }
3104 
3105 static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
3106 			       const struct ib_flow_attr *flow_attr,
3107 			       bool check_inner)
3108 {
3109 	union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
3110 	int match_ipv = check_inner ?
3111 			MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
3112 					ft_field_support.inner_ip_version) :
3113 			MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
3114 					ft_field_support.outer_ip_version);
3115 	int inner_bit = check_inner ? IB_FLOW_SPEC_INNER : 0;
3116 	bool ipv4_spec_valid, ipv6_spec_valid;
3117 	unsigned int ip_spec_type = 0;
3118 	bool has_ethertype = false;
3119 	unsigned int spec_index;
3120 	bool mask_valid = true;
3121 	u16 eth_type = 0;
3122 	bool type_valid;
3123 
3124 	/* Validate that ethertype is correct */
3125 	for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
3126 		if ((ib_spec->type == (IB_FLOW_SPEC_ETH | inner_bit)) &&
3127 		    ib_spec->eth.mask.ether_type) {
3128 			mask_valid = (ib_spec->eth.mask.ether_type ==
3129 				      htons(0xffff));
3130 			has_ethertype = true;
3131 			eth_type = ntohs(ib_spec->eth.val.ether_type);
3132 		} else if ((ib_spec->type == (IB_FLOW_SPEC_IPV4 | inner_bit)) ||
3133 			   (ib_spec->type == (IB_FLOW_SPEC_IPV6 | inner_bit))) {
3134 			ip_spec_type = ib_spec->type;
3135 		}
3136 		ib_spec = (void *)ib_spec + ib_spec->size;
3137 	}
3138 
3139 	type_valid = (!has_ethertype) || (!ip_spec_type);
3140 	if (!type_valid && mask_valid) {
3141 		ipv4_spec_valid = (eth_type == ETH_P_IP) &&
3142 			(ip_spec_type == (IB_FLOW_SPEC_IPV4 | inner_bit));
3143 		ipv6_spec_valid = (eth_type == ETH_P_IPV6) &&
3144 			(ip_spec_type == (IB_FLOW_SPEC_IPV6 | inner_bit));
3145 
3146 		type_valid = (ipv4_spec_valid) || (ipv6_spec_valid) ||
3147 			     (((eth_type == ETH_P_MPLS_UC) ||
3148 			       (eth_type == ETH_P_MPLS_MC)) && match_ipv);
3149 	}
3150 
3151 	return type_valid;
3152 }
3153 
3154 static bool is_valid_attr(struct mlx5_core_dev *mdev,
3155 			  const struct ib_flow_attr *flow_attr)
3156 {
3157 	return is_valid_ethertype(mdev, flow_attr, false) &&
3158 	       is_valid_ethertype(mdev, flow_attr, true);
3159 }
3160 
3161 static void put_flow_table(struct mlx5_ib_dev *dev,
3162 			   struct mlx5_ib_flow_prio *prio, bool ft_added)
3163 {
3164 	prio->refcount -= !!ft_added;
3165 	if (!prio->refcount) {
3166 		mlx5_destroy_flow_table(prio->flow_table);
3167 		prio->flow_table = NULL;
3168 	}
3169 }
3170 
3171 static void counters_clear_description(struct ib_counters *counters)
3172 {
3173 	struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
3174 
3175 	mutex_lock(&mcounters->mcntrs_mutex);
3176 	kfree(mcounters->counters_data);
3177 	mcounters->counters_data = NULL;
3178 	mcounters->cntrs_max_index = 0;
3179 	mutex_unlock(&mcounters->mcntrs_mutex);
3180 }
3181 
3182 static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
3183 {
3184 	struct mlx5_ib_flow_handler *handler = container_of(flow_id,
3185 							  struct mlx5_ib_flow_handler,
3186 							  ibflow);
3187 	struct mlx5_ib_flow_handler *iter, *tmp;
3188 	struct mlx5_ib_dev *dev = handler->dev;
3189 
3190 	mutex_lock(&dev->flow_db->lock);
3191 
3192 	list_for_each_entry_safe(iter, tmp, &handler->list, list) {
3193 		mlx5_del_flow_rules(iter->rule);
3194 		put_flow_table(dev, iter->prio, true);
3195 		list_del(&iter->list);
3196 		kfree(iter);
3197 	}
3198 
3199 	mlx5_del_flow_rules(handler->rule);
3200 	put_flow_table(dev, handler->prio, true);
3201 	if (handler->ibcounters &&
3202 	    atomic_read(&handler->ibcounters->usecnt) == 1)
3203 		counters_clear_description(handler->ibcounters);
3204 
3205 	mutex_unlock(&dev->flow_db->lock);
3206 	if (handler->flow_matcher)
3207 		atomic_dec(&handler->flow_matcher->usecnt);
3208 	kfree(handler);
3209 
3210 	return 0;
3211 }
3212 
3213 static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
3214 {
3215 	priority *= 2;
3216 	if (!dont_trap)
3217 		priority++;
3218 	return priority;
3219 }
3220 
3221 enum flow_table_type {
3222 	MLX5_IB_FT_RX,
3223 	MLX5_IB_FT_TX
3224 };
3225 
3226 #define MLX5_FS_MAX_TYPES	 6
3227 #define MLX5_FS_MAX_ENTRIES	 BIT(16)
3228 
3229 static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
3230 					   struct mlx5_ib_flow_prio *prio,
3231 					   int priority,
3232 					   int num_entries, int num_groups,
3233 					   u32 flags)
3234 {
3235 	struct mlx5_flow_table *ft;
3236 
3237 	ft = mlx5_create_auto_grouped_flow_table(ns, priority,
3238 						 num_entries,
3239 						 num_groups,
3240 						 0, flags);
3241 	if (IS_ERR(ft))
3242 		return ERR_CAST(ft);
3243 
3244 	prio->flow_table = ft;
3245 	prio->refcount = 0;
3246 	return prio;
3247 }
3248 
3249 static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
3250 						struct ib_flow_attr *flow_attr,
3251 						enum flow_table_type ft_type)
3252 {
3253 	bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
3254 	struct mlx5_flow_namespace *ns = NULL;
3255 	struct mlx5_ib_flow_prio *prio;
3256 	struct mlx5_flow_table *ft;
3257 	int max_table_size;
3258 	int num_entries;
3259 	int num_groups;
3260 	u32 flags = 0;
3261 	int priority;
3262 
3263 	max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3264 						       log_max_ft_size));
3265 	if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
3266 		enum mlx5_flow_namespace_type fn_type;
3267 
3268 		if (flow_is_multicast_only(flow_attr) &&
3269 		    !dont_trap)
3270 			priority = MLX5_IB_FLOW_MCAST_PRIO;
3271 		else
3272 			priority = ib_prio_to_core_prio(flow_attr->priority,
3273 							dont_trap);
3274 		if (ft_type == MLX5_IB_FT_RX) {
3275 			fn_type = MLX5_FLOW_NAMESPACE_BYPASS;
3276 			prio = &dev->flow_db->prios[priority];
3277 			if (!dev->is_rep &&
3278 			    MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
3279 				flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
3280 			if (!dev->is_rep &&
3281 			    MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3282 					reformat_l3_tunnel_to_l2))
3283 				flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3284 		} else {
3285 			max_table_size =
3286 				BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
3287 							      log_max_ft_size));
3288 			fn_type = MLX5_FLOW_NAMESPACE_EGRESS;
3289 			prio = &dev->flow_db->egress_prios[priority];
3290 			if (!dev->is_rep &&
3291 			    MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
3292 				flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3293 		}
3294 		ns = mlx5_get_flow_namespace(dev->mdev, fn_type);
3295 		num_entries = MLX5_FS_MAX_ENTRIES;
3296 		num_groups = MLX5_FS_MAX_TYPES;
3297 	} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3298 		   flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
3299 		ns = mlx5_get_flow_namespace(dev->mdev,
3300 					     MLX5_FLOW_NAMESPACE_LEFTOVERS);
3301 		build_leftovers_ft_param(&priority,
3302 					 &num_entries,
3303 					 &num_groups);
3304 		prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
3305 	} else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
3306 		if (!MLX5_CAP_FLOWTABLE(dev->mdev,
3307 					allow_sniffer_and_nic_rx_shared_tir))
3308 			return ERR_PTR(-ENOTSUPP);
3309 
3310 		ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ?
3311 					     MLX5_FLOW_NAMESPACE_SNIFFER_RX :
3312 					     MLX5_FLOW_NAMESPACE_SNIFFER_TX);
3313 
3314 		prio = &dev->flow_db->sniffer[ft_type];
3315 		priority = 0;
3316 		num_entries = 1;
3317 		num_groups = 1;
3318 	}
3319 
3320 	if (!ns)
3321 		return ERR_PTR(-ENOTSUPP);
3322 
3323 	max_table_size = min_t(int, num_entries, max_table_size);
3324 
3325 	ft = prio->flow_table;
3326 	if (!ft)
3327 		return _get_prio(ns, prio, priority, max_table_size, num_groups,
3328 				 flags);
3329 
3330 	return prio;
3331 }
3332 
3333 static void set_underlay_qp(struct mlx5_ib_dev *dev,
3334 			    struct mlx5_flow_spec *spec,
3335 			    u32 underlay_qpn)
3336 {
3337 	void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
3338 					   spec->match_criteria,
3339 					   misc_parameters);
3340 	void *misc_params_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
3341 					   misc_parameters);
3342 
3343 	if (underlay_qpn &&
3344 	    MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3345 				      ft_field_support.bth_dst_qp)) {
3346 		MLX5_SET(fte_match_set_misc,
3347 			 misc_params_v, bth_dst_qp, underlay_qpn);
3348 		MLX5_SET(fte_match_set_misc,
3349 			 misc_params_c, bth_dst_qp, 0xffffff);
3350 	}
3351 }
3352 
3353 static int read_flow_counters(struct ib_device *ibdev,
3354 			      struct mlx5_read_counters_attr *read_attr)
3355 {
3356 	struct mlx5_fc *fc = read_attr->hw_cntrs_hndl;
3357 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
3358 
3359 	return mlx5_fc_query(dev->mdev, fc,
3360 			     &read_attr->out[IB_COUNTER_PACKETS],
3361 			     &read_attr->out[IB_COUNTER_BYTES]);
3362 }
3363 
3364 /* flow counters currently expose two counters packets and bytes */
3365 #define FLOW_COUNTERS_NUM 2
3366 static int counters_set_description(struct ib_counters *counters,
3367 				    enum mlx5_ib_counters_type counters_type,
3368 				    struct mlx5_ib_flow_counters_desc *desc_data,
3369 				    u32 ncounters)
3370 {
3371 	struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
3372 	u32 cntrs_max_index = 0;
3373 	int i;
3374 
3375 	if (counters_type != MLX5_IB_COUNTERS_FLOW)
3376 		return -EINVAL;
3377 
3378 	/* init the fields for the object */
3379 	mcounters->type = counters_type;
3380 	mcounters->read_counters = read_flow_counters;
3381 	mcounters->counters_num = FLOW_COUNTERS_NUM;
3382 	mcounters->ncounters = ncounters;
3383 	/* each counter entry have both description and index pair */
3384 	for (i = 0; i < ncounters; i++) {
3385 		if (desc_data[i].description > IB_COUNTER_BYTES)
3386 			return -EINVAL;
3387 
3388 		if (cntrs_max_index <= desc_data[i].index)
3389 			cntrs_max_index = desc_data[i].index + 1;
3390 	}
3391 
3392 	mutex_lock(&mcounters->mcntrs_mutex);
3393 	mcounters->counters_data = desc_data;
3394 	mcounters->cntrs_max_index = cntrs_max_index;
3395 	mutex_unlock(&mcounters->mcntrs_mutex);
3396 
3397 	return 0;
3398 }
3399 
3400 #define MAX_COUNTERS_NUM (USHRT_MAX / (sizeof(u32) * 2))
3401 static int flow_counters_set_data(struct ib_counters *ibcounters,
3402 				  struct mlx5_ib_create_flow *ucmd)
3403 {
3404 	struct mlx5_ib_mcounters *mcounters = to_mcounters(ibcounters);
3405 	struct mlx5_ib_flow_counters_data *cntrs_data = NULL;
3406 	struct mlx5_ib_flow_counters_desc *desc_data = NULL;
3407 	bool hw_hndl = false;
3408 	int ret = 0;
3409 
3410 	if (ucmd && ucmd->ncounters_data != 0) {
3411 		cntrs_data = ucmd->data;
3412 		if (cntrs_data->ncounters > MAX_COUNTERS_NUM)
3413 			return -EINVAL;
3414 
3415 		desc_data = kcalloc(cntrs_data->ncounters,
3416 				    sizeof(*desc_data),
3417 				    GFP_KERNEL);
3418 		if (!desc_data)
3419 			return  -ENOMEM;
3420 
3421 		if (copy_from_user(desc_data,
3422 				   u64_to_user_ptr(cntrs_data->counters_data),
3423 				   sizeof(*desc_data) * cntrs_data->ncounters)) {
3424 			ret = -EFAULT;
3425 			goto free;
3426 		}
3427 	}
3428 
3429 	if (!mcounters->hw_cntrs_hndl) {
3430 		mcounters->hw_cntrs_hndl = mlx5_fc_create(
3431 			to_mdev(ibcounters->device)->mdev, false);
3432 		if (IS_ERR(mcounters->hw_cntrs_hndl)) {
3433 			ret = PTR_ERR(mcounters->hw_cntrs_hndl);
3434 			goto free;
3435 		}
3436 		hw_hndl = true;
3437 	}
3438 
3439 	if (desc_data) {
3440 		/* counters already bound to at least one flow */
3441 		if (mcounters->cntrs_max_index) {
3442 			ret = -EINVAL;
3443 			goto free_hndl;
3444 		}
3445 
3446 		ret = counters_set_description(ibcounters,
3447 					       MLX5_IB_COUNTERS_FLOW,
3448 					       desc_data,
3449 					       cntrs_data->ncounters);
3450 		if (ret)
3451 			goto free_hndl;
3452 
3453 	} else if (!mcounters->cntrs_max_index) {
3454 		/* counters not bound yet, must have udata passed */
3455 		ret = -EINVAL;
3456 		goto free_hndl;
3457 	}
3458 
3459 	return 0;
3460 
3461 free_hndl:
3462 	if (hw_hndl) {
3463 		mlx5_fc_destroy(to_mdev(ibcounters->device)->mdev,
3464 				mcounters->hw_cntrs_hndl);
3465 		mcounters->hw_cntrs_hndl = NULL;
3466 	}
3467 free:
3468 	kfree(desc_data);
3469 	return ret;
3470 }
3471 
3472 static void mlx5_ib_set_rule_source_port(struct mlx5_ib_dev *dev,
3473 					 struct mlx5_flow_spec *spec,
3474 					 struct mlx5_eswitch_rep *rep)
3475 {
3476 	struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
3477 	void *misc;
3478 
3479 	if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
3480 		misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
3481 				    misc_parameters_2);
3482 
3483 		MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
3484 			 mlx5_eswitch_get_vport_metadata_for_match(esw,
3485 								   rep->vport));
3486 		misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
3487 				    misc_parameters_2);
3488 
3489 		MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
3490 	} else {
3491 		misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
3492 				    misc_parameters);
3493 
3494 		MLX5_SET(fte_match_set_misc, misc, source_port, rep->vport);
3495 
3496 		misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
3497 				    misc_parameters);
3498 
3499 		MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
3500 	}
3501 }
3502 
3503 static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
3504 						      struct mlx5_ib_flow_prio *ft_prio,
3505 						      const struct ib_flow_attr *flow_attr,
3506 						      struct mlx5_flow_destination *dst,
3507 						      u32 underlay_qpn,
3508 						      struct mlx5_ib_create_flow *ucmd)
3509 {
3510 	struct mlx5_flow_table	*ft = ft_prio->flow_table;
3511 	struct mlx5_ib_flow_handler *handler;
3512 	struct mlx5_flow_act flow_act = {};
3513 	struct mlx5_flow_spec *spec;
3514 	struct mlx5_flow_destination dest_arr[2] = {};
3515 	struct mlx5_flow_destination *rule_dst = dest_arr;
3516 	const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
3517 	unsigned int spec_index;
3518 	u32 prev_type = 0;
3519 	int err = 0;
3520 	int dest_num = 0;
3521 	bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
3522 
3523 	if (!is_valid_attr(dev->mdev, flow_attr))
3524 		return ERR_PTR(-EINVAL);
3525 
3526 	if (dev->is_rep && is_egress)
3527 		return ERR_PTR(-EINVAL);
3528 
3529 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
3530 	handler = kzalloc(sizeof(*handler), GFP_KERNEL);
3531 	if (!handler || !spec) {
3532 		err = -ENOMEM;
3533 		goto free;
3534 	}
3535 
3536 	INIT_LIST_HEAD(&handler->list);
3537 	if (dst) {
3538 		memcpy(&dest_arr[0], dst, sizeof(*dst));
3539 		dest_num++;
3540 	}
3541 
3542 	for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
3543 		err = parse_flow_attr(dev->mdev, spec,
3544 				      ib_flow, flow_attr, &flow_act,
3545 				      prev_type);
3546 		if (err < 0)
3547 			goto free;
3548 
3549 		prev_type = ((union ib_flow_spec *)ib_flow)->type;
3550 		ib_flow += ((union ib_flow_spec *)ib_flow)->size;
3551 	}
3552 
3553 	if (!flow_is_multicast_only(flow_attr))
3554 		set_underlay_qp(dev, spec, underlay_qpn);
3555 
3556 	if (dev->is_rep) {
3557 		struct mlx5_eswitch_rep *rep;
3558 
3559 		rep = dev->port[flow_attr->port - 1].rep;
3560 		if (!rep) {
3561 			err = -EINVAL;
3562 			goto free;
3563 		}
3564 
3565 		mlx5_ib_set_rule_source_port(dev, spec, rep);
3566 	}
3567 
3568 	spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
3569 
3570 	if (is_egress &&
3571 	    !is_valid_spec(dev->mdev, spec, &flow_act, is_egress)) {
3572 		err = -EINVAL;
3573 		goto free;
3574 	}
3575 
3576 	if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
3577 		struct mlx5_ib_mcounters *mcounters;
3578 
3579 		err = flow_counters_set_data(flow_act.counters, ucmd);
3580 		if (err)
3581 			goto free;
3582 
3583 		mcounters = to_mcounters(flow_act.counters);
3584 		handler->ibcounters = flow_act.counters;
3585 		dest_arr[dest_num].type =
3586 			MLX5_FLOW_DESTINATION_TYPE_COUNTER;
3587 		dest_arr[dest_num].counter_id =
3588 			mlx5_fc_id(mcounters->hw_cntrs_hndl);
3589 		dest_num++;
3590 	}
3591 
3592 	if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
3593 		if (!(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT)) {
3594 			rule_dst = NULL;
3595 			dest_num = 0;
3596 		}
3597 	} else {
3598 		if (is_egress)
3599 			flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
3600 		else
3601 			flow_act.action |=
3602 				dest_num ?  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
3603 					MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
3604 	}
3605 
3606 	if ((spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG)  &&
3607 	    (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3608 	     flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
3609 		mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
3610 			     spec->flow_context.flow_tag, flow_attr->type);
3611 		err = -EINVAL;
3612 		goto free;
3613 	}
3614 	handler->rule = mlx5_add_flow_rules(ft, spec,
3615 					    &flow_act,
3616 					    rule_dst, dest_num);
3617 
3618 	if (IS_ERR(handler->rule)) {
3619 		err = PTR_ERR(handler->rule);
3620 		goto free;
3621 	}
3622 
3623 	ft_prio->refcount++;
3624 	handler->prio = ft_prio;
3625 	handler->dev = dev;
3626 
3627 	ft_prio->flow_table = ft;
3628 free:
3629 	if (err && handler) {
3630 		if (handler->ibcounters &&
3631 		    atomic_read(&handler->ibcounters->usecnt) == 1)
3632 			counters_clear_description(handler->ibcounters);
3633 		kfree(handler);
3634 	}
3635 	kvfree(spec);
3636 	return err ? ERR_PTR(err) : handler;
3637 }
3638 
3639 static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
3640 						     struct mlx5_ib_flow_prio *ft_prio,
3641 						     const struct ib_flow_attr *flow_attr,
3642 						     struct mlx5_flow_destination *dst)
3643 {
3644 	return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL);
3645 }
3646 
3647 static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
3648 							  struct mlx5_ib_flow_prio *ft_prio,
3649 							  struct ib_flow_attr *flow_attr,
3650 							  struct mlx5_flow_destination *dst)
3651 {
3652 	struct mlx5_ib_flow_handler *handler_dst = NULL;
3653 	struct mlx5_ib_flow_handler *handler = NULL;
3654 
3655 	handler = create_flow_rule(dev, ft_prio, flow_attr, NULL);
3656 	if (!IS_ERR(handler)) {
3657 		handler_dst = create_flow_rule(dev, ft_prio,
3658 					       flow_attr, dst);
3659 		if (IS_ERR(handler_dst)) {
3660 			mlx5_del_flow_rules(handler->rule);
3661 			ft_prio->refcount--;
3662 			kfree(handler);
3663 			handler = handler_dst;
3664 		} else {
3665 			list_add(&handler_dst->list, &handler->list);
3666 		}
3667 	}
3668 
3669 	return handler;
3670 }
3671 enum {
3672 	LEFTOVERS_MC,
3673 	LEFTOVERS_UC,
3674 };
3675 
3676 static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
3677 							  struct mlx5_ib_flow_prio *ft_prio,
3678 							  struct ib_flow_attr *flow_attr,
3679 							  struct mlx5_flow_destination *dst)
3680 {
3681 	struct mlx5_ib_flow_handler *handler_ucast = NULL;
3682 	struct mlx5_ib_flow_handler *handler = NULL;
3683 
3684 	static struct {
3685 		struct ib_flow_attr	flow_attr;
3686 		struct ib_flow_spec_eth eth_flow;
3687 	} leftovers_specs[] = {
3688 		[LEFTOVERS_MC] = {
3689 			.flow_attr = {
3690 				.num_of_specs = 1,
3691 				.size = sizeof(leftovers_specs[0])
3692 			},
3693 			.eth_flow = {
3694 				.type = IB_FLOW_SPEC_ETH,
3695 				.size = sizeof(struct ib_flow_spec_eth),
3696 				.mask = {.dst_mac = {0x1} },
3697 				.val =  {.dst_mac = {0x1} }
3698 			}
3699 		},
3700 		[LEFTOVERS_UC] = {
3701 			.flow_attr = {
3702 				.num_of_specs = 1,
3703 				.size = sizeof(leftovers_specs[0])
3704 			},
3705 			.eth_flow = {
3706 				.type = IB_FLOW_SPEC_ETH,
3707 				.size = sizeof(struct ib_flow_spec_eth),
3708 				.mask = {.dst_mac = {0x1} },
3709 				.val = {.dst_mac = {} }
3710 			}
3711 		}
3712 	};
3713 
3714 	handler = create_flow_rule(dev, ft_prio,
3715 				   &leftovers_specs[LEFTOVERS_MC].flow_attr,
3716 				   dst);
3717 	if (!IS_ERR(handler) &&
3718 	    flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
3719 		handler_ucast = create_flow_rule(dev, ft_prio,
3720 						 &leftovers_specs[LEFTOVERS_UC].flow_attr,
3721 						 dst);
3722 		if (IS_ERR(handler_ucast)) {
3723 			mlx5_del_flow_rules(handler->rule);
3724 			ft_prio->refcount--;
3725 			kfree(handler);
3726 			handler = handler_ucast;
3727 		} else {
3728 			list_add(&handler_ucast->list, &handler->list);
3729 		}
3730 	}
3731 
3732 	return handler;
3733 }
3734 
3735 static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
3736 							struct mlx5_ib_flow_prio *ft_rx,
3737 							struct mlx5_ib_flow_prio *ft_tx,
3738 							struct mlx5_flow_destination *dst)
3739 {
3740 	struct mlx5_ib_flow_handler *handler_rx;
3741 	struct mlx5_ib_flow_handler *handler_tx;
3742 	int err;
3743 	static const struct ib_flow_attr flow_attr  = {
3744 		.num_of_specs = 0,
3745 		.size = sizeof(flow_attr)
3746 	};
3747 
3748 	handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst);
3749 	if (IS_ERR(handler_rx)) {
3750 		err = PTR_ERR(handler_rx);
3751 		goto err;
3752 	}
3753 
3754 	handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst);
3755 	if (IS_ERR(handler_tx)) {
3756 		err = PTR_ERR(handler_tx);
3757 		goto err_tx;
3758 	}
3759 
3760 	list_add(&handler_tx->list, &handler_rx->list);
3761 
3762 	return handler_rx;
3763 
3764 err_tx:
3765 	mlx5_del_flow_rules(handler_rx->rule);
3766 	ft_rx->refcount--;
3767 	kfree(handler_rx);
3768 err:
3769 	return ERR_PTR(err);
3770 }
3771 
3772 static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
3773 					   struct ib_flow_attr *flow_attr,
3774 					   int domain,
3775 					   struct ib_udata *udata)
3776 {
3777 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
3778 	struct mlx5_ib_qp *mqp = to_mqp(qp);
3779 	struct mlx5_ib_flow_handler *handler = NULL;
3780 	struct mlx5_flow_destination *dst = NULL;
3781 	struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
3782 	struct mlx5_ib_flow_prio *ft_prio;
3783 	bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
3784 	struct mlx5_ib_create_flow *ucmd = NULL, ucmd_hdr;
3785 	size_t min_ucmd_sz, required_ucmd_sz;
3786 	int err;
3787 	int underlay_qpn;
3788 
3789 	if (udata && udata->inlen) {
3790 		min_ucmd_sz = offsetof(typeof(ucmd_hdr), reserved) +
3791 				sizeof(ucmd_hdr.reserved);
3792 		if (udata->inlen < min_ucmd_sz)
3793 			return ERR_PTR(-EOPNOTSUPP);
3794 
3795 		err = ib_copy_from_udata(&ucmd_hdr, udata, min_ucmd_sz);
3796 		if (err)
3797 			return ERR_PTR(err);
3798 
3799 		/* currently supports only one counters data */
3800 		if (ucmd_hdr.ncounters_data > 1)
3801 			return ERR_PTR(-EINVAL);
3802 
3803 		required_ucmd_sz = min_ucmd_sz +
3804 			sizeof(struct mlx5_ib_flow_counters_data) *
3805 			ucmd_hdr.ncounters_data;
3806 		if (udata->inlen > required_ucmd_sz &&
3807 		    !ib_is_udata_cleared(udata, required_ucmd_sz,
3808 					 udata->inlen - required_ucmd_sz))
3809 			return ERR_PTR(-EOPNOTSUPP);
3810 
3811 		ucmd = kzalloc(required_ucmd_sz, GFP_KERNEL);
3812 		if (!ucmd)
3813 			return ERR_PTR(-ENOMEM);
3814 
3815 		err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz);
3816 		if (err)
3817 			goto free_ucmd;
3818 	}
3819 
3820 	if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) {
3821 		err = -ENOMEM;
3822 		goto free_ucmd;
3823 	}
3824 
3825 	if (domain != IB_FLOW_DOMAIN_USER ||
3826 	    flow_attr->port > dev->num_ports ||
3827 	    (flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP |
3828 				  IB_FLOW_ATTR_FLAGS_EGRESS))) {
3829 		err = -EINVAL;
3830 		goto free_ucmd;
3831 	}
3832 
3833 	if (is_egress &&
3834 	    (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3835 	     flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
3836 		err = -EINVAL;
3837 		goto free_ucmd;
3838 	}
3839 
3840 	dst = kzalloc(sizeof(*dst), GFP_KERNEL);
3841 	if (!dst) {
3842 		err = -ENOMEM;
3843 		goto free_ucmd;
3844 	}
3845 
3846 	mutex_lock(&dev->flow_db->lock);
3847 
3848 	ft_prio = get_flow_table(dev, flow_attr,
3849 				 is_egress ? MLX5_IB_FT_TX : MLX5_IB_FT_RX);
3850 	if (IS_ERR(ft_prio)) {
3851 		err = PTR_ERR(ft_prio);
3852 		goto unlock;
3853 	}
3854 	if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
3855 		ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
3856 		if (IS_ERR(ft_prio_tx)) {
3857 			err = PTR_ERR(ft_prio_tx);
3858 			ft_prio_tx = NULL;
3859 			goto destroy_ft;
3860 		}
3861 	}
3862 
3863 	if (is_egress) {
3864 		dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT;
3865 	} else {
3866 		dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
3867 		if (mqp->flags & MLX5_IB_QP_RSS)
3868 			dst->tir_num = mqp->rss_qp.tirn;
3869 		else
3870 			dst->tir_num = mqp->raw_packet_qp.rq.tirn;
3871 	}
3872 
3873 	if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
3874 		if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)  {
3875 			handler = create_dont_trap_rule(dev, ft_prio,
3876 							flow_attr, dst);
3877 		} else {
3878 			underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ?
3879 					mqp->underlay_qpn : 0;
3880 			handler = _create_flow_rule(dev, ft_prio, flow_attr,
3881 						    dst, underlay_qpn, ucmd);
3882 		}
3883 	} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3884 		   flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
3885 		handler = create_leftovers_rule(dev, ft_prio, flow_attr,
3886 						dst);
3887 	} else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
3888 		handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst);
3889 	} else {
3890 		err = -EINVAL;
3891 		goto destroy_ft;
3892 	}
3893 
3894 	if (IS_ERR(handler)) {
3895 		err = PTR_ERR(handler);
3896 		handler = NULL;
3897 		goto destroy_ft;
3898 	}
3899 
3900 	mutex_unlock(&dev->flow_db->lock);
3901 	kfree(dst);
3902 	kfree(ucmd);
3903 
3904 	return &handler->ibflow;
3905 
3906 destroy_ft:
3907 	put_flow_table(dev, ft_prio, false);
3908 	if (ft_prio_tx)
3909 		put_flow_table(dev, ft_prio_tx, false);
3910 unlock:
3911 	mutex_unlock(&dev->flow_db->lock);
3912 	kfree(dst);
3913 free_ucmd:
3914 	kfree(ucmd);
3915 	return ERR_PTR(err);
3916 }
3917 
3918 static struct mlx5_ib_flow_prio *
3919 _get_flow_table(struct mlx5_ib_dev *dev,
3920 		struct mlx5_ib_flow_matcher *fs_matcher,
3921 		bool mcast)
3922 {
3923 	struct mlx5_flow_namespace *ns = NULL;
3924 	struct mlx5_ib_flow_prio *prio = NULL;
3925 	int max_table_size = 0;
3926 	u32 flags = 0;
3927 	int priority;
3928 
3929 	if (mcast)
3930 		priority = MLX5_IB_FLOW_MCAST_PRIO;
3931 	else
3932 		priority = ib_prio_to_core_prio(fs_matcher->priority, false);
3933 
3934 	if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
3935 		max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3936 					log_max_ft_size));
3937 		if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
3938 			flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
3939 		if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3940 					      reformat_l3_tunnel_to_l2))
3941 			flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3942 	} else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS) {
3943 		max_table_size = BIT(
3944 			MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, log_max_ft_size));
3945 		if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
3946 			flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3947 	} else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB) {
3948 		max_table_size = BIT(
3949 			MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, log_max_ft_size));
3950 		priority = FDB_BYPASS_PATH;
3951 	}
3952 
3953 	max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES);
3954 
3955 	ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type);
3956 	if (!ns)
3957 		return ERR_PTR(-ENOTSUPP);
3958 
3959 	if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS)
3960 		prio = &dev->flow_db->prios[priority];
3961 	else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS)
3962 		prio = &dev->flow_db->egress_prios[priority];
3963 	else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB)
3964 		prio = &dev->flow_db->fdb;
3965 
3966 	if (!prio)
3967 		return ERR_PTR(-EINVAL);
3968 
3969 	if (prio->flow_table)
3970 		return prio;
3971 
3972 	return _get_prio(ns, prio, priority, max_table_size,
3973 			 MLX5_FS_MAX_TYPES, flags);
3974 }
3975 
3976 static struct mlx5_ib_flow_handler *
3977 _create_raw_flow_rule(struct mlx5_ib_dev *dev,
3978 		      struct mlx5_ib_flow_prio *ft_prio,
3979 		      struct mlx5_flow_destination *dst,
3980 		      struct mlx5_ib_flow_matcher  *fs_matcher,
3981 		      struct mlx5_flow_context *flow_context,
3982 		      struct mlx5_flow_act *flow_act,
3983 		      void *cmd_in, int inlen,
3984 		      int dst_num)
3985 {
3986 	struct mlx5_ib_flow_handler *handler;
3987 	struct mlx5_flow_spec *spec;
3988 	struct mlx5_flow_table *ft = ft_prio->flow_table;
3989 	int err = 0;
3990 
3991 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
3992 	handler = kzalloc(sizeof(*handler), GFP_KERNEL);
3993 	if (!handler || !spec) {
3994 		err = -ENOMEM;
3995 		goto free;
3996 	}
3997 
3998 	INIT_LIST_HEAD(&handler->list);
3999 
4000 	memcpy(spec->match_value, cmd_in, inlen);
4001 	memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params,
4002 	       fs_matcher->mask_len);
4003 	spec->match_criteria_enable = fs_matcher->match_criteria_enable;
4004 	spec->flow_context = *flow_context;
4005 
4006 	handler->rule = mlx5_add_flow_rules(ft, spec,
4007 					    flow_act, dst, dst_num);
4008 
4009 	if (IS_ERR(handler->rule)) {
4010 		err = PTR_ERR(handler->rule);
4011 		goto free;
4012 	}
4013 
4014 	ft_prio->refcount++;
4015 	handler->prio = ft_prio;
4016 	handler->dev = dev;
4017 	ft_prio->flow_table = ft;
4018 
4019 free:
4020 	if (err)
4021 		kfree(handler);
4022 	kvfree(spec);
4023 	return err ? ERR_PTR(err) : handler;
4024 }
4025 
4026 static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher,
4027 				void *match_v)
4028 {
4029 	void *match_c;
4030 	void *match_v_set_lyr_2_4, *match_c_set_lyr_2_4;
4031 	void *dmac, *dmac_mask;
4032 	void *ipv4, *ipv4_mask;
4033 
4034 	if (!(fs_matcher->match_criteria_enable &
4035 	      (1 << MATCH_CRITERIA_ENABLE_OUTER_BIT)))
4036 		return false;
4037 
4038 	match_c = fs_matcher->matcher_mask.match_params;
4039 	match_v_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_v,
4040 					   outer_headers);
4041 	match_c_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_c,
4042 					   outer_headers);
4043 
4044 	dmac = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
4045 			    dmac_47_16);
4046 	dmac_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
4047 				 dmac_47_16);
4048 
4049 	if (is_multicast_ether_addr(dmac) &&
4050 	    is_multicast_ether_addr(dmac_mask))
4051 		return true;
4052 
4053 	ipv4 = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
4054 			    dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
4055 
4056 	ipv4_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
4057 				 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
4058 
4059 	if (ipv4_is_multicast(*(__be32 *)(ipv4)) &&
4060 	    ipv4_is_multicast(*(__be32 *)(ipv4_mask)))
4061 		return true;
4062 
4063 	return false;
4064 }
4065 
4066 struct mlx5_ib_flow_handler *
4067 mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
4068 			struct mlx5_ib_flow_matcher *fs_matcher,
4069 			struct mlx5_flow_context *flow_context,
4070 			struct mlx5_flow_act *flow_act,
4071 			u32 counter_id,
4072 			void *cmd_in, int inlen, int dest_id,
4073 			int dest_type)
4074 {
4075 	struct mlx5_flow_destination *dst;
4076 	struct mlx5_ib_flow_prio *ft_prio;
4077 	struct mlx5_ib_flow_handler *handler;
4078 	int dst_num = 0;
4079 	bool mcast;
4080 	int err;
4081 
4082 	if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL)
4083 		return ERR_PTR(-EOPNOTSUPP);
4084 
4085 	if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO)
4086 		return ERR_PTR(-ENOMEM);
4087 
4088 	dst = kcalloc(2, sizeof(*dst), GFP_KERNEL);
4089 	if (!dst)
4090 		return ERR_PTR(-ENOMEM);
4091 
4092 	mcast = raw_fs_is_multicast(fs_matcher, cmd_in);
4093 	mutex_lock(&dev->flow_db->lock);
4094 
4095 	ft_prio = _get_flow_table(dev, fs_matcher, mcast);
4096 	if (IS_ERR(ft_prio)) {
4097 		err = PTR_ERR(ft_prio);
4098 		goto unlock;
4099 	}
4100 
4101 	if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) {
4102 		dst[dst_num].type = dest_type;
4103 		dst[dst_num].tir_num = dest_id;
4104 		flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
4105 	} else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
4106 		dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
4107 		dst[dst_num].ft_num = dest_id;
4108 		flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
4109 	} else {
4110 		dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_PORT;
4111 		flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
4112 	}
4113 
4114 	dst_num++;
4115 
4116 	if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
4117 		dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
4118 		dst[dst_num].counter_id = counter_id;
4119 		dst_num++;
4120 	}
4121 
4122 	handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher,
4123 					flow_context, flow_act,
4124 					cmd_in, inlen, dst_num);
4125 
4126 	if (IS_ERR(handler)) {
4127 		err = PTR_ERR(handler);
4128 		goto destroy_ft;
4129 	}
4130 
4131 	mutex_unlock(&dev->flow_db->lock);
4132 	atomic_inc(&fs_matcher->usecnt);
4133 	handler->flow_matcher = fs_matcher;
4134 
4135 	kfree(dst);
4136 
4137 	return handler;
4138 
4139 destroy_ft:
4140 	put_flow_table(dev, ft_prio, false);
4141 unlock:
4142 	mutex_unlock(&dev->flow_db->lock);
4143 	kfree(dst);
4144 
4145 	return ERR_PTR(err);
4146 }
4147 
4148 static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags)
4149 {
4150 	u32 flags = 0;
4151 
4152 	if (mlx5_flags & MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA)
4153 		flags |= MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA;
4154 
4155 	return flags;
4156 }
4157 
4158 #define MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED	MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA
4159 static struct ib_flow_action *
4160 mlx5_ib_create_flow_action_esp(struct ib_device *device,
4161 			       const struct ib_flow_action_attrs_esp *attr,
4162 			       struct uverbs_attr_bundle *attrs)
4163 {
4164 	struct mlx5_ib_dev *mdev = to_mdev(device);
4165 	struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm;
4166 	struct mlx5_accel_esp_xfrm_attrs accel_attrs = {};
4167 	struct mlx5_ib_flow_action *action;
4168 	u64 action_flags;
4169 	u64 flags;
4170 	int err = 0;
4171 
4172 	err = uverbs_get_flags64(
4173 		&action_flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
4174 		((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1) - 1));
4175 	if (err)
4176 		return ERR_PTR(err);
4177 
4178 	flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags);
4179 
4180 	/* We current only support a subset of the standard features. Only a
4181 	 * keymat of type AES_GCM, with icv_len == 16, iv_algo == SEQ and esn
4182 	 * (with overlap). Full offload mode isn't supported.
4183 	 */
4184 	if (!attr->keymat || attr->replay || attr->encap ||
4185 	    attr->spi || attr->seq || attr->tfc_pad ||
4186 	    attr->hard_limit_pkts ||
4187 	    (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
4188 			     IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)))
4189 		return ERR_PTR(-EOPNOTSUPP);
4190 
4191 	if (attr->keymat->protocol !=
4192 	    IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM)
4193 		return ERR_PTR(-EOPNOTSUPP);
4194 
4195 	aes_gcm = &attr->keymat->keymat.aes_gcm;
4196 
4197 	if (aes_gcm->icv_len != 16 ||
4198 	    aes_gcm->iv_algo != IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ)
4199 		return ERR_PTR(-EOPNOTSUPP);
4200 
4201 	action = kmalloc(sizeof(*action), GFP_KERNEL);
4202 	if (!action)
4203 		return ERR_PTR(-ENOMEM);
4204 
4205 	action->esp_aes_gcm.ib_flags = attr->flags;
4206 	memcpy(&accel_attrs.keymat.aes_gcm.aes_key, &aes_gcm->aes_key,
4207 	       sizeof(accel_attrs.keymat.aes_gcm.aes_key));
4208 	accel_attrs.keymat.aes_gcm.key_len = aes_gcm->key_len * 8;
4209 	memcpy(&accel_attrs.keymat.aes_gcm.salt, &aes_gcm->salt,
4210 	       sizeof(accel_attrs.keymat.aes_gcm.salt));
4211 	memcpy(&accel_attrs.keymat.aes_gcm.seq_iv, &aes_gcm->iv,
4212 	       sizeof(accel_attrs.keymat.aes_gcm.seq_iv));
4213 	accel_attrs.keymat.aes_gcm.icv_len = aes_gcm->icv_len * 8;
4214 	accel_attrs.keymat.aes_gcm.iv_algo = MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ;
4215 	accel_attrs.keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
4216 
4217 	accel_attrs.esn = attr->esn;
4218 	if (attr->flags & IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED)
4219 		accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
4220 	if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
4221 		accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
4222 
4223 	if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)
4224 		accel_attrs.action |= MLX5_ACCEL_ESP_ACTION_ENCRYPT;
4225 
4226 	action->esp_aes_gcm.ctx =
4227 		mlx5_accel_esp_create_xfrm(mdev->mdev, &accel_attrs, flags);
4228 	if (IS_ERR(action->esp_aes_gcm.ctx)) {
4229 		err = PTR_ERR(action->esp_aes_gcm.ctx);
4230 		goto err_parse;
4231 	}
4232 
4233 	action->esp_aes_gcm.ib_flags = attr->flags;
4234 
4235 	return &action->ib_action;
4236 
4237 err_parse:
4238 	kfree(action);
4239 	return ERR_PTR(err);
4240 }
4241 
4242 static int
4243 mlx5_ib_modify_flow_action_esp(struct ib_flow_action *action,
4244 			       const struct ib_flow_action_attrs_esp *attr,
4245 			       struct uverbs_attr_bundle *attrs)
4246 {
4247 	struct mlx5_ib_flow_action *maction = to_mflow_act(action);
4248 	struct mlx5_accel_esp_xfrm_attrs accel_attrs;
4249 	int err = 0;
4250 
4251 	if (attr->keymat || attr->replay || attr->encap ||
4252 	    attr->spi || attr->seq || attr->tfc_pad ||
4253 	    attr->hard_limit_pkts ||
4254 	    (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
4255 			     IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS |
4256 			     IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)))
4257 		return -EOPNOTSUPP;
4258 
4259 	/* Only the ESN value or the MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP can
4260 	 * be modified.
4261 	 */
4262 	if (!(maction->esp_aes_gcm.ib_flags &
4263 	      IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) &&
4264 	    attr->flags & (IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
4265 			   IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW))
4266 		return -EINVAL;
4267 
4268 	memcpy(&accel_attrs, &maction->esp_aes_gcm.ctx->attrs,
4269 	       sizeof(accel_attrs));
4270 
4271 	accel_attrs.esn = attr->esn;
4272 	if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
4273 		accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
4274 	else
4275 		accel_attrs.flags &= ~MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
4276 
4277 	err = mlx5_accel_esp_modify_xfrm(maction->esp_aes_gcm.ctx,
4278 					 &accel_attrs);
4279 	if (err)
4280 		return err;
4281 
4282 	maction->esp_aes_gcm.ib_flags &=
4283 		~IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
4284 	maction->esp_aes_gcm.ib_flags |=
4285 		attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
4286 
4287 	return 0;
4288 }
4289 
4290 static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action)
4291 {
4292 	struct mlx5_ib_flow_action *maction = to_mflow_act(action);
4293 
4294 	switch (action->type) {
4295 	case IB_FLOW_ACTION_ESP:
4296 		/*
4297 		 * We only support aes_gcm by now, so we implicitly know this is
4298 		 * the underline crypto.
4299 		 */
4300 		mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx);
4301 		break;
4302 	case IB_FLOW_ACTION_UNSPECIFIED:
4303 		mlx5_ib_destroy_flow_action_raw(maction);
4304 		break;
4305 	default:
4306 		WARN_ON(true);
4307 		break;
4308 	}
4309 
4310 	kfree(maction);
4311 	return 0;
4312 }
4313 
4314 static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
4315 {
4316 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
4317 	struct mlx5_ib_qp *mqp = to_mqp(ibqp);
4318 	int err;
4319 	u16 uid;
4320 
4321 	uid = ibqp->pd ?
4322 		to_mpd(ibqp->pd)->uid : 0;
4323 
4324 	if (mqp->flags & MLX5_IB_QP_UNDERLAY) {
4325 		mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
4326 		return -EOPNOTSUPP;
4327 	}
4328 
4329 	err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
4330 	if (err)
4331 		mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
4332 			     ibqp->qp_num, gid->raw);
4333 
4334 	return err;
4335 }
4336 
4337 static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
4338 {
4339 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
4340 	int err;
4341 	u16 uid;
4342 
4343 	uid = ibqp->pd ?
4344 		to_mpd(ibqp->pd)->uid : 0;
4345 	err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
4346 	if (err)
4347 		mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
4348 			     ibqp->qp_num, gid->raw);
4349 
4350 	return err;
4351 }
4352 
4353 static int init_node_data(struct mlx5_ib_dev *dev)
4354 {
4355 	int err;
4356 
4357 	err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
4358 	if (err)
4359 		return err;
4360 
4361 	dev->mdev->rev_id = dev->mdev->pdev->revision;
4362 
4363 	return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
4364 }
4365 
4366 static ssize_t fw_pages_show(struct device *device,
4367 			     struct device_attribute *attr, char *buf)
4368 {
4369 	struct mlx5_ib_dev *dev =
4370 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4371 
4372 	return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
4373 }
4374 static DEVICE_ATTR_RO(fw_pages);
4375 
4376 static ssize_t reg_pages_show(struct device *device,
4377 			      struct device_attribute *attr, char *buf)
4378 {
4379 	struct mlx5_ib_dev *dev =
4380 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4381 
4382 	return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
4383 }
4384 static DEVICE_ATTR_RO(reg_pages);
4385 
4386 static ssize_t hca_type_show(struct device *device,
4387 			     struct device_attribute *attr, char *buf)
4388 {
4389 	struct mlx5_ib_dev *dev =
4390 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4391 
4392 	return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
4393 }
4394 static DEVICE_ATTR_RO(hca_type);
4395 
4396 static ssize_t hw_rev_show(struct device *device,
4397 			   struct device_attribute *attr, char *buf)
4398 {
4399 	struct mlx5_ib_dev *dev =
4400 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4401 
4402 	return sprintf(buf, "%x\n", dev->mdev->rev_id);
4403 }
4404 static DEVICE_ATTR_RO(hw_rev);
4405 
4406 static ssize_t board_id_show(struct device *device,
4407 			     struct device_attribute *attr, char *buf)
4408 {
4409 	struct mlx5_ib_dev *dev =
4410 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4411 
4412 	return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
4413 		       dev->mdev->board_id);
4414 }
4415 static DEVICE_ATTR_RO(board_id);
4416 
4417 static struct attribute *mlx5_class_attributes[] = {
4418 	&dev_attr_hw_rev.attr,
4419 	&dev_attr_hca_type.attr,
4420 	&dev_attr_board_id.attr,
4421 	&dev_attr_fw_pages.attr,
4422 	&dev_attr_reg_pages.attr,
4423 	NULL,
4424 };
4425 
4426 static const struct attribute_group mlx5_attr_group = {
4427 	.attrs = mlx5_class_attributes,
4428 };
4429 
4430 static void pkey_change_handler(struct work_struct *work)
4431 {
4432 	struct mlx5_ib_port_resources *ports =
4433 		container_of(work, struct mlx5_ib_port_resources,
4434 			     pkey_change_work);
4435 
4436 	mutex_lock(&ports->devr->mutex);
4437 	mlx5_ib_gsi_pkey_change(ports->gsi);
4438 	mutex_unlock(&ports->devr->mutex);
4439 }
4440 
4441 static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
4442 {
4443 	struct mlx5_ib_qp *mqp;
4444 	struct mlx5_ib_cq *send_mcq, *recv_mcq;
4445 	struct mlx5_core_cq *mcq;
4446 	struct list_head cq_armed_list;
4447 	unsigned long flags_qp;
4448 	unsigned long flags_cq;
4449 	unsigned long flags;
4450 
4451 	INIT_LIST_HEAD(&cq_armed_list);
4452 
4453 	/* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
4454 	spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
4455 	list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
4456 		spin_lock_irqsave(&mqp->sq.lock, flags_qp);
4457 		if (mqp->sq.tail != mqp->sq.head) {
4458 			send_mcq = to_mcq(mqp->ibqp.send_cq);
4459 			spin_lock_irqsave(&send_mcq->lock, flags_cq);
4460 			if (send_mcq->mcq.comp &&
4461 			    mqp->ibqp.send_cq->comp_handler) {
4462 				if (!send_mcq->mcq.reset_notify_added) {
4463 					send_mcq->mcq.reset_notify_added = 1;
4464 					list_add_tail(&send_mcq->mcq.reset_notify,
4465 						      &cq_armed_list);
4466 				}
4467 			}
4468 			spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
4469 		}
4470 		spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
4471 		spin_lock_irqsave(&mqp->rq.lock, flags_qp);
4472 		/* no handling is needed for SRQ */
4473 		if (!mqp->ibqp.srq) {
4474 			if (mqp->rq.tail != mqp->rq.head) {
4475 				recv_mcq = to_mcq(mqp->ibqp.recv_cq);
4476 				spin_lock_irqsave(&recv_mcq->lock, flags_cq);
4477 				if (recv_mcq->mcq.comp &&
4478 				    mqp->ibqp.recv_cq->comp_handler) {
4479 					if (!recv_mcq->mcq.reset_notify_added) {
4480 						recv_mcq->mcq.reset_notify_added = 1;
4481 						list_add_tail(&recv_mcq->mcq.reset_notify,
4482 							      &cq_armed_list);
4483 					}
4484 				}
4485 				spin_unlock_irqrestore(&recv_mcq->lock,
4486 						       flags_cq);
4487 			}
4488 		}
4489 		spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
4490 	}
4491 	/*At that point all inflight post send were put to be executed as of we
4492 	 * lock/unlock above locks Now need to arm all involved CQs.
4493 	 */
4494 	list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
4495 		mcq->comp(mcq, NULL);
4496 	}
4497 	spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
4498 }
4499 
4500 static void delay_drop_handler(struct work_struct *work)
4501 {
4502 	int err;
4503 	struct mlx5_ib_delay_drop *delay_drop =
4504 		container_of(work, struct mlx5_ib_delay_drop,
4505 			     delay_drop_work);
4506 
4507 	atomic_inc(&delay_drop->events_cnt);
4508 
4509 	mutex_lock(&delay_drop->lock);
4510 	err = mlx5_core_set_delay_drop(delay_drop->dev->mdev,
4511 				       delay_drop->timeout);
4512 	if (err) {
4513 		mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n",
4514 			     delay_drop->timeout);
4515 		delay_drop->activate = false;
4516 	}
4517 	mutex_unlock(&delay_drop->lock);
4518 }
4519 
4520 static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
4521 				 struct ib_event *ibev)
4522 {
4523 	u8 port = (eqe->data.port.port >> 4) & 0xf;
4524 
4525 	switch (eqe->sub_type) {
4526 	case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
4527 		if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
4528 					    IB_LINK_LAYER_ETHERNET)
4529 			schedule_work(&ibdev->delay_drop.delay_drop_work);
4530 		break;
4531 	default: /* do nothing */
4532 		return;
4533 	}
4534 }
4535 
4536 static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
4537 			      struct ib_event *ibev)
4538 {
4539 	u8 port = (eqe->data.port.port >> 4) & 0xf;
4540 
4541 	ibev->element.port_num = port;
4542 
4543 	switch (eqe->sub_type) {
4544 	case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
4545 	case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
4546 	case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
4547 		/* In RoCE, port up/down events are handled in
4548 		 * mlx5_netdev_event().
4549 		 */
4550 		if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
4551 					    IB_LINK_LAYER_ETHERNET)
4552 			return -EINVAL;
4553 
4554 		ibev->event = (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE) ?
4555 				IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
4556 		break;
4557 
4558 	case MLX5_PORT_CHANGE_SUBTYPE_LID:
4559 		ibev->event = IB_EVENT_LID_CHANGE;
4560 		break;
4561 
4562 	case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
4563 		ibev->event = IB_EVENT_PKEY_CHANGE;
4564 		schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
4565 		break;
4566 
4567 	case MLX5_PORT_CHANGE_SUBTYPE_GUID:
4568 		ibev->event = IB_EVENT_GID_CHANGE;
4569 		break;
4570 
4571 	case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
4572 		ibev->event = IB_EVENT_CLIENT_REREGISTER;
4573 		break;
4574 	default:
4575 		return -EINVAL;
4576 	}
4577 
4578 	return 0;
4579 }
4580 
4581 static void mlx5_ib_handle_event(struct work_struct *_work)
4582 {
4583 	struct mlx5_ib_event_work *work =
4584 		container_of(_work, struct mlx5_ib_event_work, work);
4585 	struct mlx5_ib_dev *ibdev;
4586 	struct ib_event ibev;
4587 	bool fatal = false;
4588 
4589 	if (work->is_slave) {
4590 		ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi);
4591 		if (!ibdev)
4592 			goto out;
4593 	} else {
4594 		ibdev = work->dev;
4595 	}
4596 
4597 	switch (work->event) {
4598 	case MLX5_DEV_EVENT_SYS_ERROR:
4599 		ibev.event = IB_EVENT_DEVICE_FATAL;
4600 		mlx5_ib_handle_internal_error(ibdev);
4601 		ibev.element.port_num  = (u8)(unsigned long)work->param;
4602 		fatal = true;
4603 		break;
4604 	case MLX5_EVENT_TYPE_PORT_CHANGE:
4605 		if (handle_port_change(ibdev, work->param, &ibev))
4606 			goto out;
4607 		break;
4608 	case MLX5_EVENT_TYPE_GENERAL_EVENT:
4609 		handle_general_event(ibdev, work->param, &ibev);
4610 		/* fall through */
4611 	default:
4612 		goto out;
4613 	}
4614 
4615 	ibev.device = &ibdev->ib_dev;
4616 
4617 	if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) {
4618 		mlx5_ib_warn(ibdev, "warning: event on port %d\n",  ibev.element.port_num);
4619 		goto out;
4620 	}
4621 
4622 	if (ibdev->ib_active)
4623 		ib_dispatch_event(&ibev);
4624 
4625 	if (fatal)
4626 		ibdev->ib_active = false;
4627 out:
4628 	kfree(work);
4629 }
4630 
4631 static int mlx5_ib_event(struct notifier_block *nb,
4632 			 unsigned long event, void *param)
4633 {
4634 	struct mlx5_ib_event_work *work;
4635 
4636 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
4637 	if (!work)
4638 		return NOTIFY_DONE;
4639 
4640 	INIT_WORK(&work->work, mlx5_ib_handle_event);
4641 	work->dev = container_of(nb, struct mlx5_ib_dev, mdev_events);
4642 	work->is_slave = false;
4643 	work->param = param;
4644 	work->event = event;
4645 
4646 	queue_work(mlx5_ib_event_wq, &work->work);
4647 
4648 	return NOTIFY_OK;
4649 }
4650 
4651 static int mlx5_ib_event_slave_port(struct notifier_block *nb,
4652 				    unsigned long event, void *param)
4653 {
4654 	struct mlx5_ib_event_work *work;
4655 
4656 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
4657 	if (!work)
4658 		return NOTIFY_DONE;
4659 
4660 	INIT_WORK(&work->work, mlx5_ib_handle_event);
4661 	work->mpi = container_of(nb, struct mlx5_ib_multiport_info, mdev_events);
4662 	work->is_slave = true;
4663 	work->param = param;
4664 	work->event = event;
4665 	queue_work(mlx5_ib_event_wq, &work->work);
4666 
4667 	return NOTIFY_OK;
4668 }
4669 
4670 static int set_has_smi_cap(struct mlx5_ib_dev *dev)
4671 {
4672 	struct mlx5_hca_vport_context vport_ctx;
4673 	int err;
4674 	int port;
4675 
4676 	for (port = 1; port <= ARRAY_SIZE(dev->mdev->port_caps); port++) {
4677 		dev->mdev->port_caps[port - 1].has_smi = false;
4678 		if (MLX5_CAP_GEN(dev->mdev, port_type) ==
4679 		    MLX5_CAP_PORT_TYPE_IB) {
4680 			if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
4681 				err = mlx5_query_hca_vport_context(dev->mdev, 0,
4682 								   port, 0,
4683 								   &vport_ctx);
4684 				if (err) {
4685 					mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
4686 						    port, err);
4687 					return err;
4688 				}
4689 				dev->mdev->port_caps[port - 1].has_smi =
4690 					vport_ctx.has_smi;
4691 			} else {
4692 				dev->mdev->port_caps[port - 1].has_smi = true;
4693 			}
4694 		}
4695 	}
4696 	return 0;
4697 }
4698 
4699 static void get_ext_port_caps(struct mlx5_ib_dev *dev)
4700 {
4701 	int port;
4702 
4703 	for (port = 1; port <= dev->num_ports; port++)
4704 		mlx5_query_ext_port_caps(dev, port);
4705 }
4706 
4707 static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port)
4708 {
4709 	struct ib_device_attr *dprops = NULL;
4710 	struct ib_port_attr *pprops = NULL;
4711 	int err = -ENOMEM;
4712 	struct ib_udata uhw = {.inlen = 0, .outlen = 0};
4713 
4714 	pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
4715 	if (!pprops)
4716 		goto out;
4717 
4718 	dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
4719 	if (!dprops)
4720 		goto out;
4721 
4722 	err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
4723 	if (err) {
4724 		mlx5_ib_warn(dev, "query_device failed %d\n", err);
4725 		goto out;
4726 	}
4727 
4728 	memset(pprops, 0, sizeof(*pprops));
4729 	err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
4730 	if (err) {
4731 		mlx5_ib_warn(dev, "query_port %d failed %d\n",
4732 			     port, err);
4733 		goto out;
4734 	}
4735 
4736 	dev->mdev->port_caps[port - 1].pkey_table_len =
4737 					dprops->max_pkeys;
4738 	dev->mdev->port_caps[port - 1].gid_table_len =
4739 					pprops->gid_tbl_len;
4740 	mlx5_ib_dbg(dev, "port %d: pkey_table_len %d, gid_table_len %d\n",
4741 		    port, dprops->max_pkeys, pprops->gid_tbl_len);
4742 
4743 out:
4744 	kfree(pprops);
4745 	kfree(dprops);
4746 
4747 	return err;
4748 }
4749 
4750 static int get_port_caps(struct mlx5_ib_dev *dev, u8 port)
4751 {
4752 	/* For representors use port 1, is this is the only native
4753 	 * port
4754 	 */
4755 	if (dev->is_rep)
4756 		return __get_port_caps(dev, 1);
4757 	return __get_port_caps(dev, port);
4758 }
4759 
4760 static void destroy_umrc_res(struct mlx5_ib_dev *dev)
4761 {
4762 	int err;
4763 
4764 	err = mlx5_mr_cache_cleanup(dev);
4765 	if (err)
4766 		mlx5_ib_warn(dev, "mr cache cleanup failed\n");
4767 
4768 	if (dev->umrc.qp)
4769 		mlx5_ib_destroy_qp(dev->umrc.qp, NULL);
4770 	if (dev->umrc.cq)
4771 		ib_free_cq(dev->umrc.cq);
4772 	if (dev->umrc.pd)
4773 		ib_dealloc_pd(dev->umrc.pd);
4774 }
4775 
4776 enum {
4777 	MAX_UMR_WR = 128,
4778 };
4779 
4780 static int create_umr_res(struct mlx5_ib_dev *dev)
4781 {
4782 	struct ib_qp_init_attr *init_attr = NULL;
4783 	struct ib_qp_attr *attr = NULL;
4784 	struct ib_pd *pd;
4785 	struct ib_cq *cq;
4786 	struct ib_qp *qp;
4787 	int ret;
4788 
4789 	attr = kzalloc(sizeof(*attr), GFP_KERNEL);
4790 	init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
4791 	if (!attr || !init_attr) {
4792 		ret = -ENOMEM;
4793 		goto error_0;
4794 	}
4795 
4796 	pd = ib_alloc_pd(&dev->ib_dev, 0);
4797 	if (IS_ERR(pd)) {
4798 		mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
4799 		ret = PTR_ERR(pd);
4800 		goto error_0;
4801 	}
4802 
4803 	cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
4804 	if (IS_ERR(cq)) {
4805 		mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
4806 		ret = PTR_ERR(cq);
4807 		goto error_2;
4808 	}
4809 
4810 	init_attr->send_cq = cq;
4811 	init_attr->recv_cq = cq;
4812 	init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
4813 	init_attr->cap.max_send_wr = MAX_UMR_WR;
4814 	init_attr->cap.max_send_sge = 1;
4815 	init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
4816 	init_attr->port_num = 1;
4817 	qp = mlx5_ib_create_qp(pd, init_attr, NULL);
4818 	if (IS_ERR(qp)) {
4819 		mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
4820 		ret = PTR_ERR(qp);
4821 		goto error_3;
4822 	}
4823 	qp->device     = &dev->ib_dev;
4824 	qp->real_qp    = qp;
4825 	qp->uobject    = NULL;
4826 	qp->qp_type    = MLX5_IB_QPT_REG_UMR;
4827 	qp->send_cq    = init_attr->send_cq;
4828 	qp->recv_cq    = init_attr->recv_cq;
4829 
4830 	attr->qp_state = IB_QPS_INIT;
4831 	attr->port_num = 1;
4832 	ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX |
4833 				IB_QP_PORT, NULL);
4834 	if (ret) {
4835 		mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
4836 		goto error_4;
4837 	}
4838 
4839 	memset(attr, 0, sizeof(*attr));
4840 	attr->qp_state = IB_QPS_RTR;
4841 	attr->path_mtu = IB_MTU_256;
4842 
4843 	ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
4844 	if (ret) {
4845 		mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
4846 		goto error_4;
4847 	}
4848 
4849 	memset(attr, 0, sizeof(*attr));
4850 	attr->qp_state = IB_QPS_RTS;
4851 	ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
4852 	if (ret) {
4853 		mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
4854 		goto error_4;
4855 	}
4856 
4857 	dev->umrc.qp = qp;
4858 	dev->umrc.cq = cq;
4859 	dev->umrc.pd = pd;
4860 
4861 	sema_init(&dev->umrc.sem, MAX_UMR_WR);
4862 	ret = mlx5_mr_cache_init(dev);
4863 	if (ret) {
4864 		mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
4865 		goto error_4;
4866 	}
4867 
4868 	kfree(attr);
4869 	kfree(init_attr);
4870 
4871 	return 0;
4872 
4873 error_4:
4874 	mlx5_ib_destroy_qp(qp, NULL);
4875 	dev->umrc.qp = NULL;
4876 
4877 error_3:
4878 	ib_free_cq(cq);
4879 	dev->umrc.cq = NULL;
4880 
4881 error_2:
4882 	ib_dealloc_pd(pd);
4883 	dev->umrc.pd = NULL;
4884 
4885 error_0:
4886 	kfree(attr);
4887 	kfree(init_attr);
4888 	return ret;
4889 }
4890 
4891 static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
4892 {
4893 	switch (umr_fence_cap) {
4894 	case MLX5_CAP_UMR_FENCE_NONE:
4895 		return MLX5_FENCE_MODE_NONE;
4896 	case MLX5_CAP_UMR_FENCE_SMALL:
4897 		return MLX5_FENCE_MODE_INITIATOR_SMALL;
4898 	default:
4899 		return MLX5_FENCE_MODE_STRONG_ORDERING;
4900 	}
4901 }
4902 
4903 static int create_dev_resources(struct mlx5_ib_resources *devr)
4904 {
4905 	struct ib_srq_init_attr attr;
4906 	struct mlx5_ib_dev *dev;
4907 	struct ib_device *ibdev;
4908 	struct ib_cq_init_attr cq_attr = {.cqe = 1};
4909 	int port;
4910 	int ret = 0;
4911 
4912 	dev = container_of(devr, struct mlx5_ib_dev, devr);
4913 	ibdev = &dev->ib_dev;
4914 
4915 	mutex_init(&devr->mutex);
4916 
4917 	devr->p0 = rdma_zalloc_drv_obj(ibdev, ib_pd);
4918 	if (!devr->p0)
4919 		return -ENOMEM;
4920 
4921 	devr->p0->device  = ibdev;
4922 	devr->p0->uobject = NULL;
4923 	atomic_set(&devr->p0->usecnt, 0);
4924 
4925 	ret = mlx5_ib_alloc_pd(devr->p0, NULL);
4926 	if (ret)
4927 		goto error0;
4928 
4929 	devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL);
4930 	if (IS_ERR(devr->c0)) {
4931 		ret = PTR_ERR(devr->c0);
4932 		goto error1;
4933 	}
4934 	devr->c0->device        = &dev->ib_dev;
4935 	devr->c0->uobject       = NULL;
4936 	devr->c0->comp_handler  = NULL;
4937 	devr->c0->event_handler = NULL;
4938 	devr->c0->cq_context    = NULL;
4939 	atomic_set(&devr->c0->usecnt, 0);
4940 
4941 	devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL);
4942 	if (IS_ERR(devr->x0)) {
4943 		ret = PTR_ERR(devr->x0);
4944 		goto error2;
4945 	}
4946 	devr->x0->device = &dev->ib_dev;
4947 	devr->x0->inode = NULL;
4948 	atomic_set(&devr->x0->usecnt, 0);
4949 	mutex_init(&devr->x0->tgt_qp_mutex);
4950 	INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
4951 
4952 	devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL);
4953 	if (IS_ERR(devr->x1)) {
4954 		ret = PTR_ERR(devr->x1);
4955 		goto error3;
4956 	}
4957 	devr->x1->device = &dev->ib_dev;
4958 	devr->x1->inode = NULL;
4959 	atomic_set(&devr->x1->usecnt, 0);
4960 	mutex_init(&devr->x1->tgt_qp_mutex);
4961 	INIT_LIST_HEAD(&devr->x1->tgt_qp_list);
4962 
4963 	memset(&attr, 0, sizeof(attr));
4964 	attr.attr.max_sge = 1;
4965 	attr.attr.max_wr = 1;
4966 	attr.srq_type = IB_SRQT_XRC;
4967 	attr.ext.cq = devr->c0;
4968 	attr.ext.xrc.xrcd = devr->x0;
4969 
4970 	devr->s0 = rdma_zalloc_drv_obj(ibdev, ib_srq);
4971 	if (!devr->s0) {
4972 		ret = -ENOMEM;
4973 		goto error4;
4974 	}
4975 
4976 	devr->s0->device	= &dev->ib_dev;
4977 	devr->s0->pd		= devr->p0;
4978 	devr->s0->srq_type      = IB_SRQT_XRC;
4979 	devr->s0->ext.xrc.xrcd	= devr->x0;
4980 	devr->s0->ext.cq	= devr->c0;
4981 	ret = mlx5_ib_create_srq(devr->s0, &attr, NULL);
4982 	if (ret)
4983 		goto err_create;
4984 
4985 	atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
4986 	atomic_inc(&devr->s0->ext.cq->usecnt);
4987 	atomic_inc(&devr->p0->usecnt);
4988 	atomic_set(&devr->s0->usecnt, 0);
4989 
4990 	memset(&attr, 0, sizeof(attr));
4991 	attr.attr.max_sge = 1;
4992 	attr.attr.max_wr = 1;
4993 	attr.srq_type = IB_SRQT_BASIC;
4994 	devr->s1 = rdma_zalloc_drv_obj(ibdev, ib_srq);
4995 	if (!devr->s1) {
4996 		ret = -ENOMEM;
4997 		goto error5;
4998 	}
4999 
5000 	devr->s1->device	= &dev->ib_dev;
5001 	devr->s1->pd		= devr->p0;
5002 	devr->s1->srq_type      = IB_SRQT_BASIC;
5003 	devr->s1->ext.cq	= devr->c0;
5004 
5005 	ret = mlx5_ib_create_srq(devr->s1, &attr, NULL);
5006 	if (ret)
5007 		goto error6;
5008 
5009 	atomic_inc(&devr->p0->usecnt);
5010 	atomic_set(&devr->s1->usecnt, 0);
5011 
5012 	for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) {
5013 		INIT_WORK(&devr->ports[port].pkey_change_work,
5014 			  pkey_change_handler);
5015 		devr->ports[port].devr = devr;
5016 	}
5017 
5018 	return 0;
5019 
5020 error6:
5021 	kfree(devr->s1);
5022 error5:
5023 	mlx5_ib_destroy_srq(devr->s0, NULL);
5024 err_create:
5025 	kfree(devr->s0);
5026 error4:
5027 	mlx5_ib_dealloc_xrcd(devr->x1, NULL);
5028 error3:
5029 	mlx5_ib_dealloc_xrcd(devr->x0, NULL);
5030 error2:
5031 	mlx5_ib_destroy_cq(devr->c0, NULL);
5032 error1:
5033 	mlx5_ib_dealloc_pd(devr->p0, NULL);
5034 error0:
5035 	kfree(devr->p0);
5036 	return ret;
5037 }
5038 
5039 static void destroy_dev_resources(struct mlx5_ib_resources *devr)
5040 {
5041 	int port;
5042 
5043 	mlx5_ib_destroy_srq(devr->s1, NULL);
5044 	kfree(devr->s1);
5045 	mlx5_ib_destroy_srq(devr->s0, NULL);
5046 	kfree(devr->s0);
5047 	mlx5_ib_dealloc_xrcd(devr->x0, NULL);
5048 	mlx5_ib_dealloc_xrcd(devr->x1, NULL);
5049 	mlx5_ib_destroy_cq(devr->c0, NULL);
5050 	mlx5_ib_dealloc_pd(devr->p0, NULL);
5051 	kfree(devr->p0);
5052 
5053 	/* Make sure no change P_Key work items are still executing */
5054 	for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
5055 		cancel_work_sync(&devr->ports[port].pkey_change_work);
5056 }
5057 
5058 static u32 get_core_cap_flags(struct ib_device *ibdev,
5059 			      struct mlx5_hca_vport_context *rep)
5060 {
5061 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
5062 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
5063 	u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
5064 	u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
5065 	bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
5066 	u32 ret = 0;
5067 
5068 	if (rep->grh_required)
5069 		ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED;
5070 
5071 	if (ll == IB_LINK_LAYER_INFINIBAND)
5072 		return ret | RDMA_CORE_PORT_IBA_IB;
5073 
5074 	if (raw_support)
5075 		ret |= RDMA_CORE_PORT_RAW_PACKET;
5076 
5077 	if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
5078 		return ret;
5079 
5080 	if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP))
5081 		return ret;
5082 
5083 	if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP)
5084 		ret |= RDMA_CORE_PORT_IBA_ROCE;
5085 
5086 	if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
5087 		ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
5088 
5089 	return ret;
5090 }
5091 
5092 static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
5093 			       struct ib_port_immutable *immutable)
5094 {
5095 	struct ib_port_attr attr;
5096 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
5097 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
5098 	struct mlx5_hca_vport_context rep = {0};
5099 	int err;
5100 
5101 	err = ib_query_port(ibdev, port_num, &attr);
5102 	if (err)
5103 		return err;
5104 
5105 	if (ll == IB_LINK_LAYER_INFINIBAND) {
5106 		err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0,
5107 						   &rep);
5108 		if (err)
5109 			return err;
5110 	}
5111 
5112 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
5113 	immutable->gid_tbl_len = attr.gid_tbl_len;
5114 	immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
5115 	if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
5116 		immutable->max_mad_size = IB_MGMT_MAD_SIZE;
5117 
5118 	return 0;
5119 }
5120 
5121 static int mlx5_port_rep_immutable(struct ib_device *ibdev, u8 port_num,
5122 				   struct ib_port_immutable *immutable)
5123 {
5124 	struct ib_port_attr attr;
5125 	int err;
5126 
5127 	immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
5128 
5129 	err = ib_query_port(ibdev, port_num, &attr);
5130 	if (err)
5131 		return err;
5132 
5133 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
5134 	immutable->gid_tbl_len = attr.gid_tbl_len;
5135 	immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
5136 
5137 	return 0;
5138 }
5139 
5140 static void get_dev_fw_str(struct ib_device *ibdev, char *str)
5141 {
5142 	struct mlx5_ib_dev *dev =
5143 		container_of(ibdev, struct mlx5_ib_dev, ib_dev);
5144 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d",
5145 		 fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
5146 		 fw_rev_sub(dev->mdev));
5147 }
5148 
5149 static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
5150 {
5151 	struct mlx5_core_dev *mdev = dev->mdev;
5152 	struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
5153 								 MLX5_FLOW_NAMESPACE_LAG);
5154 	struct mlx5_flow_table *ft;
5155 	int err;
5156 
5157 	if (!ns || !mlx5_lag_is_roce(mdev))
5158 		return 0;
5159 
5160 	err = mlx5_cmd_create_vport_lag(mdev);
5161 	if (err)
5162 		return err;
5163 
5164 	ft = mlx5_create_lag_demux_flow_table(ns, 0, 0);
5165 	if (IS_ERR(ft)) {
5166 		err = PTR_ERR(ft);
5167 		goto err_destroy_vport_lag;
5168 	}
5169 
5170 	dev->flow_db->lag_demux_ft = ft;
5171 	dev->lag_active = true;
5172 	return 0;
5173 
5174 err_destroy_vport_lag:
5175 	mlx5_cmd_destroy_vport_lag(mdev);
5176 	return err;
5177 }
5178 
5179 static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
5180 {
5181 	struct mlx5_core_dev *mdev = dev->mdev;
5182 
5183 	if (dev->lag_active) {
5184 		dev->lag_active = false;
5185 
5186 		mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft);
5187 		dev->flow_db->lag_demux_ft = NULL;
5188 
5189 		mlx5_cmd_destroy_vport_lag(mdev);
5190 	}
5191 }
5192 
5193 static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
5194 {
5195 	int err;
5196 
5197 	dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
5198 	err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
5199 	if (err) {
5200 		dev->port[port_num].roce.nb.notifier_call = NULL;
5201 		return err;
5202 	}
5203 
5204 	return 0;
5205 }
5206 
5207 static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
5208 {
5209 	if (dev->port[port_num].roce.nb.notifier_call) {
5210 		unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
5211 		dev->port[port_num].roce.nb.notifier_call = NULL;
5212 	}
5213 }
5214 
5215 static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
5216 {
5217 	int err;
5218 
5219 	if (MLX5_CAP_GEN(dev->mdev, roce)) {
5220 		err = mlx5_nic_vport_enable_roce(dev->mdev);
5221 		if (err)
5222 			return err;
5223 	}
5224 
5225 	err = mlx5_eth_lag_init(dev);
5226 	if (err)
5227 		goto err_disable_roce;
5228 
5229 	return 0;
5230 
5231 err_disable_roce:
5232 	if (MLX5_CAP_GEN(dev->mdev, roce))
5233 		mlx5_nic_vport_disable_roce(dev->mdev);
5234 
5235 	return err;
5236 }
5237 
5238 static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
5239 {
5240 	mlx5_eth_lag_cleanup(dev);
5241 	if (MLX5_CAP_GEN(dev->mdev, roce))
5242 		mlx5_nic_vport_disable_roce(dev->mdev);
5243 }
5244 
5245 struct mlx5_ib_counter {
5246 	const char *name;
5247 	size_t offset;
5248 };
5249 
5250 #define INIT_Q_COUNTER(_name)		\
5251 	{ .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)}
5252 
5253 static const struct mlx5_ib_counter basic_q_cnts[] = {
5254 	INIT_Q_COUNTER(rx_write_requests),
5255 	INIT_Q_COUNTER(rx_read_requests),
5256 	INIT_Q_COUNTER(rx_atomic_requests),
5257 	INIT_Q_COUNTER(out_of_buffer),
5258 };
5259 
5260 static const struct mlx5_ib_counter out_of_seq_q_cnts[] = {
5261 	INIT_Q_COUNTER(out_of_sequence),
5262 };
5263 
5264 static const struct mlx5_ib_counter retrans_q_cnts[] = {
5265 	INIT_Q_COUNTER(duplicate_request),
5266 	INIT_Q_COUNTER(rnr_nak_retry_err),
5267 	INIT_Q_COUNTER(packet_seq_err),
5268 	INIT_Q_COUNTER(implied_nak_seq_err),
5269 	INIT_Q_COUNTER(local_ack_timeout_err),
5270 };
5271 
5272 #define INIT_CONG_COUNTER(_name)		\
5273 	{ .name = #_name, .offset =	\
5274 		MLX5_BYTE_OFF(query_cong_statistics_out, _name ## _high)}
5275 
5276 static const struct mlx5_ib_counter cong_cnts[] = {
5277 	INIT_CONG_COUNTER(rp_cnp_ignored),
5278 	INIT_CONG_COUNTER(rp_cnp_handled),
5279 	INIT_CONG_COUNTER(np_ecn_marked_roce_packets),
5280 	INIT_CONG_COUNTER(np_cnp_sent),
5281 };
5282 
5283 static const struct mlx5_ib_counter extended_err_cnts[] = {
5284 	INIT_Q_COUNTER(resp_local_length_error),
5285 	INIT_Q_COUNTER(resp_cqe_error),
5286 	INIT_Q_COUNTER(req_cqe_error),
5287 	INIT_Q_COUNTER(req_remote_invalid_request),
5288 	INIT_Q_COUNTER(req_remote_access_errors),
5289 	INIT_Q_COUNTER(resp_remote_access_errors),
5290 	INIT_Q_COUNTER(resp_cqe_flush_error),
5291 	INIT_Q_COUNTER(req_cqe_flush_error),
5292 };
5293 
5294 #define INIT_EXT_PPCNT_COUNTER(_name)		\
5295 	{ .name = #_name, .offset =	\
5296 	MLX5_BYTE_OFF(ppcnt_reg, \
5297 		      counter_set.eth_extended_cntrs_grp_data_layout._name##_high)}
5298 
5299 static const struct mlx5_ib_counter ext_ppcnt_cnts[] = {
5300 	INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated),
5301 };
5302 
5303 static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
5304 {
5305 	int i;
5306 
5307 	for (i = 0; i < dev->num_ports; i++) {
5308 		if (dev->port[i].cnts.set_id_valid)
5309 			mlx5_core_dealloc_q_counter(dev->mdev,
5310 						    dev->port[i].cnts.set_id);
5311 		kfree(dev->port[i].cnts.names);
5312 		kfree(dev->port[i].cnts.offsets);
5313 	}
5314 }
5315 
5316 static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
5317 				    struct mlx5_ib_counters *cnts)
5318 {
5319 	u32 num_counters;
5320 
5321 	num_counters = ARRAY_SIZE(basic_q_cnts);
5322 
5323 	if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt))
5324 		num_counters += ARRAY_SIZE(out_of_seq_q_cnts);
5325 
5326 	if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters))
5327 		num_counters += ARRAY_SIZE(retrans_q_cnts);
5328 
5329 	if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters))
5330 		num_counters += ARRAY_SIZE(extended_err_cnts);
5331 
5332 	cnts->num_q_counters = num_counters;
5333 
5334 	if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
5335 		cnts->num_cong_counters = ARRAY_SIZE(cong_cnts);
5336 		num_counters += ARRAY_SIZE(cong_cnts);
5337 	}
5338 	if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
5339 		cnts->num_ext_ppcnt_counters = ARRAY_SIZE(ext_ppcnt_cnts);
5340 		num_counters += ARRAY_SIZE(ext_ppcnt_cnts);
5341 	}
5342 	cnts->names = kcalloc(num_counters, sizeof(cnts->names), GFP_KERNEL);
5343 	if (!cnts->names)
5344 		return -ENOMEM;
5345 
5346 	cnts->offsets = kcalloc(num_counters,
5347 				sizeof(cnts->offsets), GFP_KERNEL);
5348 	if (!cnts->offsets)
5349 		goto err_names;
5350 
5351 	return 0;
5352 
5353 err_names:
5354 	kfree(cnts->names);
5355 	cnts->names = NULL;
5356 	return -ENOMEM;
5357 }
5358 
5359 static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
5360 				  const char **names,
5361 				  size_t *offsets)
5362 {
5363 	int i;
5364 	int j = 0;
5365 
5366 	for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) {
5367 		names[j] = basic_q_cnts[i].name;
5368 		offsets[j] = basic_q_cnts[i].offset;
5369 	}
5370 
5371 	if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) {
5372 		for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) {
5373 			names[j] = out_of_seq_q_cnts[i].name;
5374 			offsets[j] = out_of_seq_q_cnts[i].offset;
5375 		}
5376 	}
5377 
5378 	if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
5379 		for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) {
5380 			names[j] = retrans_q_cnts[i].name;
5381 			offsets[j] = retrans_q_cnts[i].offset;
5382 		}
5383 	}
5384 
5385 	if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) {
5386 		for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) {
5387 			names[j] = extended_err_cnts[i].name;
5388 			offsets[j] = extended_err_cnts[i].offset;
5389 		}
5390 	}
5391 
5392 	if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
5393 		for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) {
5394 			names[j] = cong_cnts[i].name;
5395 			offsets[j] = cong_cnts[i].offset;
5396 		}
5397 	}
5398 
5399 	if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
5400 		for (i = 0; i < ARRAY_SIZE(ext_ppcnt_cnts); i++, j++) {
5401 			names[j] = ext_ppcnt_cnts[i].name;
5402 			offsets[j] = ext_ppcnt_cnts[i].offset;
5403 		}
5404 	}
5405 }
5406 
5407 static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
5408 {
5409 	int err = 0;
5410 	int i;
5411 	bool is_shared;
5412 
5413 	is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0;
5414 
5415 	for (i = 0; i < dev->num_ports; i++) {
5416 		err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts);
5417 		if (err)
5418 			goto err_alloc;
5419 
5420 		mlx5_ib_fill_counters(dev, dev->port[i].cnts.names,
5421 				      dev->port[i].cnts.offsets);
5422 
5423 		err = mlx5_cmd_alloc_q_counter(dev->mdev,
5424 					       &dev->port[i].cnts.set_id,
5425 					       is_shared ?
5426 					       MLX5_SHARED_RESOURCE_UID : 0);
5427 		if (err) {
5428 			mlx5_ib_warn(dev,
5429 				     "couldn't allocate queue counter for port %d, err %d\n",
5430 				     i + 1, err);
5431 			goto err_alloc;
5432 		}
5433 		dev->port[i].cnts.set_id_valid = true;
5434 	}
5435 
5436 	return 0;
5437 
5438 err_alloc:
5439 	mlx5_ib_dealloc_counters(dev);
5440 	return err;
5441 }
5442 
5443 static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
5444 						    u8 port_num)
5445 {
5446 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
5447 	struct mlx5_ib_port *port = &dev->port[port_num - 1];
5448 
5449 	/* We support only per port stats */
5450 	if (port_num == 0)
5451 		return NULL;
5452 
5453 	return rdma_alloc_hw_stats_struct(port->cnts.names,
5454 					  port->cnts.num_q_counters +
5455 					  port->cnts.num_cong_counters +
5456 					  port->cnts.num_ext_ppcnt_counters,
5457 					  RDMA_HW_STATS_DEFAULT_LIFESPAN);
5458 }
5459 
5460 static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
5461 				    struct mlx5_ib_port *port,
5462 				    struct rdma_hw_stats *stats)
5463 {
5464 	int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
5465 	void *out;
5466 	__be32 val;
5467 	int ret, i;
5468 
5469 	out = kvzalloc(outlen, GFP_KERNEL);
5470 	if (!out)
5471 		return -ENOMEM;
5472 
5473 	ret = mlx5_core_query_q_counter(mdev,
5474 					port->cnts.set_id, 0,
5475 					out, outlen);
5476 	if (ret)
5477 		goto free;
5478 
5479 	for (i = 0; i < port->cnts.num_q_counters; i++) {
5480 		val = *(__be32 *)(out + port->cnts.offsets[i]);
5481 		stats->value[i] = (u64)be32_to_cpu(val);
5482 	}
5483 
5484 free:
5485 	kvfree(out);
5486 	return ret;
5487 }
5488 
5489 static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev,
5490 					  struct mlx5_ib_port *port,
5491 					  struct rdma_hw_stats *stats)
5492 {
5493 	int offset = port->cnts.num_q_counters + port->cnts.num_cong_counters;
5494 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
5495 	int ret, i;
5496 	void *out;
5497 
5498 	out = kvzalloc(sz, GFP_KERNEL);
5499 	if (!out)
5500 		return -ENOMEM;
5501 
5502 	ret = mlx5_cmd_query_ext_ppcnt_counters(dev->mdev, out);
5503 	if (ret)
5504 		goto free;
5505 
5506 	for (i = 0; i < port->cnts.num_ext_ppcnt_counters; i++) {
5507 		stats->value[i + offset] =
5508 			be64_to_cpup((__be64 *)(out +
5509 				    port->cnts.offsets[i + offset]));
5510 	}
5511 
5512 free:
5513 	kvfree(out);
5514 	return ret;
5515 }
5516 
5517 static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
5518 				struct rdma_hw_stats *stats,
5519 				u8 port_num, int index)
5520 {
5521 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
5522 	struct mlx5_ib_port *port = &dev->port[port_num - 1];
5523 	struct mlx5_core_dev *mdev;
5524 	int ret, num_counters;
5525 	u8 mdev_port_num;
5526 
5527 	if (!stats)
5528 		return -EINVAL;
5529 
5530 	num_counters = port->cnts.num_q_counters +
5531 		       port->cnts.num_cong_counters +
5532 		       port->cnts.num_ext_ppcnt_counters;
5533 
5534 	/* q_counters are per IB device, query the master mdev */
5535 	ret = mlx5_ib_query_q_counters(dev->mdev, port, stats);
5536 	if (ret)
5537 		return ret;
5538 
5539 	if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
5540 		ret =  mlx5_ib_query_ext_ppcnt_counters(dev, port, stats);
5541 		if (ret)
5542 			return ret;
5543 	}
5544 
5545 	if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
5546 		mdev = mlx5_ib_get_native_port_mdev(dev, port_num,
5547 						    &mdev_port_num);
5548 		if (!mdev) {
5549 			/* If port is not affiliated yet, its in down state
5550 			 * which doesn't have any counters yet, so it would be
5551 			 * zero. So no need to read from the HCA.
5552 			 */
5553 			goto done;
5554 		}
5555 		ret = mlx5_lag_query_cong_counters(dev->mdev,
5556 						   stats->value +
5557 						   port->cnts.num_q_counters,
5558 						   port->cnts.num_cong_counters,
5559 						   port->cnts.offsets +
5560 						   port->cnts.num_q_counters);
5561 
5562 		mlx5_ib_put_native_port_mdev(dev, port_num);
5563 		if (ret)
5564 			return ret;
5565 	}
5566 
5567 done:
5568 	return num_counters;
5569 }
5570 
5571 static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num,
5572 				 enum rdma_netdev_t type,
5573 				 struct rdma_netdev_alloc_params *params)
5574 {
5575 	if (type != RDMA_NETDEV_IPOIB)
5576 		return -EOPNOTSUPP;
5577 
5578 	return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params);
5579 }
5580 
5581 static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev)
5582 {
5583 	if (!dev->delay_drop.dbg)
5584 		return;
5585 	debugfs_remove_recursive(dev->delay_drop.dbg->dir_debugfs);
5586 	kfree(dev->delay_drop.dbg);
5587 	dev->delay_drop.dbg = NULL;
5588 }
5589 
5590 static void cancel_delay_drop(struct mlx5_ib_dev *dev)
5591 {
5592 	if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
5593 		return;
5594 
5595 	cancel_work_sync(&dev->delay_drop.delay_drop_work);
5596 	delay_drop_debugfs_cleanup(dev);
5597 }
5598 
5599 static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf,
5600 				       size_t count, loff_t *pos)
5601 {
5602 	struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
5603 	char lbuf[20];
5604 	int len;
5605 
5606 	len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout);
5607 	return simple_read_from_buffer(buf, count, pos, lbuf, len);
5608 }
5609 
5610 static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf,
5611 					size_t count, loff_t *pos)
5612 {
5613 	struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
5614 	u32 timeout;
5615 	u32 var;
5616 
5617 	if (kstrtouint_from_user(buf, count, 0, &var))
5618 		return -EFAULT;
5619 
5620 	timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS *
5621 			1000);
5622 	if (timeout != var)
5623 		mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n",
5624 			    timeout);
5625 
5626 	delay_drop->timeout = timeout;
5627 
5628 	return count;
5629 }
5630 
5631 static const struct file_operations fops_delay_drop_timeout = {
5632 	.owner	= THIS_MODULE,
5633 	.open	= simple_open,
5634 	.write	= delay_drop_timeout_write,
5635 	.read	= delay_drop_timeout_read,
5636 };
5637 
5638 static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
5639 {
5640 	struct mlx5_ib_dbg_delay_drop *dbg;
5641 
5642 	if (!mlx5_debugfs_root)
5643 		return 0;
5644 
5645 	dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
5646 	if (!dbg)
5647 		return -ENOMEM;
5648 
5649 	dev->delay_drop.dbg = dbg;
5650 
5651 	dbg->dir_debugfs =
5652 		debugfs_create_dir("delay_drop",
5653 				   dev->mdev->priv.dbg_root);
5654 	if (!dbg->dir_debugfs)
5655 		goto out_debugfs;
5656 
5657 	dbg->events_cnt_debugfs =
5658 		debugfs_create_atomic_t("num_timeout_events", 0400,
5659 					dbg->dir_debugfs,
5660 					&dev->delay_drop.events_cnt);
5661 	if (!dbg->events_cnt_debugfs)
5662 		goto out_debugfs;
5663 
5664 	dbg->rqs_cnt_debugfs =
5665 		debugfs_create_atomic_t("num_rqs", 0400,
5666 					dbg->dir_debugfs,
5667 					&dev->delay_drop.rqs_cnt);
5668 	if (!dbg->rqs_cnt_debugfs)
5669 		goto out_debugfs;
5670 
5671 	dbg->timeout_debugfs =
5672 		debugfs_create_file("timeout", 0600,
5673 				    dbg->dir_debugfs,
5674 				    &dev->delay_drop,
5675 				    &fops_delay_drop_timeout);
5676 	if (!dbg->timeout_debugfs)
5677 		goto out_debugfs;
5678 
5679 	return 0;
5680 
5681 out_debugfs:
5682 	delay_drop_debugfs_cleanup(dev);
5683 	return -ENOMEM;
5684 }
5685 
5686 static void init_delay_drop(struct mlx5_ib_dev *dev)
5687 {
5688 	if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
5689 		return;
5690 
5691 	mutex_init(&dev->delay_drop.lock);
5692 	dev->delay_drop.dev = dev;
5693 	dev->delay_drop.activate = false;
5694 	dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000;
5695 	INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler);
5696 	atomic_set(&dev->delay_drop.rqs_cnt, 0);
5697 	atomic_set(&dev->delay_drop.events_cnt, 0);
5698 
5699 	if (delay_drop_debugfs_init(dev))
5700 		mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n");
5701 }
5702 
5703 /* The mlx5_ib_multiport_mutex should be held when calling this function */
5704 static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
5705 				      struct mlx5_ib_multiport_info *mpi)
5706 {
5707 	u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
5708 	struct mlx5_ib_port *port = &ibdev->port[port_num];
5709 	int comps;
5710 	int err;
5711 	int i;
5712 
5713 	mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
5714 
5715 	spin_lock(&port->mp.mpi_lock);
5716 	if (!mpi->ibdev) {
5717 		spin_unlock(&port->mp.mpi_lock);
5718 		return;
5719 	}
5720 
5721 	if (mpi->mdev_events.notifier_call)
5722 		mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
5723 	mpi->mdev_events.notifier_call = NULL;
5724 
5725 	mpi->ibdev = NULL;
5726 
5727 	spin_unlock(&port->mp.mpi_lock);
5728 	mlx5_remove_netdev_notifier(ibdev, port_num);
5729 	spin_lock(&port->mp.mpi_lock);
5730 
5731 	comps = mpi->mdev_refcnt;
5732 	if (comps) {
5733 		mpi->unaffiliate = true;
5734 		init_completion(&mpi->unref_comp);
5735 		spin_unlock(&port->mp.mpi_lock);
5736 
5737 		for (i = 0; i < comps; i++)
5738 			wait_for_completion(&mpi->unref_comp);
5739 
5740 		spin_lock(&port->mp.mpi_lock);
5741 		mpi->unaffiliate = false;
5742 	}
5743 
5744 	port->mp.mpi = NULL;
5745 
5746 	list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
5747 
5748 	spin_unlock(&port->mp.mpi_lock);
5749 
5750 	err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
5751 
5752 	mlx5_ib_dbg(ibdev, "unaffiliated port %d\n", port_num + 1);
5753 	/* Log an error, still needed to cleanup the pointers and add
5754 	 * it back to the list.
5755 	 */
5756 	if (err)
5757 		mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
5758 			    port_num + 1);
5759 
5760 	ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN;
5761 }
5762 
5763 /* The mlx5_ib_multiport_mutex should be held when calling this function */
5764 static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
5765 				    struct mlx5_ib_multiport_info *mpi)
5766 {
5767 	u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
5768 	int err;
5769 
5770 	spin_lock(&ibdev->port[port_num].mp.mpi_lock);
5771 	if (ibdev->port[port_num].mp.mpi) {
5772 		mlx5_ib_dbg(ibdev, "port %d already affiliated.\n",
5773 			    port_num + 1);
5774 		spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
5775 		return false;
5776 	}
5777 
5778 	ibdev->port[port_num].mp.mpi = mpi;
5779 	mpi->ibdev = ibdev;
5780 	mpi->mdev_events.notifier_call = NULL;
5781 	spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
5782 
5783 	err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
5784 	if (err)
5785 		goto unbind;
5786 
5787 	err = get_port_caps(ibdev, mlx5_core_native_port_num(mpi->mdev));
5788 	if (err)
5789 		goto unbind;
5790 
5791 	err = mlx5_add_netdev_notifier(ibdev, port_num);
5792 	if (err) {
5793 		mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n",
5794 			    port_num + 1);
5795 		goto unbind;
5796 	}
5797 
5798 	mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port;
5799 	mlx5_notifier_register(mpi->mdev, &mpi->mdev_events);
5800 
5801 	mlx5_ib_init_cong_debugfs(ibdev, port_num);
5802 
5803 	return true;
5804 
5805 unbind:
5806 	mlx5_ib_unbind_slave_port(ibdev, mpi);
5807 	return false;
5808 }
5809 
5810 static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
5811 {
5812 	int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
5813 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
5814 							  port_num + 1);
5815 	struct mlx5_ib_multiport_info *mpi;
5816 	int err;
5817 	int i;
5818 
5819 	if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
5820 		return 0;
5821 
5822 	err = mlx5_query_nic_vport_system_image_guid(dev->mdev,
5823 						     &dev->sys_image_guid);
5824 	if (err)
5825 		return err;
5826 
5827 	err = mlx5_nic_vport_enable_roce(dev->mdev);
5828 	if (err)
5829 		return err;
5830 
5831 	mutex_lock(&mlx5_ib_multiport_mutex);
5832 	for (i = 0; i < dev->num_ports; i++) {
5833 		bool bound = false;
5834 
5835 		/* build a stub multiport info struct for the native port. */
5836 		if (i == port_num) {
5837 			mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
5838 			if (!mpi) {
5839 				mutex_unlock(&mlx5_ib_multiport_mutex);
5840 				mlx5_nic_vport_disable_roce(dev->mdev);
5841 				return -ENOMEM;
5842 			}
5843 
5844 			mpi->is_master = true;
5845 			mpi->mdev = dev->mdev;
5846 			mpi->sys_image_guid = dev->sys_image_guid;
5847 			dev->port[i].mp.mpi = mpi;
5848 			mpi->ibdev = dev;
5849 			mpi = NULL;
5850 			continue;
5851 		}
5852 
5853 		list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
5854 				    list) {
5855 			if (dev->sys_image_guid == mpi->sys_image_guid &&
5856 			    (mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
5857 				bound = mlx5_ib_bind_slave_port(dev, mpi);
5858 			}
5859 
5860 			if (bound) {
5861 				dev_dbg(mpi->mdev->device,
5862 					"removing port from unaffiliated list.\n");
5863 				mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
5864 				list_del(&mpi->list);
5865 				break;
5866 			}
5867 		}
5868 		if (!bound) {
5869 			get_port_caps(dev, i + 1);
5870 			mlx5_ib_dbg(dev, "no free port found for port %d\n",
5871 				    i + 1);
5872 		}
5873 	}
5874 
5875 	list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list);
5876 	mutex_unlock(&mlx5_ib_multiport_mutex);
5877 	return err;
5878 }
5879 
5880 static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
5881 {
5882 	int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
5883 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
5884 							  port_num + 1);
5885 	int i;
5886 
5887 	if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
5888 		return;
5889 
5890 	mutex_lock(&mlx5_ib_multiport_mutex);
5891 	for (i = 0; i < dev->num_ports; i++) {
5892 		if (dev->port[i].mp.mpi) {
5893 			/* Destroy the native port stub */
5894 			if (i == port_num) {
5895 				kfree(dev->port[i].mp.mpi);
5896 				dev->port[i].mp.mpi = NULL;
5897 			} else {
5898 				mlx5_ib_dbg(dev, "unbinding port_num: %d\n", i + 1);
5899 				mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi);
5900 			}
5901 		}
5902 	}
5903 
5904 	mlx5_ib_dbg(dev, "removing from devlist\n");
5905 	list_del(&dev->ib_dev_list);
5906 	mutex_unlock(&mlx5_ib_multiport_mutex);
5907 
5908 	mlx5_nic_vport_disable_roce(dev->mdev);
5909 }
5910 
5911 ADD_UVERBS_ATTRIBUTES_SIMPLE(
5912 	mlx5_ib_dm,
5913 	UVERBS_OBJECT_DM,
5914 	UVERBS_METHOD_DM_ALLOC,
5915 	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
5916 			    UVERBS_ATTR_TYPE(u64),
5917 			    UA_MANDATORY),
5918 	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
5919 			    UVERBS_ATTR_TYPE(u16),
5920 			    UA_OPTIONAL),
5921 	UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
5922 			     enum mlx5_ib_uapi_dm_type,
5923 			     UA_OPTIONAL));
5924 
5925 ADD_UVERBS_ATTRIBUTES_SIMPLE(
5926 	mlx5_ib_flow_action,
5927 	UVERBS_OBJECT_FLOW_ACTION,
5928 	UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
5929 	UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
5930 			     enum mlx5_ib_uapi_flow_action_flags));
5931 
5932 static const struct uapi_definition mlx5_ib_defs[] = {
5933 #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
5934 	UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
5935 	UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
5936 #endif
5937 
5938 	UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
5939 				&mlx5_ib_flow_action),
5940 	UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
5941 	{}
5942 };
5943 
5944 static int mlx5_ib_read_counters(struct ib_counters *counters,
5945 				 struct ib_counters_read_attr *read_attr,
5946 				 struct uverbs_attr_bundle *attrs)
5947 {
5948 	struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
5949 	struct mlx5_read_counters_attr mread_attr = {};
5950 	struct mlx5_ib_flow_counters_desc *desc;
5951 	int ret, i;
5952 
5953 	mutex_lock(&mcounters->mcntrs_mutex);
5954 	if (mcounters->cntrs_max_index > read_attr->ncounters) {
5955 		ret = -EINVAL;
5956 		goto err_bound;
5957 	}
5958 
5959 	mread_attr.out = kcalloc(mcounters->counters_num, sizeof(u64),
5960 				 GFP_KERNEL);
5961 	if (!mread_attr.out) {
5962 		ret = -ENOMEM;
5963 		goto err_bound;
5964 	}
5965 
5966 	mread_attr.hw_cntrs_hndl = mcounters->hw_cntrs_hndl;
5967 	mread_attr.flags = read_attr->flags;
5968 	ret = mcounters->read_counters(counters->device, &mread_attr);
5969 	if (ret)
5970 		goto err_read;
5971 
5972 	/* do the pass over the counters data array to assign according to the
5973 	 * descriptions and indexing pairs
5974 	 */
5975 	desc = mcounters->counters_data;
5976 	for (i = 0; i < mcounters->ncounters; i++)
5977 		read_attr->counters_buff[desc[i].index] += mread_attr.out[desc[i].description];
5978 
5979 err_read:
5980 	kfree(mread_attr.out);
5981 err_bound:
5982 	mutex_unlock(&mcounters->mcntrs_mutex);
5983 	return ret;
5984 }
5985 
5986 static int mlx5_ib_destroy_counters(struct ib_counters *counters)
5987 {
5988 	struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
5989 
5990 	counters_clear_description(counters);
5991 	if (mcounters->hw_cntrs_hndl)
5992 		mlx5_fc_destroy(to_mdev(counters->device)->mdev,
5993 				mcounters->hw_cntrs_hndl);
5994 
5995 	kfree(mcounters);
5996 
5997 	return 0;
5998 }
5999 
6000 static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
6001 						   struct uverbs_attr_bundle *attrs)
6002 {
6003 	struct mlx5_ib_mcounters *mcounters;
6004 
6005 	mcounters = kzalloc(sizeof(*mcounters), GFP_KERNEL);
6006 	if (!mcounters)
6007 		return ERR_PTR(-ENOMEM);
6008 
6009 	mutex_init(&mcounters->mcntrs_mutex);
6010 
6011 	return &mcounters->ibcntrs;
6012 }
6013 
6014 static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
6015 {
6016 	struct mlx5_core_dev *mdev = dev->mdev;
6017 
6018 	mlx5_ib_cleanup_multiport_master(dev);
6019 	if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
6020 		srcu_barrier(&dev->mr_srcu);
6021 		cleanup_srcu_struct(&dev->mr_srcu);
6022 	}
6023 
6024 	WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
6025 
6026 	WARN_ON(dev->dm.steering_sw_icm_alloc_blocks &&
6027 		!bitmap_empty(
6028 			dev->dm.steering_sw_icm_alloc_blocks,
6029 			BIT(MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size) -
6030 			    MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev))));
6031 
6032 	kfree(dev->dm.steering_sw_icm_alloc_blocks);
6033 
6034 	WARN_ON(dev->dm.header_modify_sw_icm_alloc_blocks &&
6035 		!bitmap_empty(dev->dm.header_modify_sw_icm_alloc_blocks,
6036 			      BIT(MLX5_CAP_DEV_MEM(
6037 					  mdev, log_header_modify_sw_icm_size) -
6038 				  MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev))));
6039 
6040 	kfree(dev->dm.header_modify_sw_icm_alloc_blocks);
6041 }
6042 
6043 static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
6044 {
6045 	struct mlx5_core_dev *mdev = dev->mdev;
6046 	u64 header_modify_icm_blocks = 0;
6047 	u64 steering_icm_blocks = 0;
6048 	int err;
6049 	int i;
6050 
6051 	for (i = 0; i < dev->num_ports; i++) {
6052 		spin_lock_init(&dev->port[i].mp.mpi_lock);
6053 		rwlock_init(&dev->port[i].roce.netdev_lock);
6054 		dev->port[i].roce.dev = dev;
6055 		dev->port[i].roce.native_port_num = i + 1;
6056 		dev->port[i].roce.last_port_state = IB_PORT_DOWN;
6057 	}
6058 
6059 	err = mlx5_ib_init_multiport_master(dev);
6060 	if (err)
6061 		return err;
6062 
6063 	err = set_has_smi_cap(dev);
6064 	if (err)
6065 		return err;
6066 
6067 	if (!mlx5_core_mp_enabled(mdev)) {
6068 		for (i = 1; i <= dev->num_ports; i++) {
6069 			err = get_port_caps(dev, i);
6070 			if (err)
6071 				break;
6072 		}
6073 	} else {
6074 		err = get_port_caps(dev, mlx5_core_native_port_num(mdev));
6075 	}
6076 	if (err)
6077 		goto err_mp;
6078 
6079 	if (mlx5_use_mad_ifc(dev))
6080 		get_ext_port_caps(dev);
6081 
6082 	dev->ib_dev.owner		= THIS_MODULE;
6083 	dev->ib_dev.node_type		= RDMA_NODE_IB_CA;
6084 	dev->ib_dev.local_dma_lkey	= 0 /* not supported for now */;
6085 	dev->ib_dev.phys_port_cnt	= dev->num_ports;
6086 	dev->ib_dev.num_comp_vectors    = mlx5_comp_vectors_count(mdev);
6087 	dev->ib_dev.dev.parent		= mdev->device;
6088 
6089 	mutex_init(&dev->cap_mask_mutex);
6090 	INIT_LIST_HEAD(&dev->qp_list);
6091 	spin_lock_init(&dev->reset_flow_resource_lock);
6092 
6093 	if (MLX5_CAP_GEN_64(mdev, general_obj_types) &
6094 	    MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM) {
6095 		if (MLX5_CAP64_DEV_MEM(mdev, steering_sw_icm_start_address)) {
6096 			steering_icm_blocks =
6097 				BIT(MLX5_CAP_DEV_MEM(mdev,
6098 						     log_steering_sw_icm_size) -
6099 				    MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev));
6100 
6101 			dev->dm.steering_sw_icm_alloc_blocks =
6102 				kcalloc(BITS_TO_LONGS(steering_icm_blocks),
6103 					sizeof(unsigned long), GFP_KERNEL);
6104 			if (!dev->dm.steering_sw_icm_alloc_blocks)
6105 				goto err_mp;
6106 		}
6107 
6108 		if (MLX5_CAP64_DEV_MEM(mdev,
6109 				       header_modify_sw_icm_start_address)) {
6110 			header_modify_icm_blocks = BIT(
6111 				MLX5_CAP_DEV_MEM(
6112 					mdev, log_header_modify_sw_icm_size) -
6113 				MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev));
6114 
6115 			dev->dm.header_modify_sw_icm_alloc_blocks =
6116 				kcalloc(BITS_TO_LONGS(header_modify_icm_blocks),
6117 					sizeof(unsigned long), GFP_KERNEL);
6118 			if (!dev->dm.header_modify_sw_icm_alloc_blocks)
6119 				goto err_dm;
6120 		}
6121 	}
6122 
6123 	spin_lock_init(&dev->dm.lock);
6124 	dev->dm.dev = mdev;
6125 
6126 	if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
6127 		err = init_srcu_struct(&dev->mr_srcu);
6128 		if (err)
6129 			goto err_dm;
6130 	}
6131 
6132 	return 0;
6133 
6134 err_dm:
6135 	kfree(dev->dm.steering_sw_icm_alloc_blocks);
6136 	kfree(dev->dm.header_modify_sw_icm_alloc_blocks);
6137 
6138 err_mp:
6139 	mlx5_ib_cleanup_multiport_master(dev);
6140 
6141 	return -ENOMEM;
6142 }
6143 
6144 static int mlx5_ib_stage_flow_db_init(struct mlx5_ib_dev *dev)
6145 {
6146 	dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL);
6147 
6148 	if (!dev->flow_db)
6149 		return -ENOMEM;
6150 
6151 	mutex_init(&dev->flow_db->lock);
6152 
6153 	return 0;
6154 }
6155 
6156 static void mlx5_ib_stage_flow_db_cleanup(struct mlx5_ib_dev *dev)
6157 {
6158 	kfree(dev->flow_db);
6159 }
6160 
6161 static const struct ib_device_ops mlx5_ib_dev_ops = {
6162 	.add_gid = mlx5_ib_add_gid,
6163 	.alloc_mr = mlx5_ib_alloc_mr,
6164 	.alloc_pd = mlx5_ib_alloc_pd,
6165 	.alloc_ucontext = mlx5_ib_alloc_ucontext,
6166 	.attach_mcast = mlx5_ib_mcg_attach,
6167 	.check_mr_status = mlx5_ib_check_mr_status,
6168 	.create_ah = mlx5_ib_create_ah,
6169 	.create_counters = mlx5_ib_create_counters,
6170 	.create_cq = mlx5_ib_create_cq,
6171 	.create_flow = mlx5_ib_create_flow,
6172 	.create_qp = mlx5_ib_create_qp,
6173 	.create_srq = mlx5_ib_create_srq,
6174 	.dealloc_pd = mlx5_ib_dealloc_pd,
6175 	.dealloc_ucontext = mlx5_ib_dealloc_ucontext,
6176 	.del_gid = mlx5_ib_del_gid,
6177 	.dereg_mr = mlx5_ib_dereg_mr,
6178 	.destroy_ah = mlx5_ib_destroy_ah,
6179 	.destroy_counters = mlx5_ib_destroy_counters,
6180 	.destroy_cq = mlx5_ib_destroy_cq,
6181 	.destroy_flow = mlx5_ib_destroy_flow,
6182 	.destroy_flow_action = mlx5_ib_destroy_flow_action,
6183 	.destroy_qp = mlx5_ib_destroy_qp,
6184 	.destroy_srq = mlx5_ib_destroy_srq,
6185 	.detach_mcast = mlx5_ib_mcg_detach,
6186 	.disassociate_ucontext = mlx5_ib_disassociate_ucontext,
6187 	.drain_rq = mlx5_ib_drain_rq,
6188 	.drain_sq = mlx5_ib_drain_sq,
6189 	.get_dev_fw_str = get_dev_fw_str,
6190 	.get_dma_mr = mlx5_ib_get_dma_mr,
6191 	.get_link_layer = mlx5_ib_port_link_layer,
6192 	.map_mr_sg = mlx5_ib_map_mr_sg,
6193 	.mmap = mlx5_ib_mmap,
6194 	.modify_cq = mlx5_ib_modify_cq,
6195 	.modify_device = mlx5_ib_modify_device,
6196 	.modify_port = mlx5_ib_modify_port,
6197 	.modify_qp = mlx5_ib_modify_qp,
6198 	.modify_srq = mlx5_ib_modify_srq,
6199 	.poll_cq = mlx5_ib_poll_cq,
6200 	.post_recv = mlx5_ib_post_recv,
6201 	.post_send = mlx5_ib_post_send,
6202 	.post_srq_recv = mlx5_ib_post_srq_recv,
6203 	.process_mad = mlx5_ib_process_mad,
6204 	.query_ah = mlx5_ib_query_ah,
6205 	.query_device = mlx5_ib_query_device,
6206 	.query_gid = mlx5_ib_query_gid,
6207 	.query_pkey = mlx5_ib_query_pkey,
6208 	.query_qp = mlx5_ib_query_qp,
6209 	.query_srq = mlx5_ib_query_srq,
6210 	.read_counters = mlx5_ib_read_counters,
6211 	.reg_user_mr = mlx5_ib_reg_user_mr,
6212 	.req_notify_cq = mlx5_ib_arm_cq,
6213 	.rereg_user_mr = mlx5_ib_rereg_user_mr,
6214 	.resize_cq = mlx5_ib_resize_cq,
6215 
6216 	INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah),
6217 	INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
6218 	INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq),
6219 	INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext),
6220 };
6221 
6222 static const struct ib_device_ops mlx5_ib_dev_flow_ipsec_ops = {
6223 	.create_flow_action_esp = mlx5_ib_create_flow_action_esp,
6224 	.modify_flow_action_esp = mlx5_ib_modify_flow_action_esp,
6225 };
6226 
6227 static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = {
6228 	.rdma_netdev_get_params = mlx5_ib_rn_get_params,
6229 };
6230 
6231 static const struct ib_device_ops mlx5_ib_dev_sriov_ops = {
6232 	.get_vf_config = mlx5_ib_get_vf_config,
6233 	.get_vf_stats = mlx5_ib_get_vf_stats,
6234 	.set_vf_guid = mlx5_ib_set_vf_guid,
6235 	.set_vf_link_state = mlx5_ib_set_vf_link_state,
6236 };
6237 
6238 static const struct ib_device_ops mlx5_ib_dev_mw_ops = {
6239 	.alloc_mw = mlx5_ib_alloc_mw,
6240 	.dealloc_mw = mlx5_ib_dealloc_mw,
6241 };
6242 
6243 static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {
6244 	.alloc_xrcd = mlx5_ib_alloc_xrcd,
6245 	.dealloc_xrcd = mlx5_ib_dealloc_xrcd,
6246 };
6247 
6248 static const struct ib_device_ops mlx5_ib_dev_dm_ops = {
6249 	.alloc_dm = mlx5_ib_alloc_dm,
6250 	.dealloc_dm = mlx5_ib_dealloc_dm,
6251 	.reg_dm_mr = mlx5_ib_reg_dm_mr,
6252 };
6253 
6254 static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
6255 {
6256 	struct mlx5_core_dev *mdev = dev->mdev;
6257 	int err;
6258 
6259 	dev->ib_dev.uverbs_abi_ver	= MLX5_IB_UVERBS_ABI_VERSION;
6260 	dev->ib_dev.uverbs_cmd_mask	=
6261 		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)		|
6262 		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)	|
6263 		(1ull << IB_USER_VERBS_CMD_QUERY_PORT)		|
6264 		(1ull << IB_USER_VERBS_CMD_ALLOC_PD)		|
6265 		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)		|
6266 		(1ull << IB_USER_VERBS_CMD_CREATE_AH)		|
6267 		(1ull << IB_USER_VERBS_CMD_DESTROY_AH)		|
6268 		(1ull << IB_USER_VERBS_CMD_REG_MR)		|
6269 		(1ull << IB_USER_VERBS_CMD_REREG_MR)		|
6270 		(1ull << IB_USER_VERBS_CMD_DEREG_MR)		|
6271 		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)	|
6272 		(1ull << IB_USER_VERBS_CMD_CREATE_CQ)		|
6273 		(1ull << IB_USER_VERBS_CMD_RESIZE_CQ)		|
6274 		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ)		|
6275 		(1ull << IB_USER_VERBS_CMD_CREATE_QP)		|
6276 		(1ull << IB_USER_VERBS_CMD_MODIFY_QP)		|
6277 		(1ull << IB_USER_VERBS_CMD_QUERY_QP)		|
6278 		(1ull << IB_USER_VERBS_CMD_DESTROY_QP)		|
6279 		(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)	|
6280 		(1ull << IB_USER_VERBS_CMD_DETACH_MCAST)	|
6281 		(1ull << IB_USER_VERBS_CMD_CREATE_SRQ)		|
6282 		(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)		|
6283 		(1ull << IB_USER_VERBS_CMD_QUERY_SRQ)		|
6284 		(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)		|
6285 		(1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)		|
6286 		(1ull << IB_USER_VERBS_CMD_OPEN_QP);
6287 	dev->ib_dev.uverbs_ex_cmd_mask =
6288 		(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE)	|
6289 		(1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ)	|
6290 		(1ull << IB_USER_VERBS_EX_CMD_CREATE_QP)	|
6291 		(1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP)	|
6292 		(1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ)	|
6293 		(1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW)	|
6294 		(1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
6295 
6296 	if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
6297 	    IS_ENABLED(CONFIG_MLX5_CORE_IPOIB))
6298 		ib_set_device_ops(&dev->ib_dev,
6299 				  &mlx5_ib_dev_ipoib_enhanced_ops);
6300 
6301 	if (mlx5_core_is_pf(mdev))
6302 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_sriov_ops);
6303 
6304 	dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
6305 
6306 	if (MLX5_CAP_GEN(mdev, imaicl)) {
6307 		dev->ib_dev.uverbs_cmd_mask |=
6308 			(1ull << IB_USER_VERBS_CMD_ALLOC_MW)	|
6309 			(1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
6310 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_mw_ops);
6311 	}
6312 
6313 	if (MLX5_CAP_GEN(mdev, xrc)) {
6314 		dev->ib_dev.uverbs_cmd_mask |=
6315 			(1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
6316 			(1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
6317 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops);
6318 	}
6319 
6320 	if (MLX5_CAP_DEV_MEM(mdev, memic) ||
6321 	    MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
6322 	    MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM)
6323 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops);
6324 
6325 	if (mlx5_accel_ipsec_device_caps(dev->mdev) &
6326 	    MLX5_ACCEL_IPSEC_CAP_DEVICE)
6327 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_flow_ipsec_ops);
6328 	dev->ib_dev.driver_id = RDMA_DRIVER_MLX5;
6329 	ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops);
6330 
6331 	if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
6332 		dev->ib_dev.driver_def = mlx5_ib_defs;
6333 
6334 	err = init_node_data(dev);
6335 	if (err)
6336 		return err;
6337 
6338 	if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
6339 	    (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
6340 	     MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
6341 		mutex_init(&dev->lb.mutex);
6342 
6343 	return 0;
6344 }
6345 
6346 static const struct ib_device_ops mlx5_ib_dev_port_ops = {
6347 	.get_port_immutable = mlx5_port_immutable,
6348 	.query_port = mlx5_ib_query_port,
6349 };
6350 
6351 static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
6352 {
6353 	ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_ops);
6354 	return 0;
6355 }
6356 
6357 static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = {
6358 	.get_port_immutable = mlx5_port_rep_immutable,
6359 	.query_port = mlx5_ib_rep_query_port,
6360 };
6361 
6362 static int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
6363 {
6364 	ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops);
6365 	return 0;
6366 }
6367 
6368 static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = {
6369 	.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table,
6370 	.create_wq = mlx5_ib_create_wq,
6371 	.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table,
6372 	.destroy_wq = mlx5_ib_destroy_wq,
6373 	.get_netdev = mlx5_ib_get_netdev,
6374 	.modify_wq = mlx5_ib_modify_wq,
6375 };
6376 
6377 static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev)
6378 {
6379 	u8 port_num;
6380 
6381 	dev->ib_dev.uverbs_ex_cmd_mask |=
6382 			(1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
6383 			(1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
6384 			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
6385 			(1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
6386 			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
6387 	ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops);
6388 
6389 	port_num = mlx5_core_native_port_num(dev->mdev) - 1;
6390 
6391 	/* Register only for native ports */
6392 	return mlx5_add_netdev_notifier(dev, port_num);
6393 }
6394 
6395 static void mlx5_ib_stage_common_roce_cleanup(struct mlx5_ib_dev *dev)
6396 {
6397 	u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
6398 
6399 	mlx5_remove_netdev_notifier(dev, port_num);
6400 }
6401 
6402 static int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
6403 {
6404 	struct mlx5_core_dev *mdev = dev->mdev;
6405 	enum rdma_link_layer ll;
6406 	int port_type_cap;
6407 	int err = 0;
6408 
6409 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6410 	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6411 
6412 	if (ll == IB_LINK_LAYER_ETHERNET)
6413 		err = mlx5_ib_stage_common_roce_init(dev);
6414 
6415 	return err;
6416 }
6417 
6418 static void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev)
6419 {
6420 	mlx5_ib_stage_common_roce_cleanup(dev);
6421 }
6422 
6423 static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
6424 {
6425 	struct mlx5_core_dev *mdev = dev->mdev;
6426 	enum rdma_link_layer ll;
6427 	int port_type_cap;
6428 	int err;
6429 
6430 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6431 	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6432 
6433 	if (ll == IB_LINK_LAYER_ETHERNET) {
6434 		err = mlx5_ib_stage_common_roce_init(dev);
6435 		if (err)
6436 			return err;
6437 
6438 		err = mlx5_enable_eth(dev);
6439 		if (err)
6440 			goto cleanup;
6441 	}
6442 
6443 	return 0;
6444 cleanup:
6445 	mlx5_ib_stage_common_roce_cleanup(dev);
6446 
6447 	return err;
6448 }
6449 
6450 static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
6451 {
6452 	struct mlx5_core_dev *mdev = dev->mdev;
6453 	enum rdma_link_layer ll;
6454 	int port_type_cap;
6455 
6456 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6457 	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6458 
6459 	if (ll == IB_LINK_LAYER_ETHERNET) {
6460 		mlx5_disable_eth(dev);
6461 		mlx5_ib_stage_common_roce_cleanup(dev);
6462 	}
6463 }
6464 
6465 static int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
6466 {
6467 	return create_dev_resources(&dev->devr);
6468 }
6469 
6470 static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
6471 {
6472 	destroy_dev_resources(&dev->devr);
6473 }
6474 
6475 static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
6476 {
6477 	mlx5_ib_internal_fill_odp_caps(dev);
6478 
6479 	return mlx5_ib_odp_init_one(dev);
6480 }
6481 
6482 static void mlx5_ib_stage_odp_cleanup(struct mlx5_ib_dev *dev)
6483 {
6484 	mlx5_ib_odp_cleanup_one(dev);
6485 }
6486 
6487 static const struct ib_device_ops mlx5_ib_dev_hw_stats_ops = {
6488 	.alloc_hw_stats = mlx5_ib_alloc_hw_stats,
6489 	.get_hw_stats = mlx5_ib_get_hw_stats,
6490 };
6491 
6492 static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
6493 {
6494 	if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
6495 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_hw_stats_ops);
6496 
6497 		return mlx5_ib_alloc_counters(dev);
6498 	}
6499 
6500 	return 0;
6501 }
6502 
6503 static void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
6504 {
6505 	if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
6506 		mlx5_ib_dealloc_counters(dev);
6507 }
6508 
6509 static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
6510 {
6511 	mlx5_ib_init_cong_debugfs(dev,
6512 				  mlx5_core_native_port_num(dev->mdev) - 1);
6513 	return 0;
6514 }
6515 
6516 static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
6517 {
6518 	mlx5_ib_cleanup_cong_debugfs(dev,
6519 				     mlx5_core_native_port_num(dev->mdev) - 1);
6520 }
6521 
6522 static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
6523 {
6524 	dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
6525 	return PTR_ERR_OR_ZERO(dev->mdev->priv.uar);
6526 }
6527 
6528 static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
6529 {
6530 	mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
6531 }
6532 
6533 static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
6534 {
6535 	int err;
6536 
6537 	err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
6538 	if (err)
6539 		return err;
6540 
6541 	err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
6542 	if (err)
6543 		mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
6544 
6545 	return err;
6546 }
6547 
6548 static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
6549 {
6550 	mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
6551 	mlx5_free_bfreg(dev->mdev, &dev->bfreg);
6552 }
6553 
6554 static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
6555 {
6556 	const char *name;
6557 
6558 	rdma_set_device_sysfs_group(&dev->ib_dev, &mlx5_attr_group);
6559 	if (!mlx5_lag_is_roce(dev->mdev))
6560 		name = "mlx5_%d";
6561 	else
6562 		name = "mlx5_bond_%d";
6563 	return ib_register_device(&dev->ib_dev, name);
6564 }
6565 
6566 static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
6567 {
6568 	destroy_umrc_res(dev);
6569 }
6570 
6571 static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
6572 {
6573 	ib_unregister_device(&dev->ib_dev);
6574 }
6575 
6576 static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
6577 {
6578 	return create_umr_res(dev);
6579 }
6580 
6581 static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
6582 {
6583 	init_delay_drop(dev);
6584 
6585 	return 0;
6586 }
6587 
6588 static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
6589 {
6590 	cancel_delay_drop(dev);
6591 }
6592 
6593 static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev)
6594 {
6595 	dev->mdev_events.notifier_call = mlx5_ib_event;
6596 	mlx5_notifier_register(dev->mdev, &dev->mdev_events);
6597 	return 0;
6598 }
6599 
6600 static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev)
6601 {
6602 	mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
6603 }
6604 
6605 static int mlx5_ib_stage_devx_init(struct mlx5_ib_dev *dev)
6606 {
6607 	int uid;
6608 
6609 	uid = mlx5_ib_devx_create(dev, false);
6610 	if (uid > 0)
6611 		dev->devx_whitelist_uid = uid;
6612 
6613 	return 0;
6614 }
6615 static void mlx5_ib_stage_devx_cleanup(struct mlx5_ib_dev *dev)
6616 {
6617 	if (dev->devx_whitelist_uid)
6618 		mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
6619 }
6620 
6621 void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
6622 		      const struct mlx5_ib_profile *profile,
6623 		      int stage)
6624 {
6625 	/* Number of stages to cleanup */
6626 	while (stage) {
6627 		stage--;
6628 		if (profile->stage[stage].cleanup)
6629 			profile->stage[stage].cleanup(dev);
6630 	}
6631 
6632 	kfree(dev->port);
6633 	ib_dealloc_device(&dev->ib_dev);
6634 }
6635 
6636 void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
6637 		    const struct mlx5_ib_profile *profile)
6638 {
6639 	int err;
6640 	int i;
6641 
6642 	for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
6643 		if (profile->stage[i].init) {
6644 			err = profile->stage[i].init(dev);
6645 			if (err)
6646 				goto err_out;
6647 		}
6648 	}
6649 
6650 	dev->profile = profile;
6651 	dev->ib_active = true;
6652 
6653 	return dev;
6654 
6655 err_out:
6656 	__mlx5_ib_remove(dev, profile, i);
6657 
6658 	return NULL;
6659 }
6660 
6661 static const struct mlx5_ib_profile pf_profile = {
6662 	STAGE_CREATE(MLX5_IB_STAGE_INIT,
6663 		     mlx5_ib_stage_init_init,
6664 		     mlx5_ib_stage_init_cleanup),
6665 	STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
6666 		     mlx5_ib_stage_flow_db_init,
6667 		     mlx5_ib_stage_flow_db_cleanup),
6668 	STAGE_CREATE(MLX5_IB_STAGE_CAPS,
6669 		     mlx5_ib_stage_caps_init,
6670 		     NULL),
6671 	STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
6672 		     mlx5_ib_stage_non_default_cb,
6673 		     NULL),
6674 	STAGE_CREATE(MLX5_IB_STAGE_ROCE,
6675 		     mlx5_ib_stage_roce_init,
6676 		     mlx5_ib_stage_roce_cleanup),
6677 	STAGE_CREATE(MLX5_IB_STAGE_SRQ,
6678 		     mlx5_init_srq_table,
6679 		     mlx5_cleanup_srq_table),
6680 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
6681 		     mlx5_ib_stage_dev_res_init,
6682 		     mlx5_ib_stage_dev_res_cleanup),
6683 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
6684 		     mlx5_ib_stage_dev_notifier_init,
6685 		     mlx5_ib_stage_dev_notifier_cleanup),
6686 	STAGE_CREATE(MLX5_IB_STAGE_ODP,
6687 		     mlx5_ib_stage_odp_init,
6688 		     mlx5_ib_stage_odp_cleanup),
6689 	STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
6690 		     mlx5_ib_stage_counters_init,
6691 		     mlx5_ib_stage_counters_cleanup),
6692 	STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
6693 		     mlx5_ib_stage_cong_debugfs_init,
6694 		     mlx5_ib_stage_cong_debugfs_cleanup),
6695 	STAGE_CREATE(MLX5_IB_STAGE_UAR,
6696 		     mlx5_ib_stage_uar_init,
6697 		     mlx5_ib_stage_uar_cleanup),
6698 	STAGE_CREATE(MLX5_IB_STAGE_BFREG,
6699 		     mlx5_ib_stage_bfrag_init,
6700 		     mlx5_ib_stage_bfrag_cleanup),
6701 	STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
6702 		     NULL,
6703 		     mlx5_ib_stage_pre_ib_reg_umr_cleanup),
6704 	STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
6705 		     mlx5_ib_stage_devx_init,
6706 		     mlx5_ib_stage_devx_cleanup),
6707 	STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
6708 		     mlx5_ib_stage_ib_reg_init,
6709 		     mlx5_ib_stage_ib_reg_cleanup),
6710 	STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
6711 		     mlx5_ib_stage_post_ib_reg_umr_init,
6712 		     NULL),
6713 	STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
6714 		     mlx5_ib_stage_delay_drop_init,
6715 		     mlx5_ib_stage_delay_drop_cleanup),
6716 };
6717 
6718 const struct mlx5_ib_profile uplink_rep_profile = {
6719 	STAGE_CREATE(MLX5_IB_STAGE_INIT,
6720 		     mlx5_ib_stage_init_init,
6721 		     mlx5_ib_stage_init_cleanup),
6722 	STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
6723 		     mlx5_ib_stage_flow_db_init,
6724 		     mlx5_ib_stage_flow_db_cleanup),
6725 	STAGE_CREATE(MLX5_IB_STAGE_CAPS,
6726 		     mlx5_ib_stage_caps_init,
6727 		     NULL),
6728 	STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
6729 		     mlx5_ib_stage_rep_non_default_cb,
6730 		     NULL),
6731 	STAGE_CREATE(MLX5_IB_STAGE_ROCE,
6732 		     mlx5_ib_stage_rep_roce_init,
6733 		     mlx5_ib_stage_rep_roce_cleanup),
6734 	STAGE_CREATE(MLX5_IB_STAGE_SRQ,
6735 		     mlx5_init_srq_table,
6736 		     mlx5_cleanup_srq_table),
6737 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
6738 		     mlx5_ib_stage_dev_res_init,
6739 		     mlx5_ib_stage_dev_res_cleanup),
6740 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
6741 		     mlx5_ib_stage_dev_notifier_init,
6742 		     mlx5_ib_stage_dev_notifier_cleanup),
6743 	STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
6744 		     mlx5_ib_stage_counters_init,
6745 		     mlx5_ib_stage_counters_cleanup),
6746 	STAGE_CREATE(MLX5_IB_STAGE_UAR,
6747 		     mlx5_ib_stage_uar_init,
6748 		     mlx5_ib_stage_uar_cleanup),
6749 	STAGE_CREATE(MLX5_IB_STAGE_BFREG,
6750 		     mlx5_ib_stage_bfrag_init,
6751 		     mlx5_ib_stage_bfrag_cleanup),
6752 	STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
6753 		     NULL,
6754 		     mlx5_ib_stage_pre_ib_reg_umr_cleanup),
6755 	STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
6756 		     mlx5_ib_stage_devx_init,
6757 		     mlx5_ib_stage_devx_cleanup),
6758 	STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
6759 		     mlx5_ib_stage_ib_reg_init,
6760 		     mlx5_ib_stage_ib_reg_cleanup),
6761 	STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
6762 		     mlx5_ib_stage_post_ib_reg_umr_init,
6763 		     NULL),
6764 };
6765 
6766 static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
6767 {
6768 	struct mlx5_ib_multiport_info *mpi;
6769 	struct mlx5_ib_dev *dev;
6770 	bool bound = false;
6771 	int err;
6772 
6773 	mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
6774 	if (!mpi)
6775 		return NULL;
6776 
6777 	mpi->mdev = mdev;
6778 
6779 	err = mlx5_query_nic_vport_system_image_guid(mdev,
6780 						     &mpi->sys_image_guid);
6781 	if (err) {
6782 		kfree(mpi);
6783 		return NULL;
6784 	}
6785 
6786 	mutex_lock(&mlx5_ib_multiport_mutex);
6787 	list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
6788 		if (dev->sys_image_guid == mpi->sys_image_guid)
6789 			bound = mlx5_ib_bind_slave_port(dev, mpi);
6790 
6791 		if (bound) {
6792 			rdma_roce_rescan_device(&dev->ib_dev);
6793 			break;
6794 		}
6795 	}
6796 
6797 	if (!bound) {
6798 		list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
6799 		dev_dbg(mdev->device,
6800 			"no suitable IB device found to bind to, added to unaffiliated list.\n");
6801 	}
6802 	mutex_unlock(&mlx5_ib_multiport_mutex);
6803 
6804 	return mpi;
6805 }
6806 
6807 static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
6808 {
6809 	enum rdma_link_layer ll;
6810 	struct mlx5_ib_dev *dev;
6811 	int port_type_cap;
6812 	int num_ports;
6813 
6814 	printk_once(KERN_INFO "%s", mlx5_version);
6815 
6816 	if (MLX5_ESWITCH_MANAGER(mdev) &&
6817 	    mlx5_ib_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
6818 		if (!mlx5_core_mp_enabled(mdev))
6819 			mlx5_ib_register_vport_reps(mdev);
6820 		return mdev;
6821 	}
6822 
6823 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6824 	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6825 
6826 	if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET)
6827 		return mlx5_ib_add_slave_port(mdev);
6828 
6829 	num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
6830 			MLX5_CAP_GEN(mdev, num_vhca_ports));
6831 	dev = ib_alloc_device(mlx5_ib_dev, ib_dev);
6832 	if (!dev)
6833 		return NULL;
6834 	dev->port = kcalloc(num_ports, sizeof(*dev->port),
6835 			     GFP_KERNEL);
6836 	if (!dev->port) {
6837 		ib_dealloc_device((struct ib_device *)dev);
6838 		return NULL;
6839 	}
6840 
6841 	dev->mdev = mdev;
6842 	dev->num_ports = num_ports;
6843 
6844 	return __mlx5_ib_add(dev, &pf_profile);
6845 }
6846 
6847 static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
6848 {
6849 	struct mlx5_ib_multiport_info *mpi;
6850 	struct mlx5_ib_dev *dev;
6851 
6852 	if (MLX5_ESWITCH_MANAGER(mdev) && context == mdev) {
6853 		mlx5_ib_unregister_vport_reps(mdev);
6854 		return;
6855 	}
6856 
6857 	if (mlx5_core_is_mp_slave(mdev)) {
6858 		mpi = context;
6859 		mutex_lock(&mlx5_ib_multiport_mutex);
6860 		if (mpi->ibdev)
6861 			mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
6862 		list_del(&mpi->list);
6863 		mutex_unlock(&mlx5_ib_multiport_mutex);
6864 		return;
6865 	}
6866 
6867 	dev = context;
6868 	__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
6869 }
6870 
6871 static struct mlx5_interface mlx5_ib_interface = {
6872 	.add            = mlx5_ib_add,
6873 	.remove         = mlx5_ib_remove,
6874 	.protocol	= MLX5_INTERFACE_PROTOCOL_IB,
6875 };
6876 
6877 unsigned long mlx5_ib_get_xlt_emergency_page(void)
6878 {
6879 	mutex_lock(&xlt_emergency_page_mutex);
6880 	return xlt_emergency_page;
6881 }
6882 
6883 void mlx5_ib_put_xlt_emergency_page(void)
6884 {
6885 	mutex_unlock(&xlt_emergency_page_mutex);
6886 }
6887 
6888 static int __init mlx5_ib_init(void)
6889 {
6890 	int err;
6891 
6892 	xlt_emergency_page = __get_free_page(GFP_KERNEL);
6893 	if (!xlt_emergency_page)
6894 		return -ENOMEM;
6895 
6896 	mutex_init(&xlt_emergency_page_mutex);
6897 
6898 	mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0);
6899 	if (!mlx5_ib_event_wq) {
6900 		free_page(xlt_emergency_page);
6901 		return -ENOMEM;
6902 	}
6903 
6904 	mlx5_ib_odp_init();
6905 
6906 	err = mlx5_register_interface(&mlx5_ib_interface);
6907 
6908 	return err;
6909 }
6910 
6911 static void __exit mlx5_ib_cleanup(void)
6912 {
6913 	mlx5_unregister_interface(&mlx5_ib_interface);
6914 	destroy_workqueue(mlx5_ib_event_wq);
6915 	mutex_destroy(&xlt_emergency_page_mutex);
6916 	free_page(xlt_emergency_page);
6917 }
6918 
6919 module_init(mlx5_ib_init);
6920 module_exit(mlx5_ib_cleanup);
6921