xref: /openbmc/linux/drivers/infiniband/hw/mlx5/main.c (revision b08918fb)
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/debugfs.h>
34 #include <linux/highmem.h>
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/errno.h>
38 #include <linux/pci.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/bitmap.h>
42 #if defined(CONFIG_X86)
43 #include <asm/pat.h>
44 #endif
45 #include <linux/sched.h>
46 #include <linux/sched/mm.h>
47 #include <linux/sched/task.h>
48 #include <linux/delay.h>
49 #include <rdma/ib_user_verbs.h>
50 #include <rdma/ib_addr.h>
51 #include <rdma/ib_cache.h>
52 #include <linux/mlx5/port.h>
53 #include <linux/mlx5/vport.h>
54 #include <linux/mlx5/fs.h>
55 #include <linux/mlx5/eswitch.h>
56 #include <linux/list.h>
57 #include <rdma/ib_smi.h>
58 #include <rdma/ib_umem.h>
59 #include <linux/in.h>
60 #include <linux/etherdevice.h>
61 #include "mlx5_ib.h"
62 #include "ib_rep.h"
63 #include "cmd.h"
64 #include "srq.h"
65 #include <linux/mlx5/fs_helpers.h>
66 #include <linux/mlx5/accel.h>
67 #include <rdma/uverbs_std_types.h>
68 #include <rdma/mlx5_user_ioctl_verbs.h>
69 #include <rdma/mlx5_user_ioctl_cmds.h>
70 
71 #define UVERBS_MODULE_NAME mlx5_ib
72 #include <rdma/uverbs_named_ioctl.h>
73 
74 #define DRIVER_NAME "mlx5_ib"
75 #define DRIVER_VERSION "5.0-0"
76 
77 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
78 MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
79 MODULE_LICENSE("Dual BSD/GPL");
80 
81 static char mlx5_version[] =
82 	DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
83 	DRIVER_VERSION "\n";
84 
85 struct mlx5_ib_event_work {
86 	struct work_struct	work;
87 	union {
88 		struct mlx5_ib_dev	      *dev;
89 		struct mlx5_ib_multiport_info *mpi;
90 	};
91 	bool			is_slave;
92 	unsigned int		event;
93 	void			*param;
94 };
95 
96 enum {
97 	MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
98 };
99 
100 static struct workqueue_struct *mlx5_ib_event_wq;
101 static LIST_HEAD(mlx5_ib_unaffiliated_port_list);
102 static LIST_HEAD(mlx5_ib_dev_list);
103 /*
104  * This mutex should be held when accessing either of the above lists
105  */
106 static DEFINE_MUTEX(mlx5_ib_multiport_mutex);
107 
108 /* We can't use an array for xlt_emergency_page because dma_map_single
109  * doesn't work on kernel modules memory
110  */
111 static unsigned long xlt_emergency_page;
112 static struct mutex xlt_emergency_page_mutex;
113 
114 struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi)
115 {
116 	struct mlx5_ib_dev *dev;
117 
118 	mutex_lock(&mlx5_ib_multiport_mutex);
119 	dev = mpi->ibdev;
120 	mutex_unlock(&mlx5_ib_multiport_mutex);
121 	return dev;
122 }
123 
124 static enum rdma_link_layer
125 mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
126 {
127 	switch (port_type_cap) {
128 	case MLX5_CAP_PORT_TYPE_IB:
129 		return IB_LINK_LAYER_INFINIBAND;
130 	case MLX5_CAP_PORT_TYPE_ETH:
131 		return IB_LINK_LAYER_ETHERNET;
132 	default:
133 		return IB_LINK_LAYER_UNSPECIFIED;
134 	}
135 }
136 
137 static enum rdma_link_layer
138 mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
139 {
140 	struct mlx5_ib_dev *dev = to_mdev(device);
141 	int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
142 
143 	return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
144 }
145 
146 static int get_port_state(struct ib_device *ibdev,
147 			  u8 port_num,
148 			  enum ib_port_state *state)
149 {
150 	struct ib_port_attr attr;
151 	int ret;
152 
153 	memset(&attr, 0, sizeof(attr));
154 	ret = ibdev->ops.query_port(ibdev, port_num, &attr);
155 	if (!ret)
156 		*state = attr.state;
157 	return ret;
158 }
159 
160 static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev,
161 					   struct net_device *ndev,
162 					   u8 *port_num)
163 {
164 	struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
165 	struct net_device *rep_ndev;
166 	struct mlx5_ib_port *port;
167 	int i;
168 
169 	for (i = 0; i < dev->num_ports; i++) {
170 		port  = &dev->port[i];
171 		if (!port->rep)
172 			continue;
173 
174 		read_lock(&port->roce.netdev_lock);
175 		rep_ndev = mlx5_ib_get_rep_netdev(esw,
176 						  port->rep->vport);
177 		if (rep_ndev == ndev) {
178 			read_unlock(&port->roce.netdev_lock);
179 			*port_num = i + 1;
180 			return &port->roce;
181 		}
182 		read_unlock(&port->roce.netdev_lock);
183 	}
184 
185 	return NULL;
186 }
187 
188 static int mlx5_netdev_event(struct notifier_block *this,
189 			     unsigned long event, void *ptr)
190 {
191 	struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
192 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
193 	u8 port_num = roce->native_port_num;
194 	struct mlx5_core_dev *mdev;
195 	struct mlx5_ib_dev *ibdev;
196 
197 	ibdev = roce->dev;
198 	mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
199 	if (!mdev)
200 		return NOTIFY_DONE;
201 
202 	switch (event) {
203 	case NETDEV_REGISTER:
204 		/* Should already be registered during the load */
205 		if (ibdev->is_rep)
206 			break;
207 		write_lock(&roce->netdev_lock);
208 		if (ndev->dev.parent == mdev->device)
209 			roce->netdev = ndev;
210 		write_unlock(&roce->netdev_lock);
211 		break;
212 
213 	case NETDEV_UNREGISTER:
214 		/* In case of reps, ib device goes away before the netdevs */
215 		write_lock(&roce->netdev_lock);
216 		if (roce->netdev == ndev)
217 			roce->netdev = NULL;
218 		write_unlock(&roce->netdev_lock);
219 		break;
220 
221 	case NETDEV_CHANGE:
222 	case NETDEV_UP:
223 	case NETDEV_DOWN: {
224 		struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev);
225 		struct net_device *upper = NULL;
226 
227 		if (lag_ndev) {
228 			upper = netdev_master_upper_dev_get(lag_ndev);
229 			dev_put(lag_ndev);
230 		}
231 
232 		if (ibdev->is_rep)
233 			roce = mlx5_get_rep_roce(ibdev, ndev, &port_num);
234 		if (!roce)
235 			return NOTIFY_DONE;
236 		if ((upper == ndev || (!upper && ndev == roce->netdev))
237 		    && ibdev->ib_active) {
238 			struct ib_event ibev = { };
239 			enum ib_port_state port_state;
240 
241 			if (get_port_state(&ibdev->ib_dev, port_num,
242 					   &port_state))
243 				goto done;
244 
245 			if (roce->last_port_state == port_state)
246 				goto done;
247 
248 			roce->last_port_state = port_state;
249 			ibev.device = &ibdev->ib_dev;
250 			if (port_state == IB_PORT_DOWN)
251 				ibev.event = IB_EVENT_PORT_ERR;
252 			else if (port_state == IB_PORT_ACTIVE)
253 				ibev.event = IB_EVENT_PORT_ACTIVE;
254 			else
255 				goto done;
256 
257 			ibev.element.port_num = port_num;
258 			ib_dispatch_event(&ibev);
259 		}
260 		break;
261 	}
262 
263 	default:
264 		break;
265 	}
266 done:
267 	mlx5_ib_put_native_port_mdev(ibdev, port_num);
268 	return NOTIFY_DONE;
269 }
270 
271 static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
272 					     u8 port_num)
273 {
274 	struct mlx5_ib_dev *ibdev = to_mdev(device);
275 	struct net_device *ndev;
276 	struct mlx5_core_dev *mdev;
277 
278 	mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
279 	if (!mdev)
280 		return NULL;
281 
282 	ndev = mlx5_lag_get_roce_netdev(mdev);
283 	if (ndev)
284 		goto out;
285 
286 	/* Ensure ndev does not disappear before we invoke dev_hold()
287 	 */
288 	read_lock(&ibdev->port[port_num - 1].roce.netdev_lock);
289 	ndev = ibdev->port[port_num - 1].roce.netdev;
290 	if (ndev)
291 		dev_hold(ndev);
292 	read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock);
293 
294 out:
295 	mlx5_ib_put_native_port_mdev(ibdev, port_num);
296 	return ndev;
297 }
298 
299 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
300 						   u8 ib_port_num,
301 						   u8 *native_port_num)
302 {
303 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
304 							  ib_port_num);
305 	struct mlx5_core_dev *mdev = NULL;
306 	struct mlx5_ib_multiport_info *mpi;
307 	struct mlx5_ib_port *port;
308 
309 	if (!mlx5_core_mp_enabled(ibdev->mdev) ||
310 	    ll != IB_LINK_LAYER_ETHERNET) {
311 		if (native_port_num)
312 			*native_port_num = ib_port_num;
313 		return ibdev->mdev;
314 	}
315 
316 	if (native_port_num)
317 		*native_port_num = 1;
318 
319 	port = &ibdev->port[ib_port_num - 1];
320 	if (!port)
321 		return NULL;
322 
323 	spin_lock(&port->mp.mpi_lock);
324 	mpi = ibdev->port[ib_port_num - 1].mp.mpi;
325 	if (mpi && !mpi->unaffiliate) {
326 		mdev = mpi->mdev;
327 		/* If it's the master no need to refcount, it'll exist
328 		 * as long as the ib_dev exists.
329 		 */
330 		if (!mpi->is_master)
331 			mpi->mdev_refcnt++;
332 	}
333 	spin_unlock(&port->mp.mpi_lock);
334 
335 	return mdev;
336 }
337 
338 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u8 port_num)
339 {
340 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
341 							  port_num);
342 	struct mlx5_ib_multiport_info *mpi;
343 	struct mlx5_ib_port *port;
344 
345 	if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
346 		return;
347 
348 	port = &ibdev->port[port_num - 1];
349 
350 	spin_lock(&port->mp.mpi_lock);
351 	mpi = ibdev->port[port_num - 1].mp.mpi;
352 	if (mpi->is_master)
353 		goto out;
354 
355 	mpi->mdev_refcnt--;
356 	if (mpi->unaffiliate)
357 		complete(&mpi->unref_comp);
358 out:
359 	spin_unlock(&port->mp.mpi_lock);
360 }
361 
362 static int translate_eth_legacy_proto_oper(u32 eth_proto_oper, u8 *active_speed,
363 					   u8 *active_width)
364 {
365 	switch (eth_proto_oper) {
366 	case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII):
367 	case MLX5E_PROT_MASK(MLX5E_1000BASE_KX):
368 	case MLX5E_PROT_MASK(MLX5E_100BASE_TX):
369 	case MLX5E_PROT_MASK(MLX5E_1000BASE_T):
370 		*active_width = IB_WIDTH_1X;
371 		*active_speed = IB_SPEED_SDR;
372 		break;
373 	case MLX5E_PROT_MASK(MLX5E_10GBASE_T):
374 	case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4):
375 	case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4):
376 	case MLX5E_PROT_MASK(MLX5E_10GBASE_KR):
377 	case MLX5E_PROT_MASK(MLX5E_10GBASE_CR):
378 	case MLX5E_PROT_MASK(MLX5E_10GBASE_SR):
379 	case MLX5E_PROT_MASK(MLX5E_10GBASE_ER):
380 		*active_width = IB_WIDTH_1X;
381 		*active_speed = IB_SPEED_QDR;
382 		break;
383 	case MLX5E_PROT_MASK(MLX5E_25GBASE_CR):
384 	case MLX5E_PROT_MASK(MLX5E_25GBASE_KR):
385 	case MLX5E_PROT_MASK(MLX5E_25GBASE_SR):
386 		*active_width = IB_WIDTH_1X;
387 		*active_speed = IB_SPEED_EDR;
388 		break;
389 	case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4):
390 	case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4):
391 	case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4):
392 	case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4):
393 		*active_width = IB_WIDTH_4X;
394 		*active_speed = IB_SPEED_QDR;
395 		break;
396 	case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2):
397 	case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2):
398 	case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2):
399 		*active_width = IB_WIDTH_1X;
400 		*active_speed = IB_SPEED_HDR;
401 		break;
402 	case MLX5E_PROT_MASK(MLX5E_56GBASE_R4):
403 		*active_width = IB_WIDTH_4X;
404 		*active_speed = IB_SPEED_FDR;
405 		break;
406 	case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4):
407 	case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4):
408 	case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4):
409 	case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4):
410 		*active_width = IB_WIDTH_4X;
411 		*active_speed = IB_SPEED_EDR;
412 		break;
413 	default:
414 		return -EINVAL;
415 	}
416 
417 	return 0;
418 }
419 
420 static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed,
421 					u8 *active_width)
422 {
423 	switch (eth_proto_oper) {
424 	case MLX5E_PROT_MASK(MLX5E_SGMII_100M):
425 	case MLX5E_PROT_MASK(MLX5E_1000BASE_X_SGMII):
426 		*active_width = IB_WIDTH_1X;
427 		*active_speed = IB_SPEED_SDR;
428 		break;
429 	case MLX5E_PROT_MASK(MLX5E_5GBASE_R):
430 		*active_width = IB_WIDTH_1X;
431 		*active_speed = IB_SPEED_DDR;
432 		break;
433 	case MLX5E_PROT_MASK(MLX5E_10GBASE_XFI_XAUI_1):
434 		*active_width = IB_WIDTH_1X;
435 		*active_speed = IB_SPEED_QDR;
436 		break;
437 	case MLX5E_PROT_MASK(MLX5E_40GBASE_XLAUI_4_XLPPI_4):
438 		*active_width = IB_WIDTH_4X;
439 		*active_speed = IB_SPEED_QDR;
440 		break;
441 	case MLX5E_PROT_MASK(MLX5E_25GAUI_1_25GBASE_CR_KR):
442 		*active_width = IB_WIDTH_1X;
443 		*active_speed = IB_SPEED_EDR;
444 		break;
445 	case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2):
446 		*active_width = IB_WIDTH_2X;
447 		*active_speed = IB_SPEED_EDR;
448 		break;
449 	case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR):
450 		*active_width = IB_WIDTH_1X;
451 		*active_speed = IB_SPEED_HDR;
452 		break;
453 	case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4):
454 		*active_width = IB_WIDTH_4X;
455 		*active_speed = IB_SPEED_EDR;
456 		break;
457 	case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2):
458 		*active_width = IB_WIDTH_2X;
459 		*active_speed = IB_SPEED_HDR;
460 		break;
461 	case MLX5E_PROT_MASK(MLX5E_200GAUI_4_200GBASE_CR4_KR4):
462 		*active_width = IB_WIDTH_4X;
463 		*active_speed = IB_SPEED_HDR;
464 		break;
465 	default:
466 		return -EINVAL;
467 	}
468 
469 	return 0;
470 }
471 
472 static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
473 				    u8 *active_width, bool ext)
474 {
475 	return ext ?
476 		translate_eth_ext_proto_oper(eth_proto_oper, active_speed,
477 					     active_width) :
478 		translate_eth_legacy_proto_oper(eth_proto_oper, active_speed,
479 						active_width);
480 }
481 
482 static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
483 				struct ib_port_attr *props)
484 {
485 	struct mlx5_ib_dev *dev = to_mdev(device);
486 	u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
487 	struct mlx5_core_dev *mdev;
488 	struct net_device *ndev, *upper;
489 	enum ib_mtu ndev_ib_mtu;
490 	bool put_mdev = true;
491 	u16 qkey_viol_cntr;
492 	u32 eth_prot_oper;
493 	u8 mdev_port_num;
494 	bool ext;
495 	int err;
496 
497 	mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
498 	if (!mdev) {
499 		/* This means the port isn't affiliated yet. Get the
500 		 * info for the master port instead.
501 		 */
502 		put_mdev = false;
503 		mdev = dev->mdev;
504 		mdev_port_num = 1;
505 		port_num = 1;
506 	}
507 
508 	/* Possible bad flows are checked before filling out props so in case
509 	 * of an error it will still be zeroed out.
510 	 * Use native port in case of reps
511 	 */
512 	if (dev->is_rep)
513 		err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
514 					   1);
515 	else
516 		err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
517 					   mdev_port_num);
518 	if (err)
519 		goto out;
520 	ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet);
521 	eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
522 
523 	props->active_width     = IB_WIDTH_4X;
524 	props->active_speed     = IB_SPEED_QDR;
525 
526 	translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
527 				 &props->active_width, ext);
528 
529 	props->port_cap_flags |= IB_PORT_CM_SUP;
530 	props->ip_gids = true;
531 
532 	props->gid_tbl_len      = MLX5_CAP_ROCE(dev->mdev,
533 						roce_address_table_size);
534 	props->max_mtu          = IB_MTU_4096;
535 	props->max_msg_sz       = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
536 	props->pkey_tbl_len     = 1;
537 	props->state            = IB_PORT_DOWN;
538 	props->phys_state       = 3;
539 
540 	mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
541 	props->qkey_viol_cntr = qkey_viol_cntr;
542 
543 	/* If this is a stub query for an unaffiliated port stop here */
544 	if (!put_mdev)
545 		goto out;
546 
547 	ndev = mlx5_ib_get_netdev(device, port_num);
548 	if (!ndev)
549 		goto out;
550 
551 	if (dev->lag_active) {
552 		rcu_read_lock();
553 		upper = netdev_master_upper_dev_get_rcu(ndev);
554 		if (upper) {
555 			dev_put(ndev);
556 			ndev = upper;
557 			dev_hold(ndev);
558 		}
559 		rcu_read_unlock();
560 	}
561 
562 	if (netif_running(ndev) && netif_carrier_ok(ndev)) {
563 		props->state      = IB_PORT_ACTIVE;
564 		props->phys_state = 5;
565 	}
566 
567 	ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
568 
569 	dev_put(ndev);
570 
571 	props->active_mtu	= min(props->max_mtu, ndev_ib_mtu);
572 out:
573 	if (put_mdev)
574 		mlx5_ib_put_native_port_mdev(dev, port_num);
575 	return err;
576 }
577 
578 static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
579 			 unsigned int index, const union ib_gid *gid,
580 			 const struct ib_gid_attr *attr)
581 {
582 	enum ib_gid_type gid_type = IB_GID_TYPE_IB;
583 	u16 vlan_id = 0xffff;
584 	u8 roce_version = 0;
585 	u8 roce_l3_type = 0;
586 	u8 mac[ETH_ALEN];
587 	int ret;
588 
589 	if (gid) {
590 		gid_type = attr->gid_type;
591 		ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
592 		if (ret)
593 			return ret;
594 	}
595 
596 	switch (gid_type) {
597 	case IB_GID_TYPE_IB:
598 		roce_version = MLX5_ROCE_VERSION_1;
599 		break;
600 	case IB_GID_TYPE_ROCE_UDP_ENCAP:
601 		roce_version = MLX5_ROCE_VERSION_2;
602 		if (ipv6_addr_v4mapped((void *)gid))
603 			roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4;
604 		else
605 			roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6;
606 		break;
607 
608 	default:
609 		mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type);
610 	}
611 
612 	return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
613 				      roce_l3_type, gid->raw, mac,
614 				      vlan_id < VLAN_CFI_MASK, vlan_id,
615 				      port_num);
616 }
617 
618 static int mlx5_ib_add_gid(const struct ib_gid_attr *attr,
619 			   __always_unused void **context)
620 {
621 	return set_roce_addr(to_mdev(attr->device), attr->port_num,
622 			     attr->index, &attr->gid, attr);
623 }
624 
625 static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
626 			   __always_unused void **context)
627 {
628 	return set_roce_addr(to_mdev(attr->device), attr->port_num,
629 			     attr->index, NULL, NULL);
630 }
631 
632 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
633 			       const struct ib_gid_attr *attr)
634 {
635 	if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
636 		return 0;
637 
638 	return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
639 }
640 
641 static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
642 {
643 	if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
644 		return !MLX5_CAP_GEN(dev->mdev, ib_virt);
645 	return 0;
646 }
647 
648 enum {
649 	MLX5_VPORT_ACCESS_METHOD_MAD,
650 	MLX5_VPORT_ACCESS_METHOD_HCA,
651 	MLX5_VPORT_ACCESS_METHOD_NIC,
652 };
653 
654 static int mlx5_get_vport_access_method(struct ib_device *ibdev)
655 {
656 	if (mlx5_use_mad_ifc(to_mdev(ibdev)))
657 		return MLX5_VPORT_ACCESS_METHOD_MAD;
658 
659 	if (mlx5_ib_port_link_layer(ibdev, 1) ==
660 	    IB_LINK_LAYER_ETHERNET)
661 		return MLX5_VPORT_ACCESS_METHOD_NIC;
662 
663 	return MLX5_VPORT_ACCESS_METHOD_HCA;
664 }
665 
666 static void get_atomic_caps(struct mlx5_ib_dev *dev,
667 			    u8 atomic_size_qp,
668 			    struct ib_device_attr *props)
669 {
670 	u8 tmp;
671 	u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
672 	u8 atomic_req_8B_endianness_mode =
673 		MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
674 
675 	/* Check if HW supports 8 bytes standard atomic operations and capable
676 	 * of host endianness respond
677 	 */
678 	tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD;
679 	if (((atomic_operations & tmp) == tmp) &&
680 	    (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) &&
681 	    (atomic_req_8B_endianness_mode)) {
682 		props->atomic_cap = IB_ATOMIC_HCA;
683 	} else {
684 		props->atomic_cap = IB_ATOMIC_NONE;
685 	}
686 }
687 
688 static void get_atomic_caps_qp(struct mlx5_ib_dev *dev,
689 			       struct ib_device_attr *props)
690 {
691 	u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
692 
693 	get_atomic_caps(dev, atomic_size_qp, props);
694 }
695 
696 static void get_atomic_caps_dc(struct mlx5_ib_dev *dev,
697 			       struct ib_device_attr *props)
698 {
699 	u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
700 
701 	get_atomic_caps(dev, atomic_size_qp, props);
702 }
703 
704 bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev)
705 {
706 	struct ib_device_attr props = {};
707 
708 	get_atomic_caps_dc(dev, &props);
709 	return (props.atomic_cap == IB_ATOMIC_HCA) ? true : false;
710 }
711 static int mlx5_query_system_image_guid(struct ib_device *ibdev,
712 					__be64 *sys_image_guid)
713 {
714 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
715 	struct mlx5_core_dev *mdev = dev->mdev;
716 	u64 tmp;
717 	int err;
718 
719 	switch (mlx5_get_vport_access_method(ibdev)) {
720 	case MLX5_VPORT_ACCESS_METHOD_MAD:
721 		return mlx5_query_mad_ifc_system_image_guid(ibdev,
722 							    sys_image_guid);
723 
724 	case MLX5_VPORT_ACCESS_METHOD_HCA:
725 		err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
726 		break;
727 
728 	case MLX5_VPORT_ACCESS_METHOD_NIC:
729 		err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
730 		break;
731 
732 	default:
733 		return -EINVAL;
734 	}
735 
736 	if (!err)
737 		*sys_image_guid = cpu_to_be64(tmp);
738 
739 	return err;
740 
741 }
742 
743 static int mlx5_query_max_pkeys(struct ib_device *ibdev,
744 				u16 *max_pkeys)
745 {
746 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
747 	struct mlx5_core_dev *mdev = dev->mdev;
748 
749 	switch (mlx5_get_vport_access_method(ibdev)) {
750 	case MLX5_VPORT_ACCESS_METHOD_MAD:
751 		return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
752 
753 	case MLX5_VPORT_ACCESS_METHOD_HCA:
754 	case MLX5_VPORT_ACCESS_METHOD_NIC:
755 		*max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
756 						pkey_table_size));
757 		return 0;
758 
759 	default:
760 		return -EINVAL;
761 	}
762 }
763 
764 static int mlx5_query_vendor_id(struct ib_device *ibdev,
765 				u32 *vendor_id)
766 {
767 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
768 
769 	switch (mlx5_get_vport_access_method(ibdev)) {
770 	case MLX5_VPORT_ACCESS_METHOD_MAD:
771 		return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
772 
773 	case MLX5_VPORT_ACCESS_METHOD_HCA:
774 	case MLX5_VPORT_ACCESS_METHOD_NIC:
775 		return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
776 
777 	default:
778 		return -EINVAL;
779 	}
780 }
781 
782 static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
783 				__be64 *node_guid)
784 {
785 	u64 tmp;
786 	int err;
787 
788 	switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
789 	case MLX5_VPORT_ACCESS_METHOD_MAD:
790 		return mlx5_query_mad_ifc_node_guid(dev, node_guid);
791 
792 	case MLX5_VPORT_ACCESS_METHOD_HCA:
793 		err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
794 		break;
795 
796 	case MLX5_VPORT_ACCESS_METHOD_NIC:
797 		err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
798 		break;
799 
800 	default:
801 		return -EINVAL;
802 	}
803 
804 	if (!err)
805 		*node_guid = cpu_to_be64(tmp);
806 
807 	return err;
808 }
809 
810 struct mlx5_reg_node_desc {
811 	u8	desc[IB_DEVICE_NODE_DESC_MAX];
812 };
813 
814 static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
815 {
816 	struct mlx5_reg_node_desc in;
817 
818 	if (mlx5_use_mad_ifc(dev))
819 		return mlx5_query_mad_ifc_node_desc(dev, node_desc);
820 
821 	memset(&in, 0, sizeof(in));
822 
823 	return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
824 				    sizeof(struct mlx5_reg_node_desc),
825 				    MLX5_REG_NODE_DESC, 0, 0);
826 }
827 
828 static int mlx5_ib_query_device(struct ib_device *ibdev,
829 				struct ib_device_attr *props,
830 				struct ib_udata *uhw)
831 {
832 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
833 	struct mlx5_core_dev *mdev = dev->mdev;
834 	int err = -ENOMEM;
835 	int max_sq_desc;
836 	int max_rq_sg;
837 	int max_sq_sg;
838 	u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
839 	bool raw_support = !mlx5_core_mp_enabled(mdev);
840 	struct mlx5_ib_query_device_resp resp = {};
841 	size_t resp_len;
842 	u64 max_tso;
843 
844 	resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
845 	if (uhw->outlen && uhw->outlen < resp_len)
846 		return -EINVAL;
847 	else
848 		resp.response_length = resp_len;
849 
850 	if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
851 		return -EINVAL;
852 
853 	memset(props, 0, sizeof(*props));
854 	err = mlx5_query_system_image_guid(ibdev,
855 					   &props->sys_image_guid);
856 	if (err)
857 		return err;
858 
859 	err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
860 	if (err)
861 		return err;
862 
863 	err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
864 	if (err)
865 		return err;
866 
867 	props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
868 		(fw_rev_min(dev->mdev) << 16) |
869 		fw_rev_sub(dev->mdev);
870 	props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
871 		IB_DEVICE_PORT_ACTIVE_EVENT		|
872 		IB_DEVICE_SYS_IMAGE_GUID		|
873 		IB_DEVICE_RC_RNR_NAK_GEN;
874 
875 	if (MLX5_CAP_GEN(mdev, pkv))
876 		props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
877 	if (MLX5_CAP_GEN(mdev, qkv))
878 		props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
879 	if (MLX5_CAP_GEN(mdev, apm))
880 		props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
881 	if (MLX5_CAP_GEN(mdev, xrc))
882 		props->device_cap_flags |= IB_DEVICE_XRC;
883 	if (MLX5_CAP_GEN(mdev, imaicl)) {
884 		props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
885 					   IB_DEVICE_MEM_WINDOW_TYPE_2B;
886 		props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
887 		/* We support 'Gappy' memory registration too */
888 		props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
889 	}
890 	props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
891 	if (MLX5_CAP_GEN(mdev, sho)) {
892 		props->device_cap_flags |= IB_DEVICE_INTEGRITY_HANDOVER;
893 		/* At this stage no support for signature handover */
894 		props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
895 				      IB_PROT_T10DIF_TYPE_2 |
896 				      IB_PROT_T10DIF_TYPE_3;
897 		props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
898 				       IB_GUARD_T10DIF_CSUM;
899 	}
900 	if (MLX5_CAP_GEN(mdev, block_lb_mc))
901 		props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
902 
903 	if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) {
904 		if (MLX5_CAP_ETH(mdev, csum_cap)) {
905 			/* Legacy bit to support old userspace libraries */
906 			props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
907 			props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM;
908 		}
909 
910 		if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
911 			props->raw_packet_caps |=
912 				IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
913 
914 		if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
915 			max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
916 			if (max_tso) {
917 				resp.tso_caps.max_tso = 1 << max_tso;
918 				resp.tso_caps.supported_qpts |=
919 					1 << IB_QPT_RAW_PACKET;
920 				resp.response_length += sizeof(resp.tso_caps);
921 			}
922 		}
923 
924 		if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
925 			resp.rss_caps.rx_hash_function =
926 						MLX5_RX_HASH_FUNC_TOEPLITZ;
927 			resp.rss_caps.rx_hash_fields_mask =
928 						MLX5_RX_HASH_SRC_IPV4 |
929 						MLX5_RX_HASH_DST_IPV4 |
930 						MLX5_RX_HASH_SRC_IPV6 |
931 						MLX5_RX_HASH_DST_IPV6 |
932 						MLX5_RX_HASH_SRC_PORT_TCP |
933 						MLX5_RX_HASH_DST_PORT_TCP |
934 						MLX5_RX_HASH_SRC_PORT_UDP |
935 						MLX5_RX_HASH_DST_PORT_UDP |
936 						MLX5_RX_HASH_INNER;
937 			if (mlx5_accel_ipsec_device_caps(dev->mdev) &
938 			    MLX5_ACCEL_IPSEC_CAP_DEVICE)
939 				resp.rss_caps.rx_hash_fields_mask |=
940 					MLX5_RX_HASH_IPSEC_SPI;
941 			resp.response_length += sizeof(resp.rss_caps);
942 		}
943 	} else {
944 		if (field_avail(typeof(resp), tso_caps, uhw->outlen))
945 			resp.response_length += sizeof(resp.tso_caps);
946 		if (field_avail(typeof(resp), rss_caps, uhw->outlen))
947 			resp.response_length += sizeof(resp.rss_caps);
948 	}
949 
950 	if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
951 		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
952 		props->device_cap_flags |= IB_DEVICE_UD_TSO;
953 	}
954 
955 	if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
956 	    MLX5_CAP_GEN(dev->mdev, general_notification_event) &&
957 	    raw_support)
958 		props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP;
959 
960 	if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
961 	    MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap))
962 		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
963 
964 	if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
965 	    MLX5_CAP_ETH(dev->mdev, scatter_fcs) &&
966 	    raw_support) {
967 		/* Legacy bit to support old userspace libraries */
968 		props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
969 		props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
970 	}
971 
972 	if (MLX5_CAP_DEV_MEM(mdev, memic)) {
973 		props->max_dm_size =
974 			MLX5_CAP_DEV_MEM(mdev, max_memic_size);
975 	}
976 
977 	if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
978 		props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
979 
980 	if (MLX5_CAP_GEN(mdev, end_pad))
981 		props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING;
982 
983 	props->vendor_part_id	   = mdev->pdev->device;
984 	props->hw_ver		   = mdev->pdev->revision;
985 
986 	props->max_mr_size	   = ~0ull;
987 	props->page_size_cap	   = ~(min_page_size - 1);
988 	props->max_qp		   = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
989 	props->max_qp_wr	   = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
990 	max_rq_sg =  MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
991 		     sizeof(struct mlx5_wqe_data_seg);
992 	max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
993 	max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
994 		     sizeof(struct mlx5_wqe_raddr_seg)) /
995 		sizeof(struct mlx5_wqe_data_seg);
996 	props->max_send_sge = max_sq_sg;
997 	props->max_recv_sge = max_rq_sg;
998 	props->max_sge_rd	   = MLX5_MAX_SGE_RD;
999 	props->max_cq		   = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
1000 	props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
1001 	props->max_mr		   = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
1002 	props->max_pd		   = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
1003 	props->max_qp_rd_atom	   = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
1004 	props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
1005 	props->max_srq		   = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
1006 	props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
1007 	props->local_ca_ack_delay  = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
1008 	props->max_res_rd_atom	   = props->max_qp_rd_atom * props->max_qp;
1009 	props->max_srq_sge	   = max_rq_sg - 1;
1010 	props->max_fast_reg_page_list_len =
1011 		1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
1012 	props->max_pi_fast_reg_page_list_len =
1013 		props->max_fast_reg_page_list_len / 2;
1014 	get_atomic_caps_qp(dev, props);
1015 	props->masked_atomic_cap   = IB_ATOMIC_NONE;
1016 	props->max_mcast_grp	   = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
1017 	props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
1018 	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
1019 					   props->max_mcast_grp;
1020 	props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
1021 	props->max_ah = INT_MAX;
1022 	props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
1023 	props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
1024 
1025 	if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
1026 		if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
1027 			props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
1028 		props->odp_caps = dev->odp_caps;
1029 	}
1030 
1031 	if (MLX5_CAP_GEN(mdev, cd))
1032 		props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
1033 
1034 	if (!mlx5_core_is_pf(mdev))
1035 		props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
1036 
1037 	if (mlx5_ib_port_link_layer(ibdev, 1) ==
1038 	    IB_LINK_LAYER_ETHERNET && raw_support) {
1039 		props->rss_caps.max_rwq_indirection_tables =
1040 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
1041 		props->rss_caps.max_rwq_indirection_table_size =
1042 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
1043 		props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
1044 		props->max_wq_type_rq =
1045 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
1046 	}
1047 
1048 	if (MLX5_CAP_GEN(mdev, tag_matching)) {
1049 		props->tm_caps.max_num_tags =
1050 			(1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
1051 		props->tm_caps.max_ops =
1052 			1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
1053 		props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
1054 	}
1055 
1056 	if (MLX5_CAP_GEN(mdev, tag_matching) &&
1057 	    MLX5_CAP_GEN(mdev, rndv_offload_rc)) {
1058 		props->tm_caps.flags = IB_TM_CAP_RNDV_RC;
1059 		props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
1060 	}
1061 
1062 	if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
1063 		props->cq_caps.max_cq_moderation_count =
1064 						MLX5_MAX_CQ_COUNT;
1065 		props->cq_caps.max_cq_moderation_period =
1066 						MLX5_MAX_CQ_PERIOD;
1067 	}
1068 
1069 	if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
1070 		resp.response_length += sizeof(resp.cqe_comp_caps);
1071 
1072 		if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
1073 			resp.cqe_comp_caps.max_num =
1074 				MLX5_CAP_GEN(dev->mdev,
1075 					     cqe_compression_max_num);
1076 
1077 			resp.cqe_comp_caps.supported_format =
1078 				MLX5_IB_CQE_RES_FORMAT_HASH |
1079 				MLX5_IB_CQE_RES_FORMAT_CSUM;
1080 
1081 			if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
1082 				resp.cqe_comp_caps.supported_format |=
1083 					MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX;
1084 		}
1085 	}
1086 
1087 	if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen) &&
1088 	    raw_support) {
1089 		if (MLX5_CAP_QOS(mdev, packet_pacing) &&
1090 		    MLX5_CAP_GEN(mdev, qos)) {
1091 			resp.packet_pacing_caps.qp_rate_limit_max =
1092 				MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
1093 			resp.packet_pacing_caps.qp_rate_limit_min =
1094 				MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
1095 			resp.packet_pacing_caps.supported_qpts |=
1096 				1 << IB_QPT_RAW_PACKET;
1097 			if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) &&
1098 			    MLX5_CAP_QOS(mdev, packet_pacing_typical_size))
1099 				resp.packet_pacing_caps.cap_flags |=
1100 					MLX5_IB_PP_SUPPORT_BURST;
1101 		}
1102 		resp.response_length += sizeof(resp.packet_pacing_caps);
1103 	}
1104 
1105 	if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
1106 			uhw->outlen)) {
1107 		if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
1108 			resp.mlx5_ib_support_multi_pkt_send_wqes =
1109 				MLX5_IB_ALLOW_MPW;
1110 
1111 		if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
1112 			resp.mlx5_ib_support_multi_pkt_send_wqes |=
1113 				MLX5_IB_SUPPORT_EMPW;
1114 
1115 		resp.response_length +=
1116 			sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
1117 	}
1118 
1119 	if (field_avail(typeof(resp), flags, uhw->outlen)) {
1120 		resp.response_length += sizeof(resp.flags);
1121 
1122 		if (MLX5_CAP_GEN(mdev, cqe_compression_128))
1123 			resp.flags |=
1124 				MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP;
1125 
1126 		if (MLX5_CAP_GEN(mdev, cqe_128_always))
1127 			resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD;
1128 		if (MLX5_CAP_GEN(mdev, qp_packet_based))
1129 			resp.flags |=
1130 				MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
1131 
1132 		resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
1133 	}
1134 
1135 	if (field_avail(typeof(resp), sw_parsing_caps,
1136 			uhw->outlen)) {
1137 		resp.response_length += sizeof(resp.sw_parsing_caps);
1138 		if (MLX5_CAP_ETH(mdev, swp)) {
1139 			resp.sw_parsing_caps.sw_parsing_offloads |=
1140 				MLX5_IB_SW_PARSING;
1141 
1142 			if (MLX5_CAP_ETH(mdev, swp_csum))
1143 				resp.sw_parsing_caps.sw_parsing_offloads |=
1144 					MLX5_IB_SW_PARSING_CSUM;
1145 
1146 			if (MLX5_CAP_ETH(mdev, swp_lso))
1147 				resp.sw_parsing_caps.sw_parsing_offloads |=
1148 					MLX5_IB_SW_PARSING_LSO;
1149 
1150 			if (resp.sw_parsing_caps.sw_parsing_offloads)
1151 				resp.sw_parsing_caps.supported_qpts =
1152 					BIT(IB_QPT_RAW_PACKET);
1153 		}
1154 	}
1155 
1156 	if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen) &&
1157 	    raw_support) {
1158 		resp.response_length += sizeof(resp.striding_rq_caps);
1159 		if (MLX5_CAP_GEN(mdev, striding_rq)) {
1160 			resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
1161 				MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
1162 			resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
1163 				MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
1164 			resp.striding_rq_caps.min_single_wqe_log_num_of_strides =
1165 				MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1166 			resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
1167 				MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
1168 			resp.striding_rq_caps.supported_qpts =
1169 				BIT(IB_QPT_RAW_PACKET);
1170 		}
1171 	}
1172 
1173 	if (field_avail(typeof(resp), tunnel_offloads_caps,
1174 			uhw->outlen)) {
1175 		resp.response_length += sizeof(resp.tunnel_offloads_caps);
1176 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
1177 			resp.tunnel_offloads_caps |=
1178 				MLX5_IB_TUNNELED_OFFLOADS_VXLAN;
1179 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
1180 			resp.tunnel_offloads_caps |=
1181 				MLX5_IB_TUNNELED_OFFLOADS_GENEVE;
1182 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
1183 			resp.tunnel_offloads_caps |=
1184 				MLX5_IB_TUNNELED_OFFLOADS_GRE;
1185 		if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
1186 		    MLX5_FLEX_PROTO_CW_MPLS_GRE)
1187 			resp.tunnel_offloads_caps |=
1188 				MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
1189 		if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
1190 		    MLX5_FLEX_PROTO_CW_MPLS_UDP)
1191 			resp.tunnel_offloads_caps |=
1192 				MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
1193 	}
1194 
1195 	if (uhw->outlen) {
1196 		err = ib_copy_to_udata(uhw, &resp, resp.response_length);
1197 
1198 		if (err)
1199 			return err;
1200 	}
1201 
1202 	return 0;
1203 }
1204 
1205 enum mlx5_ib_width {
1206 	MLX5_IB_WIDTH_1X	= 1 << 0,
1207 	MLX5_IB_WIDTH_2X	= 1 << 1,
1208 	MLX5_IB_WIDTH_4X	= 1 << 2,
1209 	MLX5_IB_WIDTH_8X	= 1 << 3,
1210 	MLX5_IB_WIDTH_12X	= 1 << 4
1211 };
1212 
1213 static void translate_active_width(struct ib_device *ibdev, u8 active_width,
1214 				  u8 *ib_width)
1215 {
1216 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1217 
1218 	if (active_width & MLX5_IB_WIDTH_1X)
1219 		*ib_width = IB_WIDTH_1X;
1220 	else if (active_width & MLX5_IB_WIDTH_2X)
1221 		*ib_width = IB_WIDTH_2X;
1222 	else if (active_width & MLX5_IB_WIDTH_4X)
1223 		*ib_width = IB_WIDTH_4X;
1224 	else if (active_width & MLX5_IB_WIDTH_8X)
1225 		*ib_width = IB_WIDTH_8X;
1226 	else if (active_width & MLX5_IB_WIDTH_12X)
1227 		*ib_width = IB_WIDTH_12X;
1228 	else {
1229 		mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n",
1230 			    (int)active_width);
1231 		*ib_width = IB_WIDTH_4X;
1232 	}
1233 
1234 	return;
1235 }
1236 
1237 static int mlx5_mtu_to_ib_mtu(int mtu)
1238 {
1239 	switch (mtu) {
1240 	case 256: return 1;
1241 	case 512: return 2;
1242 	case 1024: return 3;
1243 	case 2048: return 4;
1244 	case 4096: return 5;
1245 	default:
1246 		pr_warn("invalid mtu\n");
1247 		return -1;
1248 	}
1249 }
1250 
1251 enum ib_max_vl_num {
1252 	__IB_MAX_VL_0		= 1,
1253 	__IB_MAX_VL_0_1		= 2,
1254 	__IB_MAX_VL_0_3		= 3,
1255 	__IB_MAX_VL_0_7		= 4,
1256 	__IB_MAX_VL_0_14	= 5,
1257 };
1258 
1259 enum mlx5_vl_hw_cap {
1260 	MLX5_VL_HW_0	= 1,
1261 	MLX5_VL_HW_0_1	= 2,
1262 	MLX5_VL_HW_0_2	= 3,
1263 	MLX5_VL_HW_0_3	= 4,
1264 	MLX5_VL_HW_0_4	= 5,
1265 	MLX5_VL_HW_0_5	= 6,
1266 	MLX5_VL_HW_0_6	= 7,
1267 	MLX5_VL_HW_0_7	= 8,
1268 	MLX5_VL_HW_0_14	= 15
1269 };
1270 
1271 static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
1272 				u8 *max_vl_num)
1273 {
1274 	switch (vl_hw_cap) {
1275 	case MLX5_VL_HW_0:
1276 		*max_vl_num = __IB_MAX_VL_0;
1277 		break;
1278 	case MLX5_VL_HW_0_1:
1279 		*max_vl_num = __IB_MAX_VL_0_1;
1280 		break;
1281 	case MLX5_VL_HW_0_3:
1282 		*max_vl_num = __IB_MAX_VL_0_3;
1283 		break;
1284 	case MLX5_VL_HW_0_7:
1285 		*max_vl_num = __IB_MAX_VL_0_7;
1286 		break;
1287 	case MLX5_VL_HW_0_14:
1288 		*max_vl_num = __IB_MAX_VL_0_14;
1289 		break;
1290 
1291 	default:
1292 		return -EINVAL;
1293 	}
1294 
1295 	return 0;
1296 }
1297 
1298 static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
1299 			       struct ib_port_attr *props)
1300 {
1301 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1302 	struct mlx5_core_dev *mdev = dev->mdev;
1303 	struct mlx5_hca_vport_context *rep;
1304 	u16 max_mtu;
1305 	u16 oper_mtu;
1306 	int err;
1307 	u8 ib_link_width_oper;
1308 	u8 vl_hw_cap;
1309 
1310 	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
1311 	if (!rep) {
1312 		err = -ENOMEM;
1313 		goto out;
1314 	}
1315 
1316 	/* props being zeroed by the caller, avoid zeroing it here */
1317 
1318 	err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
1319 	if (err)
1320 		goto out;
1321 
1322 	props->lid		= rep->lid;
1323 	props->lmc		= rep->lmc;
1324 	props->sm_lid		= rep->sm_lid;
1325 	props->sm_sl		= rep->sm_sl;
1326 	props->state		= rep->vport_state;
1327 	props->phys_state	= rep->port_physical_state;
1328 	props->port_cap_flags	= rep->cap_mask1;
1329 	props->gid_tbl_len	= mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
1330 	props->max_msg_sz	= 1 << MLX5_CAP_GEN(mdev, log_max_msg);
1331 	props->pkey_tbl_len	= mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
1332 	props->bad_pkey_cntr	= rep->pkey_violation_counter;
1333 	props->qkey_viol_cntr	= rep->qkey_violation_counter;
1334 	props->subnet_timeout	= rep->subnet_timeout;
1335 	props->init_type_reply	= rep->init_type_reply;
1336 
1337 	if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP)
1338 		props->port_cap_flags2 = rep->cap_mask2;
1339 
1340 	err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
1341 	if (err)
1342 		goto out;
1343 
1344 	translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
1345 
1346 	err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
1347 	if (err)
1348 		goto out;
1349 
1350 	mlx5_query_port_max_mtu(mdev, &max_mtu, port);
1351 
1352 	props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
1353 
1354 	mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
1355 
1356 	props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
1357 
1358 	err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
1359 	if (err)
1360 		goto out;
1361 
1362 	err = translate_max_vl_num(ibdev, vl_hw_cap,
1363 				   &props->max_vl_num);
1364 out:
1365 	kfree(rep);
1366 	return err;
1367 }
1368 
1369 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
1370 		       struct ib_port_attr *props)
1371 {
1372 	unsigned int count;
1373 	int ret;
1374 
1375 	switch (mlx5_get_vport_access_method(ibdev)) {
1376 	case MLX5_VPORT_ACCESS_METHOD_MAD:
1377 		ret = mlx5_query_mad_ifc_port(ibdev, port, props);
1378 		break;
1379 
1380 	case MLX5_VPORT_ACCESS_METHOD_HCA:
1381 		ret = mlx5_query_hca_port(ibdev, port, props);
1382 		break;
1383 
1384 	case MLX5_VPORT_ACCESS_METHOD_NIC:
1385 		ret = mlx5_query_port_roce(ibdev, port, props);
1386 		break;
1387 
1388 	default:
1389 		ret = -EINVAL;
1390 	}
1391 
1392 	if (!ret && props) {
1393 		struct mlx5_ib_dev *dev = to_mdev(ibdev);
1394 		struct mlx5_core_dev *mdev;
1395 		bool put_mdev = true;
1396 
1397 		mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL);
1398 		if (!mdev) {
1399 			/* If the port isn't affiliated yet query the master.
1400 			 * The master and slave will have the same values.
1401 			 */
1402 			mdev = dev->mdev;
1403 			port = 1;
1404 			put_mdev = false;
1405 		}
1406 		count = mlx5_core_reserved_gids_count(mdev);
1407 		if (put_mdev)
1408 			mlx5_ib_put_native_port_mdev(dev, port);
1409 		props->gid_tbl_len -= count;
1410 	}
1411 	return ret;
1412 }
1413 
1414 static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port,
1415 				  struct ib_port_attr *props)
1416 {
1417 	int ret;
1418 
1419 	/* Only link layer == ethernet is valid for representors
1420 	 * and we always use port 1
1421 	 */
1422 	ret = mlx5_query_port_roce(ibdev, port, props);
1423 	if (ret || !props)
1424 		return ret;
1425 
1426 	/* We don't support GIDS */
1427 	props->gid_tbl_len = 0;
1428 
1429 	return ret;
1430 }
1431 
1432 static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
1433 			     union ib_gid *gid)
1434 {
1435 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1436 	struct mlx5_core_dev *mdev = dev->mdev;
1437 
1438 	switch (mlx5_get_vport_access_method(ibdev)) {
1439 	case MLX5_VPORT_ACCESS_METHOD_MAD:
1440 		return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
1441 
1442 	case MLX5_VPORT_ACCESS_METHOD_HCA:
1443 		return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
1444 
1445 	default:
1446 		return -EINVAL;
1447 	}
1448 
1449 }
1450 
1451 static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u8 port,
1452 				   u16 index, u16 *pkey)
1453 {
1454 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1455 	struct mlx5_core_dev *mdev;
1456 	bool put_mdev = true;
1457 	u8 mdev_port_num;
1458 	int err;
1459 
1460 	mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
1461 	if (!mdev) {
1462 		/* The port isn't affiliated yet, get the PKey from the master
1463 		 * port. For RoCE the PKey tables will be the same.
1464 		 */
1465 		put_mdev = false;
1466 		mdev = dev->mdev;
1467 		mdev_port_num = 1;
1468 	}
1469 
1470 	err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0,
1471 					index, pkey);
1472 	if (put_mdev)
1473 		mlx5_ib_put_native_port_mdev(dev, port);
1474 
1475 	return err;
1476 }
1477 
1478 static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1479 			      u16 *pkey)
1480 {
1481 	switch (mlx5_get_vport_access_method(ibdev)) {
1482 	case MLX5_VPORT_ACCESS_METHOD_MAD:
1483 		return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
1484 
1485 	case MLX5_VPORT_ACCESS_METHOD_HCA:
1486 	case MLX5_VPORT_ACCESS_METHOD_NIC:
1487 		return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey);
1488 	default:
1489 		return -EINVAL;
1490 	}
1491 }
1492 
1493 static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
1494 				 struct ib_device_modify *props)
1495 {
1496 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1497 	struct mlx5_reg_node_desc in;
1498 	struct mlx5_reg_node_desc out;
1499 	int err;
1500 
1501 	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
1502 		return -EOPNOTSUPP;
1503 
1504 	if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
1505 		return 0;
1506 
1507 	/*
1508 	 * If possible, pass node desc to FW, so it can generate
1509 	 * a 144 trap.  If cmd fails, just ignore.
1510 	 */
1511 	memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1512 	err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
1513 				   sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
1514 	if (err)
1515 		return err;
1516 
1517 	memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1518 
1519 	return err;
1520 }
1521 
1522 static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask,
1523 				u32 value)
1524 {
1525 	struct mlx5_hca_vport_context ctx = {};
1526 	struct mlx5_core_dev *mdev;
1527 	u8 mdev_port_num;
1528 	int err;
1529 
1530 	mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
1531 	if (!mdev)
1532 		return -ENODEV;
1533 
1534 	err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx);
1535 	if (err)
1536 		goto out;
1537 
1538 	if (~ctx.cap_mask1_perm & mask) {
1539 		mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n",
1540 			     mask, ctx.cap_mask1_perm);
1541 		err = -EINVAL;
1542 		goto out;
1543 	}
1544 
1545 	ctx.cap_mask1 = value;
1546 	ctx.cap_mask1_perm = mask;
1547 	err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num,
1548 						 0, &ctx);
1549 
1550 out:
1551 	mlx5_ib_put_native_port_mdev(dev, port_num);
1552 
1553 	return err;
1554 }
1555 
1556 static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1557 			       struct ib_port_modify *props)
1558 {
1559 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1560 	struct ib_port_attr attr;
1561 	u32 tmp;
1562 	int err;
1563 	u32 change_mask;
1564 	u32 value;
1565 	bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
1566 		      IB_LINK_LAYER_INFINIBAND);
1567 
1568 	/* CM layer calls ib_modify_port() regardless of the link layer. For
1569 	 * Ethernet ports, qkey violation and Port capabilities are meaningless.
1570 	 */
1571 	if (!is_ib)
1572 		return 0;
1573 
1574 	if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
1575 		change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
1576 		value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
1577 		return set_port_caps_atomic(dev, port, change_mask, value);
1578 	}
1579 
1580 	mutex_lock(&dev->cap_mask_mutex);
1581 
1582 	err = ib_query_port(ibdev, port, &attr);
1583 	if (err)
1584 		goto out;
1585 
1586 	tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
1587 		~props->clr_port_cap_mask;
1588 
1589 	err = mlx5_set_port_caps(dev->mdev, port, tmp);
1590 
1591 out:
1592 	mutex_unlock(&dev->cap_mask_mutex);
1593 	return err;
1594 }
1595 
1596 static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
1597 {
1598 	mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
1599 		    caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
1600 }
1601 
1602 static u16 calc_dynamic_bfregs(int uars_per_sys_page)
1603 {
1604 	/* Large page with non 4k uar support might limit the dynamic size */
1605 	if (uars_per_sys_page == 1  && PAGE_SIZE > 4096)
1606 		return MLX5_MIN_DYN_BFREGS;
1607 
1608 	return MLX5_MAX_DYN_BFREGS;
1609 }
1610 
1611 static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
1612 			     struct mlx5_ib_alloc_ucontext_req_v2 *req,
1613 			     struct mlx5_bfreg_info *bfregi)
1614 {
1615 	int uars_per_sys_page;
1616 	int bfregs_per_sys_page;
1617 	int ref_bfregs = req->total_num_bfregs;
1618 
1619 	if (req->total_num_bfregs == 0)
1620 		return -EINVAL;
1621 
1622 	BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE);
1623 	BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE);
1624 
1625 	if (req->total_num_bfregs > MLX5_MAX_BFREGS)
1626 		return -ENOMEM;
1627 
1628 	uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
1629 	bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
1630 	/* This holds the required static allocation asked by the user */
1631 	req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
1632 	if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
1633 		return -EINVAL;
1634 
1635 	bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
1636 	bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page);
1637 	bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs;
1638 	bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page;
1639 
1640 	mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n",
1641 		    MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
1642 		    lib_uar_4k ? "yes" : "no", ref_bfregs,
1643 		    req->total_num_bfregs, bfregi->total_num_bfregs,
1644 		    bfregi->num_sys_pages);
1645 
1646 	return 0;
1647 }
1648 
1649 static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
1650 {
1651 	struct mlx5_bfreg_info *bfregi;
1652 	int err;
1653 	int i;
1654 
1655 	bfregi = &context->bfregi;
1656 	for (i = 0; i < bfregi->num_static_sys_pages; i++) {
1657 		err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
1658 		if (err)
1659 			goto error;
1660 
1661 		mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
1662 	}
1663 
1664 	for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++)
1665 		bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX;
1666 
1667 	return 0;
1668 
1669 error:
1670 	for (--i; i >= 0; i--)
1671 		if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]))
1672 			mlx5_ib_warn(dev, "failed to free uar %d\n", i);
1673 
1674 	return err;
1675 }
1676 
1677 static void deallocate_uars(struct mlx5_ib_dev *dev,
1678 			    struct mlx5_ib_ucontext *context)
1679 {
1680 	struct mlx5_bfreg_info *bfregi;
1681 	int i;
1682 
1683 	bfregi = &context->bfregi;
1684 	for (i = 0; i < bfregi->num_sys_pages; i++)
1685 		if (i < bfregi->num_static_sys_pages ||
1686 		    bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX)
1687 			mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
1688 }
1689 
1690 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
1691 {
1692 	int err = 0;
1693 
1694 	mutex_lock(&dev->lb.mutex);
1695 	if (td)
1696 		dev->lb.user_td++;
1697 	if (qp)
1698 		dev->lb.qps++;
1699 
1700 	if (dev->lb.user_td == 2 ||
1701 	    dev->lb.qps == 1) {
1702 		if (!dev->lb.enabled) {
1703 			err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
1704 			dev->lb.enabled = true;
1705 		}
1706 	}
1707 
1708 	mutex_unlock(&dev->lb.mutex);
1709 
1710 	return err;
1711 }
1712 
1713 void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
1714 {
1715 	mutex_lock(&dev->lb.mutex);
1716 	if (td)
1717 		dev->lb.user_td--;
1718 	if (qp)
1719 		dev->lb.qps--;
1720 
1721 	if (dev->lb.user_td == 1 &&
1722 	    dev->lb.qps == 0) {
1723 		if (dev->lb.enabled) {
1724 			mlx5_nic_vport_update_local_lb(dev->mdev, false);
1725 			dev->lb.enabled = false;
1726 		}
1727 	}
1728 
1729 	mutex_unlock(&dev->lb.mutex);
1730 }
1731 
1732 static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn,
1733 					  u16 uid)
1734 {
1735 	int err;
1736 
1737 	if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1738 		return 0;
1739 
1740 	err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid);
1741 	if (err)
1742 		return err;
1743 
1744 	if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1745 	    (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1746 	     !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1747 		return err;
1748 
1749 	return mlx5_ib_enable_lb(dev, true, false);
1750 }
1751 
1752 static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn,
1753 					     u16 uid)
1754 {
1755 	if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1756 		return;
1757 
1758 	mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid);
1759 
1760 	if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1761 	    (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1762 	     !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1763 		return;
1764 
1765 	mlx5_ib_disable_lb(dev, true, false);
1766 }
1767 
1768 static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
1769 				  struct ib_udata *udata)
1770 {
1771 	struct ib_device *ibdev = uctx->device;
1772 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1773 	struct mlx5_ib_alloc_ucontext_req_v2 req = {};
1774 	struct mlx5_ib_alloc_ucontext_resp resp = {};
1775 	struct mlx5_core_dev *mdev = dev->mdev;
1776 	struct mlx5_ib_ucontext *context = to_mucontext(uctx);
1777 	struct mlx5_bfreg_info *bfregi;
1778 	int ver;
1779 	int err;
1780 	size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
1781 				     max_cqe_version);
1782 	u32 dump_fill_mkey;
1783 	bool lib_uar_4k;
1784 
1785 	if (!dev->ib_active)
1786 		return -EAGAIN;
1787 
1788 	if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
1789 		ver = 0;
1790 	else if (udata->inlen >= min_req_v2)
1791 		ver = 2;
1792 	else
1793 		return -EINVAL;
1794 
1795 	err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1796 	if (err)
1797 		return err;
1798 
1799 	if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX)
1800 		return -EOPNOTSUPP;
1801 
1802 	if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
1803 		return -EOPNOTSUPP;
1804 
1805 	req.total_num_bfregs = ALIGN(req.total_num_bfregs,
1806 				    MLX5_NON_FP_BFREGS_PER_UAR);
1807 	if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
1808 		return -EINVAL;
1809 
1810 	resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
1811 	if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
1812 		resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
1813 	resp.cache_line_size = cache_line_size();
1814 	resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
1815 	resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
1816 	resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1817 	resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1818 	resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
1819 	resp.cqe_version = min_t(__u8,
1820 				 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
1821 				 req.max_cqe_version);
1822 	resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1823 				MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
1824 	resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1825 					MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1;
1826 	resp.response_length = min(offsetof(typeof(resp), response_length) +
1827 				   sizeof(resp.response_length), udata->outlen);
1828 
1829 	if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) {
1830 		if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_EGRESS))
1831 			resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM;
1832 		if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA)
1833 			resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA;
1834 		if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
1835 			resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING;
1836 		if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN)
1837 			resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN;
1838 		/* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */
1839 	}
1840 
1841 	lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
1842 	bfregi = &context->bfregi;
1843 
1844 	/* updates req->total_num_bfregs */
1845 	err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
1846 	if (err)
1847 		goto out_ctx;
1848 
1849 	mutex_init(&bfregi->lock);
1850 	bfregi->lib_uar_4k = lib_uar_4k;
1851 	bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count),
1852 				GFP_KERNEL);
1853 	if (!bfregi->count) {
1854 		err = -ENOMEM;
1855 		goto out_ctx;
1856 	}
1857 
1858 	bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
1859 				    sizeof(*bfregi->sys_pages),
1860 				    GFP_KERNEL);
1861 	if (!bfregi->sys_pages) {
1862 		err = -ENOMEM;
1863 		goto out_count;
1864 	}
1865 
1866 	err = allocate_uars(dev, context);
1867 	if (err)
1868 		goto out_sys_pages;
1869 
1870 	if (ibdev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)
1871 		context->ibucontext.invalidate_range =
1872 			&mlx5_ib_invalidate_range;
1873 
1874 	if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
1875 		err = mlx5_ib_devx_create(dev, true);
1876 		if (err < 0)
1877 			goto out_uars;
1878 		context->devx_uid = err;
1879 	}
1880 
1881 	err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
1882 					     context->devx_uid);
1883 	if (err)
1884 		goto out_devx;
1885 
1886 	if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1887 		err = mlx5_cmd_dump_fill_mkey(dev->mdev, &dump_fill_mkey);
1888 		if (err)
1889 			goto out_mdev;
1890 	}
1891 
1892 	INIT_LIST_HEAD(&context->db_page_list);
1893 	mutex_init(&context->db_page_mutex);
1894 
1895 	resp.tot_bfregs = req.total_num_bfregs;
1896 	resp.num_ports = dev->num_ports;
1897 
1898 	if (field_avail(typeof(resp), cqe_version, udata->outlen))
1899 		resp.response_length += sizeof(resp.cqe_version);
1900 
1901 	if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
1902 		resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
1903 				      MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
1904 		resp.response_length += sizeof(resp.cmds_supp_uhw);
1905 	}
1906 
1907 	if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) {
1908 		if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
1909 			mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline);
1910 			resp.eth_min_inline++;
1911 		}
1912 		resp.response_length += sizeof(resp.eth_min_inline);
1913 	}
1914 
1915 	if (field_avail(typeof(resp), clock_info_versions, udata->outlen)) {
1916 		if (mdev->clock_info)
1917 			resp.clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1);
1918 		resp.response_length += sizeof(resp.clock_info_versions);
1919 	}
1920 
1921 	/*
1922 	 * We don't want to expose information from the PCI bar that is located
1923 	 * after 4096 bytes, so if the arch only supports larger pages, let's
1924 	 * pretend we don't support reading the HCA's core clock. This is also
1925 	 * forced by mmap function.
1926 	 */
1927 	if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
1928 		if (PAGE_SIZE <= 4096) {
1929 			resp.comp_mask |=
1930 				MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
1931 			resp.hca_core_clock_offset =
1932 				offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
1933 		}
1934 		resp.response_length += sizeof(resp.hca_core_clock_offset);
1935 	}
1936 
1937 	if (field_avail(typeof(resp), log_uar_size, udata->outlen))
1938 		resp.response_length += sizeof(resp.log_uar_size);
1939 
1940 	if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
1941 		resp.response_length += sizeof(resp.num_uars_per_page);
1942 
1943 	if (field_avail(typeof(resp), num_dyn_bfregs, udata->outlen)) {
1944 		resp.num_dyn_bfregs = bfregi->num_dyn_bfregs;
1945 		resp.response_length += sizeof(resp.num_dyn_bfregs);
1946 	}
1947 
1948 	if (field_avail(typeof(resp), dump_fill_mkey, udata->outlen)) {
1949 		if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1950 			resp.dump_fill_mkey = dump_fill_mkey;
1951 			resp.comp_mask |=
1952 				MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY;
1953 		}
1954 		resp.response_length += sizeof(resp.dump_fill_mkey);
1955 	}
1956 
1957 	err = ib_copy_to_udata(udata, &resp, resp.response_length);
1958 	if (err)
1959 		goto out_mdev;
1960 
1961 	bfregi->ver = ver;
1962 	bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
1963 	context->cqe_version = resp.cqe_version;
1964 	context->lib_caps = req.lib_caps;
1965 	print_lib_caps(dev, context->lib_caps);
1966 
1967 	if (dev->lag_active) {
1968 		u8 port = mlx5_core_native_port_num(dev->mdev) - 1;
1969 
1970 		atomic_set(&context->tx_port_affinity,
1971 			   atomic_add_return(
1972 				   1, &dev->port[port].roce.tx_port_affinity));
1973 	}
1974 
1975 	return 0;
1976 
1977 out_mdev:
1978 	mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
1979 out_devx:
1980 	if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
1981 		mlx5_ib_devx_destroy(dev, context->devx_uid);
1982 
1983 out_uars:
1984 	deallocate_uars(dev, context);
1985 
1986 out_sys_pages:
1987 	kfree(bfregi->sys_pages);
1988 
1989 out_count:
1990 	kfree(bfregi->count);
1991 
1992 out_ctx:
1993 	return err;
1994 }
1995 
1996 static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1997 {
1998 	struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1999 	struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2000 	struct mlx5_bfreg_info *bfregi;
2001 
2002 	/* All umem's must be destroyed before destroying the ucontext. */
2003 	mutex_lock(&ibcontext->per_mm_list_lock);
2004 	WARN_ON(!list_empty(&ibcontext->per_mm_list));
2005 	mutex_unlock(&ibcontext->per_mm_list_lock);
2006 
2007 	bfregi = &context->bfregi;
2008 	mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
2009 
2010 	if (context->devx_uid)
2011 		mlx5_ib_devx_destroy(dev, context->devx_uid);
2012 
2013 	deallocate_uars(dev, context);
2014 	kfree(bfregi->sys_pages);
2015 	kfree(bfregi->count);
2016 }
2017 
2018 static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
2019 				 int uar_idx)
2020 {
2021 	int fw_uars_per_page;
2022 
2023 	fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
2024 
2025 	return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
2026 }
2027 
2028 static int get_command(unsigned long offset)
2029 {
2030 	return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
2031 }
2032 
2033 static int get_arg(unsigned long offset)
2034 {
2035 	return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
2036 }
2037 
2038 static int get_index(unsigned long offset)
2039 {
2040 	return get_arg(offset);
2041 }
2042 
2043 /* Index resides in an extra byte to enable larger values than 255 */
2044 static int get_extended_index(unsigned long offset)
2045 {
2046 	return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
2047 }
2048 
2049 
2050 static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
2051 {
2052 }
2053 
2054 static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
2055 {
2056 	switch (cmd) {
2057 	case MLX5_IB_MMAP_WC_PAGE:
2058 		return "WC";
2059 	case MLX5_IB_MMAP_REGULAR_PAGE:
2060 		return "best effort WC";
2061 	case MLX5_IB_MMAP_NC_PAGE:
2062 		return "NC";
2063 	case MLX5_IB_MMAP_DEVICE_MEM:
2064 		return "Device Memory";
2065 	default:
2066 		return NULL;
2067 	}
2068 }
2069 
2070 static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
2071 					struct vm_area_struct *vma,
2072 					struct mlx5_ib_ucontext *context)
2073 {
2074 	if ((vma->vm_end - vma->vm_start != PAGE_SIZE) ||
2075 	    !(vma->vm_flags & VM_SHARED))
2076 		return -EINVAL;
2077 
2078 	if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
2079 		return -EOPNOTSUPP;
2080 
2081 	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
2082 		return -EPERM;
2083 	vma->vm_flags &= ~VM_MAYWRITE;
2084 
2085 	if (!dev->mdev->clock_info)
2086 		return -EOPNOTSUPP;
2087 
2088 	return vm_insert_page(vma, vma->vm_start,
2089 			      virt_to_page(dev->mdev->clock_info));
2090 }
2091 
2092 static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
2093 		    struct vm_area_struct *vma,
2094 		    struct mlx5_ib_ucontext *context)
2095 {
2096 	struct mlx5_bfreg_info *bfregi = &context->bfregi;
2097 	int err;
2098 	unsigned long idx;
2099 	phys_addr_t pfn;
2100 	pgprot_t prot;
2101 	u32 bfreg_dyn_idx = 0;
2102 	u32 uar_index;
2103 	int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC);
2104 	int max_valid_idx = dyn_uar ? bfregi->num_sys_pages :
2105 				bfregi->num_static_sys_pages;
2106 
2107 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2108 		return -EINVAL;
2109 
2110 	if (dyn_uar)
2111 		idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages;
2112 	else
2113 		idx = get_index(vma->vm_pgoff);
2114 
2115 	if (idx >= max_valid_idx) {
2116 		mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n",
2117 			     idx, max_valid_idx);
2118 		return -EINVAL;
2119 	}
2120 
2121 	switch (cmd) {
2122 	case MLX5_IB_MMAP_WC_PAGE:
2123 	case MLX5_IB_MMAP_ALLOC_WC:
2124 /* Some architectures don't support WC memory */
2125 #if defined(CONFIG_X86)
2126 		if (!pat_enabled())
2127 			return -EPERM;
2128 #elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU)))
2129 			return -EPERM;
2130 #endif
2131 	/* fall through */
2132 	case MLX5_IB_MMAP_REGULAR_PAGE:
2133 		/* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
2134 		prot = pgprot_writecombine(vma->vm_page_prot);
2135 		break;
2136 	case MLX5_IB_MMAP_NC_PAGE:
2137 		prot = pgprot_noncached(vma->vm_page_prot);
2138 		break;
2139 	default:
2140 		return -EINVAL;
2141 	}
2142 
2143 	if (dyn_uar) {
2144 		int uars_per_page;
2145 
2146 		uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
2147 		bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR);
2148 		if (bfreg_dyn_idx >= bfregi->total_num_bfregs) {
2149 			mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n",
2150 				     bfreg_dyn_idx, bfregi->total_num_bfregs);
2151 			return -EINVAL;
2152 		}
2153 
2154 		mutex_lock(&bfregi->lock);
2155 		/* Fail if uar already allocated, first bfreg index of each
2156 		 * page holds its count.
2157 		 */
2158 		if (bfregi->count[bfreg_dyn_idx]) {
2159 			mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx);
2160 			mutex_unlock(&bfregi->lock);
2161 			return -EINVAL;
2162 		}
2163 
2164 		bfregi->count[bfreg_dyn_idx]++;
2165 		mutex_unlock(&bfregi->lock);
2166 
2167 		err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
2168 		if (err) {
2169 			mlx5_ib_warn(dev, "UAR alloc failed\n");
2170 			goto free_bfreg;
2171 		}
2172 	} else {
2173 		uar_index = bfregi->sys_pages[idx];
2174 	}
2175 
2176 	pfn = uar_index2pfn(dev, uar_index);
2177 	mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
2178 
2179 	err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
2180 				prot);
2181 	if (err) {
2182 		mlx5_ib_err(dev,
2183 			    "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
2184 			    err, mmap_cmd2str(cmd));
2185 		goto err;
2186 	}
2187 
2188 	if (dyn_uar)
2189 		bfregi->sys_pages[idx] = uar_index;
2190 	return 0;
2191 
2192 err:
2193 	if (!dyn_uar)
2194 		return err;
2195 
2196 	mlx5_cmd_free_uar(dev->mdev, idx);
2197 
2198 free_bfreg:
2199 	mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
2200 
2201 	return err;
2202 }
2203 
2204 static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
2205 {
2206 	struct mlx5_ib_ucontext *mctx = to_mucontext(context);
2207 	struct mlx5_ib_dev *dev = to_mdev(context->device);
2208 	u16 page_idx = get_extended_index(vma->vm_pgoff);
2209 	size_t map_size = vma->vm_end - vma->vm_start;
2210 	u32 npages = map_size >> PAGE_SHIFT;
2211 	phys_addr_t pfn;
2212 
2213 	if (find_next_zero_bit(mctx->dm_pages, page_idx + npages, page_idx) !=
2214 	    page_idx + npages)
2215 		return -EINVAL;
2216 
2217 	pfn = ((dev->mdev->bar_addr +
2218 	      MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >>
2219 	      PAGE_SHIFT) +
2220 	      page_idx;
2221 	return rdma_user_mmap_io(context, vma, pfn, map_size,
2222 				 pgprot_writecombine(vma->vm_page_prot));
2223 }
2224 
2225 static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
2226 {
2227 	struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
2228 	struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2229 	unsigned long command;
2230 	phys_addr_t pfn;
2231 
2232 	command = get_command(vma->vm_pgoff);
2233 	switch (command) {
2234 	case MLX5_IB_MMAP_WC_PAGE:
2235 	case MLX5_IB_MMAP_NC_PAGE:
2236 	case MLX5_IB_MMAP_REGULAR_PAGE:
2237 	case MLX5_IB_MMAP_ALLOC_WC:
2238 		return uar_mmap(dev, command, vma, context);
2239 
2240 	case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
2241 		return -ENOSYS;
2242 
2243 	case MLX5_IB_MMAP_CORE_CLOCK:
2244 		if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2245 			return -EINVAL;
2246 
2247 		if (vma->vm_flags & VM_WRITE)
2248 			return -EPERM;
2249 		vma->vm_flags &= ~VM_MAYWRITE;
2250 
2251 		/* Don't expose to user-space information it shouldn't have */
2252 		if (PAGE_SIZE > 4096)
2253 			return -EOPNOTSUPP;
2254 
2255 		pfn = (dev->mdev->iseg_base +
2256 		       offsetof(struct mlx5_init_seg, internal_timer_h)) >>
2257 			PAGE_SHIFT;
2258 		return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
2259 					 PAGE_SIZE,
2260 					 pgprot_noncached(vma->vm_page_prot));
2261 	case MLX5_IB_MMAP_CLOCK_INFO:
2262 		return mlx5_ib_mmap_clock_info_page(dev, vma, context);
2263 
2264 	case MLX5_IB_MMAP_DEVICE_MEM:
2265 		return dm_mmap(ibcontext, vma);
2266 
2267 	default:
2268 		return -EINVAL;
2269 	}
2270 
2271 	return 0;
2272 }
2273 
2274 static inline int check_dm_type_support(struct mlx5_ib_dev *dev,
2275 					u32 type)
2276 {
2277 	switch (type) {
2278 	case MLX5_IB_UAPI_DM_TYPE_MEMIC:
2279 		if (!MLX5_CAP_DEV_MEM(dev->mdev, memic))
2280 			return -EOPNOTSUPP;
2281 		break;
2282 	case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
2283 	case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
2284 		if (!capable(CAP_SYS_RAWIO) ||
2285 		    !capable(CAP_NET_RAW))
2286 			return -EPERM;
2287 
2288 		if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) ||
2289 		      MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner)))
2290 			return -EOPNOTSUPP;
2291 		break;
2292 	}
2293 
2294 	return 0;
2295 }
2296 
2297 static int handle_alloc_dm_memic(struct ib_ucontext *ctx,
2298 				 struct mlx5_ib_dm *dm,
2299 				 struct ib_dm_alloc_attr *attr,
2300 				 struct uverbs_attr_bundle *attrs)
2301 {
2302 	struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
2303 	u64 start_offset;
2304 	u32 page_idx;
2305 	int err;
2306 
2307 	dm->size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
2308 
2309 	err = mlx5_cmd_alloc_memic(dm_db, &dm->dev_addr,
2310 				   dm->size, attr->alignment);
2311 	if (err)
2312 		return err;
2313 
2314 	page_idx = (dm->dev_addr - pci_resource_start(dm_db->dev->pdev, 0) -
2315 		    MLX5_CAP64_DEV_MEM(dm_db->dev, memic_bar_start_addr)) >>
2316 		    PAGE_SHIFT;
2317 
2318 	err = uverbs_copy_to(attrs,
2319 			     MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
2320 			     &page_idx, sizeof(page_idx));
2321 	if (err)
2322 		goto err_dealloc;
2323 
2324 	start_offset = dm->dev_addr & ~PAGE_MASK;
2325 	err = uverbs_copy_to(attrs,
2326 			     MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
2327 			     &start_offset, sizeof(start_offset));
2328 	if (err)
2329 		goto err_dealloc;
2330 
2331 	bitmap_set(to_mucontext(ctx)->dm_pages, page_idx,
2332 		   DIV_ROUND_UP(dm->size, PAGE_SIZE));
2333 
2334 	return 0;
2335 
2336 err_dealloc:
2337 	mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size);
2338 
2339 	return err;
2340 }
2341 
2342 static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
2343 				  struct mlx5_ib_dm *dm,
2344 				  struct ib_dm_alloc_attr *attr,
2345 				  struct uverbs_attr_bundle *attrs,
2346 				  int type)
2347 {
2348 	struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev;
2349 	u64 act_size;
2350 	int err;
2351 
2352 	/* Allocation size must a multiple of the basic block size
2353 	 * and a power of 2.
2354 	 */
2355 	act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev));
2356 	act_size = roundup_pow_of_two(act_size);
2357 
2358 	dm->size = act_size;
2359 	err = mlx5_dm_sw_icm_alloc(dev, type, act_size,
2360 				   to_mucontext(ctx)->devx_uid, &dm->dev_addr,
2361 				   &dm->icm_dm.obj_id);
2362 	if (err)
2363 		return err;
2364 
2365 	err = uverbs_copy_to(attrs,
2366 			     MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
2367 			     &dm->dev_addr, sizeof(dm->dev_addr));
2368 	if (err)
2369 		mlx5_dm_sw_icm_dealloc(dev, type, dm->size,
2370 				       to_mucontext(ctx)->devx_uid, dm->dev_addr,
2371 				       dm->icm_dm.obj_id);
2372 
2373 	return err;
2374 }
2375 
2376 struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
2377 			       struct ib_ucontext *context,
2378 			       struct ib_dm_alloc_attr *attr,
2379 			       struct uverbs_attr_bundle *attrs)
2380 {
2381 	struct mlx5_ib_dm *dm;
2382 	enum mlx5_ib_uapi_dm_type type;
2383 	int err;
2384 
2385 	err = uverbs_get_const_default(&type, attrs,
2386 				       MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
2387 				       MLX5_IB_UAPI_DM_TYPE_MEMIC);
2388 	if (err)
2389 		return ERR_PTR(err);
2390 
2391 	mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n",
2392 		    type, attr->length, attr->alignment);
2393 
2394 	err = check_dm_type_support(to_mdev(ibdev), type);
2395 	if (err)
2396 		return ERR_PTR(err);
2397 
2398 	dm = kzalloc(sizeof(*dm), GFP_KERNEL);
2399 	if (!dm)
2400 		return ERR_PTR(-ENOMEM);
2401 
2402 	dm->type = type;
2403 
2404 	switch (type) {
2405 	case MLX5_IB_UAPI_DM_TYPE_MEMIC:
2406 		err = handle_alloc_dm_memic(context, dm,
2407 					    attr,
2408 					    attrs);
2409 		break;
2410 	case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
2411 		err = handle_alloc_dm_sw_icm(context, dm,
2412 					     attr, attrs,
2413 					     MLX5_SW_ICM_TYPE_STEERING);
2414 		break;
2415 	case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
2416 		err = handle_alloc_dm_sw_icm(context, dm,
2417 					     attr, attrs,
2418 					     MLX5_SW_ICM_TYPE_HEADER_MODIFY);
2419 		break;
2420 	default:
2421 		err = -EOPNOTSUPP;
2422 	}
2423 
2424 	if (err)
2425 		goto err_free;
2426 
2427 	return &dm->ibdm;
2428 
2429 err_free:
2430 	kfree(dm);
2431 	return ERR_PTR(err);
2432 }
2433 
2434 int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
2435 {
2436 	struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
2437 		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
2438 	struct mlx5_core_dev *dev = to_mdev(ibdm->device)->mdev;
2439 	struct mlx5_dm *dm_db = &to_mdev(ibdm->device)->dm;
2440 	struct mlx5_ib_dm *dm = to_mdm(ibdm);
2441 	u32 page_idx;
2442 	int ret;
2443 
2444 	switch (dm->type) {
2445 	case MLX5_IB_UAPI_DM_TYPE_MEMIC:
2446 		ret = mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size);
2447 		if (ret)
2448 			return ret;
2449 
2450 		page_idx = (dm->dev_addr - pci_resource_start(dev->pdev, 0) -
2451 			    MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr)) >>
2452 			    PAGE_SHIFT;
2453 		bitmap_clear(ctx->dm_pages, page_idx,
2454 			     DIV_ROUND_UP(dm->size, PAGE_SIZE));
2455 		break;
2456 	case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
2457 		ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_STEERING,
2458 					     dm->size, ctx->devx_uid, dm->dev_addr,
2459 					     dm->icm_dm.obj_id);
2460 		if (ret)
2461 			return ret;
2462 		break;
2463 	case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
2464 		ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_HEADER_MODIFY,
2465 					     dm->size, ctx->devx_uid, dm->dev_addr,
2466 					     dm->icm_dm.obj_id);
2467 		if (ret)
2468 			return ret;
2469 		break;
2470 	default:
2471 		return -EOPNOTSUPP;
2472 	}
2473 
2474 	kfree(dm);
2475 
2476 	return 0;
2477 }
2478 
2479 static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
2480 {
2481 	struct mlx5_ib_pd *pd = to_mpd(ibpd);
2482 	struct ib_device *ibdev = ibpd->device;
2483 	struct mlx5_ib_alloc_pd_resp resp;
2484 	int err;
2485 	u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
2486 	u32 in[MLX5_ST_SZ_DW(alloc_pd_in)]   = {};
2487 	u16 uid = 0;
2488 	struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
2489 		udata, struct mlx5_ib_ucontext, ibucontext);
2490 
2491 	uid = context ? context->devx_uid : 0;
2492 	MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
2493 	MLX5_SET(alloc_pd_in, in, uid, uid);
2494 	err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in),
2495 			    out, sizeof(out));
2496 	if (err)
2497 		return err;
2498 
2499 	pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
2500 	pd->uid = uid;
2501 	if (udata) {
2502 		resp.pdn = pd->pdn;
2503 		if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
2504 			mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
2505 			return -EFAULT;
2506 		}
2507 	}
2508 
2509 	return 0;
2510 }
2511 
2512 static void mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
2513 {
2514 	struct mlx5_ib_dev *mdev = to_mdev(pd->device);
2515 	struct mlx5_ib_pd *mpd = to_mpd(pd);
2516 
2517 	mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
2518 }
2519 
2520 enum {
2521 	MATCH_CRITERIA_ENABLE_OUTER_BIT,
2522 	MATCH_CRITERIA_ENABLE_MISC_BIT,
2523 	MATCH_CRITERIA_ENABLE_INNER_BIT,
2524 	MATCH_CRITERIA_ENABLE_MISC2_BIT
2525 };
2526 
2527 #define HEADER_IS_ZERO(match_criteria, headers)			           \
2528 	!(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
2529 		    0, MLX5_FLD_SZ_BYTES(fte_match_param, headers)))       \
2530 
2531 static u8 get_match_criteria_enable(u32 *match_criteria)
2532 {
2533 	u8 match_criteria_enable;
2534 
2535 	match_criteria_enable =
2536 		(!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
2537 		MATCH_CRITERIA_ENABLE_OUTER_BIT;
2538 	match_criteria_enable |=
2539 		(!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
2540 		MATCH_CRITERIA_ENABLE_MISC_BIT;
2541 	match_criteria_enable |=
2542 		(!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
2543 		MATCH_CRITERIA_ENABLE_INNER_BIT;
2544 	match_criteria_enable |=
2545 		(!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
2546 		MATCH_CRITERIA_ENABLE_MISC2_BIT;
2547 
2548 	return match_criteria_enable;
2549 }
2550 
2551 static int set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
2552 {
2553 	u8 entry_mask;
2554 	u8 entry_val;
2555 	int err = 0;
2556 
2557 	if (!mask)
2558 		goto out;
2559 
2560 	entry_mask = MLX5_GET(fte_match_set_lyr_2_4, outer_c,
2561 			      ip_protocol);
2562 	entry_val = MLX5_GET(fte_match_set_lyr_2_4, outer_v,
2563 			     ip_protocol);
2564 	if (!entry_mask) {
2565 		MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
2566 		MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
2567 		goto out;
2568 	}
2569 	/* Don't override existing ip protocol */
2570 	if (mask != entry_mask || val != entry_val)
2571 		err = -EINVAL;
2572 out:
2573 	return err;
2574 }
2575 
2576 static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val,
2577 			   bool inner)
2578 {
2579 	if (inner) {
2580 		MLX5_SET(fte_match_set_misc,
2581 			 misc_c, inner_ipv6_flow_label, mask);
2582 		MLX5_SET(fte_match_set_misc,
2583 			 misc_v, inner_ipv6_flow_label, val);
2584 	} else {
2585 		MLX5_SET(fte_match_set_misc,
2586 			 misc_c, outer_ipv6_flow_label, mask);
2587 		MLX5_SET(fte_match_set_misc,
2588 			 misc_v, outer_ipv6_flow_label, val);
2589 	}
2590 }
2591 
2592 static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
2593 {
2594 	MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
2595 	MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val);
2596 	MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2);
2597 	MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2);
2598 }
2599 
2600 static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask)
2601 {
2602 	if (MLX5_GET(fte_match_mpls, set_mask, mpls_label) &&
2603 	    !(field_support & MLX5_FIELD_SUPPORT_MPLS_LABEL))
2604 		return -EOPNOTSUPP;
2605 
2606 	if (MLX5_GET(fte_match_mpls, set_mask, mpls_exp) &&
2607 	    !(field_support & MLX5_FIELD_SUPPORT_MPLS_EXP))
2608 		return -EOPNOTSUPP;
2609 
2610 	if (MLX5_GET(fte_match_mpls, set_mask, mpls_s_bos) &&
2611 	    !(field_support & MLX5_FIELD_SUPPORT_MPLS_S_BOS))
2612 		return -EOPNOTSUPP;
2613 
2614 	if (MLX5_GET(fte_match_mpls, set_mask, mpls_ttl) &&
2615 	    !(field_support & MLX5_FIELD_SUPPORT_MPLS_TTL))
2616 		return -EOPNOTSUPP;
2617 
2618 	return 0;
2619 }
2620 
2621 #define LAST_ETH_FIELD vlan_tag
2622 #define LAST_IB_FIELD sl
2623 #define LAST_IPV4_FIELD tos
2624 #define LAST_IPV6_FIELD traffic_class
2625 #define LAST_TCP_UDP_FIELD src_port
2626 #define LAST_TUNNEL_FIELD tunnel_id
2627 #define LAST_FLOW_TAG_FIELD tag_id
2628 #define LAST_DROP_FIELD size
2629 #define LAST_COUNTERS_FIELD counters
2630 
2631 /* Field is the last supported field */
2632 #define FIELDS_NOT_SUPPORTED(filter, field)\
2633 	memchr_inv((void *)&filter.field  +\
2634 		   sizeof(filter.field), 0,\
2635 		   sizeof(filter) -\
2636 		   offsetof(typeof(filter), field) -\
2637 		   sizeof(filter.field))
2638 
2639 int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
2640 			   bool is_egress,
2641 			   struct mlx5_flow_act *action)
2642 {
2643 
2644 	switch (maction->ib_action.type) {
2645 	case IB_FLOW_ACTION_ESP:
2646 		if (action->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
2647 				      MLX5_FLOW_CONTEXT_ACTION_DECRYPT))
2648 			return -EINVAL;
2649 		/* Currently only AES_GCM keymat is supported by the driver */
2650 		action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx;
2651 		action->action |= is_egress ?
2652 			MLX5_FLOW_CONTEXT_ACTION_ENCRYPT :
2653 			MLX5_FLOW_CONTEXT_ACTION_DECRYPT;
2654 		return 0;
2655 	case IB_FLOW_ACTION_UNSPECIFIED:
2656 		if (maction->flow_action_raw.sub_type ==
2657 		    MLX5_IB_FLOW_ACTION_MODIFY_HEADER) {
2658 			if (action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2659 				return -EINVAL;
2660 			action->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2661 			action->modify_hdr =
2662 				maction->flow_action_raw.modify_hdr;
2663 			return 0;
2664 		}
2665 		if (maction->flow_action_raw.sub_type ==
2666 		    MLX5_IB_FLOW_ACTION_DECAP) {
2667 			if (action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
2668 				return -EINVAL;
2669 			action->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2670 			return 0;
2671 		}
2672 		if (maction->flow_action_raw.sub_type ==
2673 		    MLX5_IB_FLOW_ACTION_PACKET_REFORMAT) {
2674 			if (action->action &
2675 			    MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
2676 				return -EINVAL;
2677 			action->action |=
2678 				MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
2679 			action->pkt_reformat =
2680 				maction->flow_action_raw.pkt_reformat;
2681 			return 0;
2682 		}
2683 		/* fall through */
2684 	default:
2685 		return -EOPNOTSUPP;
2686 	}
2687 }
2688 
2689 static int parse_flow_attr(struct mlx5_core_dev *mdev,
2690 			   struct mlx5_flow_spec *spec,
2691 			   const union ib_flow_spec *ib_spec,
2692 			   const struct ib_flow_attr *flow_attr,
2693 			   struct mlx5_flow_act *action, u32 prev_type)
2694 {
2695 	struct mlx5_flow_context *flow_context = &spec->flow_context;
2696 	u32 *match_c = spec->match_criteria;
2697 	u32 *match_v = spec->match_value;
2698 	void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
2699 					   misc_parameters);
2700 	void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
2701 					   misc_parameters);
2702 	void *misc_params2_c = MLX5_ADDR_OF(fte_match_param, match_c,
2703 					    misc_parameters_2);
2704 	void *misc_params2_v = MLX5_ADDR_OF(fte_match_param, match_v,
2705 					    misc_parameters_2);
2706 	void *headers_c;
2707 	void *headers_v;
2708 	int match_ipv;
2709 	int ret;
2710 
2711 	if (ib_spec->type & IB_FLOW_SPEC_INNER) {
2712 		headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
2713 					 inner_headers);
2714 		headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
2715 					 inner_headers);
2716 		match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2717 					ft_field_support.inner_ip_version);
2718 	} else {
2719 		headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
2720 					 outer_headers);
2721 		headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
2722 					 outer_headers);
2723 		match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2724 					ft_field_support.outer_ip_version);
2725 	}
2726 
2727 	switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
2728 	case IB_FLOW_SPEC_ETH:
2729 		if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
2730 			return -EOPNOTSUPP;
2731 
2732 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2733 					     dmac_47_16),
2734 				ib_spec->eth.mask.dst_mac);
2735 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2736 					     dmac_47_16),
2737 				ib_spec->eth.val.dst_mac);
2738 
2739 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2740 					     smac_47_16),
2741 				ib_spec->eth.mask.src_mac);
2742 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2743 					     smac_47_16),
2744 				ib_spec->eth.val.src_mac);
2745 
2746 		if (ib_spec->eth.mask.vlan_tag) {
2747 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2748 				 cvlan_tag, 1);
2749 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2750 				 cvlan_tag, 1);
2751 
2752 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2753 				 first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
2754 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2755 				 first_vid, ntohs(ib_spec->eth.val.vlan_tag));
2756 
2757 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2758 				 first_cfi,
2759 				 ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
2760 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2761 				 first_cfi,
2762 				 ntohs(ib_spec->eth.val.vlan_tag) >> 12);
2763 
2764 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2765 				 first_prio,
2766 				 ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
2767 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2768 				 first_prio,
2769 				 ntohs(ib_spec->eth.val.vlan_tag) >> 13);
2770 		}
2771 		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2772 			 ethertype, ntohs(ib_spec->eth.mask.ether_type));
2773 		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2774 			 ethertype, ntohs(ib_spec->eth.val.ether_type));
2775 		break;
2776 	case IB_FLOW_SPEC_IPV4:
2777 		if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
2778 			return -EOPNOTSUPP;
2779 
2780 		if (match_ipv) {
2781 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2782 				 ip_version, 0xf);
2783 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2784 				 ip_version, MLX5_FS_IPV4_VERSION);
2785 		} else {
2786 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2787 				 ethertype, 0xffff);
2788 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2789 				 ethertype, ETH_P_IP);
2790 		}
2791 
2792 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2793 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
2794 		       &ib_spec->ipv4.mask.src_ip,
2795 		       sizeof(ib_spec->ipv4.mask.src_ip));
2796 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2797 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
2798 		       &ib_spec->ipv4.val.src_ip,
2799 		       sizeof(ib_spec->ipv4.val.src_ip));
2800 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2801 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2802 		       &ib_spec->ipv4.mask.dst_ip,
2803 		       sizeof(ib_spec->ipv4.mask.dst_ip));
2804 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2805 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2806 		       &ib_spec->ipv4.val.dst_ip,
2807 		       sizeof(ib_spec->ipv4.val.dst_ip));
2808 
2809 		set_tos(headers_c, headers_v,
2810 			ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
2811 
2812 		if (set_proto(headers_c, headers_v,
2813 			      ib_spec->ipv4.mask.proto,
2814 			      ib_spec->ipv4.val.proto))
2815 			return -EINVAL;
2816 		break;
2817 	case IB_FLOW_SPEC_IPV6:
2818 		if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
2819 			return -EOPNOTSUPP;
2820 
2821 		if (match_ipv) {
2822 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2823 				 ip_version, 0xf);
2824 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2825 				 ip_version, MLX5_FS_IPV6_VERSION);
2826 		} else {
2827 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2828 				 ethertype, 0xffff);
2829 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2830 				 ethertype, ETH_P_IPV6);
2831 		}
2832 
2833 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2834 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
2835 		       &ib_spec->ipv6.mask.src_ip,
2836 		       sizeof(ib_spec->ipv6.mask.src_ip));
2837 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2838 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
2839 		       &ib_spec->ipv6.val.src_ip,
2840 		       sizeof(ib_spec->ipv6.val.src_ip));
2841 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2842 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2843 		       &ib_spec->ipv6.mask.dst_ip,
2844 		       sizeof(ib_spec->ipv6.mask.dst_ip));
2845 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2846 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2847 		       &ib_spec->ipv6.val.dst_ip,
2848 		       sizeof(ib_spec->ipv6.val.dst_ip));
2849 
2850 		set_tos(headers_c, headers_v,
2851 			ib_spec->ipv6.mask.traffic_class,
2852 			ib_spec->ipv6.val.traffic_class);
2853 
2854 		if (set_proto(headers_c, headers_v,
2855 			      ib_spec->ipv6.mask.next_hdr,
2856 			      ib_spec->ipv6.val.next_hdr))
2857 			return -EINVAL;
2858 
2859 		set_flow_label(misc_params_c, misc_params_v,
2860 			       ntohl(ib_spec->ipv6.mask.flow_label),
2861 			       ntohl(ib_spec->ipv6.val.flow_label),
2862 			       ib_spec->type & IB_FLOW_SPEC_INNER);
2863 		break;
2864 	case IB_FLOW_SPEC_ESP:
2865 		if (ib_spec->esp.mask.seq)
2866 			return -EOPNOTSUPP;
2867 
2868 		MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi,
2869 			 ntohl(ib_spec->esp.mask.spi));
2870 		MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
2871 			 ntohl(ib_spec->esp.val.spi));
2872 		break;
2873 	case IB_FLOW_SPEC_TCP:
2874 		if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
2875 					 LAST_TCP_UDP_FIELD))
2876 			return -EOPNOTSUPP;
2877 
2878 		if (set_proto(headers_c, headers_v, 0xff, IPPROTO_TCP))
2879 			return -EINVAL;
2880 
2881 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
2882 			 ntohs(ib_spec->tcp_udp.mask.src_port));
2883 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2884 			 ntohs(ib_spec->tcp_udp.val.src_port));
2885 
2886 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport,
2887 			 ntohs(ib_spec->tcp_udp.mask.dst_port));
2888 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2889 			 ntohs(ib_spec->tcp_udp.val.dst_port));
2890 		break;
2891 	case IB_FLOW_SPEC_UDP:
2892 		if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
2893 					 LAST_TCP_UDP_FIELD))
2894 			return -EOPNOTSUPP;
2895 
2896 		if (set_proto(headers_c, headers_v, 0xff, IPPROTO_UDP))
2897 			return -EINVAL;
2898 
2899 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
2900 			 ntohs(ib_spec->tcp_udp.mask.src_port));
2901 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2902 			 ntohs(ib_spec->tcp_udp.val.src_port));
2903 
2904 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
2905 			 ntohs(ib_spec->tcp_udp.mask.dst_port));
2906 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2907 			 ntohs(ib_spec->tcp_udp.val.dst_port));
2908 		break;
2909 	case IB_FLOW_SPEC_GRE:
2910 		if (ib_spec->gre.mask.c_ks_res0_ver)
2911 			return -EOPNOTSUPP;
2912 
2913 		if (set_proto(headers_c, headers_v, 0xff, IPPROTO_GRE))
2914 			return -EINVAL;
2915 
2916 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2917 			 0xff);
2918 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2919 			 IPPROTO_GRE);
2920 
2921 		MLX5_SET(fte_match_set_misc, misc_params_c, gre_protocol,
2922 			 ntohs(ib_spec->gre.mask.protocol));
2923 		MLX5_SET(fte_match_set_misc, misc_params_v, gre_protocol,
2924 			 ntohs(ib_spec->gre.val.protocol));
2925 
2926 		memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
2927 				    gre_key.nvgre.hi),
2928 		       &ib_spec->gre.mask.key,
2929 		       sizeof(ib_spec->gre.mask.key));
2930 		memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v,
2931 				    gre_key.nvgre.hi),
2932 		       &ib_spec->gre.val.key,
2933 		       sizeof(ib_spec->gre.val.key));
2934 		break;
2935 	case IB_FLOW_SPEC_MPLS:
2936 		switch (prev_type) {
2937 		case IB_FLOW_SPEC_UDP:
2938 			if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2939 						   ft_field_support.outer_first_mpls_over_udp),
2940 						   &ib_spec->mpls.mask.tag))
2941 				return -EOPNOTSUPP;
2942 
2943 			memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2944 					    outer_first_mpls_over_udp),
2945 			       &ib_spec->mpls.val.tag,
2946 			       sizeof(ib_spec->mpls.val.tag));
2947 			memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2948 					    outer_first_mpls_over_udp),
2949 			       &ib_spec->mpls.mask.tag,
2950 			       sizeof(ib_spec->mpls.mask.tag));
2951 			break;
2952 		case IB_FLOW_SPEC_GRE:
2953 			if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2954 						   ft_field_support.outer_first_mpls_over_gre),
2955 						   &ib_spec->mpls.mask.tag))
2956 				return -EOPNOTSUPP;
2957 
2958 			memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2959 					    outer_first_mpls_over_gre),
2960 			       &ib_spec->mpls.val.tag,
2961 			       sizeof(ib_spec->mpls.val.tag));
2962 			memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2963 					    outer_first_mpls_over_gre),
2964 			       &ib_spec->mpls.mask.tag,
2965 			       sizeof(ib_spec->mpls.mask.tag));
2966 			break;
2967 		default:
2968 			if (ib_spec->type & IB_FLOW_SPEC_INNER) {
2969 				if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2970 							   ft_field_support.inner_first_mpls),
2971 							   &ib_spec->mpls.mask.tag))
2972 					return -EOPNOTSUPP;
2973 
2974 				memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2975 						    inner_first_mpls),
2976 				       &ib_spec->mpls.val.tag,
2977 				       sizeof(ib_spec->mpls.val.tag));
2978 				memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2979 						    inner_first_mpls),
2980 				       &ib_spec->mpls.mask.tag,
2981 				       sizeof(ib_spec->mpls.mask.tag));
2982 			} else {
2983 				if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2984 							   ft_field_support.outer_first_mpls),
2985 							   &ib_spec->mpls.mask.tag))
2986 					return -EOPNOTSUPP;
2987 
2988 				memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2989 						    outer_first_mpls),
2990 				       &ib_spec->mpls.val.tag,
2991 				       sizeof(ib_spec->mpls.val.tag));
2992 				memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2993 						    outer_first_mpls),
2994 				       &ib_spec->mpls.mask.tag,
2995 				       sizeof(ib_spec->mpls.mask.tag));
2996 			}
2997 		}
2998 		break;
2999 	case IB_FLOW_SPEC_VXLAN_TUNNEL:
3000 		if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
3001 					 LAST_TUNNEL_FIELD))
3002 			return -EOPNOTSUPP;
3003 
3004 		MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni,
3005 			 ntohl(ib_spec->tunnel.mask.tunnel_id));
3006 		MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni,
3007 			 ntohl(ib_spec->tunnel.val.tunnel_id));
3008 		break;
3009 	case IB_FLOW_SPEC_ACTION_TAG:
3010 		if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag,
3011 					 LAST_FLOW_TAG_FIELD))
3012 			return -EOPNOTSUPP;
3013 		if (ib_spec->flow_tag.tag_id >= BIT(24))
3014 			return -EINVAL;
3015 
3016 		flow_context->flow_tag = ib_spec->flow_tag.tag_id;
3017 		flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
3018 		break;
3019 	case IB_FLOW_SPEC_ACTION_DROP:
3020 		if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
3021 					 LAST_DROP_FIELD))
3022 			return -EOPNOTSUPP;
3023 		action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
3024 		break;
3025 	case IB_FLOW_SPEC_ACTION_HANDLE:
3026 		ret = parse_flow_flow_action(to_mflow_act(ib_spec->action.act),
3027 			flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS, action);
3028 		if (ret)
3029 			return ret;
3030 		break;
3031 	case IB_FLOW_SPEC_ACTION_COUNT:
3032 		if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count,
3033 					 LAST_COUNTERS_FIELD))
3034 			return -EOPNOTSUPP;
3035 
3036 		/* for now support only one counters spec per flow */
3037 		if (action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
3038 			return -EINVAL;
3039 
3040 		action->counters = ib_spec->flow_count.counters;
3041 		action->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3042 		break;
3043 	default:
3044 		return -EINVAL;
3045 	}
3046 
3047 	return 0;
3048 }
3049 
3050 /* If a flow could catch both multicast and unicast packets,
3051  * it won't fall into the multicast flow steering table and this rule
3052  * could steal other multicast packets.
3053  */
3054 static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr)
3055 {
3056 	union ib_flow_spec *flow_spec;
3057 
3058 	if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
3059 	    ib_attr->num_of_specs < 1)
3060 		return false;
3061 
3062 	flow_spec = (union ib_flow_spec *)(ib_attr + 1);
3063 	if (flow_spec->type == IB_FLOW_SPEC_IPV4) {
3064 		struct ib_flow_spec_ipv4 *ipv4_spec;
3065 
3066 		ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec;
3067 		if (ipv4_is_multicast(ipv4_spec->val.dst_ip))
3068 			return true;
3069 
3070 		return false;
3071 	}
3072 
3073 	if (flow_spec->type == IB_FLOW_SPEC_ETH) {
3074 		struct ib_flow_spec_eth *eth_spec;
3075 
3076 		eth_spec = (struct ib_flow_spec_eth *)flow_spec;
3077 		return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
3078 		       is_multicast_ether_addr(eth_spec->val.dst_mac);
3079 	}
3080 
3081 	return false;
3082 }
3083 
3084 enum valid_spec {
3085 	VALID_SPEC_INVALID,
3086 	VALID_SPEC_VALID,
3087 	VALID_SPEC_NA,
3088 };
3089 
3090 static enum valid_spec
3091 is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev,
3092 		     const struct mlx5_flow_spec *spec,
3093 		     const struct mlx5_flow_act *flow_act,
3094 		     bool egress)
3095 {
3096 	const u32 *match_c = spec->match_criteria;
3097 	bool is_crypto =
3098 		(flow_act->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
3099 				     MLX5_FLOW_CONTEXT_ACTION_DECRYPT));
3100 	bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c);
3101 	bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP;
3102 
3103 	/*
3104 	 * Currently only crypto is supported in egress, when regular egress
3105 	 * rules would be supported, always return VALID_SPEC_NA.
3106 	 */
3107 	if (!is_crypto)
3108 		return VALID_SPEC_NA;
3109 
3110 	return is_crypto && is_ipsec &&
3111 		(!egress || (!is_drop &&
3112 			     !(spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG))) ?
3113 		VALID_SPEC_VALID : VALID_SPEC_INVALID;
3114 }
3115 
3116 static bool is_valid_spec(struct mlx5_core_dev *mdev,
3117 			  const struct mlx5_flow_spec *spec,
3118 			  const struct mlx5_flow_act *flow_act,
3119 			  bool egress)
3120 {
3121 	/* We curretly only support ipsec egress flow */
3122 	return is_valid_esp_aes_gcm(mdev, spec, flow_act, egress) != VALID_SPEC_INVALID;
3123 }
3124 
3125 static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
3126 			       const struct ib_flow_attr *flow_attr,
3127 			       bool check_inner)
3128 {
3129 	union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
3130 	int match_ipv = check_inner ?
3131 			MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
3132 					ft_field_support.inner_ip_version) :
3133 			MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
3134 					ft_field_support.outer_ip_version);
3135 	int inner_bit = check_inner ? IB_FLOW_SPEC_INNER : 0;
3136 	bool ipv4_spec_valid, ipv6_spec_valid;
3137 	unsigned int ip_spec_type = 0;
3138 	bool has_ethertype = false;
3139 	unsigned int spec_index;
3140 	bool mask_valid = true;
3141 	u16 eth_type = 0;
3142 	bool type_valid;
3143 
3144 	/* Validate that ethertype is correct */
3145 	for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
3146 		if ((ib_spec->type == (IB_FLOW_SPEC_ETH | inner_bit)) &&
3147 		    ib_spec->eth.mask.ether_type) {
3148 			mask_valid = (ib_spec->eth.mask.ether_type ==
3149 				      htons(0xffff));
3150 			has_ethertype = true;
3151 			eth_type = ntohs(ib_spec->eth.val.ether_type);
3152 		} else if ((ib_spec->type == (IB_FLOW_SPEC_IPV4 | inner_bit)) ||
3153 			   (ib_spec->type == (IB_FLOW_SPEC_IPV6 | inner_bit))) {
3154 			ip_spec_type = ib_spec->type;
3155 		}
3156 		ib_spec = (void *)ib_spec + ib_spec->size;
3157 	}
3158 
3159 	type_valid = (!has_ethertype) || (!ip_spec_type);
3160 	if (!type_valid && mask_valid) {
3161 		ipv4_spec_valid = (eth_type == ETH_P_IP) &&
3162 			(ip_spec_type == (IB_FLOW_SPEC_IPV4 | inner_bit));
3163 		ipv6_spec_valid = (eth_type == ETH_P_IPV6) &&
3164 			(ip_spec_type == (IB_FLOW_SPEC_IPV6 | inner_bit));
3165 
3166 		type_valid = (ipv4_spec_valid) || (ipv6_spec_valid) ||
3167 			     (((eth_type == ETH_P_MPLS_UC) ||
3168 			       (eth_type == ETH_P_MPLS_MC)) && match_ipv);
3169 	}
3170 
3171 	return type_valid;
3172 }
3173 
3174 static bool is_valid_attr(struct mlx5_core_dev *mdev,
3175 			  const struct ib_flow_attr *flow_attr)
3176 {
3177 	return is_valid_ethertype(mdev, flow_attr, false) &&
3178 	       is_valid_ethertype(mdev, flow_attr, true);
3179 }
3180 
3181 static void put_flow_table(struct mlx5_ib_dev *dev,
3182 			   struct mlx5_ib_flow_prio *prio, bool ft_added)
3183 {
3184 	prio->refcount -= !!ft_added;
3185 	if (!prio->refcount) {
3186 		mlx5_destroy_flow_table(prio->flow_table);
3187 		prio->flow_table = NULL;
3188 	}
3189 }
3190 
3191 static void counters_clear_description(struct ib_counters *counters)
3192 {
3193 	struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
3194 
3195 	mutex_lock(&mcounters->mcntrs_mutex);
3196 	kfree(mcounters->counters_data);
3197 	mcounters->counters_data = NULL;
3198 	mcounters->cntrs_max_index = 0;
3199 	mutex_unlock(&mcounters->mcntrs_mutex);
3200 }
3201 
3202 static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
3203 {
3204 	struct mlx5_ib_flow_handler *handler = container_of(flow_id,
3205 							  struct mlx5_ib_flow_handler,
3206 							  ibflow);
3207 	struct mlx5_ib_flow_handler *iter, *tmp;
3208 	struct mlx5_ib_dev *dev = handler->dev;
3209 
3210 	mutex_lock(&dev->flow_db->lock);
3211 
3212 	list_for_each_entry_safe(iter, tmp, &handler->list, list) {
3213 		mlx5_del_flow_rules(iter->rule);
3214 		put_flow_table(dev, iter->prio, true);
3215 		list_del(&iter->list);
3216 		kfree(iter);
3217 	}
3218 
3219 	mlx5_del_flow_rules(handler->rule);
3220 	put_flow_table(dev, handler->prio, true);
3221 	if (handler->ibcounters &&
3222 	    atomic_read(&handler->ibcounters->usecnt) == 1)
3223 		counters_clear_description(handler->ibcounters);
3224 
3225 	mutex_unlock(&dev->flow_db->lock);
3226 	if (handler->flow_matcher)
3227 		atomic_dec(&handler->flow_matcher->usecnt);
3228 	kfree(handler);
3229 
3230 	return 0;
3231 }
3232 
3233 static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
3234 {
3235 	priority *= 2;
3236 	if (!dont_trap)
3237 		priority++;
3238 	return priority;
3239 }
3240 
3241 enum flow_table_type {
3242 	MLX5_IB_FT_RX,
3243 	MLX5_IB_FT_TX
3244 };
3245 
3246 #define MLX5_FS_MAX_TYPES	 6
3247 #define MLX5_FS_MAX_ENTRIES	 BIT(16)
3248 
3249 static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
3250 					   struct mlx5_ib_flow_prio *prio,
3251 					   int priority,
3252 					   int num_entries, int num_groups,
3253 					   u32 flags)
3254 {
3255 	struct mlx5_flow_table *ft;
3256 
3257 	ft = mlx5_create_auto_grouped_flow_table(ns, priority,
3258 						 num_entries,
3259 						 num_groups,
3260 						 0, flags);
3261 	if (IS_ERR(ft))
3262 		return ERR_CAST(ft);
3263 
3264 	prio->flow_table = ft;
3265 	prio->refcount = 0;
3266 	return prio;
3267 }
3268 
3269 static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
3270 						struct ib_flow_attr *flow_attr,
3271 						enum flow_table_type ft_type)
3272 {
3273 	bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
3274 	struct mlx5_flow_namespace *ns = NULL;
3275 	struct mlx5_ib_flow_prio *prio;
3276 	struct mlx5_flow_table *ft;
3277 	int max_table_size;
3278 	int num_entries;
3279 	int num_groups;
3280 	bool esw_encap;
3281 	u32 flags = 0;
3282 	int priority;
3283 
3284 	max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3285 						       log_max_ft_size));
3286 	esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
3287 		DEVLINK_ESWITCH_ENCAP_MODE_NONE;
3288 	if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
3289 		enum mlx5_flow_namespace_type fn_type;
3290 
3291 		if (flow_is_multicast_only(flow_attr) &&
3292 		    !dont_trap)
3293 			priority = MLX5_IB_FLOW_MCAST_PRIO;
3294 		else
3295 			priority = ib_prio_to_core_prio(flow_attr->priority,
3296 							dont_trap);
3297 		if (ft_type == MLX5_IB_FT_RX) {
3298 			fn_type = MLX5_FLOW_NAMESPACE_BYPASS;
3299 			prio = &dev->flow_db->prios[priority];
3300 			if (!dev->is_rep && !esw_encap &&
3301 			    MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
3302 				flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
3303 			if (!dev->is_rep && !esw_encap &&
3304 			    MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3305 					reformat_l3_tunnel_to_l2))
3306 				flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3307 		} else {
3308 			max_table_size =
3309 				BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
3310 							      log_max_ft_size));
3311 			fn_type = MLX5_FLOW_NAMESPACE_EGRESS;
3312 			prio = &dev->flow_db->egress_prios[priority];
3313 			if (!dev->is_rep && !esw_encap &&
3314 			    MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
3315 				flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3316 		}
3317 		ns = mlx5_get_flow_namespace(dev->mdev, fn_type);
3318 		num_entries = MLX5_FS_MAX_ENTRIES;
3319 		num_groups = MLX5_FS_MAX_TYPES;
3320 	} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3321 		   flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
3322 		ns = mlx5_get_flow_namespace(dev->mdev,
3323 					     MLX5_FLOW_NAMESPACE_LEFTOVERS);
3324 		build_leftovers_ft_param(&priority,
3325 					 &num_entries,
3326 					 &num_groups);
3327 		prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
3328 	} else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
3329 		if (!MLX5_CAP_FLOWTABLE(dev->mdev,
3330 					allow_sniffer_and_nic_rx_shared_tir))
3331 			return ERR_PTR(-ENOTSUPP);
3332 
3333 		ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ?
3334 					     MLX5_FLOW_NAMESPACE_SNIFFER_RX :
3335 					     MLX5_FLOW_NAMESPACE_SNIFFER_TX);
3336 
3337 		prio = &dev->flow_db->sniffer[ft_type];
3338 		priority = 0;
3339 		num_entries = 1;
3340 		num_groups = 1;
3341 	}
3342 
3343 	if (!ns)
3344 		return ERR_PTR(-ENOTSUPP);
3345 
3346 	max_table_size = min_t(int, num_entries, max_table_size);
3347 
3348 	ft = prio->flow_table;
3349 	if (!ft)
3350 		return _get_prio(ns, prio, priority, max_table_size, num_groups,
3351 				 flags);
3352 
3353 	return prio;
3354 }
3355 
3356 static void set_underlay_qp(struct mlx5_ib_dev *dev,
3357 			    struct mlx5_flow_spec *spec,
3358 			    u32 underlay_qpn)
3359 {
3360 	void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
3361 					   spec->match_criteria,
3362 					   misc_parameters);
3363 	void *misc_params_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
3364 					   misc_parameters);
3365 
3366 	if (underlay_qpn &&
3367 	    MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3368 				      ft_field_support.bth_dst_qp)) {
3369 		MLX5_SET(fte_match_set_misc,
3370 			 misc_params_v, bth_dst_qp, underlay_qpn);
3371 		MLX5_SET(fte_match_set_misc,
3372 			 misc_params_c, bth_dst_qp, 0xffffff);
3373 	}
3374 }
3375 
3376 static int read_flow_counters(struct ib_device *ibdev,
3377 			      struct mlx5_read_counters_attr *read_attr)
3378 {
3379 	struct mlx5_fc *fc = read_attr->hw_cntrs_hndl;
3380 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
3381 
3382 	return mlx5_fc_query(dev->mdev, fc,
3383 			     &read_attr->out[IB_COUNTER_PACKETS],
3384 			     &read_attr->out[IB_COUNTER_BYTES]);
3385 }
3386 
3387 /* flow counters currently expose two counters packets and bytes */
3388 #define FLOW_COUNTERS_NUM 2
3389 static int counters_set_description(struct ib_counters *counters,
3390 				    enum mlx5_ib_counters_type counters_type,
3391 				    struct mlx5_ib_flow_counters_desc *desc_data,
3392 				    u32 ncounters)
3393 {
3394 	struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
3395 	u32 cntrs_max_index = 0;
3396 	int i;
3397 
3398 	if (counters_type != MLX5_IB_COUNTERS_FLOW)
3399 		return -EINVAL;
3400 
3401 	/* init the fields for the object */
3402 	mcounters->type = counters_type;
3403 	mcounters->read_counters = read_flow_counters;
3404 	mcounters->counters_num = FLOW_COUNTERS_NUM;
3405 	mcounters->ncounters = ncounters;
3406 	/* each counter entry have both description and index pair */
3407 	for (i = 0; i < ncounters; i++) {
3408 		if (desc_data[i].description > IB_COUNTER_BYTES)
3409 			return -EINVAL;
3410 
3411 		if (cntrs_max_index <= desc_data[i].index)
3412 			cntrs_max_index = desc_data[i].index + 1;
3413 	}
3414 
3415 	mutex_lock(&mcounters->mcntrs_mutex);
3416 	mcounters->counters_data = desc_data;
3417 	mcounters->cntrs_max_index = cntrs_max_index;
3418 	mutex_unlock(&mcounters->mcntrs_mutex);
3419 
3420 	return 0;
3421 }
3422 
3423 #define MAX_COUNTERS_NUM (USHRT_MAX / (sizeof(u32) * 2))
3424 static int flow_counters_set_data(struct ib_counters *ibcounters,
3425 				  struct mlx5_ib_create_flow *ucmd)
3426 {
3427 	struct mlx5_ib_mcounters *mcounters = to_mcounters(ibcounters);
3428 	struct mlx5_ib_flow_counters_data *cntrs_data = NULL;
3429 	struct mlx5_ib_flow_counters_desc *desc_data = NULL;
3430 	bool hw_hndl = false;
3431 	int ret = 0;
3432 
3433 	if (ucmd && ucmd->ncounters_data != 0) {
3434 		cntrs_data = ucmd->data;
3435 		if (cntrs_data->ncounters > MAX_COUNTERS_NUM)
3436 			return -EINVAL;
3437 
3438 		desc_data = kcalloc(cntrs_data->ncounters,
3439 				    sizeof(*desc_data),
3440 				    GFP_KERNEL);
3441 		if (!desc_data)
3442 			return  -ENOMEM;
3443 
3444 		if (copy_from_user(desc_data,
3445 				   u64_to_user_ptr(cntrs_data->counters_data),
3446 				   sizeof(*desc_data) * cntrs_data->ncounters)) {
3447 			ret = -EFAULT;
3448 			goto free;
3449 		}
3450 	}
3451 
3452 	if (!mcounters->hw_cntrs_hndl) {
3453 		mcounters->hw_cntrs_hndl = mlx5_fc_create(
3454 			to_mdev(ibcounters->device)->mdev, false);
3455 		if (IS_ERR(mcounters->hw_cntrs_hndl)) {
3456 			ret = PTR_ERR(mcounters->hw_cntrs_hndl);
3457 			goto free;
3458 		}
3459 		hw_hndl = true;
3460 	}
3461 
3462 	if (desc_data) {
3463 		/* counters already bound to at least one flow */
3464 		if (mcounters->cntrs_max_index) {
3465 			ret = -EINVAL;
3466 			goto free_hndl;
3467 		}
3468 
3469 		ret = counters_set_description(ibcounters,
3470 					       MLX5_IB_COUNTERS_FLOW,
3471 					       desc_data,
3472 					       cntrs_data->ncounters);
3473 		if (ret)
3474 			goto free_hndl;
3475 
3476 	} else if (!mcounters->cntrs_max_index) {
3477 		/* counters not bound yet, must have udata passed */
3478 		ret = -EINVAL;
3479 		goto free_hndl;
3480 	}
3481 
3482 	return 0;
3483 
3484 free_hndl:
3485 	if (hw_hndl) {
3486 		mlx5_fc_destroy(to_mdev(ibcounters->device)->mdev,
3487 				mcounters->hw_cntrs_hndl);
3488 		mcounters->hw_cntrs_hndl = NULL;
3489 	}
3490 free:
3491 	kfree(desc_data);
3492 	return ret;
3493 }
3494 
3495 static void mlx5_ib_set_rule_source_port(struct mlx5_ib_dev *dev,
3496 					 struct mlx5_flow_spec *spec,
3497 					 struct mlx5_eswitch_rep *rep)
3498 {
3499 	struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
3500 	void *misc;
3501 
3502 	if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
3503 		misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
3504 				    misc_parameters_2);
3505 
3506 		MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
3507 			 mlx5_eswitch_get_vport_metadata_for_match(esw,
3508 								   rep->vport));
3509 		misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
3510 				    misc_parameters_2);
3511 
3512 		MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
3513 	} else {
3514 		misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
3515 				    misc_parameters);
3516 
3517 		MLX5_SET(fte_match_set_misc, misc, source_port, rep->vport);
3518 
3519 		misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
3520 				    misc_parameters);
3521 
3522 		MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
3523 	}
3524 }
3525 
3526 static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
3527 						      struct mlx5_ib_flow_prio *ft_prio,
3528 						      const struct ib_flow_attr *flow_attr,
3529 						      struct mlx5_flow_destination *dst,
3530 						      u32 underlay_qpn,
3531 						      struct mlx5_ib_create_flow *ucmd)
3532 {
3533 	struct mlx5_flow_table	*ft = ft_prio->flow_table;
3534 	struct mlx5_ib_flow_handler *handler;
3535 	struct mlx5_flow_act flow_act = {};
3536 	struct mlx5_flow_spec *spec;
3537 	struct mlx5_flow_destination dest_arr[2] = {};
3538 	struct mlx5_flow_destination *rule_dst = dest_arr;
3539 	const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
3540 	unsigned int spec_index;
3541 	u32 prev_type = 0;
3542 	int err = 0;
3543 	int dest_num = 0;
3544 	bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
3545 
3546 	if (!is_valid_attr(dev->mdev, flow_attr))
3547 		return ERR_PTR(-EINVAL);
3548 
3549 	if (dev->is_rep && is_egress)
3550 		return ERR_PTR(-EINVAL);
3551 
3552 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
3553 	handler = kzalloc(sizeof(*handler), GFP_KERNEL);
3554 	if (!handler || !spec) {
3555 		err = -ENOMEM;
3556 		goto free;
3557 	}
3558 
3559 	INIT_LIST_HEAD(&handler->list);
3560 	if (dst) {
3561 		memcpy(&dest_arr[0], dst, sizeof(*dst));
3562 		dest_num++;
3563 	}
3564 
3565 	for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
3566 		err = parse_flow_attr(dev->mdev, spec,
3567 				      ib_flow, flow_attr, &flow_act,
3568 				      prev_type);
3569 		if (err < 0)
3570 			goto free;
3571 
3572 		prev_type = ((union ib_flow_spec *)ib_flow)->type;
3573 		ib_flow += ((union ib_flow_spec *)ib_flow)->size;
3574 	}
3575 
3576 	if (!flow_is_multicast_only(flow_attr))
3577 		set_underlay_qp(dev, spec, underlay_qpn);
3578 
3579 	if (dev->is_rep) {
3580 		struct mlx5_eswitch_rep *rep;
3581 
3582 		rep = dev->port[flow_attr->port - 1].rep;
3583 		if (!rep) {
3584 			err = -EINVAL;
3585 			goto free;
3586 		}
3587 
3588 		mlx5_ib_set_rule_source_port(dev, spec, rep);
3589 	}
3590 
3591 	spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
3592 
3593 	if (is_egress &&
3594 	    !is_valid_spec(dev->mdev, spec, &flow_act, is_egress)) {
3595 		err = -EINVAL;
3596 		goto free;
3597 	}
3598 
3599 	if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
3600 		struct mlx5_ib_mcounters *mcounters;
3601 
3602 		err = flow_counters_set_data(flow_act.counters, ucmd);
3603 		if (err)
3604 			goto free;
3605 
3606 		mcounters = to_mcounters(flow_act.counters);
3607 		handler->ibcounters = flow_act.counters;
3608 		dest_arr[dest_num].type =
3609 			MLX5_FLOW_DESTINATION_TYPE_COUNTER;
3610 		dest_arr[dest_num].counter_id =
3611 			mlx5_fc_id(mcounters->hw_cntrs_hndl);
3612 		dest_num++;
3613 	}
3614 
3615 	if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
3616 		if (!(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT)) {
3617 			rule_dst = NULL;
3618 			dest_num = 0;
3619 		}
3620 	} else {
3621 		if (is_egress)
3622 			flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
3623 		else
3624 			flow_act.action |=
3625 				dest_num ?  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
3626 					MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
3627 	}
3628 
3629 	if ((spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG)  &&
3630 	    (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3631 	     flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
3632 		mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
3633 			     spec->flow_context.flow_tag, flow_attr->type);
3634 		err = -EINVAL;
3635 		goto free;
3636 	}
3637 	handler->rule = mlx5_add_flow_rules(ft, spec,
3638 					    &flow_act,
3639 					    rule_dst, dest_num);
3640 
3641 	if (IS_ERR(handler->rule)) {
3642 		err = PTR_ERR(handler->rule);
3643 		goto free;
3644 	}
3645 
3646 	ft_prio->refcount++;
3647 	handler->prio = ft_prio;
3648 	handler->dev = dev;
3649 
3650 	ft_prio->flow_table = ft;
3651 free:
3652 	if (err && handler) {
3653 		if (handler->ibcounters &&
3654 		    atomic_read(&handler->ibcounters->usecnt) == 1)
3655 			counters_clear_description(handler->ibcounters);
3656 		kfree(handler);
3657 	}
3658 	kvfree(spec);
3659 	return err ? ERR_PTR(err) : handler;
3660 }
3661 
3662 static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
3663 						     struct mlx5_ib_flow_prio *ft_prio,
3664 						     const struct ib_flow_attr *flow_attr,
3665 						     struct mlx5_flow_destination *dst)
3666 {
3667 	return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL);
3668 }
3669 
3670 static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
3671 							  struct mlx5_ib_flow_prio *ft_prio,
3672 							  struct ib_flow_attr *flow_attr,
3673 							  struct mlx5_flow_destination *dst)
3674 {
3675 	struct mlx5_ib_flow_handler *handler_dst = NULL;
3676 	struct mlx5_ib_flow_handler *handler = NULL;
3677 
3678 	handler = create_flow_rule(dev, ft_prio, flow_attr, NULL);
3679 	if (!IS_ERR(handler)) {
3680 		handler_dst = create_flow_rule(dev, ft_prio,
3681 					       flow_attr, dst);
3682 		if (IS_ERR(handler_dst)) {
3683 			mlx5_del_flow_rules(handler->rule);
3684 			ft_prio->refcount--;
3685 			kfree(handler);
3686 			handler = handler_dst;
3687 		} else {
3688 			list_add(&handler_dst->list, &handler->list);
3689 		}
3690 	}
3691 
3692 	return handler;
3693 }
3694 enum {
3695 	LEFTOVERS_MC,
3696 	LEFTOVERS_UC,
3697 };
3698 
3699 static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
3700 							  struct mlx5_ib_flow_prio *ft_prio,
3701 							  struct ib_flow_attr *flow_attr,
3702 							  struct mlx5_flow_destination *dst)
3703 {
3704 	struct mlx5_ib_flow_handler *handler_ucast = NULL;
3705 	struct mlx5_ib_flow_handler *handler = NULL;
3706 
3707 	static struct {
3708 		struct ib_flow_attr	flow_attr;
3709 		struct ib_flow_spec_eth eth_flow;
3710 	} leftovers_specs[] = {
3711 		[LEFTOVERS_MC] = {
3712 			.flow_attr = {
3713 				.num_of_specs = 1,
3714 				.size = sizeof(leftovers_specs[0])
3715 			},
3716 			.eth_flow = {
3717 				.type = IB_FLOW_SPEC_ETH,
3718 				.size = sizeof(struct ib_flow_spec_eth),
3719 				.mask = {.dst_mac = {0x1} },
3720 				.val =  {.dst_mac = {0x1} }
3721 			}
3722 		},
3723 		[LEFTOVERS_UC] = {
3724 			.flow_attr = {
3725 				.num_of_specs = 1,
3726 				.size = sizeof(leftovers_specs[0])
3727 			},
3728 			.eth_flow = {
3729 				.type = IB_FLOW_SPEC_ETH,
3730 				.size = sizeof(struct ib_flow_spec_eth),
3731 				.mask = {.dst_mac = {0x1} },
3732 				.val = {.dst_mac = {} }
3733 			}
3734 		}
3735 	};
3736 
3737 	handler = create_flow_rule(dev, ft_prio,
3738 				   &leftovers_specs[LEFTOVERS_MC].flow_attr,
3739 				   dst);
3740 	if (!IS_ERR(handler) &&
3741 	    flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
3742 		handler_ucast = create_flow_rule(dev, ft_prio,
3743 						 &leftovers_specs[LEFTOVERS_UC].flow_attr,
3744 						 dst);
3745 		if (IS_ERR(handler_ucast)) {
3746 			mlx5_del_flow_rules(handler->rule);
3747 			ft_prio->refcount--;
3748 			kfree(handler);
3749 			handler = handler_ucast;
3750 		} else {
3751 			list_add(&handler_ucast->list, &handler->list);
3752 		}
3753 	}
3754 
3755 	return handler;
3756 }
3757 
3758 static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
3759 							struct mlx5_ib_flow_prio *ft_rx,
3760 							struct mlx5_ib_flow_prio *ft_tx,
3761 							struct mlx5_flow_destination *dst)
3762 {
3763 	struct mlx5_ib_flow_handler *handler_rx;
3764 	struct mlx5_ib_flow_handler *handler_tx;
3765 	int err;
3766 	static const struct ib_flow_attr flow_attr  = {
3767 		.num_of_specs = 0,
3768 		.size = sizeof(flow_attr)
3769 	};
3770 
3771 	handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst);
3772 	if (IS_ERR(handler_rx)) {
3773 		err = PTR_ERR(handler_rx);
3774 		goto err;
3775 	}
3776 
3777 	handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst);
3778 	if (IS_ERR(handler_tx)) {
3779 		err = PTR_ERR(handler_tx);
3780 		goto err_tx;
3781 	}
3782 
3783 	list_add(&handler_tx->list, &handler_rx->list);
3784 
3785 	return handler_rx;
3786 
3787 err_tx:
3788 	mlx5_del_flow_rules(handler_rx->rule);
3789 	ft_rx->refcount--;
3790 	kfree(handler_rx);
3791 err:
3792 	return ERR_PTR(err);
3793 }
3794 
3795 static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
3796 					   struct ib_flow_attr *flow_attr,
3797 					   int domain,
3798 					   struct ib_udata *udata)
3799 {
3800 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
3801 	struct mlx5_ib_qp *mqp = to_mqp(qp);
3802 	struct mlx5_ib_flow_handler *handler = NULL;
3803 	struct mlx5_flow_destination *dst = NULL;
3804 	struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
3805 	struct mlx5_ib_flow_prio *ft_prio;
3806 	bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
3807 	struct mlx5_ib_create_flow *ucmd = NULL, ucmd_hdr;
3808 	size_t min_ucmd_sz, required_ucmd_sz;
3809 	int err;
3810 	int underlay_qpn;
3811 
3812 	if (udata && udata->inlen) {
3813 		min_ucmd_sz = offsetof(typeof(ucmd_hdr), reserved) +
3814 				sizeof(ucmd_hdr.reserved);
3815 		if (udata->inlen < min_ucmd_sz)
3816 			return ERR_PTR(-EOPNOTSUPP);
3817 
3818 		err = ib_copy_from_udata(&ucmd_hdr, udata, min_ucmd_sz);
3819 		if (err)
3820 			return ERR_PTR(err);
3821 
3822 		/* currently supports only one counters data */
3823 		if (ucmd_hdr.ncounters_data > 1)
3824 			return ERR_PTR(-EINVAL);
3825 
3826 		required_ucmd_sz = min_ucmd_sz +
3827 			sizeof(struct mlx5_ib_flow_counters_data) *
3828 			ucmd_hdr.ncounters_data;
3829 		if (udata->inlen > required_ucmd_sz &&
3830 		    !ib_is_udata_cleared(udata, required_ucmd_sz,
3831 					 udata->inlen - required_ucmd_sz))
3832 			return ERR_PTR(-EOPNOTSUPP);
3833 
3834 		ucmd = kzalloc(required_ucmd_sz, GFP_KERNEL);
3835 		if (!ucmd)
3836 			return ERR_PTR(-ENOMEM);
3837 
3838 		err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz);
3839 		if (err)
3840 			goto free_ucmd;
3841 	}
3842 
3843 	if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) {
3844 		err = -ENOMEM;
3845 		goto free_ucmd;
3846 	}
3847 
3848 	if (domain != IB_FLOW_DOMAIN_USER ||
3849 	    flow_attr->port > dev->num_ports ||
3850 	    (flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP |
3851 				  IB_FLOW_ATTR_FLAGS_EGRESS))) {
3852 		err = -EINVAL;
3853 		goto free_ucmd;
3854 	}
3855 
3856 	if (is_egress &&
3857 	    (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3858 	     flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
3859 		err = -EINVAL;
3860 		goto free_ucmd;
3861 	}
3862 
3863 	dst = kzalloc(sizeof(*dst), GFP_KERNEL);
3864 	if (!dst) {
3865 		err = -ENOMEM;
3866 		goto free_ucmd;
3867 	}
3868 
3869 	mutex_lock(&dev->flow_db->lock);
3870 
3871 	ft_prio = get_flow_table(dev, flow_attr,
3872 				 is_egress ? MLX5_IB_FT_TX : MLX5_IB_FT_RX);
3873 	if (IS_ERR(ft_prio)) {
3874 		err = PTR_ERR(ft_prio);
3875 		goto unlock;
3876 	}
3877 	if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
3878 		ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
3879 		if (IS_ERR(ft_prio_tx)) {
3880 			err = PTR_ERR(ft_prio_tx);
3881 			ft_prio_tx = NULL;
3882 			goto destroy_ft;
3883 		}
3884 	}
3885 
3886 	if (is_egress) {
3887 		dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT;
3888 	} else {
3889 		dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
3890 		if (mqp->flags & MLX5_IB_QP_RSS)
3891 			dst->tir_num = mqp->rss_qp.tirn;
3892 		else
3893 			dst->tir_num = mqp->raw_packet_qp.rq.tirn;
3894 	}
3895 
3896 	if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
3897 		if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)  {
3898 			handler = create_dont_trap_rule(dev, ft_prio,
3899 							flow_attr, dst);
3900 		} else {
3901 			underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ?
3902 					mqp->underlay_qpn : 0;
3903 			handler = _create_flow_rule(dev, ft_prio, flow_attr,
3904 						    dst, underlay_qpn, ucmd);
3905 		}
3906 	} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3907 		   flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
3908 		handler = create_leftovers_rule(dev, ft_prio, flow_attr,
3909 						dst);
3910 	} else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
3911 		handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst);
3912 	} else {
3913 		err = -EINVAL;
3914 		goto destroy_ft;
3915 	}
3916 
3917 	if (IS_ERR(handler)) {
3918 		err = PTR_ERR(handler);
3919 		handler = NULL;
3920 		goto destroy_ft;
3921 	}
3922 
3923 	mutex_unlock(&dev->flow_db->lock);
3924 	kfree(dst);
3925 	kfree(ucmd);
3926 
3927 	return &handler->ibflow;
3928 
3929 destroy_ft:
3930 	put_flow_table(dev, ft_prio, false);
3931 	if (ft_prio_tx)
3932 		put_flow_table(dev, ft_prio_tx, false);
3933 unlock:
3934 	mutex_unlock(&dev->flow_db->lock);
3935 	kfree(dst);
3936 free_ucmd:
3937 	kfree(ucmd);
3938 	return ERR_PTR(err);
3939 }
3940 
3941 static struct mlx5_ib_flow_prio *
3942 _get_flow_table(struct mlx5_ib_dev *dev,
3943 		struct mlx5_ib_flow_matcher *fs_matcher,
3944 		bool mcast)
3945 {
3946 	struct mlx5_flow_namespace *ns = NULL;
3947 	struct mlx5_ib_flow_prio *prio = NULL;
3948 	int max_table_size = 0;
3949 	bool esw_encap;
3950 	u32 flags = 0;
3951 	int priority;
3952 
3953 	if (mcast)
3954 		priority = MLX5_IB_FLOW_MCAST_PRIO;
3955 	else
3956 		priority = ib_prio_to_core_prio(fs_matcher->priority, false);
3957 
3958 	esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
3959 		DEVLINK_ESWITCH_ENCAP_MODE_NONE;
3960 	if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
3961 		max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3962 					log_max_ft_size));
3963 		if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap) && !esw_encap)
3964 			flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
3965 		if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3966 					      reformat_l3_tunnel_to_l2) &&
3967 		    !esw_encap)
3968 			flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3969 	} else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS) {
3970 		max_table_size = BIT(
3971 			MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, log_max_ft_size));
3972 		if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat) && !esw_encap)
3973 			flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3974 	} else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB) {
3975 		max_table_size = BIT(
3976 			MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, log_max_ft_size));
3977 		if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, decap) && esw_encap)
3978 			flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
3979 		if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, reformat_l3_tunnel_to_l2) &&
3980 		    esw_encap)
3981 			flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3982 		priority = FDB_BYPASS_PATH;
3983 	}
3984 
3985 	max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES);
3986 
3987 	ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type);
3988 	if (!ns)
3989 		return ERR_PTR(-ENOTSUPP);
3990 
3991 	if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS)
3992 		prio = &dev->flow_db->prios[priority];
3993 	else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS)
3994 		prio = &dev->flow_db->egress_prios[priority];
3995 	else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB)
3996 		prio = &dev->flow_db->fdb;
3997 
3998 	if (!prio)
3999 		return ERR_PTR(-EINVAL);
4000 
4001 	if (prio->flow_table)
4002 		return prio;
4003 
4004 	return _get_prio(ns, prio, priority, max_table_size,
4005 			 MLX5_FS_MAX_TYPES, flags);
4006 }
4007 
4008 static struct mlx5_ib_flow_handler *
4009 _create_raw_flow_rule(struct mlx5_ib_dev *dev,
4010 		      struct mlx5_ib_flow_prio *ft_prio,
4011 		      struct mlx5_flow_destination *dst,
4012 		      struct mlx5_ib_flow_matcher  *fs_matcher,
4013 		      struct mlx5_flow_context *flow_context,
4014 		      struct mlx5_flow_act *flow_act,
4015 		      void *cmd_in, int inlen,
4016 		      int dst_num)
4017 {
4018 	struct mlx5_ib_flow_handler *handler;
4019 	struct mlx5_flow_spec *spec;
4020 	struct mlx5_flow_table *ft = ft_prio->flow_table;
4021 	int err = 0;
4022 
4023 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
4024 	handler = kzalloc(sizeof(*handler), GFP_KERNEL);
4025 	if (!handler || !spec) {
4026 		err = -ENOMEM;
4027 		goto free;
4028 	}
4029 
4030 	INIT_LIST_HEAD(&handler->list);
4031 
4032 	memcpy(spec->match_value, cmd_in, inlen);
4033 	memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params,
4034 	       fs_matcher->mask_len);
4035 	spec->match_criteria_enable = fs_matcher->match_criteria_enable;
4036 	spec->flow_context = *flow_context;
4037 
4038 	handler->rule = mlx5_add_flow_rules(ft, spec,
4039 					    flow_act, dst, dst_num);
4040 
4041 	if (IS_ERR(handler->rule)) {
4042 		err = PTR_ERR(handler->rule);
4043 		goto free;
4044 	}
4045 
4046 	ft_prio->refcount++;
4047 	handler->prio = ft_prio;
4048 	handler->dev = dev;
4049 	ft_prio->flow_table = ft;
4050 
4051 free:
4052 	if (err)
4053 		kfree(handler);
4054 	kvfree(spec);
4055 	return err ? ERR_PTR(err) : handler;
4056 }
4057 
4058 static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher,
4059 				void *match_v)
4060 {
4061 	void *match_c;
4062 	void *match_v_set_lyr_2_4, *match_c_set_lyr_2_4;
4063 	void *dmac, *dmac_mask;
4064 	void *ipv4, *ipv4_mask;
4065 
4066 	if (!(fs_matcher->match_criteria_enable &
4067 	      (1 << MATCH_CRITERIA_ENABLE_OUTER_BIT)))
4068 		return false;
4069 
4070 	match_c = fs_matcher->matcher_mask.match_params;
4071 	match_v_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_v,
4072 					   outer_headers);
4073 	match_c_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_c,
4074 					   outer_headers);
4075 
4076 	dmac = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
4077 			    dmac_47_16);
4078 	dmac_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
4079 				 dmac_47_16);
4080 
4081 	if (is_multicast_ether_addr(dmac) &&
4082 	    is_multicast_ether_addr(dmac_mask))
4083 		return true;
4084 
4085 	ipv4 = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
4086 			    dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
4087 
4088 	ipv4_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
4089 				 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
4090 
4091 	if (ipv4_is_multicast(*(__be32 *)(ipv4)) &&
4092 	    ipv4_is_multicast(*(__be32 *)(ipv4_mask)))
4093 		return true;
4094 
4095 	return false;
4096 }
4097 
4098 struct mlx5_ib_flow_handler *
4099 mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
4100 			struct mlx5_ib_flow_matcher *fs_matcher,
4101 			struct mlx5_flow_context *flow_context,
4102 			struct mlx5_flow_act *flow_act,
4103 			u32 counter_id,
4104 			void *cmd_in, int inlen, int dest_id,
4105 			int dest_type)
4106 {
4107 	struct mlx5_flow_destination *dst;
4108 	struct mlx5_ib_flow_prio *ft_prio;
4109 	struct mlx5_ib_flow_handler *handler;
4110 	int dst_num = 0;
4111 	bool mcast;
4112 	int err;
4113 
4114 	if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL)
4115 		return ERR_PTR(-EOPNOTSUPP);
4116 
4117 	if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO)
4118 		return ERR_PTR(-ENOMEM);
4119 
4120 	dst = kcalloc(2, sizeof(*dst), GFP_KERNEL);
4121 	if (!dst)
4122 		return ERR_PTR(-ENOMEM);
4123 
4124 	mcast = raw_fs_is_multicast(fs_matcher, cmd_in);
4125 	mutex_lock(&dev->flow_db->lock);
4126 
4127 	ft_prio = _get_flow_table(dev, fs_matcher, mcast);
4128 	if (IS_ERR(ft_prio)) {
4129 		err = PTR_ERR(ft_prio);
4130 		goto unlock;
4131 	}
4132 
4133 	if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) {
4134 		dst[dst_num].type = dest_type;
4135 		dst[dst_num].tir_num = dest_id;
4136 		flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
4137 	} else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
4138 		dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
4139 		dst[dst_num].ft_num = dest_id;
4140 		flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
4141 	} else {
4142 		dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_PORT;
4143 		flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
4144 	}
4145 
4146 	dst_num++;
4147 
4148 	if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
4149 		dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
4150 		dst[dst_num].counter_id = counter_id;
4151 		dst_num++;
4152 	}
4153 
4154 	handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher,
4155 					flow_context, flow_act,
4156 					cmd_in, inlen, dst_num);
4157 
4158 	if (IS_ERR(handler)) {
4159 		err = PTR_ERR(handler);
4160 		goto destroy_ft;
4161 	}
4162 
4163 	mutex_unlock(&dev->flow_db->lock);
4164 	atomic_inc(&fs_matcher->usecnt);
4165 	handler->flow_matcher = fs_matcher;
4166 
4167 	kfree(dst);
4168 
4169 	return handler;
4170 
4171 destroy_ft:
4172 	put_flow_table(dev, ft_prio, false);
4173 unlock:
4174 	mutex_unlock(&dev->flow_db->lock);
4175 	kfree(dst);
4176 
4177 	return ERR_PTR(err);
4178 }
4179 
4180 static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags)
4181 {
4182 	u32 flags = 0;
4183 
4184 	if (mlx5_flags & MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA)
4185 		flags |= MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA;
4186 
4187 	return flags;
4188 }
4189 
4190 #define MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED	MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA
4191 static struct ib_flow_action *
4192 mlx5_ib_create_flow_action_esp(struct ib_device *device,
4193 			       const struct ib_flow_action_attrs_esp *attr,
4194 			       struct uverbs_attr_bundle *attrs)
4195 {
4196 	struct mlx5_ib_dev *mdev = to_mdev(device);
4197 	struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm;
4198 	struct mlx5_accel_esp_xfrm_attrs accel_attrs = {};
4199 	struct mlx5_ib_flow_action *action;
4200 	u64 action_flags;
4201 	u64 flags;
4202 	int err = 0;
4203 
4204 	err = uverbs_get_flags64(
4205 		&action_flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
4206 		((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1) - 1));
4207 	if (err)
4208 		return ERR_PTR(err);
4209 
4210 	flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags);
4211 
4212 	/* We current only support a subset of the standard features. Only a
4213 	 * keymat of type AES_GCM, with icv_len == 16, iv_algo == SEQ and esn
4214 	 * (with overlap). Full offload mode isn't supported.
4215 	 */
4216 	if (!attr->keymat || attr->replay || attr->encap ||
4217 	    attr->spi || attr->seq || attr->tfc_pad ||
4218 	    attr->hard_limit_pkts ||
4219 	    (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
4220 			     IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)))
4221 		return ERR_PTR(-EOPNOTSUPP);
4222 
4223 	if (attr->keymat->protocol !=
4224 	    IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM)
4225 		return ERR_PTR(-EOPNOTSUPP);
4226 
4227 	aes_gcm = &attr->keymat->keymat.aes_gcm;
4228 
4229 	if (aes_gcm->icv_len != 16 ||
4230 	    aes_gcm->iv_algo != IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ)
4231 		return ERR_PTR(-EOPNOTSUPP);
4232 
4233 	action = kmalloc(sizeof(*action), GFP_KERNEL);
4234 	if (!action)
4235 		return ERR_PTR(-ENOMEM);
4236 
4237 	action->esp_aes_gcm.ib_flags = attr->flags;
4238 	memcpy(&accel_attrs.keymat.aes_gcm.aes_key, &aes_gcm->aes_key,
4239 	       sizeof(accel_attrs.keymat.aes_gcm.aes_key));
4240 	accel_attrs.keymat.aes_gcm.key_len = aes_gcm->key_len * 8;
4241 	memcpy(&accel_attrs.keymat.aes_gcm.salt, &aes_gcm->salt,
4242 	       sizeof(accel_attrs.keymat.aes_gcm.salt));
4243 	memcpy(&accel_attrs.keymat.aes_gcm.seq_iv, &aes_gcm->iv,
4244 	       sizeof(accel_attrs.keymat.aes_gcm.seq_iv));
4245 	accel_attrs.keymat.aes_gcm.icv_len = aes_gcm->icv_len * 8;
4246 	accel_attrs.keymat.aes_gcm.iv_algo = MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ;
4247 	accel_attrs.keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
4248 
4249 	accel_attrs.esn = attr->esn;
4250 	if (attr->flags & IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED)
4251 		accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
4252 	if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
4253 		accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
4254 
4255 	if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)
4256 		accel_attrs.action |= MLX5_ACCEL_ESP_ACTION_ENCRYPT;
4257 
4258 	action->esp_aes_gcm.ctx =
4259 		mlx5_accel_esp_create_xfrm(mdev->mdev, &accel_attrs, flags);
4260 	if (IS_ERR(action->esp_aes_gcm.ctx)) {
4261 		err = PTR_ERR(action->esp_aes_gcm.ctx);
4262 		goto err_parse;
4263 	}
4264 
4265 	action->esp_aes_gcm.ib_flags = attr->flags;
4266 
4267 	return &action->ib_action;
4268 
4269 err_parse:
4270 	kfree(action);
4271 	return ERR_PTR(err);
4272 }
4273 
4274 static int
4275 mlx5_ib_modify_flow_action_esp(struct ib_flow_action *action,
4276 			       const struct ib_flow_action_attrs_esp *attr,
4277 			       struct uverbs_attr_bundle *attrs)
4278 {
4279 	struct mlx5_ib_flow_action *maction = to_mflow_act(action);
4280 	struct mlx5_accel_esp_xfrm_attrs accel_attrs;
4281 	int err = 0;
4282 
4283 	if (attr->keymat || attr->replay || attr->encap ||
4284 	    attr->spi || attr->seq || attr->tfc_pad ||
4285 	    attr->hard_limit_pkts ||
4286 	    (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
4287 			     IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS |
4288 			     IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)))
4289 		return -EOPNOTSUPP;
4290 
4291 	/* Only the ESN value or the MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP can
4292 	 * be modified.
4293 	 */
4294 	if (!(maction->esp_aes_gcm.ib_flags &
4295 	      IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) &&
4296 	    attr->flags & (IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
4297 			   IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW))
4298 		return -EINVAL;
4299 
4300 	memcpy(&accel_attrs, &maction->esp_aes_gcm.ctx->attrs,
4301 	       sizeof(accel_attrs));
4302 
4303 	accel_attrs.esn = attr->esn;
4304 	if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
4305 		accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
4306 	else
4307 		accel_attrs.flags &= ~MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
4308 
4309 	err = mlx5_accel_esp_modify_xfrm(maction->esp_aes_gcm.ctx,
4310 					 &accel_attrs);
4311 	if (err)
4312 		return err;
4313 
4314 	maction->esp_aes_gcm.ib_flags &=
4315 		~IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
4316 	maction->esp_aes_gcm.ib_flags |=
4317 		attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
4318 
4319 	return 0;
4320 }
4321 
4322 static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action)
4323 {
4324 	struct mlx5_ib_flow_action *maction = to_mflow_act(action);
4325 
4326 	switch (action->type) {
4327 	case IB_FLOW_ACTION_ESP:
4328 		/*
4329 		 * We only support aes_gcm by now, so we implicitly know this is
4330 		 * the underline crypto.
4331 		 */
4332 		mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx);
4333 		break;
4334 	case IB_FLOW_ACTION_UNSPECIFIED:
4335 		mlx5_ib_destroy_flow_action_raw(maction);
4336 		break;
4337 	default:
4338 		WARN_ON(true);
4339 		break;
4340 	}
4341 
4342 	kfree(maction);
4343 	return 0;
4344 }
4345 
4346 static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
4347 {
4348 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
4349 	struct mlx5_ib_qp *mqp = to_mqp(ibqp);
4350 	int err;
4351 	u16 uid;
4352 
4353 	uid = ibqp->pd ?
4354 		to_mpd(ibqp->pd)->uid : 0;
4355 
4356 	if (mqp->flags & MLX5_IB_QP_UNDERLAY) {
4357 		mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
4358 		return -EOPNOTSUPP;
4359 	}
4360 
4361 	err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
4362 	if (err)
4363 		mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
4364 			     ibqp->qp_num, gid->raw);
4365 
4366 	return err;
4367 }
4368 
4369 static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
4370 {
4371 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
4372 	int err;
4373 	u16 uid;
4374 
4375 	uid = ibqp->pd ?
4376 		to_mpd(ibqp->pd)->uid : 0;
4377 	err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
4378 	if (err)
4379 		mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
4380 			     ibqp->qp_num, gid->raw);
4381 
4382 	return err;
4383 }
4384 
4385 static int init_node_data(struct mlx5_ib_dev *dev)
4386 {
4387 	int err;
4388 
4389 	err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
4390 	if (err)
4391 		return err;
4392 
4393 	dev->mdev->rev_id = dev->mdev->pdev->revision;
4394 
4395 	return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
4396 }
4397 
4398 static ssize_t fw_pages_show(struct device *device,
4399 			     struct device_attribute *attr, char *buf)
4400 {
4401 	struct mlx5_ib_dev *dev =
4402 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4403 
4404 	return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
4405 }
4406 static DEVICE_ATTR_RO(fw_pages);
4407 
4408 static ssize_t reg_pages_show(struct device *device,
4409 			      struct device_attribute *attr, char *buf)
4410 {
4411 	struct mlx5_ib_dev *dev =
4412 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4413 
4414 	return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
4415 }
4416 static DEVICE_ATTR_RO(reg_pages);
4417 
4418 static ssize_t hca_type_show(struct device *device,
4419 			     struct device_attribute *attr, char *buf)
4420 {
4421 	struct mlx5_ib_dev *dev =
4422 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4423 
4424 	return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
4425 }
4426 static DEVICE_ATTR_RO(hca_type);
4427 
4428 static ssize_t hw_rev_show(struct device *device,
4429 			   struct device_attribute *attr, char *buf)
4430 {
4431 	struct mlx5_ib_dev *dev =
4432 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4433 
4434 	return sprintf(buf, "%x\n", dev->mdev->rev_id);
4435 }
4436 static DEVICE_ATTR_RO(hw_rev);
4437 
4438 static ssize_t board_id_show(struct device *device,
4439 			     struct device_attribute *attr, char *buf)
4440 {
4441 	struct mlx5_ib_dev *dev =
4442 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4443 
4444 	return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
4445 		       dev->mdev->board_id);
4446 }
4447 static DEVICE_ATTR_RO(board_id);
4448 
4449 static struct attribute *mlx5_class_attributes[] = {
4450 	&dev_attr_hw_rev.attr,
4451 	&dev_attr_hca_type.attr,
4452 	&dev_attr_board_id.attr,
4453 	&dev_attr_fw_pages.attr,
4454 	&dev_attr_reg_pages.attr,
4455 	NULL,
4456 };
4457 
4458 static const struct attribute_group mlx5_attr_group = {
4459 	.attrs = mlx5_class_attributes,
4460 };
4461 
4462 static void pkey_change_handler(struct work_struct *work)
4463 {
4464 	struct mlx5_ib_port_resources *ports =
4465 		container_of(work, struct mlx5_ib_port_resources,
4466 			     pkey_change_work);
4467 
4468 	mutex_lock(&ports->devr->mutex);
4469 	mlx5_ib_gsi_pkey_change(ports->gsi);
4470 	mutex_unlock(&ports->devr->mutex);
4471 }
4472 
4473 static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
4474 {
4475 	struct mlx5_ib_qp *mqp;
4476 	struct mlx5_ib_cq *send_mcq, *recv_mcq;
4477 	struct mlx5_core_cq *mcq;
4478 	struct list_head cq_armed_list;
4479 	unsigned long flags_qp;
4480 	unsigned long flags_cq;
4481 	unsigned long flags;
4482 
4483 	INIT_LIST_HEAD(&cq_armed_list);
4484 
4485 	/* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
4486 	spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
4487 	list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
4488 		spin_lock_irqsave(&mqp->sq.lock, flags_qp);
4489 		if (mqp->sq.tail != mqp->sq.head) {
4490 			send_mcq = to_mcq(mqp->ibqp.send_cq);
4491 			spin_lock_irqsave(&send_mcq->lock, flags_cq);
4492 			if (send_mcq->mcq.comp &&
4493 			    mqp->ibqp.send_cq->comp_handler) {
4494 				if (!send_mcq->mcq.reset_notify_added) {
4495 					send_mcq->mcq.reset_notify_added = 1;
4496 					list_add_tail(&send_mcq->mcq.reset_notify,
4497 						      &cq_armed_list);
4498 				}
4499 			}
4500 			spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
4501 		}
4502 		spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
4503 		spin_lock_irqsave(&mqp->rq.lock, flags_qp);
4504 		/* no handling is needed for SRQ */
4505 		if (!mqp->ibqp.srq) {
4506 			if (mqp->rq.tail != mqp->rq.head) {
4507 				recv_mcq = to_mcq(mqp->ibqp.recv_cq);
4508 				spin_lock_irqsave(&recv_mcq->lock, flags_cq);
4509 				if (recv_mcq->mcq.comp &&
4510 				    mqp->ibqp.recv_cq->comp_handler) {
4511 					if (!recv_mcq->mcq.reset_notify_added) {
4512 						recv_mcq->mcq.reset_notify_added = 1;
4513 						list_add_tail(&recv_mcq->mcq.reset_notify,
4514 							      &cq_armed_list);
4515 					}
4516 				}
4517 				spin_unlock_irqrestore(&recv_mcq->lock,
4518 						       flags_cq);
4519 			}
4520 		}
4521 		spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
4522 	}
4523 	/*At that point all inflight post send were put to be executed as of we
4524 	 * lock/unlock above locks Now need to arm all involved CQs.
4525 	 */
4526 	list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
4527 		mcq->comp(mcq, NULL);
4528 	}
4529 	spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
4530 }
4531 
4532 static void delay_drop_handler(struct work_struct *work)
4533 {
4534 	int err;
4535 	struct mlx5_ib_delay_drop *delay_drop =
4536 		container_of(work, struct mlx5_ib_delay_drop,
4537 			     delay_drop_work);
4538 
4539 	atomic_inc(&delay_drop->events_cnt);
4540 
4541 	mutex_lock(&delay_drop->lock);
4542 	err = mlx5_core_set_delay_drop(delay_drop->dev->mdev,
4543 				       delay_drop->timeout);
4544 	if (err) {
4545 		mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n",
4546 			     delay_drop->timeout);
4547 		delay_drop->activate = false;
4548 	}
4549 	mutex_unlock(&delay_drop->lock);
4550 }
4551 
4552 static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
4553 				 struct ib_event *ibev)
4554 {
4555 	u8 port = (eqe->data.port.port >> 4) & 0xf;
4556 
4557 	switch (eqe->sub_type) {
4558 	case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
4559 		if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
4560 					    IB_LINK_LAYER_ETHERNET)
4561 			schedule_work(&ibdev->delay_drop.delay_drop_work);
4562 		break;
4563 	default: /* do nothing */
4564 		return;
4565 	}
4566 }
4567 
4568 static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
4569 			      struct ib_event *ibev)
4570 {
4571 	u8 port = (eqe->data.port.port >> 4) & 0xf;
4572 
4573 	ibev->element.port_num = port;
4574 
4575 	switch (eqe->sub_type) {
4576 	case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
4577 	case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
4578 	case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
4579 		/* In RoCE, port up/down events are handled in
4580 		 * mlx5_netdev_event().
4581 		 */
4582 		if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
4583 					    IB_LINK_LAYER_ETHERNET)
4584 			return -EINVAL;
4585 
4586 		ibev->event = (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE) ?
4587 				IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
4588 		break;
4589 
4590 	case MLX5_PORT_CHANGE_SUBTYPE_LID:
4591 		ibev->event = IB_EVENT_LID_CHANGE;
4592 		break;
4593 
4594 	case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
4595 		ibev->event = IB_EVENT_PKEY_CHANGE;
4596 		schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
4597 		break;
4598 
4599 	case MLX5_PORT_CHANGE_SUBTYPE_GUID:
4600 		ibev->event = IB_EVENT_GID_CHANGE;
4601 		break;
4602 
4603 	case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
4604 		ibev->event = IB_EVENT_CLIENT_REREGISTER;
4605 		break;
4606 	default:
4607 		return -EINVAL;
4608 	}
4609 
4610 	return 0;
4611 }
4612 
4613 static void mlx5_ib_handle_event(struct work_struct *_work)
4614 {
4615 	struct mlx5_ib_event_work *work =
4616 		container_of(_work, struct mlx5_ib_event_work, work);
4617 	struct mlx5_ib_dev *ibdev;
4618 	struct ib_event ibev;
4619 	bool fatal = false;
4620 
4621 	if (work->is_slave) {
4622 		ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi);
4623 		if (!ibdev)
4624 			goto out;
4625 	} else {
4626 		ibdev = work->dev;
4627 	}
4628 
4629 	switch (work->event) {
4630 	case MLX5_DEV_EVENT_SYS_ERROR:
4631 		ibev.event = IB_EVENT_DEVICE_FATAL;
4632 		mlx5_ib_handle_internal_error(ibdev);
4633 		ibev.element.port_num  = (u8)(unsigned long)work->param;
4634 		fatal = true;
4635 		break;
4636 	case MLX5_EVENT_TYPE_PORT_CHANGE:
4637 		if (handle_port_change(ibdev, work->param, &ibev))
4638 			goto out;
4639 		break;
4640 	case MLX5_EVENT_TYPE_GENERAL_EVENT:
4641 		handle_general_event(ibdev, work->param, &ibev);
4642 		/* fall through */
4643 	default:
4644 		goto out;
4645 	}
4646 
4647 	ibev.device = &ibdev->ib_dev;
4648 
4649 	if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) {
4650 		mlx5_ib_warn(ibdev, "warning: event on port %d\n",  ibev.element.port_num);
4651 		goto out;
4652 	}
4653 
4654 	if (ibdev->ib_active)
4655 		ib_dispatch_event(&ibev);
4656 
4657 	if (fatal)
4658 		ibdev->ib_active = false;
4659 out:
4660 	kfree(work);
4661 }
4662 
4663 static int mlx5_ib_event(struct notifier_block *nb,
4664 			 unsigned long event, void *param)
4665 {
4666 	struct mlx5_ib_event_work *work;
4667 
4668 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
4669 	if (!work)
4670 		return NOTIFY_DONE;
4671 
4672 	INIT_WORK(&work->work, mlx5_ib_handle_event);
4673 	work->dev = container_of(nb, struct mlx5_ib_dev, mdev_events);
4674 	work->is_slave = false;
4675 	work->param = param;
4676 	work->event = event;
4677 
4678 	queue_work(mlx5_ib_event_wq, &work->work);
4679 
4680 	return NOTIFY_OK;
4681 }
4682 
4683 static int mlx5_ib_event_slave_port(struct notifier_block *nb,
4684 				    unsigned long event, void *param)
4685 {
4686 	struct mlx5_ib_event_work *work;
4687 
4688 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
4689 	if (!work)
4690 		return NOTIFY_DONE;
4691 
4692 	INIT_WORK(&work->work, mlx5_ib_handle_event);
4693 	work->mpi = container_of(nb, struct mlx5_ib_multiport_info, mdev_events);
4694 	work->is_slave = true;
4695 	work->param = param;
4696 	work->event = event;
4697 	queue_work(mlx5_ib_event_wq, &work->work);
4698 
4699 	return NOTIFY_OK;
4700 }
4701 
4702 static int set_has_smi_cap(struct mlx5_ib_dev *dev)
4703 {
4704 	struct mlx5_hca_vport_context vport_ctx;
4705 	int err;
4706 	int port;
4707 
4708 	for (port = 1; port <= ARRAY_SIZE(dev->mdev->port_caps); port++) {
4709 		dev->mdev->port_caps[port - 1].has_smi = false;
4710 		if (MLX5_CAP_GEN(dev->mdev, port_type) ==
4711 		    MLX5_CAP_PORT_TYPE_IB) {
4712 			if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
4713 				err = mlx5_query_hca_vport_context(dev->mdev, 0,
4714 								   port, 0,
4715 								   &vport_ctx);
4716 				if (err) {
4717 					mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
4718 						    port, err);
4719 					return err;
4720 				}
4721 				dev->mdev->port_caps[port - 1].has_smi =
4722 					vport_ctx.has_smi;
4723 			} else {
4724 				dev->mdev->port_caps[port - 1].has_smi = true;
4725 			}
4726 		}
4727 	}
4728 	return 0;
4729 }
4730 
4731 static void get_ext_port_caps(struct mlx5_ib_dev *dev)
4732 {
4733 	int port;
4734 
4735 	for (port = 1; port <= dev->num_ports; port++)
4736 		mlx5_query_ext_port_caps(dev, port);
4737 }
4738 
4739 static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port)
4740 {
4741 	struct ib_device_attr *dprops = NULL;
4742 	struct ib_port_attr *pprops = NULL;
4743 	int err = -ENOMEM;
4744 	struct ib_udata uhw = {.inlen = 0, .outlen = 0};
4745 
4746 	pprops = kzalloc(sizeof(*pprops), GFP_KERNEL);
4747 	if (!pprops)
4748 		goto out;
4749 
4750 	dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
4751 	if (!dprops)
4752 		goto out;
4753 
4754 	err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
4755 	if (err) {
4756 		mlx5_ib_warn(dev, "query_device failed %d\n", err);
4757 		goto out;
4758 	}
4759 
4760 	err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
4761 	if (err) {
4762 		mlx5_ib_warn(dev, "query_port %d failed %d\n",
4763 			     port, err);
4764 		goto out;
4765 	}
4766 
4767 	dev->mdev->port_caps[port - 1].pkey_table_len =
4768 					dprops->max_pkeys;
4769 	dev->mdev->port_caps[port - 1].gid_table_len =
4770 					pprops->gid_tbl_len;
4771 	mlx5_ib_dbg(dev, "port %d: pkey_table_len %d, gid_table_len %d\n",
4772 		    port, dprops->max_pkeys, pprops->gid_tbl_len);
4773 
4774 out:
4775 	kfree(pprops);
4776 	kfree(dprops);
4777 
4778 	return err;
4779 }
4780 
4781 static int get_port_caps(struct mlx5_ib_dev *dev, u8 port)
4782 {
4783 	/* For representors use port 1, is this is the only native
4784 	 * port
4785 	 */
4786 	if (dev->is_rep)
4787 		return __get_port_caps(dev, 1);
4788 	return __get_port_caps(dev, port);
4789 }
4790 
4791 static void destroy_umrc_res(struct mlx5_ib_dev *dev)
4792 {
4793 	int err;
4794 
4795 	err = mlx5_mr_cache_cleanup(dev);
4796 	if (err)
4797 		mlx5_ib_warn(dev, "mr cache cleanup failed\n");
4798 
4799 	if (dev->umrc.qp)
4800 		mlx5_ib_destroy_qp(dev->umrc.qp, NULL);
4801 	if (dev->umrc.cq)
4802 		ib_free_cq(dev->umrc.cq);
4803 	if (dev->umrc.pd)
4804 		ib_dealloc_pd(dev->umrc.pd);
4805 }
4806 
4807 enum {
4808 	MAX_UMR_WR = 128,
4809 };
4810 
4811 static int create_umr_res(struct mlx5_ib_dev *dev)
4812 {
4813 	struct ib_qp_init_attr *init_attr = NULL;
4814 	struct ib_qp_attr *attr = NULL;
4815 	struct ib_pd *pd;
4816 	struct ib_cq *cq;
4817 	struct ib_qp *qp;
4818 	int ret;
4819 
4820 	attr = kzalloc(sizeof(*attr), GFP_KERNEL);
4821 	init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
4822 	if (!attr || !init_attr) {
4823 		ret = -ENOMEM;
4824 		goto error_0;
4825 	}
4826 
4827 	pd = ib_alloc_pd(&dev->ib_dev, 0);
4828 	if (IS_ERR(pd)) {
4829 		mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
4830 		ret = PTR_ERR(pd);
4831 		goto error_0;
4832 	}
4833 
4834 	cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
4835 	if (IS_ERR(cq)) {
4836 		mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
4837 		ret = PTR_ERR(cq);
4838 		goto error_2;
4839 	}
4840 
4841 	init_attr->send_cq = cq;
4842 	init_attr->recv_cq = cq;
4843 	init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
4844 	init_attr->cap.max_send_wr = MAX_UMR_WR;
4845 	init_attr->cap.max_send_sge = 1;
4846 	init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
4847 	init_attr->port_num = 1;
4848 	qp = mlx5_ib_create_qp(pd, init_attr, NULL);
4849 	if (IS_ERR(qp)) {
4850 		mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
4851 		ret = PTR_ERR(qp);
4852 		goto error_3;
4853 	}
4854 	qp->device     = &dev->ib_dev;
4855 	qp->real_qp    = qp;
4856 	qp->uobject    = NULL;
4857 	qp->qp_type    = MLX5_IB_QPT_REG_UMR;
4858 	qp->send_cq    = init_attr->send_cq;
4859 	qp->recv_cq    = init_attr->recv_cq;
4860 
4861 	attr->qp_state = IB_QPS_INIT;
4862 	attr->port_num = 1;
4863 	ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX |
4864 				IB_QP_PORT, NULL);
4865 	if (ret) {
4866 		mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
4867 		goto error_4;
4868 	}
4869 
4870 	memset(attr, 0, sizeof(*attr));
4871 	attr->qp_state = IB_QPS_RTR;
4872 	attr->path_mtu = IB_MTU_256;
4873 
4874 	ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
4875 	if (ret) {
4876 		mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
4877 		goto error_4;
4878 	}
4879 
4880 	memset(attr, 0, sizeof(*attr));
4881 	attr->qp_state = IB_QPS_RTS;
4882 	ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
4883 	if (ret) {
4884 		mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
4885 		goto error_4;
4886 	}
4887 
4888 	dev->umrc.qp = qp;
4889 	dev->umrc.cq = cq;
4890 	dev->umrc.pd = pd;
4891 
4892 	sema_init(&dev->umrc.sem, MAX_UMR_WR);
4893 	ret = mlx5_mr_cache_init(dev);
4894 	if (ret) {
4895 		mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
4896 		goto error_4;
4897 	}
4898 
4899 	kfree(attr);
4900 	kfree(init_attr);
4901 
4902 	return 0;
4903 
4904 error_4:
4905 	mlx5_ib_destroy_qp(qp, NULL);
4906 	dev->umrc.qp = NULL;
4907 
4908 error_3:
4909 	ib_free_cq(cq);
4910 	dev->umrc.cq = NULL;
4911 
4912 error_2:
4913 	ib_dealloc_pd(pd);
4914 	dev->umrc.pd = NULL;
4915 
4916 error_0:
4917 	kfree(attr);
4918 	kfree(init_attr);
4919 	return ret;
4920 }
4921 
4922 static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
4923 {
4924 	switch (umr_fence_cap) {
4925 	case MLX5_CAP_UMR_FENCE_NONE:
4926 		return MLX5_FENCE_MODE_NONE;
4927 	case MLX5_CAP_UMR_FENCE_SMALL:
4928 		return MLX5_FENCE_MODE_INITIATOR_SMALL;
4929 	default:
4930 		return MLX5_FENCE_MODE_STRONG_ORDERING;
4931 	}
4932 }
4933 
4934 static int create_dev_resources(struct mlx5_ib_resources *devr)
4935 {
4936 	struct ib_srq_init_attr attr;
4937 	struct mlx5_ib_dev *dev;
4938 	struct ib_device *ibdev;
4939 	struct ib_cq_init_attr cq_attr = {.cqe = 1};
4940 	int port;
4941 	int ret = 0;
4942 
4943 	dev = container_of(devr, struct mlx5_ib_dev, devr);
4944 	ibdev = &dev->ib_dev;
4945 
4946 	mutex_init(&devr->mutex);
4947 
4948 	devr->p0 = rdma_zalloc_drv_obj(ibdev, ib_pd);
4949 	if (!devr->p0)
4950 		return -ENOMEM;
4951 
4952 	devr->p0->device  = ibdev;
4953 	devr->p0->uobject = NULL;
4954 	atomic_set(&devr->p0->usecnt, 0);
4955 
4956 	ret = mlx5_ib_alloc_pd(devr->p0, NULL);
4957 	if (ret)
4958 		goto error0;
4959 
4960 	devr->c0 = rdma_zalloc_drv_obj(ibdev, ib_cq);
4961 	if (!devr->c0) {
4962 		ret = -ENOMEM;
4963 		goto error1;
4964 	}
4965 
4966 	devr->c0->device = &dev->ib_dev;
4967 	atomic_set(&devr->c0->usecnt, 0);
4968 
4969 	ret = mlx5_ib_create_cq(devr->c0, &cq_attr, NULL);
4970 	if (ret)
4971 		goto err_create_cq;
4972 
4973 	devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL);
4974 	if (IS_ERR(devr->x0)) {
4975 		ret = PTR_ERR(devr->x0);
4976 		goto error2;
4977 	}
4978 	devr->x0->device = &dev->ib_dev;
4979 	devr->x0->inode = NULL;
4980 	atomic_set(&devr->x0->usecnt, 0);
4981 	mutex_init(&devr->x0->tgt_qp_mutex);
4982 	INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
4983 
4984 	devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL);
4985 	if (IS_ERR(devr->x1)) {
4986 		ret = PTR_ERR(devr->x1);
4987 		goto error3;
4988 	}
4989 	devr->x1->device = &dev->ib_dev;
4990 	devr->x1->inode = NULL;
4991 	atomic_set(&devr->x1->usecnt, 0);
4992 	mutex_init(&devr->x1->tgt_qp_mutex);
4993 	INIT_LIST_HEAD(&devr->x1->tgt_qp_list);
4994 
4995 	memset(&attr, 0, sizeof(attr));
4996 	attr.attr.max_sge = 1;
4997 	attr.attr.max_wr = 1;
4998 	attr.srq_type = IB_SRQT_XRC;
4999 	attr.ext.cq = devr->c0;
5000 	attr.ext.xrc.xrcd = devr->x0;
5001 
5002 	devr->s0 = rdma_zalloc_drv_obj(ibdev, ib_srq);
5003 	if (!devr->s0) {
5004 		ret = -ENOMEM;
5005 		goto error4;
5006 	}
5007 
5008 	devr->s0->device	= &dev->ib_dev;
5009 	devr->s0->pd		= devr->p0;
5010 	devr->s0->srq_type      = IB_SRQT_XRC;
5011 	devr->s0->ext.xrc.xrcd	= devr->x0;
5012 	devr->s0->ext.cq	= devr->c0;
5013 	ret = mlx5_ib_create_srq(devr->s0, &attr, NULL);
5014 	if (ret)
5015 		goto err_create;
5016 
5017 	atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
5018 	atomic_inc(&devr->s0->ext.cq->usecnt);
5019 	atomic_inc(&devr->p0->usecnt);
5020 	atomic_set(&devr->s0->usecnt, 0);
5021 
5022 	memset(&attr, 0, sizeof(attr));
5023 	attr.attr.max_sge = 1;
5024 	attr.attr.max_wr = 1;
5025 	attr.srq_type = IB_SRQT_BASIC;
5026 	devr->s1 = rdma_zalloc_drv_obj(ibdev, ib_srq);
5027 	if (!devr->s1) {
5028 		ret = -ENOMEM;
5029 		goto error5;
5030 	}
5031 
5032 	devr->s1->device	= &dev->ib_dev;
5033 	devr->s1->pd		= devr->p0;
5034 	devr->s1->srq_type      = IB_SRQT_BASIC;
5035 	devr->s1->ext.cq	= devr->c0;
5036 
5037 	ret = mlx5_ib_create_srq(devr->s1, &attr, NULL);
5038 	if (ret)
5039 		goto error6;
5040 
5041 	atomic_inc(&devr->p0->usecnt);
5042 	atomic_set(&devr->s1->usecnt, 0);
5043 
5044 	for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) {
5045 		INIT_WORK(&devr->ports[port].pkey_change_work,
5046 			  pkey_change_handler);
5047 		devr->ports[port].devr = devr;
5048 	}
5049 
5050 	return 0;
5051 
5052 error6:
5053 	kfree(devr->s1);
5054 error5:
5055 	mlx5_ib_destroy_srq(devr->s0, NULL);
5056 err_create:
5057 	kfree(devr->s0);
5058 error4:
5059 	mlx5_ib_dealloc_xrcd(devr->x1, NULL);
5060 error3:
5061 	mlx5_ib_dealloc_xrcd(devr->x0, NULL);
5062 error2:
5063 	mlx5_ib_destroy_cq(devr->c0, NULL);
5064 err_create_cq:
5065 	kfree(devr->c0);
5066 error1:
5067 	mlx5_ib_dealloc_pd(devr->p0, NULL);
5068 error0:
5069 	kfree(devr->p0);
5070 	return ret;
5071 }
5072 
5073 static void destroy_dev_resources(struct mlx5_ib_resources *devr)
5074 {
5075 	int port;
5076 
5077 	mlx5_ib_destroy_srq(devr->s1, NULL);
5078 	kfree(devr->s1);
5079 	mlx5_ib_destroy_srq(devr->s0, NULL);
5080 	kfree(devr->s0);
5081 	mlx5_ib_dealloc_xrcd(devr->x0, NULL);
5082 	mlx5_ib_dealloc_xrcd(devr->x1, NULL);
5083 	mlx5_ib_destroy_cq(devr->c0, NULL);
5084 	kfree(devr->c0);
5085 	mlx5_ib_dealloc_pd(devr->p0, NULL);
5086 	kfree(devr->p0);
5087 
5088 	/* Make sure no change P_Key work items are still executing */
5089 	for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
5090 		cancel_work_sync(&devr->ports[port].pkey_change_work);
5091 }
5092 
5093 static u32 get_core_cap_flags(struct ib_device *ibdev,
5094 			      struct mlx5_hca_vport_context *rep)
5095 {
5096 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
5097 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
5098 	u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
5099 	u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
5100 	bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
5101 	u32 ret = 0;
5102 
5103 	if (rep->grh_required)
5104 		ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED;
5105 
5106 	if (ll == IB_LINK_LAYER_INFINIBAND)
5107 		return ret | RDMA_CORE_PORT_IBA_IB;
5108 
5109 	if (raw_support)
5110 		ret |= RDMA_CORE_PORT_RAW_PACKET;
5111 
5112 	if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
5113 		return ret;
5114 
5115 	if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP))
5116 		return ret;
5117 
5118 	if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP)
5119 		ret |= RDMA_CORE_PORT_IBA_ROCE;
5120 
5121 	if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
5122 		ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
5123 
5124 	return ret;
5125 }
5126 
5127 static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
5128 			       struct ib_port_immutable *immutable)
5129 {
5130 	struct ib_port_attr attr;
5131 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
5132 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
5133 	struct mlx5_hca_vport_context rep = {0};
5134 	int err;
5135 
5136 	err = ib_query_port(ibdev, port_num, &attr);
5137 	if (err)
5138 		return err;
5139 
5140 	if (ll == IB_LINK_LAYER_INFINIBAND) {
5141 		err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0,
5142 						   &rep);
5143 		if (err)
5144 			return err;
5145 	}
5146 
5147 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
5148 	immutable->gid_tbl_len = attr.gid_tbl_len;
5149 	immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
5150 	if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
5151 		immutable->max_mad_size = IB_MGMT_MAD_SIZE;
5152 
5153 	return 0;
5154 }
5155 
5156 static int mlx5_port_rep_immutable(struct ib_device *ibdev, u8 port_num,
5157 				   struct ib_port_immutable *immutable)
5158 {
5159 	struct ib_port_attr attr;
5160 	int err;
5161 
5162 	immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
5163 
5164 	err = ib_query_port(ibdev, port_num, &attr);
5165 	if (err)
5166 		return err;
5167 
5168 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
5169 	immutable->gid_tbl_len = attr.gid_tbl_len;
5170 	immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
5171 
5172 	return 0;
5173 }
5174 
5175 static void get_dev_fw_str(struct ib_device *ibdev, char *str)
5176 {
5177 	struct mlx5_ib_dev *dev =
5178 		container_of(ibdev, struct mlx5_ib_dev, ib_dev);
5179 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d",
5180 		 fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
5181 		 fw_rev_sub(dev->mdev));
5182 }
5183 
5184 static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
5185 {
5186 	struct mlx5_core_dev *mdev = dev->mdev;
5187 	struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
5188 								 MLX5_FLOW_NAMESPACE_LAG);
5189 	struct mlx5_flow_table *ft;
5190 	int err;
5191 
5192 	if (!ns || !mlx5_lag_is_roce(mdev))
5193 		return 0;
5194 
5195 	err = mlx5_cmd_create_vport_lag(mdev);
5196 	if (err)
5197 		return err;
5198 
5199 	ft = mlx5_create_lag_demux_flow_table(ns, 0, 0);
5200 	if (IS_ERR(ft)) {
5201 		err = PTR_ERR(ft);
5202 		goto err_destroy_vport_lag;
5203 	}
5204 
5205 	dev->flow_db->lag_demux_ft = ft;
5206 	dev->lag_active = true;
5207 	return 0;
5208 
5209 err_destroy_vport_lag:
5210 	mlx5_cmd_destroy_vport_lag(mdev);
5211 	return err;
5212 }
5213 
5214 static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
5215 {
5216 	struct mlx5_core_dev *mdev = dev->mdev;
5217 
5218 	if (dev->lag_active) {
5219 		dev->lag_active = false;
5220 
5221 		mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft);
5222 		dev->flow_db->lag_demux_ft = NULL;
5223 
5224 		mlx5_cmd_destroy_vport_lag(mdev);
5225 	}
5226 }
5227 
5228 static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
5229 {
5230 	int err;
5231 
5232 	dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
5233 	err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
5234 	if (err) {
5235 		dev->port[port_num].roce.nb.notifier_call = NULL;
5236 		return err;
5237 	}
5238 
5239 	return 0;
5240 }
5241 
5242 static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
5243 {
5244 	if (dev->port[port_num].roce.nb.notifier_call) {
5245 		unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
5246 		dev->port[port_num].roce.nb.notifier_call = NULL;
5247 	}
5248 }
5249 
5250 static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
5251 {
5252 	int err;
5253 
5254 	if (MLX5_CAP_GEN(dev->mdev, roce)) {
5255 		err = mlx5_nic_vport_enable_roce(dev->mdev);
5256 		if (err)
5257 			return err;
5258 	}
5259 
5260 	err = mlx5_eth_lag_init(dev);
5261 	if (err)
5262 		goto err_disable_roce;
5263 
5264 	return 0;
5265 
5266 err_disable_roce:
5267 	if (MLX5_CAP_GEN(dev->mdev, roce))
5268 		mlx5_nic_vport_disable_roce(dev->mdev);
5269 
5270 	return err;
5271 }
5272 
5273 static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
5274 {
5275 	mlx5_eth_lag_cleanup(dev);
5276 	if (MLX5_CAP_GEN(dev->mdev, roce))
5277 		mlx5_nic_vport_disable_roce(dev->mdev);
5278 }
5279 
5280 struct mlx5_ib_counter {
5281 	const char *name;
5282 	size_t offset;
5283 };
5284 
5285 #define INIT_Q_COUNTER(_name)		\
5286 	{ .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)}
5287 
5288 static const struct mlx5_ib_counter basic_q_cnts[] = {
5289 	INIT_Q_COUNTER(rx_write_requests),
5290 	INIT_Q_COUNTER(rx_read_requests),
5291 	INIT_Q_COUNTER(rx_atomic_requests),
5292 	INIT_Q_COUNTER(out_of_buffer),
5293 };
5294 
5295 static const struct mlx5_ib_counter out_of_seq_q_cnts[] = {
5296 	INIT_Q_COUNTER(out_of_sequence),
5297 };
5298 
5299 static const struct mlx5_ib_counter retrans_q_cnts[] = {
5300 	INIT_Q_COUNTER(duplicate_request),
5301 	INIT_Q_COUNTER(rnr_nak_retry_err),
5302 	INIT_Q_COUNTER(packet_seq_err),
5303 	INIT_Q_COUNTER(implied_nak_seq_err),
5304 	INIT_Q_COUNTER(local_ack_timeout_err),
5305 };
5306 
5307 #define INIT_CONG_COUNTER(_name)		\
5308 	{ .name = #_name, .offset =	\
5309 		MLX5_BYTE_OFF(query_cong_statistics_out, _name ## _high)}
5310 
5311 static const struct mlx5_ib_counter cong_cnts[] = {
5312 	INIT_CONG_COUNTER(rp_cnp_ignored),
5313 	INIT_CONG_COUNTER(rp_cnp_handled),
5314 	INIT_CONG_COUNTER(np_ecn_marked_roce_packets),
5315 	INIT_CONG_COUNTER(np_cnp_sent),
5316 };
5317 
5318 static const struct mlx5_ib_counter extended_err_cnts[] = {
5319 	INIT_Q_COUNTER(resp_local_length_error),
5320 	INIT_Q_COUNTER(resp_cqe_error),
5321 	INIT_Q_COUNTER(req_cqe_error),
5322 	INIT_Q_COUNTER(req_remote_invalid_request),
5323 	INIT_Q_COUNTER(req_remote_access_errors),
5324 	INIT_Q_COUNTER(resp_remote_access_errors),
5325 	INIT_Q_COUNTER(resp_cqe_flush_error),
5326 	INIT_Q_COUNTER(req_cqe_flush_error),
5327 };
5328 
5329 #define INIT_EXT_PPCNT_COUNTER(_name)		\
5330 	{ .name = #_name, .offset =	\
5331 	MLX5_BYTE_OFF(ppcnt_reg, \
5332 		      counter_set.eth_extended_cntrs_grp_data_layout._name##_high)}
5333 
5334 static const struct mlx5_ib_counter ext_ppcnt_cnts[] = {
5335 	INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated),
5336 };
5337 
5338 static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
5339 {
5340 	int i;
5341 
5342 	for (i = 0; i < dev->num_ports; i++) {
5343 		if (dev->port[i].cnts.set_id_valid)
5344 			mlx5_core_dealloc_q_counter(dev->mdev,
5345 						    dev->port[i].cnts.set_id);
5346 		kfree(dev->port[i].cnts.names);
5347 		kfree(dev->port[i].cnts.offsets);
5348 	}
5349 }
5350 
5351 static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
5352 				    struct mlx5_ib_counters *cnts)
5353 {
5354 	u32 num_counters;
5355 
5356 	num_counters = ARRAY_SIZE(basic_q_cnts);
5357 
5358 	if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt))
5359 		num_counters += ARRAY_SIZE(out_of_seq_q_cnts);
5360 
5361 	if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters))
5362 		num_counters += ARRAY_SIZE(retrans_q_cnts);
5363 
5364 	if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters))
5365 		num_counters += ARRAY_SIZE(extended_err_cnts);
5366 
5367 	cnts->num_q_counters = num_counters;
5368 
5369 	if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
5370 		cnts->num_cong_counters = ARRAY_SIZE(cong_cnts);
5371 		num_counters += ARRAY_SIZE(cong_cnts);
5372 	}
5373 	if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
5374 		cnts->num_ext_ppcnt_counters = ARRAY_SIZE(ext_ppcnt_cnts);
5375 		num_counters += ARRAY_SIZE(ext_ppcnt_cnts);
5376 	}
5377 	cnts->names = kcalloc(num_counters, sizeof(cnts->names), GFP_KERNEL);
5378 	if (!cnts->names)
5379 		return -ENOMEM;
5380 
5381 	cnts->offsets = kcalloc(num_counters,
5382 				sizeof(cnts->offsets), GFP_KERNEL);
5383 	if (!cnts->offsets)
5384 		goto err_names;
5385 
5386 	return 0;
5387 
5388 err_names:
5389 	kfree(cnts->names);
5390 	cnts->names = NULL;
5391 	return -ENOMEM;
5392 }
5393 
5394 static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
5395 				  const char **names,
5396 				  size_t *offsets)
5397 {
5398 	int i;
5399 	int j = 0;
5400 
5401 	for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) {
5402 		names[j] = basic_q_cnts[i].name;
5403 		offsets[j] = basic_q_cnts[i].offset;
5404 	}
5405 
5406 	if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) {
5407 		for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) {
5408 			names[j] = out_of_seq_q_cnts[i].name;
5409 			offsets[j] = out_of_seq_q_cnts[i].offset;
5410 		}
5411 	}
5412 
5413 	if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
5414 		for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) {
5415 			names[j] = retrans_q_cnts[i].name;
5416 			offsets[j] = retrans_q_cnts[i].offset;
5417 		}
5418 	}
5419 
5420 	if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) {
5421 		for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) {
5422 			names[j] = extended_err_cnts[i].name;
5423 			offsets[j] = extended_err_cnts[i].offset;
5424 		}
5425 	}
5426 
5427 	if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
5428 		for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) {
5429 			names[j] = cong_cnts[i].name;
5430 			offsets[j] = cong_cnts[i].offset;
5431 		}
5432 	}
5433 
5434 	if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
5435 		for (i = 0; i < ARRAY_SIZE(ext_ppcnt_cnts); i++, j++) {
5436 			names[j] = ext_ppcnt_cnts[i].name;
5437 			offsets[j] = ext_ppcnt_cnts[i].offset;
5438 		}
5439 	}
5440 }
5441 
5442 static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
5443 {
5444 	int err = 0;
5445 	int i;
5446 	bool is_shared;
5447 
5448 	is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0;
5449 
5450 	for (i = 0; i < dev->num_ports; i++) {
5451 		err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts);
5452 		if (err)
5453 			goto err_alloc;
5454 
5455 		mlx5_ib_fill_counters(dev, dev->port[i].cnts.names,
5456 				      dev->port[i].cnts.offsets);
5457 
5458 		err = mlx5_cmd_alloc_q_counter(dev->mdev,
5459 					       &dev->port[i].cnts.set_id,
5460 					       is_shared ?
5461 					       MLX5_SHARED_RESOURCE_UID : 0);
5462 		if (err) {
5463 			mlx5_ib_warn(dev,
5464 				     "couldn't allocate queue counter for port %d, err %d\n",
5465 				     i + 1, err);
5466 			goto err_alloc;
5467 		}
5468 		dev->port[i].cnts.set_id_valid = true;
5469 	}
5470 
5471 	return 0;
5472 
5473 err_alloc:
5474 	mlx5_ib_dealloc_counters(dev);
5475 	return err;
5476 }
5477 
5478 static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
5479 						    u8 port_num)
5480 {
5481 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
5482 	struct mlx5_ib_port *port = &dev->port[port_num - 1];
5483 
5484 	/* We support only per port stats */
5485 	if (port_num == 0)
5486 		return NULL;
5487 
5488 	return rdma_alloc_hw_stats_struct(port->cnts.names,
5489 					  port->cnts.num_q_counters +
5490 					  port->cnts.num_cong_counters +
5491 					  port->cnts.num_ext_ppcnt_counters,
5492 					  RDMA_HW_STATS_DEFAULT_LIFESPAN);
5493 }
5494 
5495 static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
5496 				    struct mlx5_ib_port *port,
5497 				    struct rdma_hw_stats *stats,
5498 				    u16 set_id)
5499 {
5500 	int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
5501 	void *out;
5502 	__be32 val;
5503 	int ret, i;
5504 
5505 	out = kvzalloc(outlen, GFP_KERNEL);
5506 	if (!out)
5507 		return -ENOMEM;
5508 
5509 	ret = mlx5_core_query_q_counter(mdev, set_id, 0, out, outlen);
5510 	if (ret)
5511 		goto free;
5512 
5513 	for (i = 0; i < port->cnts.num_q_counters; i++) {
5514 		val = *(__be32 *)(out + port->cnts.offsets[i]);
5515 		stats->value[i] = (u64)be32_to_cpu(val);
5516 	}
5517 
5518 free:
5519 	kvfree(out);
5520 	return ret;
5521 }
5522 
5523 static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev,
5524 					  struct mlx5_ib_port *port,
5525 					  struct rdma_hw_stats *stats)
5526 {
5527 	int offset = port->cnts.num_q_counters + port->cnts.num_cong_counters;
5528 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
5529 	int ret, i;
5530 	void *out;
5531 
5532 	out = kvzalloc(sz, GFP_KERNEL);
5533 	if (!out)
5534 		return -ENOMEM;
5535 
5536 	ret = mlx5_cmd_query_ext_ppcnt_counters(dev->mdev, out);
5537 	if (ret)
5538 		goto free;
5539 
5540 	for (i = 0; i < port->cnts.num_ext_ppcnt_counters; i++) {
5541 		stats->value[i + offset] =
5542 			be64_to_cpup((__be64 *)(out +
5543 				    port->cnts.offsets[i + offset]));
5544 	}
5545 
5546 free:
5547 	kvfree(out);
5548 	return ret;
5549 }
5550 
5551 static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
5552 				struct rdma_hw_stats *stats,
5553 				u8 port_num, int index)
5554 {
5555 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
5556 	struct mlx5_ib_port *port = &dev->port[port_num - 1];
5557 	struct mlx5_core_dev *mdev;
5558 	int ret, num_counters;
5559 	u8 mdev_port_num;
5560 
5561 	if (!stats)
5562 		return -EINVAL;
5563 
5564 	num_counters = port->cnts.num_q_counters +
5565 		       port->cnts.num_cong_counters +
5566 		       port->cnts.num_ext_ppcnt_counters;
5567 
5568 	/* q_counters are per IB device, query the master mdev */
5569 	ret = mlx5_ib_query_q_counters(dev->mdev, port, stats,
5570 				       port->cnts.set_id);
5571 	if (ret)
5572 		return ret;
5573 
5574 	if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
5575 		ret =  mlx5_ib_query_ext_ppcnt_counters(dev, port, stats);
5576 		if (ret)
5577 			return ret;
5578 	}
5579 
5580 	if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
5581 		mdev = mlx5_ib_get_native_port_mdev(dev, port_num,
5582 						    &mdev_port_num);
5583 		if (!mdev) {
5584 			/* If port is not affiliated yet, its in down state
5585 			 * which doesn't have any counters yet, so it would be
5586 			 * zero. So no need to read from the HCA.
5587 			 */
5588 			goto done;
5589 		}
5590 		ret = mlx5_lag_query_cong_counters(dev->mdev,
5591 						   stats->value +
5592 						   port->cnts.num_q_counters,
5593 						   port->cnts.num_cong_counters,
5594 						   port->cnts.offsets +
5595 						   port->cnts.num_q_counters);
5596 
5597 		mlx5_ib_put_native_port_mdev(dev, port_num);
5598 		if (ret)
5599 			return ret;
5600 	}
5601 
5602 done:
5603 	return num_counters;
5604 }
5605 
5606 static struct rdma_hw_stats *
5607 mlx5_ib_counter_alloc_stats(struct rdma_counter *counter)
5608 {
5609 	struct mlx5_ib_dev *dev = to_mdev(counter->device);
5610 	struct mlx5_ib_port *port = &dev->port[counter->port - 1];
5611 
5612 	/* Q counters are in the beginning of all counters */
5613 	return rdma_alloc_hw_stats_struct(port->cnts.names,
5614 					  port->cnts.num_q_counters,
5615 					  RDMA_HW_STATS_DEFAULT_LIFESPAN);
5616 }
5617 
5618 static int mlx5_ib_counter_update_stats(struct rdma_counter *counter)
5619 {
5620 	struct mlx5_ib_dev *dev = to_mdev(counter->device);
5621 	struct mlx5_ib_port *port = &dev->port[counter->port - 1];
5622 
5623 	return mlx5_ib_query_q_counters(dev->mdev, port,
5624 					counter->stats, counter->id);
5625 }
5626 
5627 static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
5628 				   struct ib_qp *qp)
5629 {
5630 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
5631 	u16 cnt_set_id = 0;
5632 	int err;
5633 
5634 	if (!counter->id) {
5635 		err = mlx5_cmd_alloc_q_counter(dev->mdev,
5636 					       &cnt_set_id,
5637 					       MLX5_SHARED_RESOURCE_UID);
5638 		if (err)
5639 			return err;
5640 		counter->id = cnt_set_id;
5641 	}
5642 
5643 	err = mlx5_ib_qp_set_counter(qp, counter);
5644 	if (err)
5645 		goto fail_set_counter;
5646 
5647 	return 0;
5648 
5649 fail_set_counter:
5650 	mlx5_core_dealloc_q_counter(dev->mdev, cnt_set_id);
5651 	counter->id = 0;
5652 
5653 	return err;
5654 }
5655 
5656 static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp)
5657 {
5658 	return mlx5_ib_qp_set_counter(qp, NULL);
5659 }
5660 
5661 static int mlx5_ib_counter_dealloc(struct rdma_counter *counter)
5662 {
5663 	struct mlx5_ib_dev *dev = to_mdev(counter->device);
5664 
5665 	return mlx5_core_dealloc_q_counter(dev->mdev, counter->id);
5666 }
5667 
5668 static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num,
5669 				 enum rdma_netdev_t type,
5670 				 struct rdma_netdev_alloc_params *params)
5671 {
5672 	if (type != RDMA_NETDEV_IPOIB)
5673 		return -EOPNOTSUPP;
5674 
5675 	return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params);
5676 }
5677 
5678 static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev)
5679 {
5680 	if (!dev->delay_drop.dbg)
5681 		return;
5682 	debugfs_remove_recursive(dev->delay_drop.dbg->dir_debugfs);
5683 	kfree(dev->delay_drop.dbg);
5684 	dev->delay_drop.dbg = NULL;
5685 }
5686 
5687 static void cancel_delay_drop(struct mlx5_ib_dev *dev)
5688 {
5689 	if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
5690 		return;
5691 
5692 	cancel_work_sync(&dev->delay_drop.delay_drop_work);
5693 	delay_drop_debugfs_cleanup(dev);
5694 }
5695 
5696 static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf,
5697 				       size_t count, loff_t *pos)
5698 {
5699 	struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
5700 	char lbuf[20];
5701 	int len;
5702 
5703 	len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout);
5704 	return simple_read_from_buffer(buf, count, pos, lbuf, len);
5705 }
5706 
5707 static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf,
5708 					size_t count, loff_t *pos)
5709 {
5710 	struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
5711 	u32 timeout;
5712 	u32 var;
5713 
5714 	if (kstrtouint_from_user(buf, count, 0, &var))
5715 		return -EFAULT;
5716 
5717 	timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS *
5718 			1000);
5719 	if (timeout != var)
5720 		mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n",
5721 			    timeout);
5722 
5723 	delay_drop->timeout = timeout;
5724 
5725 	return count;
5726 }
5727 
5728 static const struct file_operations fops_delay_drop_timeout = {
5729 	.owner	= THIS_MODULE,
5730 	.open	= simple_open,
5731 	.write	= delay_drop_timeout_write,
5732 	.read	= delay_drop_timeout_read,
5733 };
5734 
5735 static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
5736 {
5737 	struct mlx5_ib_dbg_delay_drop *dbg;
5738 
5739 	if (!mlx5_debugfs_root)
5740 		return 0;
5741 
5742 	dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
5743 	if (!dbg)
5744 		return -ENOMEM;
5745 
5746 	dev->delay_drop.dbg = dbg;
5747 
5748 	dbg->dir_debugfs =
5749 		debugfs_create_dir("delay_drop",
5750 				   dev->mdev->priv.dbg_root);
5751 	if (!dbg->dir_debugfs)
5752 		goto out_debugfs;
5753 
5754 	dbg->events_cnt_debugfs =
5755 		debugfs_create_atomic_t("num_timeout_events", 0400,
5756 					dbg->dir_debugfs,
5757 					&dev->delay_drop.events_cnt);
5758 	if (!dbg->events_cnt_debugfs)
5759 		goto out_debugfs;
5760 
5761 	dbg->rqs_cnt_debugfs =
5762 		debugfs_create_atomic_t("num_rqs", 0400,
5763 					dbg->dir_debugfs,
5764 					&dev->delay_drop.rqs_cnt);
5765 	if (!dbg->rqs_cnt_debugfs)
5766 		goto out_debugfs;
5767 
5768 	dbg->timeout_debugfs =
5769 		debugfs_create_file("timeout", 0600,
5770 				    dbg->dir_debugfs,
5771 				    &dev->delay_drop,
5772 				    &fops_delay_drop_timeout);
5773 	if (!dbg->timeout_debugfs)
5774 		goto out_debugfs;
5775 
5776 	return 0;
5777 
5778 out_debugfs:
5779 	delay_drop_debugfs_cleanup(dev);
5780 	return -ENOMEM;
5781 }
5782 
5783 static void init_delay_drop(struct mlx5_ib_dev *dev)
5784 {
5785 	if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
5786 		return;
5787 
5788 	mutex_init(&dev->delay_drop.lock);
5789 	dev->delay_drop.dev = dev;
5790 	dev->delay_drop.activate = false;
5791 	dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000;
5792 	INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler);
5793 	atomic_set(&dev->delay_drop.rqs_cnt, 0);
5794 	atomic_set(&dev->delay_drop.events_cnt, 0);
5795 
5796 	if (delay_drop_debugfs_init(dev))
5797 		mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n");
5798 }
5799 
5800 /* The mlx5_ib_multiport_mutex should be held when calling this function */
5801 static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
5802 				      struct mlx5_ib_multiport_info *mpi)
5803 {
5804 	u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
5805 	struct mlx5_ib_port *port = &ibdev->port[port_num];
5806 	int comps;
5807 	int err;
5808 	int i;
5809 
5810 	mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
5811 
5812 	spin_lock(&port->mp.mpi_lock);
5813 	if (!mpi->ibdev) {
5814 		spin_unlock(&port->mp.mpi_lock);
5815 		return;
5816 	}
5817 
5818 	mpi->ibdev = NULL;
5819 
5820 	spin_unlock(&port->mp.mpi_lock);
5821 	if (mpi->mdev_events.notifier_call)
5822 		mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
5823 	mpi->mdev_events.notifier_call = NULL;
5824 	mlx5_remove_netdev_notifier(ibdev, port_num);
5825 	spin_lock(&port->mp.mpi_lock);
5826 
5827 	comps = mpi->mdev_refcnt;
5828 	if (comps) {
5829 		mpi->unaffiliate = true;
5830 		init_completion(&mpi->unref_comp);
5831 		spin_unlock(&port->mp.mpi_lock);
5832 
5833 		for (i = 0; i < comps; i++)
5834 			wait_for_completion(&mpi->unref_comp);
5835 
5836 		spin_lock(&port->mp.mpi_lock);
5837 		mpi->unaffiliate = false;
5838 	}
5839 
5840 	port->mp.mpi = NULL;
5841 
5842 	list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
5843 
5844 	spin_unlock(&port->mp.mpi_lock);
5845 
5846 	err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
5847 
5848 	mlx5_ib_dbg(ibdev, "unaffiliated port %d\n", port_num + 1);
5849 	/* Log an error, still needed to cleanup the pointers and add
5850 	 * it back to the list.
5851 	 */
5852 	if (err)
5853 		mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
5854 			    port_num + 1);
5855 
5856 	ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN;
5857 }
5858 
5859 /* The mlx5_ib_multiport_mutex should be held when calling this function */
5860 static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
5861 				    struct mlx5_ib_multiport_info *mpi)
5862 {
5863 	u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
5864 	int err;
5865 
5866 	spin_lock(&ibdev->port[port_num].mp.mpi_lock);
5867 	if (ibdev->port[port_num].mp.mpi) {
5868 		mlx5_ib_dbg(ibdev, "port %d already affiliated.\n",
5869 			    port_num + 1);
5870 		spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
5871 		return false;
5872 	}
5873 
5874 	ibdev->port[port_num].mp.mpi = mpi;
5875 	mpi->ibdev = ibdev;
5876 	mpi->mdev_events.notifier_call = NULL;
5877 	spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
5878 
5879 	err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
5880 	if (err)
5881 		goto unbind;
5882 
5883 	err = get_port_caps(ibdev, mlx5_core_native_port_num(mpi->mdev));
5884 	if (err)
5885 		goto unbind;
5886 
5887 	err = mlx5_add_netdev_notifier(ibdev, port_num);
5888 	if (err) {
5889 		mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n",
5890 			    port_num + 1);
5891 		goto unbind;
5892 	}
5893 
5894 	mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port;
5895 	mlx5_notifier_register(mpi->mdev, &mpi->mdev_events);
5896 
5897 	mlx5_ib_init_cong_debugfs(ibdev, port_num);
5898 
5899 	return true;
5900 
5901 unbind:
5902 	mlx5_ib_unbind_slave_port(ibdev, mpi);
5903 	return false;
5904 }
5905 
5906 static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
5907 {
5908 	int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
5909 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
5910 							  port_num + 1);
5911 	struct mlx5_ib_multiport_info *mpi;
5912 	int err;
5913 	int i;
5914 
5915 	if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
5916 		return 0;
5917 
5918 	err = mlx5_query_nic_vport_system_image_guid(dev->mdev,
5919 						     &dev->sys_image_guid);
5920 	if (err)
5921 		return err;
5922 
5923 	err = mlx5_nic_vport_enable_roce(dev->mdev);
5924 	if (err)
5925 		return err;
5926 
5927 	mutex_lock(&mlx5_ib_multiport_mutex);
5928 	for (i = 0; i < dev->num_ports; i++) {
5929 		bool bound = false;
5930 
5931 		/* build a stub multiport info struct for the native port. */
5932 		if (i == port_num) {
5933 			mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
5934 			if (!mpi) {
5935 				mutex_unlock(&mlx5_ib_multiport_mutex);
5936 				mlx5_nic_vport_disable_roce(dev->mdev);
5937 				return -ENOMEM;
5938 			}
5939 
5940 			mpi->is_master = true;
5941 			mpi->mdev = dev->mdev;
5942 			mpi->sys_image_guid = dev->sys_image_guid;
5943 			dev->port[i].mp.mpi = mpi;
5944 			mpi->ibdev = dev;
5945 			mpi = NULL;
5946 			continue;
5947 		}
5948 
5949 		list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
5950 				    list) {
5951 			if (dev->sys_image_guid == mpi->sys_image_guid &&
5952 			    (mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
5953 				bound = mlx5_ib_bind_slave_port(dev, mpi);
5954 			}
5955 
5956 			if (bound) {
5957 				dev_dbg(mpi->mdev->device,
5958 					"removing port from unaffiliated list.\n");
5959 				mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
5960 				list_del(&mpi->list);
5961 				break;
5962 			}
5963 		}
5964 		if (!bound) {
5965 			get_port_caps(dev, i + 1);
5966 			mlx5_ib_dbg(dev, "no free port found for port %d\n",
5967 				    i + 1);
5968 		}
5969 	}
5970 
5971 	list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list);
5972 	mutex_unlock(&mlx5_ib_multiport_mutex);
5973 	return err;
5974 }
5975 
5976 static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
5977 {
5978 	int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
5979 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
5980 							  port_num + 1);
5981 	int i;
5982 
5983 	if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
5984 		return;
5985 
5986 	mutex_lock(&mlx5_ib_multiport_mutex);
5987 	for (i = 0; i < dev->num_ports; i++) {
5988 		if (dev->port[i].mp.mpi) {
5989 			/* Destroy the native port stub */
5990 			if (i == port_num) {
5991 				kfree(dev->port[i].mp.mpi);
5992 				dev->port[i].mp.mpi = NULL;
5993 			} else {
5994 				mlx5_ib_dbg(dev, "unbinding port_num: %d\n", i + 1);
5995 				mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi);
5996 			}
5997 		}
5998 	}
5999 
6000 	mlx5_ib_dbg(dev, "removing from devlist\n");
6001 	list_del(&dev->ib_dev_list);
6002 	mutex_unlock(&mlx5_ib_multiport_mutex);
6003 
6004 	mlx5_nic_vport_disable_roce(dev->mdev);
6005 }
6006 
6007 ADD_UVERBS_ATTRIBUTES_SIMPLE(
6008 	mlx5_ib_dm,
6009 	UVERBS_OBJECT_DM,
6010 	UVERBS_METHOD_DM_ALLOC,
6011 	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
6012 			    UVERBS_ATTR_TYPE(u64),
6013 			    UA_MANDATORY),
6014 	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
6015 			    UVERBS_ATTR_TYPE(u16),
6016 			    UA_OPTIONAL),
6017 	UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
6018 			     enum mlx5_ib_uapi_dm_type,
6019 			     UA_OPTIONAL));
6020 
6021 ADD_UVERBS_ATTRIBUTES_SIMPLE(
6022 	mlx5_ib_flow_action,
6023 	UVERBS_OBJECT_FLOW_ACTION,
6024 	UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
6025 	UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
6026 			     enum mlx5_ib_uapi_flow_action_flags));
6027 
6028 static const struct uapi_definition mlx5_ib_defs[] = {
6029 #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
6030 	UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
6031 	UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
6032 #endif
6033 
6034 	UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
6035 				&mlx5_ib_flow_action),
6036 	UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
6037 	{}
6038 };
6039 
6040 static int mlx5_ib_read_counters(struct ib_counters *counters,
6041 				 struct ib_counters_read_attr *read_attr,
6042 				 struct uverbs_attr_bundle *attrs)
6043 {
6044 	struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
6045 	struct mlx5_read_counters_attr mread_attr = {};
6046 	struct mlx5_ib_flow_counters_desc *desc;
6047 	int ret, i;
6048 
6049 	mutex_lock(&mcounters->mcntrs_mutex);
6050 	if (mcounters->cntrs_max_index > read_attr->ncounters) {
6051 		ret = -EINVAL;
6052 		goto err_bound;
6053 	}
6054 
6055 	mread_attr.out = kcalloc(mcounters->counters_num, sizeof(u64),
6056 				 GFP_KERNEL);
6057 	if (!mread_attr.out) {
6058 		ret = -ENOMEM;
6059 		goto err_bound;
6060 	}
6061 
6062 	mread_attr.hw_cntrs_hndl = mcounters->hw_cntrs_hndl;
6063 	mread_attr.flags = read_attr->flags;
6064 	ret = mcounters->read_counters(counters->device, &mread_attr);
6065 	if (ret)
6066 		goto err_read;
6067 
6068 	/* do the pass over the counters data array to assign according to the
6069 	 * descriptions and indexing pairs
6070 	 */
6071 	desc = mcounters->counters_data;
6072 	for (i = 0; i < mcounters->ncounters; i++)
6073 		read_attr->counters_buff[desc[i].index] += mread_attr.out[desc[i].description];
6074 
6075 err_read:
6076 	kfree(mread_attr.out);
6077 err_bound:
6078 	mutex_unlock(&mcounters->mcntrs_mutex);
6079 	return ret;
6080 }
6081 
6082 static int mlx5_ib_destroy_counters(struct ib_counters *counters)
6083 {
6084 	struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
6085 
6086 	counters_clear_description(counters);
6087 	if (mcounters->hw_cntrs_hndl)
6088 		mlx5_fc_destroy(to_mdev(counters->device)->mdev,
6089 				mcounters->hw_cntrs_hndl);
6090 
6091 	kfree(mcounters);
6092 
6093 	return 0;
6094 }
6095 
6096 static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
6097 						   struct uverbs_attr_bundle *attrs)
6098 {
6099 	struct mlx5_ib_mcounters *mcounters;
6100 
6101 	mcounters = kzalloc(sizeof(*mcounters), GFP_KERNEL);
6102 	if (!mcounters)
6103 		return ERR_PTR(-ENOMEM);
6104 
6105 	mutex_init(&mcounters->mcntrs_mutex);
6106 
6107 	return &mcounters->ibcntrs;
6108 }
6109 
6110 static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
6111 {
6112 	mlx5_ib_cleanup_multiport_master(dev);
6113 	if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
6114 		srcu_barrier(&dev->mr_srcu);
6115 		cleanup_srcu_struct(&dev->mr_srcu);
6116 	}
6117 
6118 	WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
6119 }
6120 
6121 static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
6122 {
6123 	struct mlx5_core_dev *mdev = dev->mdev;
6124 	int err;
6125 	int i;
6126 
6127 	for (i = 0; i < dev->num_ports; i++) {
6128 		spin_lock_init(&dev->port[i].mp.mpi_lock);
6129 		rwlock_init(&dev->port[i].roce.netdev_lock);
6130 		dev->port[i].roce.dev = dev;
6131 		dev->port[i].roce.native_port_num = i + 1;
6132 		dev->port[i].roce.last_port_state = IB_PORT_DOWN;
6133 	}
6134 
6135 	mlx5_ib_internal_fill_odp_caps(dev);
6136 
6137 	err = mlx5_ib_init_multiport_master(dev);
6138 	if (err)
6139 		return err;
6140 
6141 	err = set_has_smi_cap(dev);
6142 	if (err)
6143 		return err;
6144 
6145 	if (!mlx5_core_mp_enabled(mdev)) {
6146 		for (i = 1; i <= dev->num_ports; i++) {
6147 			err = get_port_caps(dev, i);
6148 			if (err)
6149 				break;
6150 		}
6151 	} else {
6152 		err = get_port_caps(dev, mlx5_core_native_port_num(mdev));
6153 	}
6154 	if (err)
6155 		goto err_mp;
6156 
6157 	if (mlx5_use_mad_ifc(dev))
6158 		get_ext_port_caps(dev);
6159 
6160 	dev->ib_dev.node_type		= RDMA_NODE_IB_CA;
6161 	dev->ib_dev.local_dma_lkey	= 0 /* not supported for now */;
6162 	dev->ib_dev.phys_port_cnt	= dev->num_ports;
6163 	dev->ib_dev.num_comp_vectors    = mlx5_comp_vectors_count(mdev);
6164 	dev->ib_dev.dev.parent		= mdev->device;
6165 
6166 	mutex_init(&dev->cap_mask_mutex);
6167 	INIT_LIST_HEAD(&dev->qp_list);
6168 	spin_lock_init(&dev->reset_flow_resource_lock);
6169 
6170 	spin_lock_init(&dev->dm.lock);
6171 	dev->dm.dev = mdev;
6172 
6173 	if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
6174 		err = init_srcu_struct(&dev->mr_srcu);
6175 		if (err)
6176 			goto err_mp;
6177 	}
6178 
6179 	return 0;
6180 
6181 err_mp:
6182 	mlx5_ib_cleanup_multiport_master(dev);
6183 
6184 	return -ENOMEM;
6185 }
6186 
6187 static int mlx5_ib_stage_flow_db_init(struct mlx5_ib_dev *dev)
6188 {
6189 	dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL);
6190 
6191 	if (!dev->flow_db)
6192 		return -ENOMEM;
6193 
6194 	mutex_init(&dev->flow_db->lock);
6195 
6196 	return 0;
6197 }
6198 
6199 static void mlx5_ib_stage_flow_db_cleanup(struct mlx5_ib_dev *dev)
6200 {
6201 	kfree(dev->flow_db);
6202 }
6203 
6204 static const struct ib_device_ops mlx5_ib_dev_ops = {
6205 	.owner = THIS_MODULE,
6206 	.driver_id = RDMA_DRIVER_MLX5,
6207 	.uverbs_abi_ver	= MLX5_IB_UVERBS_ABI_VERSION,
6208 
6209 	.add_gid = mlx5_ib_add_gid,
6210 	.alloc_mr = mlx5_ib_alloc_mr,
6211 	.alloc_mr_integrity = mlx5_ib_alloc_mr_integrity,
6212 	.alloc_pd = mlx5_ib_alloc_pd,
6213 	.alloc_ucontext = mlx5_ib_alloc_ucontext,
6214 	.attach_mcast = mlx5_ib_mcg_attach,
6215 	.check_mr_status = mlx5_ib_check_mr_status,
6216 	.create_ah = mlx5_ib_create_ah,
6217 	.create_counters = mlx5_ib_create_counters,
6218 	.create_cq = mlx5_ib_create_cq,
6219 	.create_flow = mlx5_ib_create_flow,
6220 	.create_qp = mlx5_ib_create_qp,
6221 	.create_srq = mlx5_ib_create_srq,
6222 	.dealloc_pd = mlx5_ib_dealloc_pd,
6223 	.dealloc_ucontext = mlx5_ib_dealloc_ucontext,
6224 	.del_gid = mlx5_ib_del_gid,
6225 	.dereg_mr = mlx5_ib_dereg_mr,
6226 	.destroy_ah = mlx5_ib_destroy_ah,
6227 	.destroy_counters = mlx5_ib_destroy_counters,
6228 	.destroy_cq = mlx5_ib_destroy_cq,
6229 	.destroy_flow = mlx5_ib_destroy_flow,
6230 	.destroy_flow_action = mlx5_ib_destroy_flow_action,
6231 	.destroy_qp = mlx5_ib_destroy_qp,
6232 	.destroy_srq = mlx5_ib_destroy_srq,
6233 	.detach_mcast = mlx5_ib_mcg_detach,
6234 	.disassociate_ucontext = mlx5_ib_disassociate_ucontext,
6235 	.drain_rq = mlx5_ib_drain_rq,
6236 	.drain_sq = mlx5_ib_drain_sq,
6237 	.get_dev_fw_str = get_dev_fw_str,
6238 	.get_dma_mr = mlx5_ib_get_dma_mr,
6239 	.get_link_layer = mlx5_ib_port_link_layer,
6240 	.map_mr_sg = mlx5_ib_map_mr_sg,
6241 	.map_mr_sg_pi = mlx5_ib_map_mr_sg_pi,
6242 	.mmap = mlx5_ib_mmap,
6243 	.modify_cq = mlx5_ib_modify_cq,
6244 	.modify_device = mlx5_ib_modify_device,
6245 	.modify_port = mlx5_ib_modify_port,
6246 	.modify_qp = mlx5_ib_modify_qp,
6247 	.modify_srq = mlx5_ib_modify_srq,
6248 	.poll_cq = mlx5_ib_poll_cq,
6249 	.post_recv = mlx5_ib_post_recv,
6250 	.post_send = mlx5_ib_post_send,
6251 	.post_srq_recv = mlx5_ib_post_srq_recv,
6252 	.process_mad = mlx5_ib_process_mad,
6253 	.query_ah = mlx5_ib_query_ah,
6254 	.query_device = mlx5_ib_query_device,
6255 	.query_gid = mlx5_ib_query_gid,
6256 	.query_pkey = mlx5_ib_query_pkey,
6257 	.query_qp = mlx5_ib_query_qp,
6258 	.query_srq = mlx5_ib_query_srq,
6259 	.read_counters = mlx5_ib_read_counters,
6260 	.reg_user_mr = mlx5_ib_reg_user_mr,
6261 	.req_notify_cq = mlx5_ib_arm_cq,
6262 	.rereg_user_mr = mlx5_ib_rereg_user_mr,
6263 	.resize_cq = mlx5_ib_resize_cq,
6264 
6265 	INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah),
6266 	INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq),
6267 	INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
6268 	INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq),
6269 	INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext),
6270 };
6271 
6272 static const struct ib_device_ops mlx5_ib_dev_flow_ipsec_ops = {
6273 	.create_flow_action_esp = mlx5_ib_create_flow_action_esp,
6274 	.modify_flow_action_esp = mlx5_ib_modify_flow_action_esp,
6275 };
6276 
6277 static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = {
6278 	.rdma_netdev_get_params = mlx5_ib_rn_get_params,
6279 };
6280 
6281 static const struct ib_device_ops mlx5_ib_dev_sriov_ops = {
6282 	.get_vf_config = mlx5_ib_get_vf_config,
6283 	.get_vf_stats = mlx5_ib_get_vf_stats,
6284 	.set_vf_guid = mlx5_ib_set_vf_guid,
6285 	.set_vf_link_state = mlx5_ib_set_vf_link_state,
6286 };
6287 
6288 static const struct ib_device_ops mlx5_ib_dev_mw_ops = {
6289 	.alloc_mw = mlx5_ib_alloc_mw,
6290 	.dealloc_mw = mlx5_ib_dealloc_mw,
6291 };
6292 
6293 static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {
6294 	.alloc_xrcd = mlx5_ib_alloc_xrcd,
6295 	.dealloc_xrcd = mlx5_ib_dealloc_xrcd,
6296 };
6297 
6298 static const struct ib_device_ops mlx5_ib_dev_dm_ops = {
6299 	.alloc_dm = mlx5_ib_alloc_dm,
6300 	.dealloc_dm = mlx5_ib_dealloc_dm,
6301 	.reg_dm_mr = mlx5_ib_reg_dm_mr,
6302 };
6303 
6304 static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
6305 {
6306 	struct mlx5_core_dev *mdev = dev->mdev;
6307 	int err;
6308 
6309 	dev->ib_dev.uverbs_cmd_mask	=
6310 		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)		|
6311 		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)	|
6312 		(1ull << IB_USER_VERBS_CMD_QUERY_PORT)		|
6313 		(1ull << IB_USER_VERBS_CMD_ALLOC_PD)		|
6314 		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)		|
6315 		(1ull << IB_USER_VERBS_CMD_CREATE_AH)		|
6316 		(1ull << IB_USER_VERBS_CMD_DESTROY_AH)		|
6317 		(1ull << IB_USER_VERBS_CMD_REG_MR)		|
6318 		(1ull << IB_USER_VERBS_CMD_REREG_MR)		|
6319 		(1ull << IB_USER_VERBS_CMD_DEREG_MR)		|
6320 		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)	|
6321 		(1ull << IB_USER_VERBS_CMD_CREATE_CQ)		|
6322 		(1ull << IB_USER_VERBS_CMD_RESIZE_CQ)		|
6323 		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ)		|
6324 		(1ull << IB_USER_VERBS_CMD_CREATE_QP)		|
6325 		(1ull << IB_USER_VERBS_CMD_MODIFY_QP)		|
6326 		(1ull << IB_USER_VERBS_CMD_QUERY_QP)		|
6327 		(1ull << IB_USER_VERBS_CMD_DESTROY_QP)		|
6328 		(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)	|
6329 		(1ull << IB_USER_VERBS_CMD_DETACH_MCAST)	|
6330 		(1ull << IB_USER_VERBS_CMD_CREATE_SRQ)		|
6331 		(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)		|
6332 		(1ull << IB_USER_VERBS_CMD_QUERY_SRQ)		|
6333 		(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)		|
6334 		(1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)		|
6335 		(1ull << IB_USER_VERBS_CMD_OPEN_QP);
6336 	dev->ib_dev.uverbs_ex_cmd_mask =
6337 		(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE)	|
6338 		(1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ)	|
6339 		(1ull << IB_USER_VERBS_EX_CMD_CREATE_QP)	|
6340 		(1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP)	|
6341 		(1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ)	|
6342 		(1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW)	|
6343 		(1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
6344 
6345 	if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
6346 	    IS_ENABLED(CONFIG_MLX5_CORE_IPOIB))
6347 		ib_set_device_ops(&dev->ib_dev,
6348 				  &mlx5_ib_dev_ipoib_enhanced_ops);
6349 
6350 	if (mlx5_core_is_pf(mdev))
6351 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_sriov_ops);
6352 
6353 	dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
6354 
6355 	if (MLX5_CAP_GEN(mdev, imaicl)) {
6356 		dev->ib_dev.uverbs_cmd_mask |=
6357 			(1ull << IB_USER_VERBS_CMD_ALLOC_MW)	|
6358 			(1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
6359 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_mw_ops);
6360 	}
6361 
6362 	if (MLX5_CAP_GEN(mdev, xrc)) {
6363 		dev->ib_dev.uverbs_cmd_mask |=
6364 			(1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
6365 			(1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
6366 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops);
6367 	}
6368 
6369 	if (MLX5_CAP_DEV_MEM(mdev, memic) ||
6370 	    MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
6371 	    MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM)
6372 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops);
6373 
6374 	if (mlx5_accel_ipsec_device_caps(dev->mdev) &
6375 	    MLX5_ACCEL_IPSEC_CAP_DEVICE)
6376 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_flow_ipsec_ops);
6377 	ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops);
6378 
6379 	if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
6380 		dev->ib_dev.driver_def = mlx5_ib_defs;
6381 
6382 	err = init_node_data(dev);
6383 	if (err)
6384 		return err;
6385 
6386 	if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
6387 	    (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
6388 	     MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
6389 		mutex_init(&dev->lb.mutex);
6390 
6391 	dev->ib_dev.use_cq_dim = true;
6392 
6393 	return 0;
6394 }
6395 
6396 static const struct ib_device_ops mlx5_ib_dev_port_ops = {
6397 	.get_port_immutable = mlx5_port_immutable,
6398 	.query_port = mlx5_ib_query_port,
6399 };
6400 
6401 static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
6402 {
6403 	ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_ops);
6404 	return 0;
6405 }
6406 
6407 static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = {
6408 	.get_port_immutable = mlx5_port_rep_immutable,
6409 	.query_port = mlx5_ib_rep_query_port,
6410 };
6411 
6412 static int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
6413 {
6414 	ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops);
6415 	return 0;
6416 }
6417 
6418 static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = {
6419 	.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table,
6420 	.create_wq = mlx5_ib_create_wq,
6421 	.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table,
6422 	.destroy_wq = mlx5_ib_destroy_wq,
6423 	.get_netdev = mlx5_ib_get_netdev,
6424 	.modify_wq = mlx5_ib_modify_wq,
6425 };
6426 
6427 static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev)
6428 {
6429 	u8 port_num;
6430 
6431 	dev->ib_dev.uverbs_ex_cmd_mask |=
6432 			(1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
6433 			(1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
6434 			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
6435 			(1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
6436 			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
6437 	ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops);
6438 
6439 	port_num = mlx5_core_native_port_num(dev->mdev) - 1;
6440 
6441 	/* Register only for native ports */
6442 	return mlx5_add_netdev_notifier(dev, port_num);
6443 }
6444 
6445 static void mlx5_ib_stage_common_roce_cleanup(struct mlx5_ib_dev *dev)
6446 {
6447 	u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
6448 
6449 	mlx5_remove_netdev_notifier(dev, port_num);
6450 }
6451 
6452 static int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
6453 {
6454 	struct mlx5_core_dev *mdev = dev->mdev;
6455 	enum rdma_link_layer ll;
6456 	int port_type_cap;
6457 	int err = 0;
6458 
6459 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6460 	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6461 
6462 	if (ll == IB_LINK_LAYER_ETHERNET)
6463 		err = mlx5_ib_stage_common_roce_init(dev);
6464 
6465 	return err;
6466 }
6467 
6468 static void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev)
6469 {
6470 	mlx5_ib_stage_common_roce_cleanup(dev);
6471 }
6472 
6473 static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
6474 {
6475 	struct mlx5_core_dev *mdev = dev->mdev;
6476 	enum rdma_link_layer ll;
6477 	int port_type_cap;
6478 	int err;
6479 
6480 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6481 	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6482 
6483 	if (ll == IB_LINK_LAYER_ETHERNET) {
6484 		err = mlx5_ib_stage_common_roce_init(dev);
6485 		if (err)
6486 			return err;
6487 
6488 		err = mlx5_enable_eth(dev);
6489 		if (err)
6490 			goto cleanup;
6491 	}
6492 
6493 	return 0;
6494 cleanup:
6495 	mlx5_ib_stage_common_roce_cleanup(dev);
6496 
6497 	return err;
6498 }
6499 
6500 static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
6501 {
6502 	struct mlx5_core_dev *mdev = dev->mdev;
6503 	enum rdma_link_layer ll;
6504 	int port_type_cap;
6505 
6506 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6507 	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6508 
6509 	if (ll == IB_LINK_LAYER_ETHERNET) {
6510 		mlx5_disable_eth(dev);
6511 		mlx5_ib_stage_common_roce_cleanup(dev);
6512 	}
6513 }
6514 
6515 static int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
6516 {
6517 	return create_dev_resources(&dev->devr);
6518 }
6519 
6520 static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
6521 {
6522 	destroy_dev_resources(&dev->devr);
6523 }
6524 
6525 static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
6526 {
6527 	return mlx5_ib_odp_init_one(dev);
6528 }
6529 
6530 static void mlx5_ib_stage_odp_cleanup(struct mlx5_ib_dev *dev)
6531 {
6532 	mlx5_ib_odp_cleanup_one(dev);
6533 }
6534 
6535 static const struct ib_device_ops mlx5_ib_dev_hw_stats_ops = {
6536 	.alloc_hw_stats = mlx5_ib_alloc_hw_stats,
6537 	.get_hw_stats = mlx5_ib_get_hw_stats,
6538 	.counter_bind_qp = mlx5_ib_counter_bind_qp,
6539 	.counter_unbind_qp = mlx5_ib_counter_unbind_qp,
6540 	.counter_dealloc = mlx5_ib_counter_dealloc,
6541 	.counter_alloc_stats = mlx5_ib_counter_alloc_stats,
6542 	.counter_update_stats = mlx5_ib_counter_update_stats,
6543 };
6544 
6545 static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
6546 {
6547 	if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
6548 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_hw_stats_ops);
6549 
6550 		return mlx5_ib_alloc_counters(dev);
6551 	}
6552 
6553 	return 0;
6554 }
6555 
6556 static void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
6557 {
6558 	if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
6559 		mlx5_ib_dealloc_counters(dev);
6560 }
6561 
6562 static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
6563 {
6564 	mlx5_ib_init_cong_debugfs(dev,
6565 				  mlx5_core_native_port_num(dev->mdev) - 1);
6566 	return 0;
6567 }
6568 
6569 static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
6570 {
6571 	mlx5_ib_cleanup_cong_debugfs(dev,
6572 				     mlx5_core_native_port_num(dev->mdev) - 1);
6573 }
6574 
6575 static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
6576 {
6577 	dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
6578 	return PTR_ERR_OR_ZERO(dev->mdev->priv.uar);
6579 }
6580 
6581 static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
6582 {
6583 	mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
6584 }
6585 
6586 static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
6587 {
6588 	int err;
6589 
6590 	err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
6591 	if (err)
6592 		return err;
6593 
6594 	err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
6595 	if (err)
6596 		mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
6597 
6598 	return err;
6599 }
6600 
6601 static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
6602 {
6603 	mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
6604 	mlx5_free_bfreg(dev->mdev, &dev->bfreg);
6605 }
6606 
6607 static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
6608 {
6609 	const char *name;
6610 
6611 	rdma_set_device_sysfs_group(&dev->ib_dev, &mlx5_attr_group);
6612 	if (!mlx5_lag_is_roce(dev->mdev))
6613 		name = "mlx5_%d";
6614 	else
6615 		name = "mlx5_bond_%d";
6616 	return ib_register_device(&dev->ib_dev, name);
6617 }
6618 
6619 static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
6620 {
6621 	destroy_umrc_res(dev);
6622 }
6623 
6624 static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
6625 {
6626 	ib_unregister_device(&dev->ib_dev);
6627 }
6628 
6629 static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
6630 {
6631 	return create_umr_res(dev);
6632 }
6633 
6634 static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
6635 {
6636 	init_delay_drop(dev);
6637 
6638 	return 0;
6639 }
6640 
6641 static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
6642 {
6643 	cancel_delay_drop(dev);
6644 }
6645 
6646 static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev)
6647 {
6648 	dev->mdev_events.notifier_call = mlx5_ib_event;
6649 	mlx5_notifier_register(dev->mdev, &dev->mdev_events);
6650 	return 0;
6651 }
6652 
6653 static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev)
6654 {
6655 	mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
6656 }
6657 
6658 static int mlx5_ib_stage_devx_init(struct mlx5_ib_dev *dev)
6659 {
6660 	int uid;
6661 
6662 	uid = mlx5_ib_devx_create(dev, false);
6663 	if (uid > 0) {
6664 		dev->devx_whitelist_uid = uid;
6665 		mlx5_ib_devx_init_event_table(dev);
6666 	}
6667 
6668 	return 0;
6669 }
6670 static void mlx5_ib_stage_devx_cleanup(struct mlx5_ib_dev *dev)
6671 {
6672 	if (dev->devx_whitelist_uid) {
6673 		mlx5_ib_devx_cleanup_event_table(dev);
6674 		mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
6675 	}
6676 }
6677 
6678 void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
6679 		      const struct mlx5_ib_profile *profile,
6680 		      int stage)
6681 {
6682 	/* Number of stages to cleanup */
6683 	while (stage) {
6684 		stage--;
6685 		if (profile->stage[stage].cleanup)
6686 			profile->stage[stage].cleanup(dev);
6687 	}
6688 
6689 	kfree(dev->port);
6690 	ib_dealloc_device(&dev->ib_dev);
6691 }
6692 
6693 void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
6694 		    const struct mlx5_ib_profile *profile)
6695 {
6696 	int err;
6697 	int i;
6698 
6699 	for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
6700 		if (profile->stage[i].init) {
6701 			err = profile->stage[i].init(dev);
6702 			if (err)
6703 				goto err_out;
6704 		}
6705 	}
6706 
6707 	dev->profile = profile;
6708 	dev->ib_active = true;
6709 
6710 	return dev;
6711 
6712 err_out:
6713 	__mlx5_ib_remove(dev, profile, i);
6714 
6715 	return NULL;
6716 }
6717 
6718 static const struct mlx5_ib_profile pf_profile = {
6719 	STAGE_CREATE(MLX5_IB_STAGE_INIT,
6720 		     mlx5_ib_stage_init_init,
6721 		     mlx5_ib_stage_init_cleanup),
6722 	STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
6723 		     mlx5_ib_stage_flow_db_init,
6724 		     mlx5_ib_stage_flow_db_cleanup),
6725 	STAGE_CREATE(MLX5_IB_STAGE_CAPS,
6726 		     mlx5_ib_stage_caps_init,
6727 		     NULL),
6728 	STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
6729 		     mlx5_ib_stage_non_default_cb,
6730 		     NULL),
6731 	STAGE_CREATE(MLX5_IB_STAGE_ROCE,
6732 		     mlx5_ib_stage_roce_init,
6733 		     mlx5_ib_stage_roce_cleanup),
6734 	STAGE_CREATE(MLX5_IB_STAGE_SRQ,
6735 		     mlx5_init_srq_table,
6736 		     mlx5_cleanup_srq_table),
6737 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
6738 		     mlx5_ib_stage_dev_res_init,
6739 		     mlx5_ib_stage_dev_res_cleanup),
6740 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
6741 		     mlx5_ib_stage_dev_notifier_init,
6742 		     mlx5_ib_stage_dev_notifier_cleanup),
6743 	STAGE_CREATE(MLX5_IB_STAGE_ODP,
6744 		     mlx5_ib_stage_odp_init,
6745 		     mlx5_ib_stage_odp_cleanup),
6746 	STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
6747 		     mlx5_ib_stage_counters_init,
6748 		     mlx5_ib_stage_counters_cleanup),
6749 	STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
6750 		     mlx5_ib_stage_cong_debugfs_init,
6751 		     mlx5_ib_stage_cong_debugfs_cleanup),
6752 	STAGE_CREATE(MLX5_IB_STAGE_UAR,
6753 		     mlx5_ib_stage_uar_init,
6754 		     mlx5_ib_stage_uar_cleanup),
6755 	STAGE_CREATE(MLX5_IB_STAGE_BFREG,
6756 		     mlx5_ib_stage_bfrag_init,
6757 		     mlx5_ib_stage_bfrag_cleanup),
6758 	STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
6759 		     NULL,
6760 		     mlx5_ib_stage_pre_ib_reg_umr_cleanup),
6761 	STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
6762 		     mlx5_ib_stage_devx_init,
6763 		     mlx5_ib_stage_devx_cleanup),
6764 	STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
6765 		     mlx5_ib_stage_ib_reg_init,
6766 		     mlx5_ib_stage_ib_reg_cleanup),
6767 	STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
6768 		     mlx5_ib_stage_post_ib_reg_umr_init,
6769 		     NULL),
6770 	STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
6771 		     mlx5_ib_stage_delay_drop_init,
6772 		     mlx5_ib_stage_delay_drop_cleanup),
6773 };
6774 
6775 const struct mlx5_ib_profile uplink_rep_profile = {
6776 	STAGE_CREATE(MLX5_IB_STAGE_INIT,
6777 		     mlx5_ib_stage_init_init,
6778 		     mlx5_ib_stage_init_cleanup),
6779 	STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
6780 		     mlx5_ib_stage_flow_db_init,
6781 		     mlx5_ib_stage_flow_db_cleanup),
6782 	STAGE_CREATE(MLX5_IB_STAGE_CAPS,
6783 		     mlx5_ib_stage_caps_init,
6784 		     NULL),
6785 	STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
6786 		     mlx5_ib_stage_rep_non_default_cb,
6787 		     NULL),
6788 	STAGE_CREATE(MLX5_IB_STAGE_ROCE,
6789 		     mlx5_ib_stage_rep_roce_init,
6790 		     mlx5_ib_stage_rep_roce_cleanup),
6791 	STAGE_CREATE(MLX5_IB_STAGE_SRQ,
6792 		     mlx5_init_srq_table,
6793 		     mlx5_cleanup_srq_table),
6794 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
6795 		     mlx5_ib_stage_dev_res_init,
6796 		     mlx5_ib_stage_dev_res_cleanup),
6797 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
6798 		     mlx5_ib_stage_dev_notifier_init,
6799 		     mlx5_ib_stage_dev_notifier_cleanup),
6800 	STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
6801 		     mlx5_ib_stage_counters_init,
6802 		     mlx5_ib_stage_counters_cleanup),
6803 	STAGE_CREATE(MLX5_IB_STAGE_UAR,
6804 		     mlx5_ib_stage_uar_init,
6805 		     mlx5_ib_stage_uar_cleanup),
6806 	STAGE_CREATE(MLX5_IB_STAGE_BFREG,
6807 		     mlx5_ib_stage_bfrag_init,
6808 		     mlx5_ib_stage_bfrag_cleanup),
6809 	STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
6810 		     NULL,
6811 		     mlx5_ib_stage_pre_ib_reg_umr_cleanup),
6812 	STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
6813 		     mlx5_ib_stage_devx_init,
6814 		     mlx5_ib_stage_devx_cleanup),
6815 	STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
6816 		     mlx5_ib_stage_ib_reg_init,
6817 		     mlx5_ib_stage_ib_reg_cleanup),
6818 	STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
6819 		     mlx5_ib_stage_post_ib_reg_umr_init,
6820 		     NULL),
6821 };
6822 
6823 static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
6824 {
6825 	struct mlx5_ib_multiport_info *mpi;
6826 	struct mlx5_ib_dev *dev;
6827 	bool bound = false;
6828 	int err;
6829 
6830 	mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
6831 	if (!mpi)
6832 		return NULL;
6833 
6834 	mpi->mdev = mdev;
6835 
6836 	err = mlx5_query_nic_vport_system_image_guid(mdev,
6837 						     &mpi->sys_image_guid);
6838 	if (err) {
6839 		kfree(mpi);
6840 		return NULL;
6841 	}
6842 
6843 	mutex_lock(&mlx5_ib_multiport_mutex);
6844 	list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
6845 		if (dev->sys_image_guid == mpi->sys_image_guid)
6846 			bound = mlx5_ib_bind_slave_port(dev, mpi);
6847 
6848 		if (bound) {
6849 			rdma_roce_rescan_device(&dev->ib_dev);
6850 			break;
6851 		}
6852 	}
6853 
6854 	if (!bound) {
6855 		list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
6856 		dev_dbg(mdev->device,
6857 			"no suitable IB device found to bind to, added to unaffiliated list.\n");
6858 	}
6859 	mutex_unlock(&mlx5_ib_multiport_mutex);
6860 
6861 	return mpi;
6862 }
6863 
6864 static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
6865 {
6866 	enum rdma_link_layer ll;
6867 	struct mlx5_ib_dev *dev;
6868 	int port_type_cap;
6869 	int num_ports;
6870 
6871 	printk_once(KERN_INFO "%s", mlx5_version);
6872 
6873 	if (MLX5_ESWITCH_MANAGER(mdev) &&
6874 	    mlx5_ib_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
6875 		if (!mlx5_core_mp_enabled(mdev))
6876 			mlx5_ib_register_vport_reps(mdev);
6877 		return mdev;
6878 	}
6879 
6880 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6881 	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6882 
6883 	if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET)
6884 		return mlx5_ib_add_slave_port(mdev);
6885 
6886 	num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
6887 			MLX5_CAP_GEN(mdev, num_vhca_ports));
6888 	dev = ib_alloc_device(mlx5_ib_dev, ib_dev);
6889 	if (!dev)
6890 		return NULL;
6891 	dev->port = kcalloc(num_ports, sizeof(*dev->port),
6892 			     GFP_KERNEL);
6893 	if (!dev->port) {
6894 		ib_dealloc_device((struct ib_device *)dev);
6895 		return NULL;
6896 	}
6897 
6898 	dev->mdev = mdev;
6899 	dev->num_ports = num_ports;
6900 
6901 	return __mlx5_ib_add(dev, &pf_profile);
6902 }
6903 
6904 static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
6905 {
6906 	struct mlx5_ib_multiport_info *mpi;
6907 	struct mlx5_ib_dev *dev;
6908 
6909 	if (MLX5_ESWITCH_MANAGER(mdev) && context == mdev) {
6910 		mlx5_ib_unregister_vport_reps(mdev);
6911 		return;
6912 	}
6913 
6914 	if (mlx5_core_is_mp_slave(mdev)) {
6915 		mpi = context;
6916 		mutex_lock(&mlx5_ib_multiport_mutex);
6917 		if (mpi->ibdev)
6918 			mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
6919 		list_del(&mpi->list);
6920 		mutex_unlock(&mlx5_ib_multiport_mutex);
6921 		return;
6922 	}
6923 
6924 	dev = context;
6925 	__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
6926 }
6927 
6928 static struct mlx5_interface mlx5_ib_interface = {
6929 	.add            = mlx5_ib_add,
6930 	.remove         = mlx5_ib_remove,
6931 	.protocol	= MLX5_INTERFACE_PROTOCOL_IB,
6932 };
6933 
6934 unsigned long mlx5_ib_get_xlt_emergency_page(void)
6935 {
6936 	mutex_lock(&xlt_emergency_page_mutex);
6937 	return xlt_emergency_page;
6938 }
6939 
6940 void mlx5_ib_put_xlt_emergency_page(void)
6941 {
6942 	mutex_unlock(&xlt_emergency_page_mutex);
6943 }
6944 
6945 static int __init mlx5_ib_init(void)
6946 {
6947 	int err;
6948 
6949 	xlt_emergency_page = __get_free_page(GFP_KERNEL);
6950 	if (!xlt_emergency_page)
6951 		return -ENOMEM;
6952 
6953 	mutex_init(&xlt_emergency_page_mutex);
6954 
6955 	mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0);
6956 	if (!mlx5_ib_event_wq) {
6957 		free_page(xlt_emergency_page);
6958 		return -ENOMEM;
6959 	}
6960 
6961 	mlx5_ib_odp_init();
6962 
6963 	err = mlx5_register_interface(&mlx5_ib_interface);
6964 
6965 	return err;
6966 }
6967 
6968 static void __exit mlx5_ib_cleanup(void)
6969 {
6970 	mlx5_unregister_interface(&mlx5_ib_interface);
6971 	destroy_workqueue(mlx5_ib_event_wq);
6972 	mutex_destroy(&xlt_emergency_page_mutex);
6973 	free_page(xlt_emergency_page);
6974 }
6975 
6976 module_init(mlx5_ib_init);
6977 module_exit(mlx5_ib_cleanup);
6978