1 /*
2  * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/errno.h>
34 #include <linux/if_ether.h>
35 #include <linux/if_vlan.h>
36 #include <linux/export.h>
37 
38 #include <linux/mlx4/cmd.h>
39 
40 #include "mlx4.h"
41 
42 #define MLX4_MAC_VALID		(1ull << 63)
43 
44 #define MLX4_VLAN_VALID		(1u << 31)
45 #define MLX4_VLAN_MASK		0xfff
46 
47 #define MLX4_STATS_TRAFFIC_COUNTERS_MASK	0xfULL
48 #define MLX4_STATS_TRAFFIC_DROPS_MASK		0xc0ULL
49 #define MLX4_STATS_ERROR_COUNTERS_MASK		0x1ffc30ULL
50 #define MLX4_STATS_PORT_COUNTERS_MASK		0x1fe00000ULL
51 
52 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
53 {
54 	int i;
55 
56 	mutex_init(&table->mutex);
57 	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
58 		table->entries[i] = 0;
59 		table->refs[i]	 = 0;
60 	}
61 	table->max   = 1 << dev->caps.log_num_macs;
62 	table->total = 0;
63 }
64 
65 void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
66 {
67 	int i;
68 
69 	mutex_init(&table->mutex);
70 	for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
71 		table->entries[i] = 0;
72 		table->refs[i]	 = 0;
73 	}
74 	table->max   = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR;
75 	table->total = 0;
76 }
77 
78 void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
79 			      struct mlx4_roce_gid_table *table)
80 {
81 	int i;
82 
83 	mutex_init(&table->mutex);
84 	for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++)
85 		memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE);
86 }
87 
88 static int validate_index(struct mlx4_dev *dev,
89 			  struct mlx4_mac_table *table, int index)
90 {
91 	int err = 0;
92 
93 	if (index < 0 || index >= table->max || !table->entries[index]) {
94 		mlx4_warn(dev, "No valid Mac entry for the given index\n");
95 		err = -EINVAL;
96 	}
97 	return err;
98 }
99 
100 static int find_index(struct mlx4_dev *dev,
101 		      struct mlx4_mac_table *table, u64 mac)
102 {
103 	int i;
104 
105 	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
106 		if ((mac & MLX4_MAC_MASK) ==
107 		    (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
108 			return i;
109 	}
110 	/* Mac not found */
111 	return -EINVAL;
112 }
113 
114 static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
115 				   __be64 *entries)
116 {
117 	struct mlx4_cmd_mailbox *mailbox;
118 	u32 in_mod;
119 	int err;
120 
121 	mailbox = mlx4_alloc_cmd_mailbox(dev);
122 	if (IS_ERR(mailbox))
123 		return PTR_ERR(mailbox);
124 
125 	memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
126 
127 	in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
128 
129 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
130 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
131 
132 	mlx4_free_cmd_mailbox(dev, mailbox);
133 	return err;
134 }
135 
136 int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx)
137 {
138 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
139 	struct mlx4_mac_table *table = &info->mac_table;
140 	int i;
141 
142 	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
143 		if (!table->refs[i])
144 			continue;
145 
146 		if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
147 			*idx = i;
148 			return 0;
149 		}
150 	}
151 
152 	return -ENOENT;
153 }
154 EXPORT_SYMBOL_GPL(mlx4_find_cached_mac);
155 
156 int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
157 {
158 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
159 	struct mlx4_mac_table *table = &info->mac_table;
160 	int i, err = 0;
161 	int free = -1;
162 
163 	mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n",
164 		 (unsigned long long) mac, port);
165 
166 	mutex_lock(&table->mutex);
167 	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
168 		if (free < 0 && !table->entries[i]) {
169 			free = i;
170 			continue;
171 		}
172 
173 		if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
174 			/* MAC already registered, increment ref count */
175 			err = i;
176 			++table->refs[i];
177 			goto out;
178 		}
179 	}
180 
181 	mlx4_dbg(dev, "Free MAC index is %d\n", free);
182 
183 	if (table->total == table->max) {
184 		/* No free mac entries */
185 		err = -ENOSPC;
186 		goto out;
187 	}
188 
189 	/* Register new MAC */
190 	table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
191 
192 	err = mlx4_set_port_mac_table(dev, port, table->entries);
193 	if (unlikely(err)) {
194 		mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
195 			 (unsigned long long) mac);
196 		table->entries[free] = 0;
197 		goto out;
198 	}
199 	table->refs[free] = 1;
200 	err = free;
201 	++table->total;
202 out:
203 	mutex_unlock(&table->mutex);
204 	return err;
205 }
206 EXPORT_SYMBOL_GPL(__mlx4_register_mac);
207 
208 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
209 {
210 	u64 out_param = 0;
211 	int err = -EINVAL;
212 
213 	if (mlx4_is_mfunc(dev)) {
214 		if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
215 			err = mlx4_cmd_imm(dev, mac, &out_param,
216 					   ((u32) port) << 8 | (u32) RES_MAC,
217 					   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
218 					   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
219 		}
220 		if (err && err == -EINVAL && mlx4_is_slave(dev)) {
221 			/* retry using old REG_MAC format */
222 			set_param_l(&out_param, port);
223 			err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
224 					   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
225 					   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
226 			if (!err)
227 				dev->flags |= MLX4_FLAG_OLD_REG_MAC;
228 		}
229 		if (err)
230 			return err;
231 
232 		return get_param_l(&out_param);
233 	}
234 	return __mlx4_register_mac(dev, port, mac);
235 }
236 EXPORT_SYMBOL_GPL(mlx4_register_mac);
237 
238 int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port)
239 {
240 	return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
241 			(port - 1) * (1 << dev->caps.log_num_macs);
242 }
243 EXPORT_SYMBOL_GPL(mlx4_get_base_qpn);
244 
245 void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
246 {
247 	struct mlx4_port_info *info;
248 	struct mlx4_mac_table *table;
249 	int index;
250 
251 	if (port < 1 || port > dev->caps.num_ports) {
252 		mlx4_warn(dev, "invalid port number (%d), aborting...\n", port);
253 		return;
254 	}
255 	info = &mlx4_priv(dev)->port[port];
256 	table = &info->mac_table;
257 	mutex_lock(&table->mutex);
258 	index = find_index(dev, table, mac);
259 
260 	if (validate_index(dev, table, index))
261 		goto out;
262 	if (--table->refs[index]) {
263 		mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n",
264 			 index);
265 		goto out;
266 	}
267 
268 	table->entries[index] = 0;
269 	mlx4_set_port_mac_table(dev, port, table->entries);
270 	--table->total;
271 out:
272 	mutex_unlock(&table->mutex);
273 }
274 EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
275 
276 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
277 {
278 	u64 out_param = 0;
279 
280 	if (mlx4_is_mfunc(dev)) {
281 		if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
282 			(void) mlx4_cmd_imm(dev, mac, &out_param,
283 					    ((u32) port) << 8 | (u32) RES_MAC,
284 					    RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
285 					    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
286 		} else {
287 			/* use old unregister mac format */
288 			set_param_l(&out_param, port);
289 			(void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
290 					    RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
291 					    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
292 		}
293 		return;
294 	}
295 	__mlx4_unregister_mac(dev, port, mac);
296 	return;
297 }
298 EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
299 
300 int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
301 {
302 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
303 	struct mlx4_mac_table *table = &info->mac_table;
304 	int index = qpn - info->base_qpn;
305 	int err = 0;
306 
307 	/* CX1 doesn't support multi-functions */
308 	mutex_lock(&table->mutex);
309 
310 	err = validate_index(dev, table, index);
311 	if (err)
312 		goto out;
313 
314 	table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
315 
316 	err = mlx4_set_port_mac_table(dev, port, table->entries);
317 	if (unlikely(err)) {
318 		mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
319 			 (unsigned long long) new_mac);
320 		table->entries[index] = 0;
321 	}
322 out:
323 	mutex_unlock(&table->mutex);
324 	return err;
325 }
326 EXPORT_SYMBOL_GPL(__mlx4_replace_mac);
327 
328 static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
329 				    __be32 *entries)
330 {
331 	struct mlx4_cmd_mailbox *mailbox;
332 	u32 in_mod;
333 	int err;
334 
335 	mailbox = mlx4_alloc_cmd_mailbox(dev);
336 	if (IS_ERR(mailbox))
337 		return PTR_ERR(mailbox);
338 
339 	memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
340 	in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
341 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
342 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
343 
344 	mlx4_free_cmd_mailbox(dev, mailbox);
345 
346 	return err;
347 }
348 
349 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
350 {
351 	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
352 	int i;
353 
354 	for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
355 		if (table->refs[i] &&
356 		    (vid == (MLX4_VLAN_MASK &
357 			      be32_to_cpu(table->entries[i])))) {
358 			/* VLAN already registered, increase reference count */
359 			*idx = i;
360 			return 0;
361 		}
362 	}
363 
364 	return -ENOENT;
365 }
366 EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
367 
368 int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
369 				int *index)
370 {
371 	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
372 	int i, err = 0;
373 	int free = -1;
374 
375 	mutex_lock(&table->mutex);
376 
377 	if (table->total == table->max) {
378 		/* No free vlan entries */
379 		err = -ENOSPC;
380 		goto out;
381 	}
382 
383 	for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
384 		if (free < 0 && (table->refs[i] == 0)) {
385 			free = i;
386 			continue;
387 		}
388 
389 		if (table->refs[i] &&
390 		    (vlan == (MLX4_VLAN_MASK &
391 			      be32_to_cpu(table->entries[i])))) {
392 			/* Vlan already registered, increase references count */
393 			*index = i;
394 			++table->refs[i];
395 			goto out;
396 		}
397 	}
398 
399 	if (free < 0) {
400 		err = -ENOMEM;
401 		goto out;
402 	}
403 
404 	/* Register new VLAN */
405 	table->refs[free] = 1;
406 	table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
407 
408 	err = mlx4_set_port_vlan_table(dev, port, table->entries);
409 	if (unlikely(err)) {
410 		mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
411 		table->refs[free] = 0;
412 		table->entries[free] = 0;
413 		goto out;
414 	}
415 
416 	*index = free;
417 	++table->total;
418 out:
419 	mutex_unlock(&table->mutex);
420 	return err;
421 }
422 
423 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
424 {
425 	u64 out_param = 0;
426 	int err;
427 
428 	if (vlan > 4095)
429 		return -EINVAL;
430 
431 	if (mlx4_is_mfunc(dev)) {
432 		err = mlx4_cmd_imm(dev, vlan, &out_param,
433 				   ((u32) port) << 8 | (u32) RES_VLAN,
434 				   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
435 				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
436 		if (!err)
437 			*index = get_param_l(&out_param);
438 
439 		return err;
440 	}
441 	return __mlx4_register_vlan(dev, port, vlan, index);
442 }
443 EXPORT_SYMBOL_GPL(mlx4_register_vlan);
444 
445 void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
446 {
447 	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
448 	int index;
449 
450 	mutex_lock(&table->mutex);
451 	if (mlx4_find_cached_vlan(dev, port, vlan, &index)) {
452 		mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan);
453 		goto out;
454 	}
455 
456 	if (index < MLX4_VLAN_REGULAR) {
457 		mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
458 		goto out;
459 	}
460 
461 	if (--table->refs[index]) {
462 		mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n",
463 			 table->refs[index], index);
464 		goto out;
465 	}
466 	table->entries[index] = 0;
467 	mlx4_set_port_vlan_table(dev, port, table->entries);
468 	--table->total;
469 out:
470 	mutex_unlock(&table->mutex);
471 }
472 
473 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
474 {
475 	u64 out_param = 0;
476 
477 	if (mlx4_is_mfunc(dev)) {
478 		(void) mlx4_cmd_imm(dev, vlan, &out_param,
479 				    ((u32) port) << 8 | (u32) RES_VLAN,
480 				    RES_OP_RESERVE_AND_MAP,
481 				    MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
482 				    MLX4_CMD_WRAPPED);
483 		return;
484 	}
485 	__mlx4_unregister_vlan(dev, port, vlan);
486 }
487 EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
488 
489 int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
490 {
491 	struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
492 	u8 *inbuf, *outbuf;
493 	int err;
494 
495 	inmailbox = mlx4_alloc_cmd_mailbox(dev);
496 	if (IS_ERR(inmailbox))
497 		return PTR_ERR(inmailbox);
498 
499 	outmailbox = mlx4_alloc_cmd_mailbox(dev);
500 	if (IS_ERR(outmailbox)) {
501 		mlx4_free_cmd_mailbox(dev, inmailbox);
502 		return PTR_ERR(outmailbox);
503 	}
504 
505 	inbuf = inmailbox->buf;
506 	outbuf = outmailbox->buf;
507 	inbuf[0] = 1;
508 	inbuf[1] = 1;
509 	inbuf[2] = 1;
510 	inbuf[3] = 1;
511 	*(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
512 	*(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
513 
514 	err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
515 			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
516 			   MLX4_CMD_NATIVE);
517 	if (!err)
518 		*caps = *(__be32 *) (outbuf + 84);
519 	mlx4_free_cmd_mailbox(dev, inmailbox);
520 	mlx4_free_cmd_mailbox(dev, outmailbox);
521 	return err;
522 }
523 static struct mlx4_roce_gid_entry zgid_entry;
524 
525 int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port)
526 {
527 	int vfs;
528 	int slave_gid = slave;
529 	unsigned i;
530 	struct mlx4_slaves_pport slaves_pport;
531 	struct mlx4_active_ports actv_ports;
532 	unsigned max_port_p_one;
533 
534 	if (slave == 0)
535 		return MLX4_ROCE_PF_GIDS;
536 
537 	/* Slave is a VF */
538 	slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
539 	actv_ports = mlx4_get_active_ports(dev, slave);
540 	max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
541 		bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
542 
543 	for (i = 1; i < max_port_p_one; i++) {
544 		struct mlx4_active_ports exclusive_ports;
545 		struct mlx4_slaves_pport slaves_pport_actv;
546 		bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
547 		set_bit(i - 1, exclusive_ports.ports);
548 		if (i == port)
549 			continue;
550 		slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
551 				    dev, &exclusive_ports);
552 		slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
553 					   dev->num_vfs + 1);
554 	}
555 	vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
556 	if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs))
557 		return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1;
558 	return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs;
559 }
560 
561 int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
562 {
563 	int gids;
564 	unsigned i;
565 	int slave_gid = slave;
566 	int vfs;
567 
568 	struct mlx4_slaves_pport slaves_pport;
569 	struct mlx4_active_ports actv_ports;
570 	unsigned max_port_p_one;
571 
572 	if (slave == 0)
573 		return 0;
574 
575 	slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
576 	actv_ports = mlx4_get_active_ports(dev, slave);
577 	max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
578 		bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
579 
580 	for (i = 1; i < max_port_p_one; i++) {
581 		struct mlx4_active_ports exclusive_ports;
582 		struct mlx4_slaves_pport slaves_pport_actv;
583 		bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
584 		set_bit(i - 1, exclusive_ports.ports);
585 		if (i == port)
586 			continue;
587 		slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
588 				    dev, &exclusive_ports);
589 		slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
590 					   dev->num_vfs + 1);
591 	}
592 	gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
593 	vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
594 	if (slave_gid <= gids % vfs)
595 		return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1);
596 
597 	return MLX4_ROCE_PF_GIDS + (gids % vfs) +
598 		((gids / vfs) * (slave_gid - 1));
599 }
600 EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix);
601 
602 static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave,
603 				     int port, struct mlx4_cmd_mailbox *mailbox)
604 {
605 	struct mlx4_roce_gid_entry *gid_entry_mbox;
606 	struct mlx4_priv *priv = mlx4_priv(dev);
607 	int num_gids, base, offset;
608 	int i, err;
609 
610 	num_gids = mlx4_get_slave_num_gids(dev, slave, port);
611 	base = mlx4_get_base_gid_ix(dev, slave, port);
612 
613 	memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
614 
615 	mutex_lock(&(priv->port[port].gid_table.mutex));
616 	/* Zero-out gids belonging to that slave in the port GID table */
617 	for (i = 0, offset = base; i < num_gids; offset++, i++)
618 		memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
619 		       zgid_entry.raw, MLX4_ROCE_GID_ENTRY_SIZE);
620 
621 	/* Now, copy roce port gids table to mailbox for passing to FW */
622 	gid_entry_mbox = (struct mlx4_roce_gid_entry *)mailbox->buf;
623 	for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
624 		memcpy(gid_entry_mbox->raw,
625 		       priv->port[port].gid_table.roce_gids[i].raw,
626 		       MLX4_ROCE_GID_ENTRY_SIZE);
627 
628 	err = mlx4_cmd(dev, mailbox->dma,
629 		       ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8), 1,
630 		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
631 		       MLX4_CMD_NATIVE);
632 	mutex_unlock(&(priv->port[port].gid_table.mutex));
633 	return err;
634 }
635 
636 
637 void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
638 {
639 	struct mlx4_active_ports actv_ports;
640 	struct mlx4_cmd_mailbox *mailbox;
641 	int num_eth_ports, err;
642 	int i;
643 
644 	if (slave < 0 || slave > dev->num_vfs)
645 		return;
646 
647 	actv_ports = mlx4_get_active_ports(dev, slave);
648 
649 	for (i = 0, num_eth_ports = 0; i < dev->caps.num_ports; i++) {
650 		if (test_bit(i, actv_ports.ports)) {
651 			if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
652 				continue;
653 			num_eth_ports++;
654 		}
655 	}
656 
657 	if (!num_eth_ports)
658 		return;
659 
660 	/* have ETH ports.  Alloc mailbox for SET_PORT command */
661 	mailbox = mlx4_alloc_cmd_mailbox(dev);
662 	if (IS_ERR(mailbox))
663 		return;
664 
665 	for (i = 0; i < dev->caps.num_ports; i++) {
666 		if (test_bit(i, actv_ports.ports)) {
667 			if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
668 				continue;
669 			err = mlx4_reset_roce_port_gids(dev, slave, i + 1, mailbox);
670 			if (err)
671 				mlx4_warn(dev, "Could not reset ETH port GID table for slave %d, port %d (%d)\n",
672 					  slave, i + 1, err);
673 		}
674 	}
675 
676 	mlx4_free_cmd_mailbox(dev, mailbox);
677 	return;
678 }
679 
680 static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
681 				u8 op_mod, struct mlx4_cmd_mailbox *inbox)
682 {
683 	struct mlx4_priv *priv = mlx4_priv(dev);
684 	struct mlx4_port_info *port_info;
685 	struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
686 	struct mlx4_slave_state *slave_st = &master->slave_state[slave];
687 	struct mlx4_set_port_rqp_calc_context *qpn_context;
688 	struct mlx4_set_port_general_context *gen_context;
689 	struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1;
690 	int reset_qkey_viols;
691 	int port;
692 	int is_eth;
693 	int num_gids;
694 	int base;
695 	u32 in_modifier;
696 	u32 promisc;
697 	u16 mtu, prev_mtu;
698 	int err;
699 	int i, j;
700 	int offset;
701 	__be32 agg_cap_mask;
702 	__be32 slave_cap_mask;
703 	__be32 new_cap_mask;
704 
705 	port = in_mod & 0xff;
706 	in_modifier = in_mod >> 8;
707 	is_eth = op_mod;
708 	port_info = &priv->port[port];
709 
710 	/* Slaves cannot perform SET_PORT operations except changing MTU */
711 	if (is_eth) {
712 		if (slave != dev->caps.function &&
713 		    in_modifier != MLX4_SET_PORT_GENERAL &&
714 		    in_modifier != MLX4_SET_PORT_GID_TABLE) {
715 			mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
716 					slave);
717 			return -EINVAL;
718 		}
719 		switch (in_modifier) {
720 		case MLX4_SET_PORT_RQP_CALC:
721 			qpn_context = inbox->buf;
722 			qpn_context->base_qpn =
723 				cpu_to_be32(port_info->base_qpn);
724 			qpn_context->n_mac = 0x7;
725 			promisc = be32_to_cpu(qpn_context->promisc) >>
726 				SET_PORT_PROMISC_SHIFT;
727 			qpn_context->promisc = cpu_to_be32(
728 				promisc << SET_PORT_PROMISC_SHIFT |
729 				port_info->base_qpn);
730 			promisc = be32_to_cpu(qpn_context->mcast) >>
731 				SET_PORT_MC_PROMISC_SHIFT;
732 			qpn_context->mcast = cpu_to_be32(
733 				promisc << SET_PORT_MC_PROMISC_SHIFT |
734 				port_info->base_qpn);
735 			break;
736 		case MLX4_SET_PORT_GENERAL:
737 			gen_context = inbox->buf;
738 			/* Mtu is configured as the max MTU among all the
739 			 * the functions on the port. */
740 			mtu = be16_to_cpu(gen_context->mtu);
741 			mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] +
742 				    ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
743 			prev_mtu = slave_st->mtu[port];
744 			slave_st->mtu[port] = mtu;
745 			if (mtu > master->max_mtu[port])
746 				master->max_mtu[port] = mtu;
747 			if (mtu < prev_mtu && prev_mtu ==
748 						master->max_mtu[port]) {
749 				slave_st->mtu[port] = mtu;
750 				master->max_mtu[port] = mtu;
751 				for (i = 0; i < dev->num_slaves; i++) {
752 					master->max_mtu[port] =
753 					max(master->max_mtu[port],
754 					    master->slave_state[i].mtu[port]);
755 				}
756 			}
757 
758 			gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
759 			break;
760 		case MLX4_SET_PORT_GID_TABLE:
761 			/* change to MULTIPLE entries: number of guest's gids
762 			 * need a FOR-loop here over number of gids the guest has.
763 			 * 1. Check no duplicates in gids passed by slave
764 			 */
765 			num_gids = mlx4_get_slave_num_gids(dev, slave, port);
766 			base = mlx4_get_base_gid_ix(dev, slave, port);
767 			gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
768 			for (i = 0; i < num_gids; gid_entry_mbox++, i++) {
769 				if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
770 					    sizeof(zgid_entry)))
771 					continue;
772 				gid_entry_mb1 = gid_entry_mbox + 1;
773 				for (j = i + 1; j < num_gids; gid_entry_mb1++, j++) {
774 					if (!memcmp(gid_entry_mb1->raw,
775 						    zgid_entry.raw, sizeof(zgid_entry)))
776 						continue;
777 					if (!memcmp(gid_entry_mb1->raw, gid_entry_mbox->raw,
778 						    sizeof(gid_entry_mbox->raw))) {
779 						/* found duplicate */
780 						return -EINVAL;
781 					}
782 				}
783 			}
784 
785 			/* 2. Check that do not have duplicates in OTHER
786 			 *    entries in the port GID table
787 			 */
788 
789 			mutex_lock(&(priv->port[port].gid_table.mutex));
790 			for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
791 				if (i >= base && i < base + num_gids)
792 					continue; /* don't compare to slave's current gids */
793 				gid_entry_tbl = &priv->port[port].gid_table.roce_gids[i];
794 				if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry)))
795 					continue;
796 				gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
797 				for (j = 0; j < num_gids; gid_entry_mbox++, j++) {
798 					if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
799 						    sizeof(zgid_entry)))
800 						continue;
801 					if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
802 						    sizeof(gid_entry_tbl->raw))) {
803 						/* found duplicate */
804 						mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
805 							  slave, i);
806 						mutex_unlock(&(priv->port[port].gid_table.mutex));
807 						return -EINVAL;
808 					}
809 				}
810 			}
811 
812 			/* insert slave GIDs with memcpy, starting at slave's base index */
813 			gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
814 			for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++)
815 				memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
816 				       gid_entry_mbox->raw, MLX4_ROCE_GID_ENTRY_SIZE);
817 
818 			/* Now, copy roce port gids table to current mailbox for passing to FW */
819 			gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
820 			for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
821 				memcpy(gid_entry_mbox->raw,
822 				       priv->port[port].gid_table.roce_gids[i].raw,
823 				       MLX4_ROCE_GID_ENTRY_SIZE);
824 
825 			err = mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
826 				       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
827 				       MLX4_CMD_NATIVE);
828 			mutex_unlock(&(priv->port[port].gid_table.mutex));
829 			return err;
830 		}
831 
832 		return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
833 				MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
834 				MLX4_CMD_NATIVE);
835 	}
836 
837 	/* For IB, we only consider:
838 	 * - The capability mask, which is set to the aggregate of all
839 	 *   slave function capabilities
840 	 * - The QKey violatin counter - reset according to each request.
841 	 */
842 
843 	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
844 		reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
845 		new_cap_mask = ((__be32 *) inbox->buf)[2];
846 	} else {
847 		reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
848 		new_cap_mask = ((__be32 *) inbox->buf)[1];
849 	}
850 
851 	/* slave may not set the IS_SM capability for the port */
852 	if (slave != mlx4_master_func_num(dev) &&
853 	    (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_IS_SM))
854 		return -EINVAL;
855 
856 	/* No DEV_MGMT in multifunc mode */
857 	if (mlx4_is_mfunc(dev) &&
858 	    (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_DEV_MGMT_SUP))
859 		return -EINVAL;
860 
861 	agg_cap_mask = 0;
862 	slave_cap_mask =
863 		priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
864 	priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
865 	for (i = 0; i < dev->num_slaves; i++)
866 		agg_cap_mask |=
867 			priv->mfunc.master.slave_state[i].ib_cap_mask[port];
868 
869 	/* only clear mailbox for guests.  Master may be setting
870 	* MTU or PKEY table size
871 	*/
872 	if (slave != dev->caps.function)
873 		memset(inbox->buf, 0, 256);
874 	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
875 		*(u8 *) inbox->buf	   |= !!reset_qkey_viols << 6;
876 		((__be32 *) inbox->buf)[2] = agg_cap_mask;
877 	} else {
878 		((u8 *) inbox->buf)[3]     |= !!reset_qkey_viols;
879 		((__be32 *) inbox->buf)[1] = agg_cap_mask;
880 	}
881 
882 	err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
883 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
884 	if (err)
885 		priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
886 			slave_cap_mask;
887 	return err;
888 }
889 
890 int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
891 			  struct mlx4_vhcr *vhcr,
892 			  struct mlx4_cmd_mailbox *inbox,
893 			  struct mlx4_cmd_mailbox *outbox,
894 			  struct mlx4_cmd_info *cmd)
895 {
896 	int port = mlx4_slave_convert_port(
897 			dev, slave, vhcr->in_modifier & 0xFF);
898 
899 	if (port < 0)
900 		return -EINVAL;
901 
902 	vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) |
903 			    (port & 0xFF);
904 
905 	return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
906 				    vhcr->op_modifier, inbox);
907 }
908 
909 /* bit locations for set port command with zero op modifier */
910 enum {
911 	MLX4_SET_PORT_VL_CAP	 = 4, /* bits 7:4 */
912 	MLX4_SET_PORT_MTU_CAP	 = 12, /* bits 15:12 */
913 	MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20,
914 	MLX4_CHANGE_PORT_VL_CAP	 = 21,
915 	MLX4_CHANGE_PORT_MTU_CAP = 22,
916 };
917 
918 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
919 {
920 	struct mlx4_cmd_mailbox *mailbox;
921 	int err, vl_cap, pkey_tbl_flag = 0;
922 
923 	if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
924 		return 0;
925 
926 	mailbox = mlx4_alloc_cmd_mailbox(dev);
927 	if (IS_ERR(mailbox))
928 		return PTR_ERR(mailbox);
929 
930 	((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
931 
932 	if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
933 		pkey_tbl_flag = 1;
934 		((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz);
935 	}
936 
937 	/* IB VL CAP enum isn't used by the firmware, just numerical values */
938 	for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) {
939 		((__be32 *) mailbox->buf)[0] = cpu_to_be32(
940 			(1 << MLX4_CHANGE_PORT_MTU_CAP) |
941 			(1 << MLX4_CHANGE_PORT_VL_CAP)  |
942 			(pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) |
943 			(dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) |
944 			(vl_cap << MLX4_SET_PORT_VL_CAP));
945 		err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
946 				MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
947 		if (err != -ENOMEM)
948 			break;
949 	}
950 
951 	mlx4_free_cmd_mailbox(dev, mailbox);
952 	return err;
953 }
954 
955 int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
956 			  u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
957 {
958 	struct mlx4_cmd_mailbox *mailbox;
959 	struct mlx4_set_port_general_context *context;
960 	int err;
961 	u32 in_mod;
962 
963 	mailbox = mlx4_alloc_cmd_mailbox(dev);
964 	if (IS_ERR(mailbox))
965 		return PTR_ERR(mailbox);
966 	context = mailbox->buf;
967 	context->flags = SET_PORT_GEN_ALL_VALID;
968 	context->mtu = cpu_to_be16(mtu);
969 	context->pptx = (pptx * (!pfctx)) << 7;
970 	context->pfctx = pfctx;
971 	context->pprx = (pprx * (!pfcrx)) << 7;
972 	context->pfcrx = pfcrx;
973 
974 	in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
975 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
976 		       MLX4_CMD_TIME_CLASS_B,  MLX4_CMD_WRAPPED);
977 
978 	mlx4_free_cmd_mailbox(dev, mailbox);
979 	return err;
980 }
981 EXPORT_SYMBOL(mlx4_SET_PORT_general);
982 
983 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
984 			   u8 promisc)
985 {
986 	struct mlx4_cmd_mailbox *mailbox;
987 	struct mlx4_set_port_rqp_calc_context *context;
988 	int err;
989 	u32 in_mod;
990 	u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
991 		MCAST_DIRECT : MCAST_DEFAULT;
992 
993 	if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
994 		return 0;
995 
996 	mailbox = mlx4_alloc_cmd_mailbox(dev);
997 	if (IS_ERR(mailbox))
998 		return PTR_ERR(mailbox);
999 	context = mailbox->buf;
1000 	context->base_qpn = cpu_to_be32(base_qpn);
1001 	context->n_mac = dev->caps.log_num_macs;
1002 	context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
1003 				       base_qpn);
1004 	context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
1005 				     base_qpn);
1006 	context->intra_no_vlan = 0;
1007 	context->no_vlan = MLX4_NO_VLAN_IDX;
1008 	context->intra_vlan_miss = 0;
1009 	context->vlan_miss = MLX4_VLAN_MISS_IDX;
1010 
1011 	in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
1012 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
1013 		       MLX4_CMD_TIME_CLASS_B,  MLX4_CMD_WRAPPED);
1014 
1015 	mlx4_free_cmd_mailbox(dev, mailbox);
1016 	return err;
1017 }
1018 EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
1019 
1020 int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
1021 {
1022 	struct mlx4_cmd_mailbox *mailbox;
1023 	struct mlx4_set_port_prio2tc_context *context;
1024 	int err;
1025 	u32 in_mod;
1026 	int i;
1027 
1028 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1029 	if (IS_ERR(mailbox))
1030 		return PTR_ERR(mailbox);
1031 	context = mailbox->buf;
1032 	for (i = 0; i < MLX4_NUM_UP; i += 2)
1033 		context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
1034 
1035 	in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port;
1036 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
1037 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1038 
1039 	mlx4_free_cmd_mailbox(dev, mailbox);
1040 	return err;
1041 }
1042 EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC);
1043 
1044 int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
1045 		u8 *pg, u16 *ratelimit)
1046 {
1047 	struct mlx4_cmd_mailbox *mailbox;
1048 	struct mlx4_set_port_scheduler_context *context;
1049 	int err;
1050 	u32 in_mod;
1051 	int i;
1052 
1053 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1054 	if (IS_ERR(mailbox))
1055 		return PTR_ERR(mailbox);
1056 	context = mailbox->buf;
1057 
1058 	for (i = 0; i < MLX4_NUM_TC; i++) {
1059 		struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
1060 		u16 r;
1061 
1062 		if (ratelimit && ratelimit[i]) {
1063 			if (ratelimit[i] <= MLX4_MAX_100M_UNITS_VAL) {
1064 				r = ratelimit[i];
1065 				tc->max_bw_units =
1066 					htons(MLX4_RATELIMIT_100M_UNITS);
1067 			} else {
1068 				r = ratelimit[i]/10;
1069 				tc->max_bw_units =
1070 					htons(MLX4_RATELIMIT_1G_UNITS);
1071 			}
1072 			tc->max_bw_value = htons(r);
1073 		} else {
1074 			tc->max_bw_value = htons(MLX4_RATELIMIT_DEFAULT);
1075 			tc->max_bw_units = htons(MLX4_RATELIMIT_1G_UNITS);
1076 		}
1077 
1078 		tc->pg = htons(pg[i]);
1079 		tc->bw_precentage = htons(tc_tx_bw[i]);
1080 	}
1081 
1082 	in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port;
1083 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
1084 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1085 
1086 	mlx4_free_cmd_mailbox(dev, mailbox);
1087 	return err;
1088 }
1089 EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
1090 
1091 enum {
1092 	VXLAN_ENABLE_MODIFY	= 1 << 7,
1093 	VXLAN_STEERING_MODIFY	= 1 << 6,
1094 
1095 	VXLAN_ENABLE		= 1 << 7,
1096 };
1097 
1098 struct mlx4_set_port_vxlan_context {
1099 	u32	reserved1;
1100 	u8	modify_flags;
1101 	u8	reserved2;
1102 	u8	enable_flags;
1103 	u8	steering;
1104 };
1105 
1106 int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable)
1107 {
1108 	int err;
1109 	u32 in_mod;
1110 	struct mlx4_cmd_mailbox *mailbox;
1111 	struct mlx4_set_port_vxlan_context  *context;
1112 
1113 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1114 	if (IS_ERR(mailbox))
1115 		return PTR_ERR(mailbox);
1116 	context = mailbox->buf;
1117 	memset(context, 0, sizeof(*context));
1118 
1119 	context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY;
1120 	if (enable)
1121 		context->enable_flags = VXLAN_ENABLE;
1122 	context->steering  = steering;
1123 
1124 	in_mod = MLX4_SET_PORT_VXLAN << 8 | port;
1125 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
1126 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1127 
1128 	mlx4_free_cmd_mailbox(dev, mailbox);
1129 	return err;
1130 }
1131 EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN);
1132 
1133 int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
1134 				struct mlx4_vhcr *vhcr,
1135 				struct mlx4_cmd_mailbox *inbox,
1136 				struct mlx4_cmd_mailbox *outbox,
1137 				struct mlx4_cmd_info *cmd)
1138 {
1139 	int err = 0;
1140 
1141 	return err;
1142 }
1143 
1144 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
1145 			u64 mac, u64 clear, u8 mode)
1146 {
1147 	return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
1148 			MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
1149 			MLX4_CMD_WRAPPED);
1150 }
1151 EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
1152 
1153 int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
1154 			       struct mlx4_vhcr *vhcr,
1155 			       struct mlx4_cmd_mailbox *inbox,
1156 			       struct mlx4_cmd_mailbox *outbox,
1157 			       struct mlx4_cmd_info *cmd)
1158 {
1159 	int err = 0;
1160 
1161 	return err;
1162 }
1163 
1164 int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
1165 			       u32 in_mod, struct mlx4_cmd_mailbox *outbox)
1166 {
1167 	return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
1168 			    MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
1169 			    MLX4_CMD_NATIVE);
1170 }
1171 
1172 int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
1173 				struct mlx4_vhcr *vhcr,
1174 				struct mlx4_cmd_mailbox *inbox,
1175 				struct mlx4_cmd_mailbox *outbox,
1176 				struct mlx4_cmd_info *cmd)
1177 {
1178 	if (slave != dev->caps.function)
1179 		return 0;
1180 	return mlx4_common_dump_eth_stats(dev, slave,
1181 					  vhcr->in_modifier, outbox);
1182 }
1183 
1184 void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap)
1185 {
1186 	if (!mlx4_is_mfunc(dev)) {
1187 		*stats_bitmap = 0;
1188 		return;
1189 	}
1190 
1191 	*stats_bitmap = (MLX4_STATS_TRAFFIC_COUNTERS_MASK |
1192 			 MLX4_STATS_TRAFFIC_DROPS_MASK |
1193 			 MLX4_STATS_PORT_COUNTERS_MASK);
1194 
1195 	if (mlx4_is_master(dev))
1196 		*stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK;
1197 }
1198 EXPORT_SYMBOL(mlx4_set_stats_bitmap);
1199 
1200 int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1201 				 int *slave_id)
1202 {
1203 	struct mlx4_priv *priv = mlx4_priv(dev);
1204 	int i, found_ix = -1;
1205 	int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
1206 	struct mlx4_slaves_pport slaves_pport;
1207 	unsigned num_vfs;
1208 	int slave_gid;
1209 
1210 	if (!mlx4_is_mfunc(dev))
1211 		return -EINVAL;
1212 
1213 	slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
1214 	num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
1215 
1216 	for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
1217 		if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid,
1218 			    MLX4_ROCE_GID_ENTRY_SIZE)) {
1219 			found_ix = i;
1220 			break;
1221 		}
1222 	}
1223 
1224 	if (found_ix >= 0) {
1225 		/* Calculate a slave_gid which is the slave number in the gid
1226 		 * table and not a globally unique slave number.
1227 		 */
1228 		if (found_ix < MLX4_ROCE_PF_GIDS)
1229 			slave_gid = 0;
1230 		else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
1231 			 (vf_gids / num_vfs + 1))
1232 			slave_gid = ((found_ix - MLX4_ROCE_PF_GIDS) /
1233 				     (vf_gids / num_vfs + 1)) + 1;
1234 		else
1235 			slave_gid =
1236 			((found_ix - MLX4_ROCE_PF_GIDS -
1237 			  ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
1238 			 (vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
1239 
1240 		/* Calculate the globally unique slave id */
1241 		if (slave_gid) {
1242 			struct mlx4_active_ports exclusive_ports;
1243 			struct mlx4_active_ports actv_ports;
1244 			struct mlx4_slaves_pport slaves_pport_actv;
1245 			unsigned max_port_p_one;
1246 			int num_vfs_before = 0;
1247 			int candidate_slave_gid;
1248 
1249 			/* Calculate how many VFs are on the previous port, if exists */
1250 			for (i = 1; i < port; i++) {
1251 				bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
1252 				set_bit(i - 1, exclusive_ports.ports);
1253 				slaves_pport_actv =
1254 					mlx4_phys_to_slaves_pport_actv(
1255 							dev, &exclusive_ports);
1256 				num_vfs_before += bitmap_weight(
1257 						slaves_pport_actv.slaves,
1258 						dev->num_vfs + 1);
1259 			}
1260 
1261 			/* candidate_slave_gid isn't necessarily the correct slave, but
1262 			 * it has the same number of ports and is assigned to the same
1263 			 * ports as the real slave we're looking for. On dual port VF,
1264 			 * slave_gid = [single port VFs on port <port>] +
1265 			 * [offset of the current slave from the first dual port VF] +
1266 			 * 1 (for the PF).
1267 			 */
1268 			candidate_slave_gid = slave_gid + num_vfs_before;
1269 
1270 			actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid);
1271 			max_port_p_one = find_first_bit(
1272 				actv_ports.ports, dev->caps.num_ports) +
1273 				bitmap_weight(actv_ports.ports,
1274 					      dev->caps.num_ports) + 1;
1275 
1276 			/* Calculate the real slave number */
1277 			for (i = 1; i < max_port_p_one; i++) {
1278 				if (i == port)
1279 					continue;
1280 				bitmap_zero(exclusive_ports.ports,
1281 					    dev->caps.num_ports);
1282 				set_bit(i - 1, exclusive_ports.ports);
1283 				slaves_pport_actv =
1284 					mlx4_phys_to_slaves_pport_actv(
1285 						dev, &exclusive_ports);
1286 				slave_gid += bitmap_weight(
1287 						slaves_pport_actv.slaves,
1288 						dev->num_vfs + 1);
1289 			}
1290 		}
1291 		*slave_id = slave_gid;
1292 	}
1293 
1294 	return (found_ix >= 0) ? 0 : -EINVAL;
1295 }
1296 EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid);
1297 
1298 int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
1299 				 u8 *gid)
1300 {
1301 	struct mlx4_priv *priv = mlx4_priv(dev);
1302 
1303 	if (!mlx4_is_master(dev))
1304 		return -EINVAL;
1305 
1306 	memcpy(gid, priv->port[port].gid_table.roce_gids[slave_id].raw,
1307 	       MLX4_ROCE_GID_ENTRY_SIZE);
1308 	return 0;
1309 }
1310 EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
1311