1 /*
2  * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/errno.h>
34 #include <linux/if_ether.h>
35 #include <linux/if_vlan.h>
36 #include <linux/export.h>
37 
38 #include <linux/mlx4/cmd.h>
39 
40 #include "mlx4.h"
41 
42 #define MLX4_MAC_VALID		(1ull << 63)
43 
44 #define MLX4_VLAN_VALID		(1u << 31)
45 #define MLX4_VLAN_MASK		0xfff
46 
47 #define MLX4_STATS_TRAFFIC_COUNTERS_MASK	0xfULL
48 #define MLX4_STATS_TRAFFIC_DROPS_MASK		0xc0ULL
49 #define MLX4_STATS_ERROR_COUNTERS_MASK		0x1ffc30ULL
50 #define MLX4_STATS_PORT_COUNTERS_MASK		0x1fe00000ULL
51 
52 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
53 {
54 	int i;
55 
56 	mutex_init(&table->mutex);
57 	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
58 		table->entries[i] = 0;
59 		table->refs[i]	 = 0;
60 	}
61 	table->max   = 1 << dev->caps.log_num_macs;
62 	table->total = 0;
63 }
64 
65 void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
66 {
67 	int i;
68 
69 	mutex_init(&table->mutex);
70 	for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
71 		table->entries[i] = 0;
72 		table->refs[i]	 = 0;
73 	}
74 	table->max   = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR;
75 	table->total = 0;
76 }
77 
78 void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
79 			      struct mlx4_roce_gid_table *table)
80 {
81 	int i;
82 
83 	mutex_init(&table->mutex);
84 	for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++)
85 		memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE);
86 }
87 
88 static int validate_index(struct mlx4_dev *dev,
89 			  struct mlx4_mac_table *table, int index)
90 {
91 	int err = 0;
92 
93 	if (index < 0 || index >= table->max || !table->entries[index]) {
94 		mlx4_warn(dev, "No valid Mac entry for the given index\n");
95 		err = -EINVAL;
96 	}
97 	return err;
98 }
99 
100 static int find_index(struct mlx4_dev *dev,
101 		      struct mlx4_mac_table *table, u64 mac)
102 {
103 	int i;
104 
105 	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
106 		if (table->refs[i] &&
107 		    (MLX4_MAC_MASK & mac) ==
108 		    (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
109 			return i;
110 	}
111 	/* Mac not found */
112 	return -EINVAL;
113 }
114 
115 static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
116 				   __be64 *entries)
117 {
118 	struct mlx4_cmd_mailbox *mailbox;
119 	u32 in_mod;
120 	int err;
121 
122 	mailbox = mlx4_alloc_cmd_mailbox(dev);
123 	if (IS_ERR(mailbox))
124 		return PTR_ERR(mailbox);
125 
126 	memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
127 
128 	in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
129 
130 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
131 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
132 
133 	mlx4_free_cmd_mailbox(dev, mailbox);
134 	return err;
135 }
136 
137 int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx)
138 {
139 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
140 	struct mlx4_mac_table *table = &info->mac_table;
141 	int i;
142 
143 	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
144 		if (!table->refs[i])
145 			continue;
146 
147 		if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
148 			*idx = i;
149 			return 0;
150 		}
151 	}
152 
153 	return -ENOENT;
154 }
155 EXPORT_SYMBOL_GPL(mlx4_find_cached_mac);
156 
157 int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
158 {
159 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
160 	struct mlx4_mac_table *table = &info->mac_table;
161 	int i, err = 0;
162 	int free = -1;
163 
164 	mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n",
165 		 (unsigned long long) mac, port);
166 
167 	mutex_lock(&table->mutex);
168 	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
169 		if (!table->refs[i]) {
170 			if (free < 0)
171 				free = i;
172 			continue;
173 		}
174 
175 		if ((MLX4_MAC_MASK & mac) ==
176 		     (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
177 			/* MAC already registered, increment ref count */
178 			err = i;
179 			++table->refs[i];
180 			goto out;
181 		}
182 	}
183 
184 	mlx4_dbg(dev, "Free MAC index is %d\n", free);
185 
186 	if (table->total == table->max) {
187 		/* No free mac entries */
188 		err = -ENOSPC;
189 		goto out;
190 	}
191 
192 	/* Register new MAC */
193 	table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
194 
195 	err = mlx4_set_port_mac_table(dev, port, table->entries);
196 	if (unlikely(err)) {
197 		mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
198 			 (unsigned long long) mac);
199 		table->entries[free] = 0;
200 		goto out;
201 	}
202 	table->refs[free] = 1;
203 	err = free;
204 	++table->total;
205 out:
206 	mutex_unlock(&table->mutex);
207 	return err;
208 }
209 EXPORT_SYMBOL_GPL(__mlx4_register_mac);
210 
211 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
212 {
213 	u64 out_param = 0;
214 	int err = -EINVAL;
215 
216 	if (mlx4_is_mfunc(dev)) {
217 		if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
218 			err = mlx4_cmd_imm(dev, mac, &out_param,
219 					   ((u32) port) << 8 | (u32) RES_MAC,
220 					   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
221 					   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
222 		}
223 		if (err && err == -EINVAL && mlx4_is_slave(dev)) {
224 			/* retry using old REG_MAC format */
225 			set_param_l(&out_param, port);
226 			err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
227 					   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
228 					   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
229 			if (!err)
230 				dev->flags |= MLX4_FLAG_OLD_REG_MAC;
231 		}
232 		if (err)
233 			return err;
234 
235 		return get_param_l(&out_param);
236 	}
237 	return __mlx4_register_mac(dev, port, mac);
238 }
239 EXPORT_SYMBOL_GPL(mlx4_register_mac);
240 
241 int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port)
242 {
243 	return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
244 			(port - 1) * (1 << dev->caps.log_num_macs);
245 }
246 EXPORT_SYMBOL_GPL(mlx4_get_base_qpn);
247 
248 void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
249 {
250 	struct mlx4_port_info *info;
251 	struct mlx4_mac_table *table;
252 	int index;
253 
254 	if (port < 1 || port > dev->caps.num_ports) {
255 		mlx4_warn(dev, "invalid port number (%d), aborting...\n", port);
256 		return;
257 	}
258 	info = &mlx4_priv(dev)->port[port];
259 	table = &info->mac_table;
260 	mutex_lock(&table->mutex);
261 	index = find_index(dev, table, mac);
262 
263 	if (validate_index(dev, table, index))
264 		goto out;
265 	if (--table->refs[index]) {
266 		mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n",
267 			 index);
268 		goto out;
269 	}
270 
271 	table->entries[index] = 0;
272 	mlx4_set_port_mac_table(dev, port, table->entries);
273 	--table->total;
274 out:
275 	mutex_unlock(&table->mutex);
276 }
277 EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
278 
279 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
280 {
281 	u64 out_param = 0;
282 
283 	if (mlx4_is_mfunc(dev)) {
284 		if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
285 			(void) mlx4_cmd_imm(dev, mac, &out_param,
286 					    ((u32) port) << 8 | (u32) RES_MAC,
287 					    RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
288 					    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
289 		} else {
290 			/* use old unregister mac format */
291 			set_param_l(&out_param, port);
292 			(void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
293 					    RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
294 					    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
295 		}
296 		return;
297 	}
298 	__mlx4_unregister_mac(dev, port, mac);
299 	return;
300 }
301 EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
302 
303 int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
304 {
305 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
306 	struct mlx4_mac_table *table = &info->mac_table;
307 	int index = qpn - info->base_qpn;
308 	int err = 0;
309 
310 	/* CX1 doesn't support multi-functions */
311 	mutex_lock(&table->mutex);
312 
313 	err = validate_index(dev, table, index);
314 	if (err)
315 		goto out;
316 
317 	table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
318 
319 	err = mlx4_set_port_mac_table(dev, port, table->entries);
320 	if (unlikely(err)) {
321 		mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
322 			 (unsigned long long) new_mac);
323 		table->entries[index] = 0;
324 	}
325 out:
326 	mutex_unlock(&table->mutex);
327 	return err;
328 }
329 EXPORT_SYMBOL_GPL(__mlx4_replace_mac);
330 
331 static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
332 				    __be32 *entries)
333 {
334 	struct mlx4_cmd_mailbox *mailbox;
335 	u32 in_mod;
336 	int err;
337 
338 	mailbox = mlx4_alloc_cmd_mailbox(dev);
339 	if (IS_ERR(mailbox))
340 		return PTR_ERR(mailbox);
341 
342 	memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
343 	in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
344 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
345 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
346 
347 	mlx4_free_cmd_mailbox(dev, mailbox);
348 
349 	return err;
350 }
351 
352 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
353 {
354 	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
355 	int i;
356 
357 	for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
358 		if (table->refs[i] &&
359 		    (vid == (MLX4_VLAN_MASK &
360 			      be32_to_cpu(table->entries[i])))) {
361 			/* VLAN already registered, increase reference count */
362 			*idx = i;
363 			return 0;
364 		}
365 	}
366 
367 	return -ENOENT;
368 }
369 EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
370 
371 int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
372 				int *index)
373 {
374 	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
375 	int i, err = 0;
376 	int free = -1;
377 
378 	mutex_lock(&table->mutex);
379 
380 	if (table->total == table->max) {
381 		/* No free vlan entries */
382 		err = -ENOSPC;
383 		goto out;
384 	}
385 
386 	for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
387 		if (free < 0 && (table->refs[i] == 0)) {
388 			free = i;
389 			continue;
390 		}
391 
392 		if (table->refs[i] &&
393 		    (vlan == (MLX4_VLAN_MASK &
394 			      be32_to_cpu(table->entries[i])))) {
395 			/* Vlan already registered, increase references count */
396 			*index = i;
397 			++table->refs[i];
398 			goto out;
399 		}
400 	}
401 
402 	if (free < 0) {
403 		err = -ENOMEM;
404 		goto out;
405 	}
406 
407 	/* Register new VLAN */
408 	table->refs[free] = 1;
409 	table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
410 
411 	err = mlx4_set_port_vlan_table(dev, port, table->entries);
412 	if (unlikely(err)) {
413 		mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
414 		table->refs[free] = 0;
415 		table->entries[free] = 0;
416 		goto out;
417 	}
418 
419 	*index = free;
420 	++table->total;
421 out:
422 	mutex_unlock(&table->mutex);
423 	return err;
424 }
425 
426 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
427 {
428 	u64 out_param = 0;
429 	int err;
430 
431 	if (vlan > 4095)
432 		return -EINVAL;
433 
434 	if (mlx4_is_mfunc(dev)) {
435 		err = mlx4_cmd_imm(dev, vlan, &out_param,
436 				   ((u32) port) << 8 | (u32) RES_VLAN,
437 				   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
438 				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
439 		if (!err)
440 			*index = get_param_l(&out_param);
441 
442 		return err;
443 	}
444 	return __mlx4_register_vlan(dev, port, vlan, index);
445 }
446 EXPORT_SYMBOL_GPL(mlx4_register_vlan);
447 
448 void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
449 {
450 	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
451 	int index;
452 
453 	mutex_lock(&table->mutex);
454 	if (mlx4_find_cached_vlan(dev, port, vlan, &index)) {
455 		mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan);
456 		goto out;
457 	}
458 
459 	if (index < MLX4_VLAN_REGULAR) {
460 		mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
461 		goto out;
462 	}
463 
464 	if (--table->refs[index]) {
465 		mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n",
466 			 table->refs[index], index);
467 		goto out;
468 	}
469 	table->entries[index] = 0;
470 	mlx4_set_port_vlan_table(dev, port, table->entries);
471 	--table->total;
472 out:
473 	mutex_unlock(&table->mutex);
474 }
475 
476 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
477 {
478 	u64 out_param = 0;
479 
480 	if (mlx4_is_mfunc(dev)) {
481 		(void) mlx4_cmd_imm(dev, vlan, &out_param,
482 				    ((u32) port) << 8 | (u32) RES_VLAN,
483 				    RES_OP_RESERVE_AND_MAP,
484 				    MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
485 				    MLX4_CMD_WRAPPED);
486 		return;
487 	}
488 	__mlx4_unregister_vlan(dev, port, vlan);
489 }
490 EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
491 
492 int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
493 {
494 	struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
495 	u8 *inbuf, *outbuf;
496 	int err;
497 
498 	inmailbox = mlx4_alloc_cmd_mailbox(dev);
499 	if (IS_ERR(inmailbox))
500 		return PTR_ERR(inmailbox);
501 
502 	outmailbox = mlx4_alloc_cmd_mailbox(dev);
503 	if (IS_ERR(outmailbox)) {
504 		mlx4_free_cmd_mailbox(dev, inmailbox);
505 		return PTR_ERR(outmailbox);
506 	}
507 
508 	inbuf = inmailbox->buf;
509 	outbuf = outmailbox->buf;
510 	inbuf[0] = 1;
511 	inbuf[1] = 1;
512 	inbuf[2] = 1;
513 	inbuf[3] = 1;
514 	*(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
515 	*(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
516 
517 	err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
518 			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
519 			   MLX4_CMD_NATIVE);
520 	if (!err)
521 		*caps = *(__be32 *) (outbuf + 84);
522 	mlx4_free_cmd_mailbox(dev, inmailbox);
523 	mlx4_free_cmd_mailbox(dev, outmailbox);
524 	return err;
525 }
526 static struct mlx4_roce_gid_entry zgid_entry;
527 
528 int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port)
529 {
530 	int vfs;
531 	int slave_gid = slave;
532 	unsigned i;
533 	struct mlx4_slaves_pport slaves_pport;
534 	struct mlx4_active_ports actv_ports;
535 	unsigned max_port_p_one;
536 
537 	if (slave == 0)
538 		return MLX4_ROCE_PF_GIDS;
539 
540 	/* Slave is a VF */
541 	slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
542 	actv_ports = mlx4_get_active_ports(dev, slave);
543 	max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
544 		bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
545 
546 	for (i = 1; i < max_port_p_one; i++) {
547 		struct mlx4_active_ports exclusive_ports;
548 		struct mlx4_slaves_pport slaves_pport_actv;
549 		bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
550 		set_bit(i - 1, exclusive_ports.ports);
551 		if (i == port)
552 			continue;
553 		slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
554 				    dev, &exclusive_ports);
555 		slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
556 					   dev->persist->num_vfs + 1);
557 	}
558 	vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
559 	if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs))
560 		return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1;
561 	return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs;
562 }
563 
564 int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
565 {
566 	int gids;
567 	unsigned i;
568 	int slave_gid = slave;
569 	int vfs;
570 
571 	struct mlx4_slaves_pport slaves_pport;
572 	struct mlx4_active_ports actv_ports;
573 	unsigned max_port_p_one;
574 
575 	if (slave == 0)
576 		return 0;
577 
578 	slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
579 	actv_ports = mlx4_get_active_ports(dev, slave);
580 	max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
581 		bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
582 
583 	for (i = 1; i < max_port_p_one; i++) {
584 		struct mlx4_active_ports exclusive_ports;
585 		struct mlx4_slaves_pport slaves_pport_actv;
586 		bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
587 		set_bit(i - 1, exclusive_ports.ports);
588 		if (i == port)
589 			continue;
590 		slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
591 				    dev, &exclusive_ports);
592 		slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
593 					   dev->persist->num_vfs + 1);
594 	}
595 	gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
596 	vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
597 	if (slave_gid <= gids % vfs)
598 		return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1);
599 
600 	return MLX4_ROCE_PF_GIDS + (gids % vfs) +
601 		((gids / vfs) * (slave_gid - 1));
602 }
603 EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix);
604 
605 static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave,
606 				     int port, struct mlx4_cmd_mailbox *mailbox)
607 {
608 	struct mlx4_roce_gid_entry *gid_entry_mbox;
609 	struct mlx4_priv *priv = mlx4_priv(dev);
610 	int num_gids, base, offset;
611 	int i, err;
612 
613 	num_gids = mlx4_get_slave_num_gids(dev, slave, port);
614 	base = mlx4_get_base_gid_ix(dev, slave, port);
615 
616 	memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
617 
618 	mutex_lock(&(priv->port[port].gid_table.mutex));
619 	/* Zero-out gids belonging to that slave in the port GID table */
620 	for (i = 0, offset = base; i < num_gids; offset++, i++)
621 		memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
622 		       zgid_entry.raw, MLX4_ROCE_GID_ENTRY_SIZE);
623 
624 	/* Now, copy roce port gids table to mailbox for passing to FW */
625 	gid_entry_mbox = (struct mlx4_roce_gid_entry *)mailbox->buf;
626 	for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
627 		memcpy(gid_entry_mbox->raw,
628 		       priv->port[port].gid_table.roce_gids[i].raw,
629 		       MLX4_ROCE_GID_ENTRY_SIZE);
630 
631 	err = mlx4_cmd(dev, mailbox->dma,
632 		       ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8), 1,
633 		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
634 		       MLX4_CMD_NATIVE);
635 	mutex_unlock(&(priv->port[port].gid_table.mutex));
636 	return err;
637 }
638 
639 
640 void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
641 {
642 	struct mlx4_active_ports actv_ports;
643 	struct mlx4_cmd_mailbox *mailbox;
644 	int num_eth_ports, err;
645 	int i;
646 
647 	if (slave < 0 || slave > dev->persist->num_vfs)
648 		return;
649 
650 	actv_ports = mlx4_get_active_ports(dev, slave);
651 
652 	for (i = 0, num_eth_ports = 0; i < dev->caps.num_ports; i++) {
653 		if (test_bit(i, actv_ports.ports)) {
654 			if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
655 				continue;
656 			num_eth_ports++;
657 		}
658 	}
659 
660 	if (!num_eth_ports)
661 		return;
662 
663 	/* have ETH ports.  Alloc mailbox for SET_PORT command */
664 	mailbox = mlx4_alloc_cmd_mailbox(dev);
665 	if (IS_ERR(mailbox))
666 		return;
667 
668 	for (i = 0; i < dev->caps.num_ports; i++) {
669 		if (test_bit(i, actv_ports.ports)) {
670 			if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
671 				continue;
672 			err = mlx4_reset_roce_port_gids(dev, slave, i + 1, mailbox);
673 			if (err)
674 				mlx4_warn(dev, "Could not reset ETH port GID table for slave %d, port %d (%d)\n",
675 					  slave, i + 1, err);
676 		}
677 	}
678 
679 	mlx4_free_cmd_mailbox(dev, mailbox);
680 	return;
681 }
682 
683 static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
684 				u8 op_mod, struct mlx4_cmd_mailbox *inbox)
685 {
686 	struct mlx4_priv *priv = mlx4_priv(dev);
687 	struct mlx4_port_info *port_info;
688 	struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
689 	struct mlx4_slave_state *slave_st = &master->slave_state[slave];
690 	struct mlx4_set_port_rqp_calc_context *qpn_context;
691 	struct mlx4_set_port_general_context *gen_context;
692 	struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1;
693 	int reset_qkey_viols;
694 	int port;
695 	int is_eth;
696 	int num_gids;
697 	int base;
698 	u32 in_modifier;
699 	u32 promisc;
700 	u16 mtu, prev_mtu;
701 	int err;
702 	int i, j;
703 	int offset;
704 	__be32 agg_cap_mask;
705 	__be32 slave_cap_mask;
706 	__be32 new_cap_mask;
707 
708 	port = in_mod & 0xff;
709 	in_modifier = in_mod >> 8;
710 	is_eth = op_mod;
711 	port_info = &priv->port[port];
712 
713 	/* Slaves cannot perform SET_PORT operations except changing MTU */
714 	if (is_eth) {
715 		if (slave != dev->caps.function &&
716 		    in_modifier != MLX4_SET_PORT_GENERAL &&
717 		    in_modifier != MLX4_SET_PORT_GID_TABLE) {
718 			mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
719 					slave);
720 			return -EINVAL;
721 		}
722 		switch (in_modifier) {
723 		case MLX4_SET_PORT_RQP_CALC:
724 			qpn_context = inbox->buf;
725 			qpn_context->base_qpn =
726 				cpu_to_be32(port_info->base_qpn);
727 			qpn_context->n_mac = 0x7;
728 			promisc = be32_to_cpu(qpn_context->promisc) >>
729 				SET_PORT_PROMISC_SHIFT;
730 			qpn_context->promisc = cpu_to_be32(
731 				promisc << SET_PORT_PROMISC_SHIFT |
732 				port_info->base_qpn);
733 			promisc = be32_to_cpu(qpn_context->mcast) >>
734 				SET_PORT_MC_PROMISC_SHIFT;
735 			qpn_context->mcast = cpu_to_be32(
736 				promisc << SET_PORT_MC_PROMISC_SHIFT |
737 				port_info->base_qpn);
738 			break;
739 		case MLX4_SET_PORT_GENERAL:
740 			gen_context = inbox->buf;
741 			/* Mtu is configured as the max MTU among all the
742 			 * the functions on the port. */
743 			mtu = be16_to_cpu(gen_context->mtu);
744 			mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] +
745 				    ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
746 			prev_mtu = slave_st->mtu[port];
747 			slave_st->mtu[port] = mtu;
748 			if (mtu > master->max_mtu[port])
749 				master->max_mtu[port] = mtu;
750 			if (mtu < prev_mtu && prev_mtu ==
751 						master->max_mtu[port]) {
752 				slave_st->mtu[port] = mtu;
753 				master->max_mtu[port] = mtu;
754 				for (i = 0; i < dev->num_slaves; i++) {
755 					master->max_mtu[port] =
756 					max(master->max_mtu[port],
757 					    master->slave_state[i].mtu[port]);
758 				}
759 			}
760 
761 			gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
762 			break;
763 		case MLX4_SET_PORT_GID_TABLE:
764 			/* change to MULTIPLE entries: number of guest's gids
765 			 * need a FOR-loop here over number of gids the guest has.
766 			 * 1. Check no duplicates in gids passed by slave
767 			 */
768 			num_gids = mlx4_get_slave_num_gids(dev, slave, port);
769 			base = mlx4_get_base_gid_ix(dev, slave, port);
770 			gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
771 			for (i = 0; i < num_gids; gid_entry_mbox++, i++) {
772 				if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
773 					    sizeof(zgid_entry)))
774 					continue;
775 				gid_entry_mb1 = gid_entry_mbox + 1;
776 				for (j = i + 1; j < num_gids; gid_entry_mb1++, j++) {
777 					if (!memcmp(gid_entry_mb1->raw,
778 						    zgid_entry.raw, sizeof(zgid_entry)))
779 						continue;
780 					if (!memcmp(gid_entry_mb1->raw, gid_entry_mbox->raw,
781 						    sizeof(gid_entry_mbox->raw))) {
782 						/* found duplicate */
783 						return -EINVAL;
784 					}
785 				}
786 			}
787 
788 			/* 2. Check that do not have duplicates in OTHER
789 			 *    entries in the port GID table
790 			 */
791 
792 			mutex_lock(&(priv->port[port].gid_table.mutex));
793 			for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
794 				if (i >= base && i < base + num_gids)
795 					continue; /* don't compare to slave's current gids */
796 				gid_entry_tbl = &priv->port[port].gid_table.roce_gids[i];
797 				if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry)))
798 					continue;
799 				gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
800 				for (j = 0; j < num_gids; gid_entry_mbox++, j++) {
801 					if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
802 						    sizeof(zgid_entry)))
803 						continue;
804 					if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
805 						    sizeof(gid_entry_tbl->raw))) {
806 						/* found duplicate */
807 						mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
808 							  slave, i);
809 						mutex_unlock(&(priv->port[port].gid_table.mutex));
810 						return -EINVAL;
811 					}
812 				}
813 			}
814 
815 			/* insert slave GIDs with memcpy, starting at slave's base index */
816 			gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
817 			for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++)
818 				memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
819 				       gid_entry_mbox->raw, MLX4_ROCE_GID_ENTRY_SIZE);
820 
821 			/* Now, copy roce port gids table to current mailbox for passing to FW */
822 			gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
823 			for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
824 				memcpy(gid_entry_mbox->raw,
825 				       priv->port[port].gid_table.roce_gids[i].raw,
826 				       MLX4_ROCE_GID_ENTRY_SIZE);
827 
828 			err = mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
829 				       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
830 				       MLX4_CMD_NATIVE);
831 			mutex_unlock(&(priv->port[port].gid_table.mutex));
832 			return err;
833 		}
834 
835 		return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
836 				MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
837 				MLX4_CMD_NATIVE);
838 	}
839 
840 	/* For IB, we only consider:
841 	 * - The capability mask, which is set to the aggregate of all
842 	 *   slave function capabilities
843 	 * - The QKey violatin counter - reset according to each request.
844 	 */
845 
846 	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
847 		reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
848 		new_cap_mask = ((__be32 *) inbox->buf)[2];
849 	} else {
850 		reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
851 		new_cap_mask = ((__be32 *) inbox->buf)[1];
852 	}
853 
854 	/* slave may not set the IS_SM capability for the port */
855 	if (slave != mlx4_master_func_num(dev) &&
856 	    (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_IS_SM))
857 		return -EINVAL;
858 
859 	/* No DEV_MGMT in multifunc mode */
860 	if (mlx4_is_mfunc(dev) &&
861 	    (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_DEV_MGMT_SUP))
862 		return -EINVAL;
863 
864 	agg_cap_mask = 0;
865 	slave_cap_mask =
866 		priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
867 	priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
868 	for (i = 0; i < dev->num_slaves; i++)
869 		agg_cap_mask |=
870 			priv->mfunc.master.slave_state[i].ib_cap_mask[port];
871 
872 	/* only clear mailbox for guests.  Master may be setting
873 	* MTU or PKEY table size
874 	*/
875 	if (slave != dev->caps.function)
876 		memset(inbox->buf, 0, 256);
877 	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
878 		*(u8 *) inbox->buf	   |= !!reset_qkey_viols << 6;
879 		((__be32 *) inbox->buf)[2] = agg_cap_mask;
880 	} else {
881 		((u8 *) inbox->buf)[3]     |= !!reset_qkey_viols;
882 		((__be32 *) inbox->buf)[1] = agg_cap_mask;
883 	}
884 
885 	err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
886 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
887 	if (err)
888 		priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
889 			slave_cap_mask;
890 	return err;
891 }
892 
893 int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
894 			  struct mlx4_vhcr *vhcr,
895 			  struct mlx4_cmd_mailbox *inbox,
896 			  struct mlx4_cmd_mailbox *outbox,
897 			  struct mlx4_cmd_info *cmd)
898 {
899 	int port = mlx4_slave_convert_port(
900 			dev, slave, vhcr->in_modifier & 0xFF);
901 
902 	if (port < 0)
903 		return -EINVAL;
904 
905 	vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) |
906 			    (port & 0xFF);
907 
908 	return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
909 				    vhcr->op_modifier, inbox);
910 }
911 
912 /* bit locations for set port command with zero op modifier */
913 enum {
914 	MLX4_SET_PORT_VL_CAP	 = 4, /* bits 7:4 */
915 	MLX4_SET_PORT_MTU_CAP	 = 12, /* bits 15:12 */
916 	MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20,
917 	MLX4_CHANGE_PORT_VL_CAP	 = 21,
918 	MLX4_CHANGE_PORT_MTU_CAP = 22,
919 };
920 
921 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
922 {
923 	struct mlx4_cmd_mailbox *mailbox;
924 	int err, vl_cap, pkey_tbl_flag = 0;
925 
926 	if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
927 		return 0;
928 
929 	mailbox = mlx4_alloc_cmd_mailbox(dev);
930 	if (IS_ERR(mailbox))
931 		return PTR_ERR(mailbox);
932 
933 	((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
934 
935 	if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
936 		pkey_tbl_flag = 1;
937 		((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz);
938 	}
939 
940 	/* IB VL CAP enum isn't used by the firmware, just numerical values */
941 	for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) {
942 		((__be32 *) mailbox->buf)[0] = cpu_to_be32(
943 			(1 << MLX4_CHANGE_PORT_MTU_CAP) |
944 			(1 << MLX4_CHANGE_PORT_VL_CAP)  |
945 			(pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) |
946 			(dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) |
947 			(vl_cap << MLX4_SET_PORT_VL_CAP));
948 		err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
949 				MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
950 		if (err != -ENOMEM)
951 			break;
952 	}
953 
954 	mlx4_free_cmd_mailbox(dev, mailbox);
955 	return err;
956 }
957 
958 int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
959 			  u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
960 {
961 	struct mlx4_cmd_mailbox *mailbox;
962 	struct mlx4_set_port_general_context *context;
963 	int err;
964 	u32 in_mod;
965 
966 	mailbox = mlx4_alloc_cmd_mailbox(dev);
967 	if (IS_ERR(mailbox))
968 		return PTR_ERR(mailbox);
969 	context = mailbox->buf;
970 	context->flags = SET_PORT_GEN_ALL_VALID;
971 	context->mtu = cpu_to_be16(mtu);
972 	context->pptx = (pptx * (!pfctx)) << 7;
973 	context->pfctx = pfctx;
974 	context->pprx = (pprx * (!pfcrx)) << 7;
975 	context->pfcrx = pfcrx;
976 
977 	in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
978 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
979 		       MLX4_CMD_TIME_CLASS_B,  MLX4_CMD_WRAPPED);
980 
981 	mlx4_free_cmd_mailbox(dev, mailbox);
982 	return err;
983 }
984 EXPORT_SYMBOL(mlx4_SET_PORT_general);
985 
986 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
987 			   u8 promisc)
988 {
989 	struct mlx4_cmd_mailbox *mailbox;
990 	struct mlx4_set_port_rqp_calc_context *context;
991 	int err;
992 	u32 in_mod;
993 	u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
994 		MCAST_DIRECT : MCAST_DEFAULT;
995 
996 	if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
997 		return 0;
998 
999 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1000 	if (IS_ERR(mailbox))
1001 		return PTR_ERR(mailbox);
1002 	context = mailbox->buf;
1003 	context->base_qpn = cpu_to_be32(base_qpn);
1004 	context->n_mac = dev->caps.log_num_macs;
1005 	context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
1006 				       base_qpn);
1007 	context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
1008 				     base_qpn);
1009 	context->intra_no_vlan = 0;
1010 	context->no_vlan = MLX4_NO_VLAN_IDX;
1011 	context->intra_vlan_miss = 0;
1012 	context->vlan_miss = MLX4_VLAN_MISS_IDX;
1013 
1014 	in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
1015 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
1016 		       MLX4_CMD_TIME_CLASS_B,  MLX4_CMD_WRAPPED);
1017 
1018 	mlx4_free_cmd_mailbox(dev, mailbox);
1019 	return err;
1020 }
1021 EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
1022 
1023 int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
1024 {
1025 	struct mlx4_cmd_mailbox *mailbox;
1026 	struct mlx4_set_port_prio2tc_context *context;
1027 	int err;
1028 	u32 in_mod;
1029 	int i;
1030 
1031 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1032 	if (IS_ERR(mailbox))
1033 		return PTR_ERR(mailbox);
1034 	context = mailbox->buf;
1035 	for (i = 0; i < MLX4_NUM_UP; i += 2)
1036 		context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
1037 
1038 	in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port;
1039 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
1040 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1041 
1042 	mlx4_free_cmd_mailbox(dev, mailbox);
1043 	return err;
1044 }
1045 EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC);
1046 
1047 int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
1048 		u8 *pg, u16 *ratelimit)
1049 {
1050 	struct mlx4_cmd_mailbox *mailbox;
1051 	struct mlx4_set_port_scheduler_context *context;
1052 	int err;
1053 	u32 in_mod;
1054 	int i;
1055 
1056 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1057 	if (IS_ERR(mailbox))
1058 		return PTR_ERR(mailbox);
1059 	context = mailbox->buf;
1060 
1061 	for (i = 0; i < MLX4_NUM_TC; i++) {
1062 		struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
1063 		u16 r;
1064 
1065 		if (ratelimit && ratelimit[i]) {
1066 			if (ratelimit[i] <= MLX4_MAX_100M_UNITS_VAL) {
1067 				r = ratelimit[i];
1068 				tc->max_bw_units =
1069 					htons(MLX4_RATELIMIT_100M_UNITS);
1070 			} else {
1071 				r = ratelimit[i]/10;
1072 				tc->max_bw_units =
1073 					htons(MLX4_RATELIMIT_1G_UNITS);
1074 			}
1075 			tc->max_bw_value = htons(r);
1076 		} else {
1077 			tc->max_bw_value = htons(MLX4_RATELIMIT_DEFAULT);
1078 			tc->max_bw_units = htons(MLX4_RATELIMIT_1G_UNITS);
1079 		}
1080 
1081 		tc->pg = htons(pg[i]);
1082 		tc->bw_precentage = htons(tc_tx_bw[i]);
1083 	}
1084 
1085 	in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port;
1086 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
1087 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1088 
1089 	mlx4_free_cmd_mailbox(dev, mailbox);
1090 	return err;
1091 }
1092 EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
1093 
1094 enum {
1095 	VXLAN_ENABLE_MODIFY	= 1 << 7,
1096 	VXLAN_STEERING_MODIFY	= 1 << 6,
1097 
1098 	VXLAN_ENABLE		= 1 << 7,
1099 };
1100 
1101 struct mlx4_set_port_vxlan_context {
1102 	u32	reserved1;
1103 	u8	modify_flags;
1104 	u8	reserved2;
1105 	u8	enable_flags;
1106 	u8	steering;
1107 };
1108 
1109 int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable)
1110 {
1111 	int err;
1112 	u32 in_mod;
1113 	struct mlx4_cmd_mailbox *mailbox;
1114 	struct mlx4_set_port_vxlan_context  *context;
1115 
1116 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1117 	if (IS_ERR(mailbox))
1118 		return PTR_ERR(mailbox);
1119 	context = mailbox->buf;
1120 	memset(context, 0, sizeof(*context));
1121 
1122 	context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY;
1123 	if (enable)
1124 		context->enable_flags = VXLAN_ENABLE;
1125 	context->steering  = steering;
1126 
1127 	in_mod = MLX4_SET_PORT_VXLAN << 8 | port;
1128 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
1129 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1130 
1131 	mlx4_free_cmd_mailbox(dev, mailbox);
1132 	return err;
1133 }
1134 EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN);
1135 
1136 int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
1137 				struct mlx4_vhcr *vhcr,
1138 				struct mlx4_cmd_mailbox *inbox,
1139 				struct mlx4_cmd_mailbox *outbox,
1140 				struct mlx4_cmd_info *cmd)
1141 {
1142 	int err = 0;
1143 
1144 	return err;
1145 }
1146 
1147 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
1148 			u64 mac, u64 clear, u8 mode)
1149 {
1150 	return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
1151 			MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
1152 			MLX4_CMD_WRAPPED);
1153 }
1154 EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
1155 
1156 int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
1157 			       struct mlx4_vhcr *vhcr,
1158 			       struct mlx4_cmd_mailbox *inbox,
1159 			       struct mlx4_cmd_mailbox *outbox,
1160 			       struct mlx4_cmd_info *cmd)
1161 {
1162 	int err = 0;
1163 
1164 	return err;
1165 }
1166 
1167 int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
1168 			       u32 in_mod, struct mlx4_cmd_mailbox *outbox)
1169 {
1170 	return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
1171 			    MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
1172 			    MLX4_CMD_NATIVE);
1173 }
1174 
1175 int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
1176 				struct mlx4_vhcr *vhcr,
1177 				struct mlx4_cmd_mailbox *inbox,
1178 				struct mlx4_cmd_mailbox *outbox,
1179 				struct mlx4_cmd_info *cmd)
1180 {
1181 	if (slave != dev->caps.function)
1182 		return 0;
1183 	return mlx4_common_dump_eth_stats(dev, slave,
1184 					  vhcr->in_modifier, outbox);
1185 }
1186 
1187 void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap)
1188 {
1189 	if (!mlx4_is_mfunc(dev)) {
1190 		*stats_bitmap = 0;
1191 		return;
1192 	}
1193 
1194 	*stats_bitmap = (MLX4_STATS_TRAFFIC_COUNTERS_MASK |
1195 			 MLX4_STATS_TRAFFIC_DROPS_MASK |
1196 			 MLX4_STATS_PORT_COUNTERS_MASK);
1197 
1198 	if (mlx4_is_master(dev))
1199 		*stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK;
1200 }
1201 EXPORT_SYMBOL(mlx4_set_stats_bitmap);
1202 
1203 int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1204 				 int *slave_id)
1205 {
1206 	struct mlx4_priv *priv = mlx4_priv(dev);
1207 	int i, found_ix = -1;
1208 	int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
1209 	struct mlx4_slaves_pport slaves_pport;
1210 	unsigned num_vfs;
1211 	int slave_gid;
1212 
1213 	if (!mlx4_is_mfunc(dev))
1214 		return -EINVAL;
1215 
1216 	slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
1217 	num_vfs = bitmap_weight(slaves_pport.slaves,
1218 				dev->persist->num_vfs + 1) - 1;
1219 
1220 	for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
1221 		if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid,
1222 			    MLX4_ROCE_GID_ENTRY_SIZE)) {
1223 			found_ix = i;
1224 			break;
1225 		}
1226 	}
1227 
1228 	if (found_ix >= 0) {
1229 		/* Calculate a slave_gid which is the slave number in the gid
1230 		 * table and not a globally unique slave number.
1231 		 */
1232 		if (found_ix < MLX4_ROCE_PF_GIDS)
1233 			slave_gid = 0;
1234 		else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
1235 			 (vf_gids / num_vfs + 1))
1236 			slave_gid = ((found_ix - MLX4_ROCE_PF_GIDS) /
1237 				     (vf_gids / num_vfs + 1)) + 1;
1238 		else
1239 			slave_gid =
1240 			((found_ix - MLX4_ROCE_PF_GIDS -
1241 			  ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
1242 			 (vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
1243 
1244 		/* Calculate the globally unique slave id */
1245 		if (slave_gid) {
1246 			struct mlx4_active_ports exclusive_ports;
1247 			struct mlx4_active_ports actv_ports;
1248 			struct mlx4_slaves_pport slaves_pport_actv;
1249 			unsigned max_port_p_one;
1250 			int num_vfs_before = 0;
1251 			int candidate_slave_gid;
1252 
1253 			/* Calculate how many VFs are on the previous port, if exists */
1254 			for (i = 1; i < port; i++) {
1255 				bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
1256 				set_bit(i - 1, exclusive_ports.ports);
1257 				slaves_pport_actv =
1258 					mlx4_phys_to_slaves_pport_actv(
1259 							dev, &exclusive_ports);
1260 				num_vfs_before += bitmap_weight(
1261 						slaves_pport_actv.slaves,
1262 						dev->persist->num_vfs + 1);
1263 			}
1264 
1265 			/* candidate_slave_gid isn't necessarily the correct slave, but
1266 			 * it has the same number of ports and is assigned to the same
1267 			 * ports as the real slave we're looking for. On dual port VF,
1268 			 * slave_gid = [single port VFs on port <port>] +
1269 			 * [offset of the current slave from the first dual port VF] +
1270 			 * 1 (for the PF).
1271 			 */
1272 			candidate_slave_gid = slave_gid + num_vfs_before;
1273 
1274 			actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid);
1275 			max_port_p_one = find_first_bit(
1276 				actv_ports.ports, dev->caps.num_ports) +
1277 				bitmap_weight(actv_ports.ports,
1278 					      dev->caps.num_ports) + 1;
1279 
1280 			/* Calculate the real slave number */
1281 			for (i = 1; i < max_port_p_one; i++) {
1282 				if (i == port)
1283 					continue;
1284 				bitmap_zero(exclusive_ports.ports,
1285 					    dev->caps.num_ports);
1286 				set_bit(i - 1, exclusive_ports.ports);
1287 				slaves_pport_actv =
1288 					mlx4_phys_to_slaves_pport_actv(
1289 						dev, &exclusive_ports);
1290 				slave_gid += bitmap_weight(
1291 						slaves_pport_actv.slaves,
1292 						dev->persist->num_vfs + 1);
1293 			}
1294 		}
1295 		*slave_id = slave_gid;
1296 	}
1297 
1298 	return (found_ix >= 0) ? 0 : -EINVAL;
1299 }
1300 EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid);
1301 
1302 int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
1303 				 u8 *gid)
1304 {
1305 	struct mlx4_priv *priv = mlx4_priv(dev);
1306 
1307 	if (!mlx4_is_master(dev))
1308 		return -EINVAL;
1309 
1310 	memcpy(gid, priv->port[port].gid_table.roce_gids[slave_id].raw,
1311 	       MLX4_ROCE_GID_ENTRY_SIZE);
1312 	return 0;
1313 }
1314 EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
1315 
1316 /* Cable Module Info */
1317 #define MODULE_INFO_MAX_READ 48
1318 
1319 #define I2C_ADDR_LOW  0x50
1320 #define I2C_ADDR_HIGH 0x51
1321 #define I2C_PAGE_SIZE 256
1322 
1323 /* Module Info Data */
1324 struct mlx4_cable_info {
1325 	u8	i2c_addr;
1326 	u8	page_num;
1327 	__be16	dev_mem_address;
1328 	__be16	reserved1;
1329 	__be16	size;
1330 	__be32	reserved2[2];
1331 	u8	data[MODULE_INFO_MAX_READ];
1332 };
1333 
1334 enum cable_info_err {
1335 	 CABLE_INF_INV_PORT      = 0x1,
1336 	 CABLE_INF_OP_NOSUP      = 0x2,
1337 	 CABLE_INF_NOT_CONN      = 0x3,
1338 	 CABLE_INF_NO_EEPRM      = 0x4,
1339 	 CABLE_INF_PAGE_ERR      = 0x5,
1340 	 CABLE_INF_INV_ADDR      = 0x6,
1341 	 CABLE_INF_I2C_ADDR      = 0x7,
1342 	 CABLE_INF_QSFP_VIO      = 0x8,
1343 	 CABLE_INF_I2C_BUSY      = 0x9,
1344 };
1345 
1346 #define MAD_STATUS_2_CABLE_ERR(mad_status) ((mad_status >> 8) & 0xFF)
1347 
1348 static inline const char *cable_info_mad_err_str(u16 mad_status)
1349 {
1350 	u8 err = MAD_STATUS_2_CABLE_ERR(mad_status);
1351 
1352 	switch (err) {
1353 	case CABLE_INF_INV_PORT:
1354 		return "invalid port selected";
1355 	case CABLE_INF_OP_NOSUP:
1356 		return "operation not supported for this port (the port is of type CX4 or internal)";
1357 	case CABLE_INF_NOT_CONN:
1358 		return "cable is not connected";
1359 	case CABLE_INF_NO_EEPRM:
1360 		return "the connected cable has no EPROM (passive copper cable)";
1361 	case CABLE_INF_PAGE_ERR:
1362 		return "page number is greater than 15";
1363 	case CABLE_INF_INV_ADDR:
1364 		return "invalid device_address or size (that is, size equals 0 or address+size is greater than 256)";
1365 	case CABLE_INF_I2C_ADDR:
1366 		return "invalid I2C slave address";
1367 	case CABLE_INF_QSFP_VIO:
1368 		return "at least one cable violates the QSFP specification and ignores the modsel signal";
1369 	case CABLE_INF_I2C_BUSY:
1370 		return "I2C bus is constantly busy";
1371 	}
1372 	return "Unknown Error";
1373 }
1374 
1375 /**
1376  * mlx4_get_module_info - Read cable module eeprom data
1377  * @dev: mlx4_dev.
1378  * @port: port number.
1379  * @offset: byte offset in eeprom to start reading data from.
1380  * @size: num of bytes to read.
1381  * @data: output buffer to put the requested data into.
1382  *
1383  * Reads cable module eeprom data, puts the outcome data into
1384  * data pointer paramer.
1385  * Returns num of read bytes on success or a negative error
1386  * code.
1387  */
1388 int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
1389 			 u16 offset, u16 size, u8 *data)
1390 {
1391 	struct mlx4_cmd_mailbox *inbox, *outbox;
1392 	struct mlx4_mad_ifc *inmad, *outmad;
1393 	struct mlx4_cable_info *cable_info;
1394 	u16 i2c_addr;
1395 	int ret;
1396 
1397 	if (size > MODULE_INFO_MAX_READ)
1398 		size = MODULE_INFO_MAX_READ;
1399 
1400 	inbox = mlx4_alloc_cmd_mailbox(dev);
1401 	if (IS_ERR(inbox))
1402 		return PTR_ERR(inbox);
1403 
1404 	outbox = mlx4_alloc_cmd_mailbox(dev);
1405 	if (IS_ERR(outbox)) {
1406 		mlx4_free_cmd_mailbox(dev, inbox);
1407 		return PTR_ERR(outbox);
1408 	}
1409 
1410 	inmad = (struct mlx4_mad_ifc *)(inbox->buf);
1411 	outmad = (struct mlx4_mad_ifc *)(outbox->buf);
1412 
1413 	inmad->method = 0x1; /* Get */
1414 	inmad->class_version = 0x1;
1415 	inmad->mgmt_class = 0x1;
1416 	inmad->base_version = 0x1;
1417 	inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
1418 
1419 	if (offset < I2C_PAGE_SIZE && offset + size > I2C_PAGE_SIZE)
1420 		/* Cross pages reads are not allowed
1421 		 * read until offset 256 in low page
1422 		 */
1423 		size -= offset + size - I2C_PAGE_SIZE;
1424 
1425 	i2c_addr = I2C_ADDR_LOW;
1426 	if (offset >= I2C_PAGE_SIZE) {
1427 		/* Reset offset to high page */
1428 		i2c_addr = I2C_ADDR_HIGH;
1429 		offset -= I2C_PAGE_SIZE;
1430 	}
1431 
1432 	cable_info = (struct mlx4_cable_info *)inmad->data;
1433 	cable_info->dev_mem_address = cpu_to_be16(offset);
1434 	cable_info->page_num = 0;
1435 	cable_info->i2c_addr = i2c_addr;
1436 	cable_info->size = cpu_to_be16(size);
1437 
1438 	ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
1439 			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
1440 			   MLX4_CMD_NATIVE);
1441 	if (ret)
1442 		goto out;
1443 
1444 	if (be16_to_cpu(outmad->status)) {
1445 		/* Mad returned with bad status */
1446 		ret = be16_to_cpu(outmad->status);
1447 		mlx4_warn(dev,
1448 			  "MLX4_CMD_MAD_IFC Get Module info attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
1449 			  0xFF60, port, i2c_addr, offset, size,
1450 			  ret, cable_info_mad_err_str(ret));
1451 
1452 		if (i2c_addr == I2C_ADDR_HIGH &&
1453 		    MAD_STATUS_2_CABLE_ERR(ret) == CABLE_INF_I2C_ADDR)
1454 			/* Some SFP cables do not support i2c slave
1455 			 * address 0x51 (high page), abort silently.
1456 			 */
1457 			ret = 0;
1458 		else
1459 			ret = -ret;
1460 		goto out;
1461 	}
1462 	cable_info = (struct mlx4_cable_info *)outmad->data;
1463 	memcpy(data, cable_info->data, size);
1464 	ret = size;
1465 out:
1466 	mlx4_free_cmd_mailbox(dev, inbox);
1467 	mlx4_free_cmd_mailbox(dev, outbox);
1468 	return ret;
1469 }
1470 EXPORT_SYMBOL(mlx4_get_module_info);
1471