1 /*
2  * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/errno.h>
34 #include <linux/if_ether.h>
35 #include <linux/if_vlan.h>
36 #include <linux/export.h>
37 
38 #include <linux/mlx4/cmd.h>
39 
40 #include "mlx4.h"
41 
42 #define MLX4_MAC_VALID		(1ull << 63)
43 
44 #define MLX4_VLAN_VALID		(1u << 31)
45 #define MLX4_VLAN_MASK		0xfff
46 
47 #define MLX4_STATS_TRAFFIC_COUNTERS_MASK	0xfULL
48 #define MLX4_STATS_TRAFFIC_DROPS_MASK		0xc0ULL
49 #define MLX4_STATS_ERROR_COUNTERS_MASK		0x1ffc30ULL
50 #define MLX4_STATS_PORT_COUNTERS_MASK		0x1fe00000ULL
51 
52 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
53 {
54 	int i;
55 
56 	mutex_init(&table->mutex);
57 	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
58 		table->entries[i] = 0;
59 		table->refs[i]	 = 0;
60 	}
61 	table->max   = 1 << dev->caps.log_num_macs;
62 	table->total = 0;
63 }
64 
65 void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
66 {
67 	int i;
68 
69 	mutex_init(&table->mutex);
70 	for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
71 		table->entries[i] = 0;
72 		table->refs[i]	 = 0;
73 	}
74 	table->max   = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR;
75 	table->total = 0;
76 }
77 
78 static int validate_index(struct mlx4_dev *dev,
79 			  struct mlx4_mac_table *table, int index)
80 {
81 	int err = 0;
82 
83 	if (index < 0 || index >= table->max || !table->entries[index]) {
84 		mlx4_warn(dev, "No valid Mac entry for the given index\n");
85 		err = -EINVAL;
86 	}
87 	return err;
88 }
89 
90 static int find_index(struct mlx4_dev *dev,
91 		      struct mlx4_mac_table *table, u64 mac)
92 {
93 	int i;
94 
95 	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
96 		if ((mac & MLX4_MAC_MASK) ==
97 		    (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
98 			return i;
99 	}
100 	/* Mac not found */
101 	return -EINVAL;
102 }
103 
104 static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
105 				   __be64 *entries)
106 {
107 	struct mlx4_cmd_mailbox *mailbox;
108 	u32 in_mod;
109 	int err;
110 
111 	mailbox = mlx4_alloc_cmd_mailbox(dev);
112 	if (IS_ERR(mailbox))
113 		return PTR_ERR(mailbox);
114 
115 	memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
116 
117 	in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
118 
119 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
120 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
121 
122 	mlx4_free_cmd_mailbox(dev, mailbox);
123 	return err;
124 }
125 
126 int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx)
127 {
128 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
129 	struct mlx4_mac_table *table = &info->mac_table;
130 	int i;
131 
132 	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
133 		if (!table->refs[i])
134 			continue;
135 
136 		if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
137 			*idx = i;
138 			return 0;
139 		}
140 	}
141 
142 	return -ENOENT;
143 }
144 EXPORT_SYMBOL_GPL(mlx4_find_cached_mac);
145 
146 int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
147 {
148 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
149 	struct mlx4_mac_table *table = &info->mac_table;
150 	int i, err = 0;
151 	int free = -1;
152 
153 	mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n",
154 		 (unsigned long long) mac, port);
155 
156 	mutex_lock(&table->mutex);
157 	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
158 		if (free < 0 && !table->entries[i]) {
159 			free = i;
160 			continue;
161 		}
162 
163 		if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
164 			/* MAC already registered, increment ref count */
165 			err = i;
166 			++table->refs[i];
167 			goto out;
168 		}
169 	}
170 
171 	mlx4_dbg(dev, "Free MAC index is %d\n", free);
172 
173 	if (table->total == table->max) {
174 		/* No free mac entries */
175 		err = -ENOSPC;
176 		goto out;
177 	}
178 
179 	/* Register new MAC */
180 	table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
181 
182 	err = mlx4_set_port_mac_table(dev, port, table->entries);
183 	if (unlikely(err)) {
184 		mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
185 			 (unsigned long long) mac);
186 		table->entries[free] = 0;
187 		goto out;
188 	}
189 	table->refs[free] = 1;
190 	err = free;
191 	++table->total;
192 out:
193 	mutex_unlock(&table->mutex);
194 	return err;
195 }
196 EXPORT_SYMBOL_GPL(__mlx4_register_mac);
197 
198 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
199 {
200 	u64 out_param = 0;
201 	int err = -EINVAL;
202 
203 	if (mlx4_is_mfunc(dev)) {
204 		if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
205 			err = mlx4_cmd_imm(dev, mac, &out_param,
206 					   ((u32) port) << 8 | (u32) RES_MAC,
207 					   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
208 					   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
209 		}
210 		if (err && err == -EINVAL && mlx4_is_slave(dev)) {
211 			/* retry using old REG_MAC format */
212 			set_param_l(&out_param, port);
213 			err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
214 					   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
215 					   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
216 			if (!err)
217 				dev->flags |= MLX4_FLAG_OLD_REG_MAC;
218 		}
219 		if (err)
220 			return err;
221 
222 		return get_param_l(&out_param);
223 	}
224 	return __mlx4_register_mac(dev, port, mac);
225 }
226 EXPORT_SYMBOL_GPL(mlx4_register_mac);
227 
228 int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port)
229 {
230 	return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
231 			(port - 1) * (1 << dev->caps.log_num_macs);
232 }
233 EXPORT_SYMBOL_GPL(mlx4_get_base_qpn);
234 
235 void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
236 {
237 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
238 	struct mlx4_mac_table *table = &info->mac_table;
239 	int index;
240 
241 	mutex_lock(&table->mutex);
242 	index = find_index(dev, table, mac);
243 
244 	if (validate_index(dev, table, index))
245 		goto out;
246 	if (--table->refs[index]) {
247 		mlx4_dbg(dev, "Have more references for index %d,"
248 			 "no need to modify mac table\n", index);
249 		goto out;
250 	}
251 
252 	table->entries[index] = 0;
253 	mlx4_set_port_mac_table(dev, port, table->entries);
254 	--table->total;
255 out:
256 	mutex_unlock(&table->mutex);
257 }
258 EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
259 
260 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
261 {
262 	u64 out_param = 0;
263 
264 	if (mlx4_is_mfunc(dev)) {
265 		if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
266 			(void) mlx4_cmd_imm(dev, mac, &out_param,
267 					    ((u32) port) << 8 | (u32) RES_MAC,
268 					    RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
269 					    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
270 		} else {
271 			/* use old unregister mac format */
272 			set_param_l(&out_param, port);
273 			(void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
274 					    RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
275 					    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
276 		}
277 		return;
278 	}
279 	__mlx4_unregister_mac(dev, port, mac);
280 	return;
281 }
282 EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
283 
284 int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
285 {
286 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
287 	struct mlx4_mac_table *table = &info->mac_table;
288 	int index = qpn - info->base_qpn;
289 	int err = 0;
290 
291 	/* CX1 doesn't support multi-functions */
292 	mutex_lock(&table->mutex);
293 
294 	err = validate_index(dev, table, index);
295 	if (err)
296 		goto out;
297 
298 	table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
299 
300 	err = mlx4_set_port_mac_table(dev, port, table->entries);
301 	if (unlikely(err)) {
302 		mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
303 			 (unsigned long long) new_mac);
304 		table->entries[index] = 0;
305 	}
306 out:
307 	mutex_unlock(&table->mutex);
308 	return err;
309 }
310 EXPORT_SYMBOL_GPL(__mlx4_replace_mac);
311 
312 static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
313 				    __be32 *entries)
314 {
315 	struct mlx4_cmd_mailbox *mailbox;
316 	u32 in_mod;
317 	int err;
318 
319 	mailbox = mlx4_alloc_cmd_mailbox(dev);
320 	if (IS_ERR(mailbox))
321 		return PTR_ERR(mailbox);
322 
323 	memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
324 	in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
325 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
326 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
327 
328 	mlx4_free_cmd_mailbox(dev, mailbox);
329 
330 	return err;
331 }
332 
333 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
334 {
335 	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
336 	int i;
337 
338 	for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
339 		if (table->refs[i] &&
340 		    (vid == (MLX4_VLAN_MASK &
341 			      be32_to_cpu(table->entries[i])))) {
342 			/* VLAN already registered, increase reference count */
343 			*idx = i;
344 			return 0;
345 		}
346 	}
347 
348 	return -ENOENT;
349 }
350 EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
351 
352 int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
353 				int *index)
354 {
355 	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
356 	int i, err = 0;
357 	int free = -1;
358 
359 	mutex_lock(&table->mutex);
360 
361 	if (table->total == table->max) {
362 		/* No free vlan entries */
363 		err = -ENOSPC;
364 		goto out;
365 	}
366 
367 	for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
368 		if (free < 0 && (table->refs[i] == 0)) {
369 			free = i;
370 			continue;
371 		}
372 
373 		if (table->refs[i] &&
374 		    (vlan == (MLX4_VLAN_MASK &
375 			      be32_to_cpu(table->entries[i])))) {
376 			/* Vlan already registered, increase references count */
377 			*index = i;
378 			++table->refs[i];
379 			goto out;
380 		}
381 	}
382 
383 	if (free < 0) {
384 		err = -ENOMEM;
385 		goto out;
386 	}
387 
388 	/* Register new VLAN */
389 	table->refs[free] = 1;
390 	table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
391 
392 	err = mlx4_set_port_vlan_table(dev, port, table->entries);
393 	if (unlikely(err)) {
394 		mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
395 		table->refs[free] = 0;
396 		table->entries[free] = 0;
397 		goto out;
398 	}
399 
400 	*index = free;
401 	++table->total;
402 out:
403 	mutex_unlock(&table->mutex);
404 	return err;
405 }
406 
407 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
408 {
409 	u64 out_param = 0;
410 	int err;
411 
412 	if (vlan > 4095)
413 		return -EINVAL;
414 
415 	if (mlx4_is_mfunc(dev)) {
416 		err = mlx4_cmd_imm(dev, vlan, &out_param,
417 				   ((u32) port) << 8 | (u32) RES_VLAN,
418 				   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
419 				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
420 		if (!err)
421 			*index = get_param_l(&out_param);
422 
423 		return err;
424 	}
425 	return __mlx4_register_vlan(dev, port, vlan, index);
426 }
427 EXPORT_SYMBOL_GPL(mlx4_register_vlan);
428 
429 void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
430 {
431 	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
432 	int index;
433 
434 	mutex_lock(&table->mutex);
435 	if (mlx4_find_cached_vlan(dev, port, vlan, &index)) {
436 		mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan);
437 		goto out;
438 	}
439 
440 	if (index < MLX4_VLAN_REGULAR) {
441 		mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
442 		goto out;
443 	}
444 
445 	if (--table->refs[index]) {
446 		mlx4_dbg(dev, "Have %d more references for index %d,"
447 			 "no need to modify vlan table\n", table->refs[index],
448 			 index);
449 		goto out;
450 	}
451 	table->entries[index] = 0;
452 	mlx4_set_port_vlan_table(dev, port, table->entries);
453 	--table->total;
454 out:
455 	mutex_unlock(&table->mutex);
456 }
457 
458 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
459 {
460 	u64 out_param = 0;
461 
462 	if (mlx4_is_mfunc(dev)) {
463 		(void) mlx4_cmd_imm(dev, vlan, &out_param,
464 				    ((u32) port) << 8 | (u32) RES_VLAN,
465 				    RES_OP_RESERVE_AND_MAP,
466 				    MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
467 				    MLX4_CMD_WRAPPED);
468 		return;
469 	}
470 	__mlx4_unregister_vlan(dev, port, vlan);
471 }
472 EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
473 
474 int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
475 {
476 	struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
477 	u8 *inbuf, *outbuf;
478 	int err;
479 
480 	inmailbox = mlx4_alloc_cmd_mailbox(dev);
481 	if (IS_ERR(inmailbox))
482 		return PTR_ERR(inmailbox);
483 
484 	outmailbox = mlx4_alloc_cmd_mailbox(dev);
485 	if (IS_ERR(outmailbox)) {
486 		mlx4_free_cmd_mailbox(dev, inmailbox);
487 		return PTR_ERR(outmailbox);
488 	}
489 
490 	inbuf = inmailbox->buf;
491 	outbuf = outmailbox->buf;
492 	inbuf[0] = 1;
493 	inbuf[1] = 1;
494 	inbuf[2] = 1;
495 	inbuf[3] = 1;
496 	*(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
497 	*(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
498 
499 	err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
500 			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
501 			   MLX4_CMD_NATIVE);
502 	if (!err)
503 		*caps = *(__be32 *) (outbuf + 84);
504 	mlx4_free_cmd_mailbox(dev, inmailbox);
505 	mlx4_free_cmd_mailbox(dev, outmailbox);
506 	return err;
507 }
508 
509 static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
510 				u8 op_mod, struct mlx4_cmd_mailbox *inbox)
511 {
512 	struct mlx4_priv *priv = mlx4_priv(dev);
513 	struct mlx4_port_info *port_info;
514 	struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
515 	struct mlx4_slave_state *slave_st = &master->slave_state[slave];
516 	struct mlx4_set_port_rqp_calc_context *qpn_context;
517 	struct mlx4_set_port_general_context *gen_context;
518 	int reset_qkey_viols;
519 	int port;
520 	int is_eth;
521 	u32 in_modifier;
522 	u32 promisc;
523 	u16 mtu, prev_mtu;
524 	int err;
525 	int i;
526 	__be32 agg_cap_mask;
527 	__be32 slave_cap_mask;
528 	__be32 new_cap_mask;
529 
530 	port = in_mod & 0xff;
531 	in_modifier = in_mod >> 8;
532 	is_eth = op_mod;
533 	port_info = &priv->port[port];
534 
535 	/* Slaves cannot perform SET_PORT operations except changing MTU */
536 	if (is_eth) {
537 		if (slave != dev->caps.function &&
538 		    in_modifier != MLX4_SET_PORT_GENERAL) {
539 			mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
540 					slave);
541 			return -EINVAL;
542 		}
543 		switch (in_modifier) {
544 		case MLX4_SET_PORT_RQP_CALC:
545 			qpn_context = inbox->buf;
546 			qpn_context->base_qpn =
547 				cpu_to_be32(port_info->base_qpn);
548 			qpn_context->n_mac = 0x7;
549 			promisc = be32_to_cpu(qpn_context->promisc) >>
550 				SET_PORT_PROMISC_SHIFT;
551 			qpn_context->promisc = cpu_to_be32(
552 				promisc << SET_PORT_PROMISC_SHIFT |
553 				port_info->base_qpn);
554 			promisc = be32_to_cpu(qpn_context->mcast) >>
555 				SET_PORT_MC_PROMISC_SHIFT;
556 			qpn_context->mcast = cpu_to_be32(
557 				promisc << SET_PORT_MC_PROMISC_SHIFT |
558 				port_info->base_qpn);
559 			break;
560 		case MLX4_SET_PORT_GENERAL:
561 			gen_context = inbox->buf;
562 			/* Mtu is configured as the max MTU among all the
563 			 * the functions on the port. */
564 			mtu = be16_to_cpu(gen_context->mtu);
565 			mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] +
566 				    ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
567 			prev_mtu = slave_st->mtu[port];
568 			slave_st->mtu[port] = mtu;
569 			if (mtu > master->max_mtu[port])
570 				master->max_mtu[port] = mtu;
571 			if (mtu < prev_mtu && prev_mtu ==
572 						master->max_mtu[port]) {
573 				slave_st->mtu[port] = mtu;
574 				master->max_mtu[port] = mtu;
575 				for (i = 0; i < dev->num_slaves; i++) {
576 					master->max_mtu[port] =
577 					max(master->max_mtu[port],
578 					    master->slave_state[i].mtu[port]);
579 				}
580 			}
581 
582 			gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
583 			break;
584 		}
585 		return mlx4_cmd(dev, inbox->dma, in_mod, op_mod,
586 				MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
587 				MLX4_CMD_NATIVE);
588 	}
589 
590 	/* For IB, we only consider:
591 	 * - The capability mask, which is set to the aggregate of all
592 	 *   slave function capabilities
593 	 * - The QKey violatin counter - reset according to each request.
594 	 */
595 
596 	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
597 		reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
598 		new_cap_mask = ((__be32 *) inbox->buf)[2];
599 	} else {
600 		reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
601 		new_cap_mask = ((__be32 *) inbox->buf)[1];
602 	}
603 
604 	/* slave may not set the IS_SM capability for the port */
605 	if (slave != mlx4_master_func_num(dev) &&
606 	    (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_IS_SM))
607 		return -EINVAL;
608 
609 	/* No DEV_MGMT in multifunc mode */
610 	if (mlx4_is_mfunc(dev) &&
611 	    (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_DEV_MGMT_SUP))
612 		return -EINVAL;
613 
614 	agg_cap_mask = 0;
615 	slave_cap_mask =
616 		priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
617 	priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
618 	for (i = 0; i < dev->num_slaves; i++)
619 		agg_cap_mask |=
620 			priv->mfunc.master.slave_state[i].ib_cap_mask[port];
621 
622 	/* only clear mailbox for guests.  Master may be setting
623 	* MTU or PKEY table size
624 	*/
625 	if (slave != dev->caps.function)
626 		memset(inbox->buf, 0, 256);
627 	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
628 		*(u8 *) inbox->buf	   |= !!reset_qkey_viols << 6;
629 		((__be32 *) inbox->buf)[2] = agg_cap_mask;
630 	} else {
631 		((u8 *) inbox->buf)[3]     |= !!reset_qkey_viols;
632 		((__be32 *) inbox->buf)[1] = agg_cap_mask;
633 	}
634 
635 	err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
636 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
637 	if (err)
638 		priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
639 			slave_cap_mask;
640 	return err;
641 }
642 
643 int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
644 			  struct mlx4_vhcr *vhcr,
645 			  struct mlx4_cmd_mailbox *inbox,
646 			  struct mlx4_cmd_mailbox *outbox,
647 			  struct mlx4_cmd_info *cmd)
648 {
649 	return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
650 				    vhcr->op_modifier, inbox);
651 }
652 
653 /* bit locations for set port command with zero op modifier */
654 enum {
655 	MLX4_SET_PORT_VL_CAP	 = 4, /* bits 7:4 */
656 	MLX4_SET_PORT_MTU_CAP	 = 12, /* bits 15:12 */
657 	MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20,
658 	MLX4_CHANGE_PORT_VL_CAP	 = 21,
659 	MLX4_CHANGE_PORT_MTU_CAP = 22,
660 };
661 
662 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
663 {
664 	struct mlx4_cmd_mailbox *mailbox;
665 	int err, vl_cap, pkey_tbl_flag = 0;
666 
667 	if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
668 		return 0;
669 
670 	mailbox = mlx4_alloc_cmd_mailbox(dev);
671 	if (IS_ERR(mailbox))
672 		return PTR_ERR(mailbox);
673 
674 	((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
675 
676 	if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
677 		pkey_tbl_flag = 1;
678 		((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz);
679 	}
680 
681 	/* IB VL CAP enum isn't used by the firmware, just numerical values */
682 	for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) {
683 		((__be32 *) mailbox->buf)[0] = cpu_to_be32(
684 			(1 << MLX4_CHANGE_PORT_MTU_CAP) |
685 			(1 << MLX4_CHANGE_PORT_VL_CAP)  |
686 			(pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) |
687 			(dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) |
688 			(vl_cap << MLX4_SET_PORT_VL_CAP));
689 		err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
690 				MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
691 		if (err != -ENOMEM)
692 			break;
693 	}
694 
695 	mlx4_free_cmd_mailbox(dev, mailbox);
696 	return err;
697 }
698 
699 int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
700 			  u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
701 {
702 	struct mlx4_cmd_mailbox *mailbox;
703 	struct mlx4_set_port_general_context *context;
704 	int err;
705 	u32 in_mod;
706 
707 	mailbox = mlx4_alloc_cmd_mailbox(dev);
708 	if (IS_ERR(mailbox))
709 		return PTR_ERR(mailbox);
710 	context = mailbox->buf;
711 	context->flags = SET_PORT_GEN_ALL_VALID;
712 	context->mtu = cpu_to_be16(mtu);
713 	context->pptx = (pptx * (!pfctx)) << 7;
714 	context->pfctx = pfctx;
715 	context->pprx = (pprx * (!pfcrx)) << 7;
716 	context->pfcrx = pfcrx;
717 
718 	in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
719 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
720 		       MLX4_CMD_TIME_CLASS_B,  MLX4_CMD_WRAPPED);
721 
722 	mlx4_free_cmd_mailbox(dev, mailbox);
723 	return err;
724 }
725 EXPORT_SYMBOL(mlx4_SET_PORT_general);
726 
727 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
728 			   u8 promisc)
729 {
730 	struct mlx4_cmd_mailbox *mailbox;
731 	struct mlx4_set_port_rqp_calc_context *context;
732 	int err;
733 	u32 in_mod;
734 	u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
735 		MCAST_DIRECT : MCAST_DEFAULT;
736 
737 	if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
738 		return 0;
739 
740 	mailbox = mlx4_alloc_cmd_mailbox(dev);
741 	if (IS_ERR(mailbox))
742 		return PTR_ERR(mailbox);
743 	context = mailbox->buf;
744 	context->base_qpn = cpu_to_be32(base_qpn);
745 	context->n_mac = dev->caps.log_num_macs;
746 	context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
747 				       base_qpn);
748 	context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
749 				     base_qpn);
750 	context->intra_no_vlan = 0;
751 	context->no_vlan = MLX4_NO_VLAN_IDX;
752 	context->intra_vlan_miss = 0;
753 	context->vlan_miss = MLX4_VLAN_MISS_IDX;
754 
755 	in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
756 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
757 		       MLX4_CMD_TIME_CLASS_B,  MLX4_CMD_WRAPPED);
758 
759 	mlx4_free_cmd_mailbox(dev, mailbox);
760 	return err;
761 }
762 EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
763 
764 int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
765 {
766 	struct mlx4_cmd_mailbox *mailbox;
767 	struct mlx4_set_port_prio2tc_context *context;
768 	int err;
769 	u32 in_mod;
770 	int i;
771 
772 	mailbox = mlx4_alloc_cmd_mailbox(dev);
773 	if (IS_ERR(mailbox))
774 		return PTR_ERR(mailbox);
775 	context = mailbox->buf;
776 	for (i = 0; i < MLX4_NUM_UP; i += 2)
777 		context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
778 
779 	in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port;
780 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
781 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
782 
783 	mlx4_free_cmd_mailbox(dev, mailbox);
784 	return err;
785 }
786 EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC);
787 
788 int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
789 		u8 *pg, u16 *ratelimit)
790 {
791 	struct mlx4_cmd_mailbox *mailbox;
792 	struct mlx4_set_port_scheduler_context *context;
793 	int err;
794 	u32 in_mod;
795 	int i;
796 
797 	mailbox = mlx4_alloc_cmd_mailbox(dev);
798 	if (IS_ERR(mailbox))
799 		return PTR_ERR(mailbox);
800 	context = mailbox->buf;
801 
802 	for (i = 0; i < MLX4_NUM_TC; i++) {
803 		struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
804 		u16 r = ratelimit && ratelimit[i] ? ratelimit[i] :
805 			MLX4_RATELIMIT_DEFAULT;
806 
807 		tc->pg = htons(pg[i]);
808 		tc->bw_precentage = htons(tc_tx_bw[i]);
809 
810 		tc->max_bw_units = htons(MLX4_RATELIMIT_UNITS);
811 		tc->max_bw_value = htons(r);
812 	}
813 
814 	in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port;
815 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
816 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
817 
818 	mlx4_free_cmd_mailbox(dev, mailbox);
819 	return err;
820 }
821 EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
822 
823 enum {
824 	VXLAN_ENABLE_MODIFY	= 1 << 7,
825 	VXLAN_STEERING_MODIFY	= 1 << 6,
826 
827 	VXLAN_ENABLE		= 1 << 7,
828 };
829 
830 struct mlx4_set_port_vxlan_context {
831 	u32	reserved1;
832 	u8	modify_flags;
833 	u8	reserved2;
834 	u8	enable_flags;
835 	u8	steering;
836 };
837 
838 int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering)
839 {
840 	int err;
841 	u32 in_mod;
842 	struct mlx4_cmd_mailbox *mailbox;
843 	struct mlx4_set_port_vxlan_context  *context;
844 
845 	mailbox = mlx4_alloc_cmd_mailbox(dev);
846 	if (IS_ERR(mailbox))
847 		return PTR_ERR(mailbox);
848 	context = mailbox->buf;
849 	memset(context, 0, sizeof(*context));
850 
851 	context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY;
852 	context->enable_flags = VXLAN_ENABLE;
853 	context->steering  = steering;
854 
855 	in_mod = MLX4_SET_PORT_VXLAN << 8 | port;
856 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
857 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
858 
859 	mlx4_free_cmd_mailbox(dev, mailbox);
860 	return err;
861 }
862 EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN);
863 
864 int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
865 				struct mlx4_vhcr *vhcr,
866 				struct mlx4_cmd_mailbox *inbox,
867 				struct mlx4_cmd_mailbox *outbox,
868 				struct mlx4_cmd_info *cmd)
869 {
870 	int err = 0;
871 
872 	return err;
873 }
874 
875 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
876 			u64 mac, u64 clear, u8 mode)
877 {
878 	return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
879 			MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
880 			MLX4_CMD_WRAPPED);
881 }
882 EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
883 
884 int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
885 			       struct mlx4_vhcr *vhcr,
886 			       struct mlx4_cmd_mailbox *inbox,
887 			       struct mlx4_cmd_mailbox *outbox,
888 			       struct mlx4_cmd_info *cmd)
889 {
890 	int err = 0;
891 
892 	return err;
893 }
894 
895 int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
896 			       u32 in_mod, struct mlx4_cmd_mailbox *outbox)
897 {
898 	return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
899 			    MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
900 			    MLX4_CMD_NATIVE);
901 }
902 
903 int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
904 				struct mlx4_vhcr *vhcr,
905 				struct mlx4_cmd_mailbox *inbox,
906 				struct mlx4_cmd_mailbox *outbox,
907 				struct mlx4_cmd_info *cmd)
908 {
909 	if (slave != dev->caps.function)
910 		return 0;
911 	return mlx4_common_dump_eth_stats(dev, slave,
912 					  vhcr->in_modifier, outbox);
913 }
914 
915 void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap)
916 {
917 	if (!mlx4_is_mfunc(dev)) {
918 		*stats_bitmap = 0;
919 		return;
920 	}
921 
922 	*stats_bitmap = (MLX4_STATS_TRAFFIC_COUNTERS_MASK |
923 			 MLX4_STATS_TRAFFIC_DROPS_MASK |
924 			 MLX4_STATS_PORT_COUNTERS_MASK);
925 
926 	if (mlx4_is_master(dev))
927 		*stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK;
928 }
929 EXPORT_SYMBOL(mlx4_set_stats_bitmap);
930