1 /*
2  * Copyright (c) 2011 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 
34 #include <linux/dcbnl.h>
35 #include <linux/math64.h>
36 
37 #include "mlx4_en.h"
38 #include "fw_qos.h"
39 
40 enum {
41 	MLX4_CEE_STATE_DOWN   = 0,
42 	MLX4_CEE_STATE_UP     = 1,
43 };
44 
45 /* Definitions for QCN
46  */
47 
48 struct mlx4_congestion_control_mb_prio_802_1_qau_params {
49 	__be32 modify_enable_high;
50 	__be32 modify_enable_low;
51 	__be32 reserved1;
52 	__be32 extended_enable;
53 	__be32 rppp_max_rps;
54 	__be32 rpg_time_reset;
55 	__be32 rpg_byte_reset;
56 	__be32 rpg_threshold;
57 	__be32 rpg_max_rate;
58 	__be32 rpg_ai_rate;
59 	__be32 rpg_hai_rate;
60 	__be32 rpg_gd;
61 	__be32 rpg_min_dec_fac;
62 	__be32 rpg_min_rate;
63 	__be32 max_time_rise;
64 	__be32 max_byte_rise;
65 	__be32 max_qdelta;
66 	__be32 min_qoffset;
67 	__be32 gd_coefficient;
68 	__be32 reserved2[5];
69 	__be32 cp_sample_base;
70 	__be32 reserved3[39];
71 };
72 
73 struct mlx4_congestion_control_mb_prio_802_1_qau_statistics {
74 	__be64 rppp_rp_centiseconds;
75 	__be32 reserved1;
76 	__be32 ignored_cnm;
77 	__be32 rppp_created_rps;
78 	__be32 estimated_total_rate;
79 	__be32 max_active_rate_limiter_index;
80 	__be32 dropped_cnms_busy_fw;
81 	__be32 reserved2;
82 	__be32 cnms_handled_successfully;
83 	__be32 min_total_limiters_rate;
84 	__be32 max_total_limiters_rate;
85 	__be32 reserved3[4];
86 };
87 
88 static u8 mlx4_en_dcbnl_getcap(struct net_device *dev, int capid, u8 *cap)
89 {
90 	struct mlx4_en_priv *priv = netdev_priv(dev);
91 
92 	switch (capid) {
93 	case DCB_CAP_ATTR_PFC:
94 		*cap = true;
95 		break;
96 	case DCB_CAP_ATTR_DCBX:
97 		*cap = priv->dcbx_cap;
98 		break;
99 	case DCB_CAP_ATTR_PFC_TCS:
100 		*cap = 1 <<  mlx4_max_tc(priv->mdev->dev);
101 		break;
102 	default:
103 		*cap = false;
104 		break;
105 	}
106 
107 	return 0;
108 }
109 
110 static u8 mlx4_en_dcbnl_getpfcstate(struct net_device *netdev)
111 {
112 	struct mlx4_en_priv *priv = netdev_priv(netdev);
113 
114 	return priv->cee_config.pfc_state;
115 }
116 
117 static void mlx4_en_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
118 {
119 	struct mlx4_en_priv *priv = netdev_priv(netdev);
120 
121 	priv->cee_config.pfc_state = state;
122 }
123 
124 static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
125 				      u8 *setting)
126 {
127 	struct mlx4_en_priv *priv = netdev_priv(netdev);
128 
129 	*setting = priv->cee_config.dcb_pfc[priority];
130 }
131 
132 static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
133 				      u8 setting)
134 {
135 	struct mlx4_en_priv *priv = netdev_priv(netdev);
136 
137 	priv->cee_config.dcb_pfc[priority] = setting;
138 	priv->cee_config.pfc_state = true;
139 }
140 
141 static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
142 {
143 	struct mlx4_en_priv *priv = netdev_priv(netdev);
144 
145 	if (!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED))
146 		return -EINVAL;
147 
148 	if (tcid == DCB_NUMTCS_ATTR_PFC)
149 		*num = mlx4_max_tc(priv->mdev->dev);
150 	else
151 		*num = 0;
152 
153 	return 0;
154 }
155 
156 static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
157 {
158 	struct mlx4_en_priv *priv = netdev_priv(netdev);
159 	struct mlx4_en_dev *mdev = priv->mdev;
160 
161 	if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
162 		return 1;
163 
164 	if (priv->cee_config.pfc_state) {
165 		int tc;
166 
167 		priv->prof->rx_pause = 0;
168 		priv->prof->tx_pause = 0;
169 		for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) {
170 			u8 tc_mask = 1 << tc;
171 
172 			switch (priv->cee_config.dcb_pfc[tc]) {
173 			case pfc_disabled:
174 				priv->prof->tx_ppp &= ~tc_mask;
175 				priv->prof->rx_ppp &= ~tc_mask;
176 				break;
177 			case pfc_enabled_full:
178 				priv->prof->tx_ppp |= tc_mask;
179 				priv->prof->rx_ppp |= tc_mask;
180 				break;
181 			case pfc_enabled_tx:
182 				priv->prof->tx_ppp |= tc_mask;
183 				priv->prof->rx_ppp &= ~tc_mask;
184 				break;
185 			case pfc_enabled_rx:
186 				priv->prof->tx_ppp &= ~tc_mask;
187 				priv->prof->rx_ppp |= tc_mask;
188 				break;
189 			default:
190 				break;
191 			}
192 		}
193 		en_dbg(DRV, priv, "Set pfc on\n");
194 	} else {
195 		priv->prof->rx_pause = 1;
196 		priv->prof->tx_pause = 1;
197 		en_dbg(DRV, priv, "Set pfc off\n");
198 	}
199 
200 	if (mlx4_SET_PORT_general(mdev->dev, priv->port,
201 				  priv->rx_skb_size + ETH_FCS_LEN,
202 				  priv->prof->tx_pause,
203 				  priv->prof->tx_ppp,
204 				  priv->prof->rx_pause,
205 				  priv->prof->rx_ppp)) {
206 		en_err(priv, "Failed setting pause params\n");
207 		return 1;
208 	}
209 
210 	return 0;
211 }
212 
213 static u8 mlx4_en_dcbnl_get_state(struct net_device *dev)
214 {
215 	struct mlx4_en_priv *priv = netdev_priv(dev);
216 
217 	if (priv->flags & MLX4_EN_FLAG_DCB_ENABLED)
218 		return MLX4_CEE_STATE_UP;
219 
220 	return MLX4_CEE_STATE_DOWN;
221 }
222 
223 static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
224 {
225 	struct mlx4_en_priv *priv = netdev_priv(dev);
226 	int num_tcs = 0;
227 
228 	if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
229 		return 1;
230 
231 	if (!!(state) == !!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED))
232 		return 0;
233 
234 	if (state) {
235 		priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
236 		num_tcs = IEEE_8021QAZ_MAX_TCS;
237 	} else {
238 		priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
239 	}
240 
241 	if (mlx4_en_alloc_tx_queue_per_tc(dev, num_tcs))
242 		return 1;
243 
244 	return 0;
245 }
246 
247 /* On success returns a non-zero 802.1p user priority bitmap
248  * otherwise returns 0 as the invalid user priority bitmap to
249  * indicate an error.
250  */
251 static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
252 {
253 	struct mlx4_en_priv *priv = netdev_priv(netdev);
254 	struct dcb_app app = {
255 				.selector = idtype,
256 				.protocol = id,
257 			     };
258 	if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
259 		return 0;
260 
261 	return dcb_getapp(netdev, &app);
262 }
263 
264 static int mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype,
265 				u16 id, u8 up)
266 {
267 	struct mlx4_en_priv *priv = netdev_priv(netdev);
268 	struct dcb_app app;
269 
270 	if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
271 		return -EINVAL;
272 
273 	memset(&app, 0, sizeof(struct dcb_app));
274 	app.selector = idtype;
275 	app.protocol = id;
276 	app.priority = up;
277 
278 	return dcb_setapp(netdev, &app);
279 }
280 
281 static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
282 				   struct ieee_ets *ets)
283 {
284 	struct mlx4_en_priv *priv = netdev_priv(dev);
285 	struct ieee_ets *my_ets = &priv->ets;
286 
287 	if (!my_ets)
288 		return -EINVAL;
289 
290 	ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
291 	ets->cbs = my_ets->cbs;
292 	memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
293 	memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
294 	memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
295 
296 	return 0;
297 }
298 
299 static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
300 {
301 	int i;
302 	int total_ets_bw = 0;
303 	int has_ets_tc = 0;
304 
305 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
306 		if (ets->prio_tc[i] >= MLX4_EN_NUM_UP_HIGH) {
307 			en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n",
308 					i, ets->prio_tc[i]);
309 			return -EINVAL;
310 		}
311 
312 		switch (ets->tc_tsa[i]) {
313 		case IEEE_8021QAZ_TSA_VENDOR:
314 		case IEEE_8021QAZ_TSA_STRICT:
315 			break;
316 		case IEEE_8021QAZ_TSA_ETS:
317 			has_ets_tc = 1;
318 			total_ets_bw += ets->tc_tx_bw[i];
319 			break;
320 		default:
321 			en_err(priv, "TC[%d]: Not supported TSA: %d\n",
322 					i, ets->tc_tsa[i]);
323 			return -EOPNOTSUPP;
324 		}
325 	}
326 
327 	if (has_ets_tc && total_ets_bw != MLX4_EN_BW_MAX) {
328 		en_err(priv, "Bad ETS BW sum: %d. Should be exactly 100%%\n",
329 				total_ets_bw);
330 		return -EINVAL;
331 	}
332 
333 	return 0;
334 }
335 
336 static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv,
337 		struct ieee_ets *ets, u16 *ratelimit)
338 {
339 	struct mlx4_en_dev *mdev = priv->mdev;
340 	int num_strict = 0;
341 	int i;
342 	__u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS] = { 0 };
343 	__u8 pg[IEEE_8021QAZ_MAX_TCS] = { 0 };
344 
345 	ets = ets ?: &priv->ets;
346 	ratelimit = ratelimit ?: priv->maxrate;
347 
348 	/* higher TC means higher priority => lower pg */
349 	for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) {
350 		switch (ets->tc_tsa[i]) {
351 		case IEEE_8021QAZ_TSA_VENDOR:
352 			pg[i] = MLX4_EN_TC_VENDOR;
353 			tc_tx_bw[i] = MLX4_EN_BW_MAX;
354 			break;
355 		case IEEE_8021QAZ_TSA_STRICT:
356 			pg[i] = num_strict++;
357 			tc_tx_bw[i] = MLX4_EN_BW_MAX;
358 			break;
359 		case IEEE_8021QAZ_TSA_ETS:
360 			pg[i] = MLX4_EN_TC_ETS;
361 			tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX4_EN_BW_MIN;
362 			break;
363 		}
364 	}
365 
366 	return mlx4_SET_PORT_SCHEDULER(mdev->dev, priv->port, tc_tx_bw, pg,
367 			ratelimit);
368 }
369 
370 static int
371 mlx4_en_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
372 {
373 	struct mlx4_en_priv *priv = netdev_priv(dev);
374 	struct mlx4_en_dev *mdev = priv->mdev;
375 	int err;
376 
377 	err = mlx4_en_ets_validate(priv, ets);
378 	if (err)
379 		return err;
380 
381 	err = mlx4_SET_PORT_PRIO2TC(mdev->dev, priv->port, ets->prio_tc);
382 	if (err)
383 		return err;
384 
385 	err = mlx4_en_config_port_scheduler(priv, ets, NULL);
386 	if (err)
387 		return err;
388 
389 	memcpy(&priv->ets, ets, sizeof(priv->ets));
390 
391 	return 0;
392 }
393 
394 static int mlx4_en_dcbnl_ieee_getpfc(struct net_device *dev,
395 		struct ieee_pfc *pfc)
396 {
397 	struct mlx4_en_priv *priv = netdev_priv(dev);
398 
399 	pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
400 	pfc->pfc_en = priv->prof->tx_ppp;
401 
402 	return 0;
403 }
404 
405 static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
406 		struct ieee_pfc *pfc)
407 {
408 	struct mlx4_en_priv *priv = netdev_priv(dev);
409 	struct mlx4_en_port_profile *prof = priv->prof;
410 	struct mlx4_en_dev *mdev = priv->mdev;
411 	int err;
412 
413 	en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n",
414 			pfc->pfc_cap,
415 			pfc->pfc_en,
416 			pfc->mbc,
417 			pfc->delay);
418 
419 	prof->rx_pause = !pfc->pfc_en;
420 	prof->tx_pause = !pfc->pfc_en;
421 	prof->rx_ppp = pfc->pfc_en;
422 	prof->tx_ppp = pfc->pfc_en;
423 
424 	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
425 				    priv->rx_skb_size + ETH_FCS_LEN,
426 				    prof->tx_pause,
427 				    prof->tx_ppp,
428 				    prof->rx_pause,
429 				    prof->rx_ppp);
430 	if (err)
431 		en_err(priv, "Failed setting pause params\n");
432 	else
433 		mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
434 						prof->rx_ppp, prof->rx_pause,
435 						prof->tx_ppp, prof->tx_pause);
436 
437 	return err;
438 }
439 
440 static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
441 {
442 	struct mlx4_en_priv *priv = netdev_priv(dev);
443 
444 	return priv->dcbx_cap;
445 }
446 
447 static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
448 {
449 	struct mlx4_en_priv *priv = netdev_priv(dev);
450 	struct ieee_ets ets = {0};
451 	struct ieee_pfc pfc = {0};
452 
453 	if (mode == priv->dcbx_cap)
454 		return 0;
455 
456 	if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
457 	    ((mode & DCB_CAP_DCBX_VER_IEEE) &&
458 	     (mode & DCB_CAP_DCBX_VER_CEE)) ||
459 	    !(mode & DCB_CAP_DCBX_HOST))
460 		goto err;
461 
462 	priv->dcbx_cap = mode;
463 
464 	ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
465 	pfc.pfc_cap = IEEE_8021QAZ_MAX_TCS;
466 
467 	if (mode & DCB_CAP_DCBX_VER_IEEE) {
468 		if (mlx4_en_dcbnl_ieee_setets(dev, &ets))
469 			goto err;
470 		if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc))
471 			goto err;
472 	} else if (mode & DCB_CAP_DCBX_VER_CEE) {
473 		if (mlx4_en_dcbnl_set_all(dev))
474 			goto err;
475 	} else {
476 		if (mlx4_en_dcbnl_ieee_setets(dev, &ets))
477 			goto err;
478 		if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc))
479 			goto err;
480 		if (mlx4_en_alloc_tx_queue_per_tc(dev, 0))
481 			goto err;
482 	}
483 
484 	return 0;
485 err:
486 	return 1;
487 }
488 
489 #define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */
490 static int mlx4_en_dcbnl_ieee_getmaxrate(struct net_device *dev,
491 				   struct ieee_maxrate *maxrate)
492 {
493 	struct mlx4_en_priv *priv = netdev_priv(dev);
494 	int i;
495 
496 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
497 		maxrate->tc_maxrate[i] =
498 			priv->maxrate[i] * MLX4_RATELIMIT_UNITS_IN_KB;
499 
500 	return 0;
501 }
502 
503 static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
504 		struct ieee_maxrate *maxrate)
505 {
506 	struct mlx4_en_priv *priv = netdev_priv(dev);
507 	u16 tmp[IEEE_8021QAZ_MAX_TCS];
508 	int i, err;
509 
510 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
511 		/* Convert from Kbps into HW units, rounding result up.
512 		 * Setting to 0, means unlimited BW.
513 		 */
514 		tmp[i] = div_u64(maxrate->tc_maxrate[i] +
515 				 MLX4_RATELIMIT_UNITS_IN_KB - 1,
516 				 MLX4_RATELIMIT_UNITS_IN_KB);
517 	}
518 
519 	err = mlx4_en_config_port_scheduler(priv, NULL, tmp);
520 	if (err)
521 		return err;
522 
523 	memcpy(priv->maxrate, tmp, sizeof(priv->maxrate));
524 
525 	return 0;
526 }
527 
528 #define RPG_ENABLE_BIT	31
529 #define CN_TAG_BIT	30
530 
531 static int mlx4_en_dcbnl_ieee_getqcn(struct net_device *dev,
532 				     struct ieee_qcn *qcn)
533 {
534 	struct mlx4_en_priv *priv = netdev_priv(dev);
535 	struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn;
536 	struct mlx4_cmd_mailbox *mailbox_out = NULL;
537 	u64 mailbox_in_dma = 0;
538 	u32 inmod = 0;
539 	int i, err;
540 
541 	if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
542 		return -EOPNOTSUPP;
543 
544 	mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
545 	if (IS_ERR(mailbox_out))
546 		return -ENOMEM;
547 	hw_qcn =
548 	(struct mlx4_congestion_control_mb_prio_802_1_qau_params *)
549 	mailbox_out->buf;
550 
551 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
552 		inmod = priv->port | ((1 << i) << 8) |
553 			 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
554 		err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma,
555 				   mailbox_out->dma,
556 				   inmod, MLX4_CONGESTION_CONTROL_GET_PARAMS,
557 				   MLX4_CMD_CONGESTION_CTRL_OPCODE,
558 				   MLX4_CMD_TIME_CLASS_C,
559 				   MLX4_CMD_NATIVE);
560 		if (err) {
561 			mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
562 			return err;
563 		}
564 
565 		qcn->rpg_enable[i] =
566 			be32_to_cpu(hw_qcn->extended_enable) >> RPG_ENABLE_BIT;
567 		qcn->rppp_max_rps[i] =
568 			be32_to_cpu(hw_qcn->rppp_max_rps);
569 		qcn->rpg_time_reset[i] =
570 			be32_to_cpu(hw_qcn->rpg_time_reset);
571 		qcn->rpg_byte_reset[i] =
572 			be32_to_cpu(hw_qcn->rpg_byte_reset);
573 		qcn->rpg_threshold[i] =
574 			be32_to_cpu(hw_qcn->rpg_threshold);
575 		qcn->rpg_max_rate[i] =
576 			be32_to_cpu(hw_qcn->rpg_max_rate);
577 		qcn->rpg_ai_rate[i] =
578 			be32_to_cpu(hw_qcn->rpg_ai_rate);
579 		qcn->rpg_hai_rate[i] =
580 			be32_to_cpu(hw_qcn->rpg_hai_rate);
581 		qcn->rpg_gd[i] =
582 			be32_to_cpu(hw_qcn->rpg_gd);
583 		qcn->rpg_min_dec_fac[i] =
584 			be32_to_cpu(hw_qcn->rpg_min_dec_fac);
585 		qcn->rpg_min_rate[i] =
586 			be32_to_cpu(hw_qcn->rpg_min_rate);
587 		qcn->cndd_state_machine[i] =
588 			priv->cndd_state[i];
589 	}
590 	mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
591 	return 0;
592 }
593 
594 static int mlx4_en_dcbnl_ieee_setqcn(struct net_device *dev,
595 				     struct ieee_qcn *qcn)
596 {
597 	struct mlx4_en_priv *priv = netdev_priv(dev);
598 	struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn;
599 	struct mlx4_cmd_mailbox *mailbox_in = NULL;
600 	u64 mailbox_in_dma = 0;
601 	u32 inmod = 0;
602 	int i, err;
603 #define MODIFY_ENABLE_HIGH_MASK 0xc0000000
604 #define MODIFY_ENABLE_LOW_MASK 0xffc00000
605 
606 	if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
607 		return -EOPNOTSUPP;
608 
609 	mailbox_in = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
610 	if (IS_ERR(mailbox_in))
611 		return -ENOMEM;
612 
613 	mailbox_in_dma = mailbox_in->dma;
614 	hw_qcn =
615 	(struct mlx4_congestion_control_mb_prio_802_1_qau_params *)mailbox_in->buf;
616 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
617 		inmod = priv->port | ((1 << i) << 8) |
618 			 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
619 
620 		/* Before updating QCN parameter,
621 		 * need to set it's modify enable bit to 1
622 		 */
623 
624 		hw_qcn->modify_enable_high = cpu_to_be32(
625 						MODIFY_ENABLE_HIGH_MASK);
626 		hw_qcn->modify_enable_low = cpu_to_be32(MODIFY_ENABLE_LOW_MASK);
627 
628 		hw_qcn->extended_enable = cpu_to_be32(qcn->rpg_enable[i] << RPG_ENABLE_BIT);
629 		hw_qcn->rppp_max_rps = cpu_to_be32(qcn->rppp_max_rps[i]);
630 		hw_qcn->rpg_time_reset = cpu_to_be32(qcn->rpg_time_reset[i]);
631 		hw_qcn->rpg_byte_reset = cpu_to_be32(qcn->rpg_byte_reset[i]);
632 		hw_qcn->rpg_threshold = cpu_to_be32(qcn->rpg_threshold[i]);
633 		hw_qcn->rpg_max_rate = cpu_to_be32(qcn->rpg_max_rate[i]);
634 		hw_qcn->rpg_ai_rate = cpu_to_be32(qcn->rpg_ai_rate[i]);
635 		hw_qcn->rpg_hai_rate = cpu_to_be32(qcn->rpg_hai_rate[i]);
636 		hw_qcn->rpg_gd = cpu_to_be32(qcn->rpg_gd[i]);
637 		hw_qcn->rpg_min_dec_fac = cpu_to_be32(qcn->rpg_min_dec_fac[i]);
638 		hw_qcn->rpg_min_rate = cpu_to_be32(qcn->rpg_min_rate[i]);
639 		priv->cndd_state[i] = qcn->cndd_state_machine[i];
640 		if (qcn->cndd_state_machine[i] == DCB_CNDD_INTERIOR_READY)
641 			hw_qcn->extended_enable |= cpu_to_be32(1 << CN_TAG_BIT);
642 
643 		err = mlx4_cmd(priv->mdev->dev, mailbox_in_dma, inmod,
644 			       MLX4_CONGESTION_CONTROL_SET_PARAMS,
645 			       MLX4_CMD_CONGESTION_CTRL_OPCODE,
646 			       MLX4_CMD_TIME_CLASS_C,
647 			       MLX4_CMD_NATIVE);
648 		if (err) {
649 			mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in);
650 			return err;
651 		}
652 	}
653 	mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in);
654 	return 0;
655 }
656 
657 static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev,
658 					  struct ieee_qcn_stats *qcn_stats)
659 {
660 	struct mlx4_en_priv *priv = netdev_priv(dev);
661 	struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *hw_qcn_stats;
662 	struct mlx4_cmd_mailbox *mailbox_out = NULL;
663 	u64 mailbox_in_dma = 0;
664 	u32 inmod = 0;
665 	int i, err;
666 
667 	if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
668 		return -EOPNOTSUPP;
669 
670 	mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
671 	if (IS_ERR(mailbox_out))
672 		return -ENOMEM;
673 
674 	hw_qcn_stats =
675 	(struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *)
676 	mailbox_out->buf;
677 
678 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
679 		inmod = priv->port | ((1 << i) << 8) |
680 			 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
681 		err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma,
682 				   mailbox_out->dma, inmod,
683 				   MLX4_CONGESTION_CONTROL_GET_STATISTICS,
684 				   MLX4_CMD_CONGESTION_CTRL_OPCODE,
685 				   MLX4_CMD_TIME_CLASS_C,
686 				   MLX4_CMD_NATIVE);
687 		if (err) {
688 			mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
689 			return err;
690 		}
691 		qcn_stats->rppp_rp_centiseconds[i] =
692 			be64_to_cpu(hw_qcn_stats->rppp_rp_centiseconds);
693 		qcn_stats->rppp_created_rps[i] =
694 			be32_to_cpu(hw_qcn_stats->rppp_created_rps);
695 	}
696 	mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
697 	return 0;
698 }
699 
700 const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
701 	.ieee_getets		= mlx4_en_dcbnl_ieee_getets,
702 	.ieee_setets		= mlx4_en_dcbnl_ieee_setets,
703 	.ieee_getmaxrate	= mlx4_en_dcbnl_ieee_getmaxrate,
704 	.ieee_setmaxrate	= mlx4_en_dcbnl_ieee_setmaxrate,
705 	.ieee_getqcn		= mlx4_en_dcbnl_ieee_getqcn,
706 	.ieee_setqcn		= mlx4_en_dcbnl_ieee_setqcn,
707 	.ieee_getqcnstats	= mlx4_en_dcbnl_ieee_getqcnstats,
708 	.ieee_getpfc		= mlx4_en_dcbnl_ieee_getpfc,
709 	.ieee_setpfc		= mlx4_en_dcbnl_ieee_setpfc,
710 
711 	.getstate	= mlx4_en_dcbnl_get_state,
712 	.setstate	= mlx4_en_dcbnl_set_state,
713 	.getpfccfg	= mlx4_en_dcbnl_get_pfc_cfg,
714 	.setpfccfg	= mlx4_en_dcbnl_set_pfc_cfg,
715 	.setall		= mlx4_en_dcbnl_set_all,
716 	.getcap		= mlx4_en_dcbnl_getcap,
717 	.getnumtcs	= mlx4_en_dcbnl_getnumtcs,
718 	.getpfcstate	= mlx4_en_dcbnl_getpfcstate,
719 	.setpfcstate	= mlx4_en_dcbnl_setpfcstate,
720 	.getapp		= mlx4_en_dcbnl_getapp,
721 	.setapp		= mlx4_en_dcbnl_setapp,
722 
723 	.getdcbx	= mlx4_en_dcbnl_getdcbx,
724 	.setdcbx	= mlx4_en_dcbnl_setdcbx,
725 };
726 
727 const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = {
728 	.ieee_getpfc	= mlx4_en_dcbnl_ieee_getpfc,
729 	.ieee_setpfc	= mlx4_en_dcbnl_ieee_setpfc,
730 
731 	.setstate	= mlx4_en_dcbnl_set_state,
732 	.getpfccfg	= mlx4_en_dcbnl_get_pfc_cfg,
733 	.setpfccfg	= mlx4_en_dcbnl_set_pfc_cfg,
734 	.setall		= mlx4_en_dcbnl_set_all,
735 	.getnumtcs	= mlx4_en_dcbnl_getnumtcs,
736 	.getpfcstate	= mlx4_en_dcbnl_getpfcstate,
737 	.setpfcstate	= mlx4_en_dcbnl_setpfcstate,
738 	.getapp		= mlx4_en_dcbnl_getapp,
739 	.setapp		= mlx4_en_dcbnl_setapp,
740 
741 	.getdcbx	= mlx4_en_dcbnl_getdcbx,
742 	.setdcbx	= mlx4_en_dcbnl_setdcbx,
743 };
744