1 /*
2  * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include "port_buffer.h"
33 
34 int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
35 			    struct mlx5e_port_buffer *port_buffer)
36 {
37 	u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
38 	struct mlx5_core_dev *mdev = priv->mdev;
39 	int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
40 	u32 total_used = 0;
41 	void *buffer;
42 	void *out;
43 	int err;
44 	int i;
45 
46 	out = kzalloc(sz, GFP_KERNEL);
47 	if (!out)
48 		return -ENOMEM;
49 
50 	err = mlx5e_port_query_pbmc(mdev, out);
51 	if (err)
52 		goto out;
53 
54 	for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
55 		buffer = MLX5_ADDR_OF(pbmc_reg, out, buffer[i]);
56 		port_buffer->buffer[i].lossy =
57 			MLX5_GET(bufferx_reg, buffer, lossy);
58 		port_buffer->buffer[i].epsb =
59 			MLX5_GET(bufferx_reg, buffer, epsb);
60 		port_buffer->buffer[i].size =
61 			MLX5_GET(bufferx_reg, buffer, size) * port_buff_cell_sz;
62 		port_buffer->buffer[i].xon =
63 			MLX5_GET(bufferx_reg, buffer, xon_threshold) * port_buff_cell_sz;
64 		port_buffer->buffer[i].xoff =
65 			MLX5_GET(bufferx_reg, buffer, xoff_threshold) * port_buff_cell_sz;
66 		total_used += port_buffer->buffer[i].size;
67 
68 		mlx5e_dbg(HW, priv, "buffer %d: size=%d, xon=%d, xoff=%d, epsb=%d, lossy=%d\n", i,
69 			  port_buffer->buffer[i].size,
70 			  port_buffer->buffer[i].xon,
71 			  port_buffer->buffer[i].xoff,
72 			  port_buffer->buffer[i].epsb,
73 			  port_buffer->buffer[i].lossy);
74 	}
75 
76 	port_buffer->headroom_size = total_used;
77 	port_buffer->port_buffer_size =
78 		MLX5_GET(pbmc_reg, out, port_buffer_size) * port_buff_cell_sz;
79 	port_buffer->spare_buffer_size =
80 		port_buffer->port_buffer_size - total_used;
81 
82 	mlx5e_dbg(HW, priv, "total buffer size=%d, spare buffer size=%d\n",
83 		  port_buffer->port_buffer_size,
84 		  port_buffer->spare_buffer_size);
85 out:
86 	kfree(out);
87 	return err;
88 }
89 
90 struct mlx5e_buffer_pool {
91 	u32 infi_size;
92 	u32 size;
93 	u32 buff_occupancy;
94 };
95 
96 static int mlx5e_port_query_pool(struct mlx5_core_dev *mdev,
97 				 struct mlx5e_buffer_pool *buffer_pool,
98 				 u32 desc, u8 dir, u8 pool_idx)
99 {
100 	u32 out[MLX5_ST_SZ_DW(sbpr_reg)] = {};
101 	int err;
102 
103 	err = mlx5e_port_query_sbpr(mdev, desc, dir, pool_idx, out,
104 				    sizeof(out));
105 	if (err)
106 		return err;
107 
108 	buffer_pool->size = MLX5_GET(sbpr_reg, out, size);
109 	buffer_pool->infi_size = MLX5_GET(sbpr_reg, out, infi_size);
110 	buffer_pool->buff_occupancy = MLX5_GET(sbpr_reg, out, buff_occupancy);
111 
112 	return err;
113 }
114 
115 enum {
116 	MLX5_INGRESS_DIR = 0,
117 	MLX5_EGRESS_DIR = 1,
118 };
119 
120 enum {
121 	MLX5_LOSSY_POOL = 0,
122 	MLX5_LOSSLESS_POOL = 1,
123 };
124 
125 /* No limit on usage of shared buffer pool (max_buff=0) */
126 #define MLX5_SB_POOL_NO_THRESHOLD  0
127 /* Shared buffer pool usage threshold when calculated
128  * dynamically in alpha units. alpha=13 is equivalent to
129  * HW_alpha of  [(1/128) * 2 ^ (alpha-1)] = 32, where HW_alpha
130  * equates to the following portion of the shared buffer pool:
131  * [32 / (1 + n * 32)] While *n* is the number of buffers
132  * that are using the shared buffer pool.
133  */
134 #define MLX5_SB_POOL_THRESHOLD 13
135 
136 /* Shared buffer class management parameters */
137 struct mlx5_sbcm_params {
138 	u8 pool_idx;
139 	u8 max_buff;
140 	u8 infi_size;
141 };
142 
143 static const struct mlx5_sbcm_params sbcm_default = {
144 	.pool_idx = MLX5_LOSSY_POOL,
145 	.max_buff = MLX5_SB_POOL_NO_THRESHOLD,
146 	.infi_size = 0,
147 };
148 
149 static const struct mlx5_sbcm_params sbcm_lossy = {
150 	.pool_idx = MLX5_LOSSY_POOL,
151 	.max_buff = MLX5_SB_POOL_NO_THRESHOLD,
152 	.infi_size = 1,
153 };
154 
155 static const struct mlx5_sbcm_params sbcm_lossless = {
156 	.pool_idx = MLX5_LOSSLESS_POOL,
157 	.max_buff = MLX5_SB_POOL_THRESHOLD,
158 	.infi_size = 0,
159 };
160 
161 static const struct mlx5_sbcm_params sbcm_lossless_no_threshold = {
162 	.pool_idx = MLX5_LOSSLESS_POOL,
163 	.max_buff = MLX5_SB_POOL_NO_THRESHOLD,
164 	.infi_size = 1,
165 };
166 
167 /**
168  * select_sbcm_params() - selects the shared buffer pool configuration
169  *
170  * @buffer: <input> port buffer to retrieve params of
171  * @lossless_buff_count: <input> number of lossless buffers in total
172  *
173  * The selection is based on the following rules:
174  * 1. If buffer size is 0, no shared buffer pool is used.
175  * 2. If buffer is lossy, use lossy shared buffer pool.
176  * 3. If there are more than 1 lossless buffers, use lossless shared buffer pool
177  *    with threshold.
178  * 4. If there is only 1 lossless buffer, use lossless shared buffer pool
179  *    without threshold.
180  *
181  * @return const struct mlx5_sbcm_params* selected values
182  */
183 static const struct mlx5_sbcm_params *
184 select_sbcm_params(struct mlx5e_bufferx_reg *buffer, u8 lossless_buff_count)
185 {
186 	if (buffer->size == 0)
187 		return &sbcm_default;
188 
189 	if (buffer->lossy)
190 		return &sbcm_lossy;
191 
192 	if (lossless_buff_count > 1)
193 		return &sbcm_lossless;
194 
195 	return &sbcm_lossless_no_threshold;
196 }
197 
198 static int port_update_pool_cfg(struct mlx5_core_dev *mdev,
199 				struct mlx5e_port_buffer *port_buffer)
200 {
201 	const struct mlx5_sbcm_params *p;
202 	u8 lossless_buff_count = 0;
203 	int err;
204 	int i;
205 
206 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
207 		return 0;
208 
209 	for (i = 0; i < MLX5E_MAX_BUFFER; i++)
210 		lossless_buff_count += ((port_buffer->buffer[i].size) &&
211 				       (!(port_buffer->buffer[i].lossy)));
212 
213 	for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
214 		p = select_sbcm_params(&port_buffer->buffer[i], lossless_buff_count);
215 		err = mlx5e_port_set_sbcm(mdev, 0, i,
216 					  MLX5_INGRESS_DIR,
217 					  p->infi_size,
218 					  p->max_buff,
219 					  p->pool_idx);
220 		if (err)
221 			return err;
222 	}
223 
224 	return 0;
225 }
226 
227 static int port_update_shared_buffer(struct mlx5_core_dev *mdev,
228 				     u32 current_headroom_size,
229 				     u32 new_headroom_size)
230 {
231 	struct mlx5e_buffer_pool lossless_ipool;
232 	struct mlx5e_buffer_pool lossy_epool;
233 	u32 lossless_ipool_size;
234 	u32 shared_buffer_size;
235 	u32 total_buffer_size;
236 	u32 lossy_epool_size;
237 	int err;
238 
239 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
240 		return 0;
241 
242 	err = mlx5e_port_query_pool(mdev, &lossy_epool, 0, MLX5_EGRESS_DIR,
243 				    MLX5_LOSSY_POOL);
244 	if (err)
245 		return err;
246 
247 	err = mlx5e_port_query_pool(mdev, &lossless_ipool, 0, MLX5_INGRESS_DIR,
248 				    MLX5_LOSSLESS_POOL);
249 	if (err)
250 		return err;
251 
252 	total_buffer_size = current_headroom_size + lossy_epool.size +
253 			    lossless_ipool.size;
254 	shared_buffer_size = total_buffer_size - new_headroom_size;
255 
256 	if (shared_buffer_size < 4) {
257 		pr_err("Requested port buffer is too large, not enough space left for shared buffer\n");
258 		return -EINVAL;
259 	}
260 
261 	/* Total shared buffer size is split in a ratio of 3:1 between
262 	 * lossy and lossless pools respectively.
263 	 */
264 	lossy_epool_size = (shared_buffer_size / 4) * 3;
265 	lossless_ipool_size = shared_buffer_size / 4;
266 
267 	mlx5e_port_set_sbpr(mdev, 0, MLX5_EGRESS_DIR, MLX5_LOSSY_POOL, 0,
268 			    lossy_epool_size);
269 	mlx5e_port_set_sbpr(mdev, 0, MLX5_INGRESS_DIR, MLX5_LOSSLESS_POOL, 0,
270 			    lossless_ipool_size);
271 	return 0;
272 }
273 
274 static int port_set_buffer(struct mlx5e_priv *priv,
275 			   struct mlx5e_port_buffer *port_buffer)
276 {
277 	u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
278 	struct mlx5_core_dev *mdev = priv->mdev;
279 	int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
280 	u32 new_headroom_size = 0;
281 	u32 current_headroom_size;
282 	void *in;
283 	int err;
284 	int i;
285 
286 	current_headroom_size = port_buffer->headroom_size;
287 
288 	in = kzalloc(sz, GFP_KERNEL);
289 	if (!in)
290 		return -ENOMEM;
291 
292 	err = mlx5e_port_query_pbmc(mdev, in);
293 	if (err)
294 		goto out;
295 
296 	for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
297 		void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
298 		u64 size = port_buffer->buffer[i].size;
299 		u64 xoff = port_buffer->buffer[i].xoff;
300 		u64 xon = port_buffer->buffer[i].xon;
301 
302 		new_headroom_size += size;
303 		do_div(size, port_buff_cell_sz);
304 		do_div(xoff, port_buff_cell_sz);
305 		do_div(xon, port_buff_cell_sz);
306 		MLX5_SET(bufferx_reg, buffer, size, size);
307 		MLX5_SET(bufferx_reg, buffer, lossy, port_buffer->buffer[i].lossy);
308 		MLX5_SET(bufferx_reg, buffer, xoff_threshold, xoff);
309 		MLX5_SET(bufferx_reg, buffer, xon_threshold, xon);
310 	}
311 
312 	new_headroom_size /= port_buff_cell_sz;
313 	current_headroom_size /= port_buff_cell_sz;
314 	err = port_update_shared_buffer(priv->mdev, current_headroom_size,
315 					new_headroom_size);
316 	if (err)
317 		goto out;
318 
319 	err = port_update_pool_cfg(priv->mdev, port_buffer);
320 	if (err)
321 		goto out;
322 
323 	err = mlx5e_port_set_pbmc(mdev, in);
324 out:
325 	kfree(in);
326 	return err;
327 }
328 
329 /* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B])
330  * minimum speed value is 40Gbps
331  */
332 static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
333 {
334 	u32 speed;
335 	u32 xoff;
336 	int err;
337 
338 	err = mlx5e_port_linkspeed(priv->mdev, &speed);
339 	if (err)
340 		speed = SPEED_40000;
341 	speed = max_t(u32, speed, SPEED_40000);
342 
343 	xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
344 
345 	mlx5e_dbg(HW, priv, "%s: xoff=%d\n", __func__, xoff);
346 	return xoff;
347 }
348 
349 static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
350 				 u32 xoff, unsigned int max_mtu, u16 port_buff_cell_sz)
351 {
352 	int i;
353 
354 	for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
355 		if (port_buffer->buffer[i].lossy) {
356 			port_buffer->buffer[i].xoff = 0;
357 			port_buffer->buffer[i].xon  = 0;
358 			continue;
359 		}
360 
361 		if (port_buffer->buffer[i].size <
362 		    (xoff + max_mtu + port_buff_cell_sz)) {
363 			pr_err("buffer_size[%d]=%d is not enough for lossless buffer\n",
364 			       i, port_buffer->buffer[i].size);
365 			return -ENOMEM;
366 		}
367 
368 		port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
369 		port_buffer->buffer[i].xon  =
370 			port_buffer->buffer[i].xoff - max_mtu;
371 	}
372 
373 	return 0;
374 }
375 
376 /**
377  *	update_buffer_lossy	- Update buffer configuration based on pfc
378  *	@mdev: port function core device
379  *	@max_mtu: netdev's max_mtu
380  *	@pfc_en: <input> current pfc configuration
381  *	@buffer: <input> current prio to buffer mapping
382  *	@xoff:   <input> xoff value
383  *	@port_buff_cell_sz: <input> port buffer cell_size
384  *	@port_buffer: <output> port receive buffer configuration
385  *	@change: <output>
386  *
387  *	Update buffer configuration based on pfc configuration and
388  *	priority to buffer mapping.
389  *	Buffer's lossy bit is changed to:
390  *		lossless if there is at least one PFC enabled priority
391  *		mapped to this buffer lossy if all priorities mapped to
392  *		this buffer are PFC disabled
393  *
394  *	@return: 0 if no error,
395  *	sets change to true if buffer configuration was modified.
396  */
397 static int update_buffer_lossy(struct mlx5_core_dev *mdev,
398 			       unsigned int max_mtu,
399 			       u8 pfc_en, u8 *buffer, u32 xoff, u16 port_buff_cell_sz,
400 			       struct mlx5e_port_buffer *port_buffer,
401 			       bool *change)
402 {
403 	bool changed = false;
404 	u8 lossy_count;
405 	u8 prio_count;
406 	u8 lossy;
407 	int prio;
408 	int err;
409 	int i;
410 
411 	for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
412 		prio_count = 0;
413 		lossy_count = 0;
414 
415 		for (prio = 0; prio < MLX5E_MAX_PRIORITY; prio++) {
416 			if (buffer[prio] != i)
417 				continue;
418 
419 			prio_count++;
420 			lossy_count += !(pfc_en & (1 << prio));
421 		}
422 
423 		if (lossy_count == prio_count)
424 			lossy = 1;
425 		else /* lossy_count < prio_count */
426 			lossy = 0;
427 
428 		if (lossy != port_buffer->buffer[i].lossy) {
429 			port_buffer->buffer[i].lossy = lossy;
430 			changed = true;
431 		}
432 	}
433 
434 	if (changed) {
435 		err = port_update_pool_cfg(mdev, port_buffer);
436 		if (err)
437 			return err;
438 
439 		err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz);
440 		if (err)
441 			return err;
442 
443 		*change = true;
444 	}
445 
446 	return 0;
447 }
448 
449 static int fill_pfc_en(struct mlx5_core_dev *mdev, u8 *pfc_en)
450 {
451 	u32 g_rx_pause, g_tx_pause;
452 	int err;
453 
454 	err = mlx5_query_port_pause(mdev, &g_rx_pause, &g_tx_pause);
455 	if (err)
456 		return err;
457 
458 	/* If global pause enabled, set all active buffers to lossless.
459 	 * Otherwise, check PFC setting.
460 	 */
461 	if (g_rx_pause || g_tx_pause)
462 		*pfc_en = 0xff;
463 	else
464 		err = mlx5_query_port_pfc(mdev, pfc_en, NULL);
465 
466 	return err;
467 }
468 
469 #define MINIMUM_MAX_MTU 9216
470 int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
471 				    u32 change, unsigned int mtu,
472 				    struct ieee_pfc *pfc,
473 				    u32 *buffer_size,
474 				    u8 *prio2buffer)
475 {
476 	u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
477 	struct mlx5e_port_buffer port_buffer;
478 	u32 xoff = calculate_xoff(priv, mtu);
479 	bool update_prio2buffer = false;
480 	u8 buffer[MLX5E_MAX_PRIORITY];
481 	bool update_buffer = false;
482 	unsigned int max_mtu;
483 	u32 total_used = 0;
484 	u8 curr_pfc_en;
485 	int err;
486 	int i;
487 
488 	mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
489 	max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
490 
491 	err = mlx5e_port_query_buffer(priv, &port_buffer);
492 	if (err)
493 		return err;
494 
495 	if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
496 		update_buffer = true;
497 		err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
498 		if (err)
499 			return err;
500 	}
501 
502 	if (change & MLX5E_PORT_BUFFER_PFC) {
503 		mlx5e_dbg(HW, priv, "%s: requested PFC per priority bitmask: 0x%x\n",
504 			  __func__, pfc->pfc_en);
505 		err = mlx5e_port_query_priority2buffer(priv->mdev, buffer);
506 		if (err)
507 			return err;
508 
509 		err = update_buffer_lossy(priv->mdev, max_mtu, pfc->pfc_en, buffer, xoff,
510 					  port_buff_cell_sz, &port_buffer,
511 					  &update_buffer);
512 		if (err)
513 			return err;
514 	}
515 
516 	if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) {
517 		update_prio2buffer = true;
518 		for (i = 0; i < MLX5E_MAX_BUFFER; i++)
519 			mlx5e_dbg(HW, priv, "%s: requested to map prio[%d] to buffer %d\n",
520 				  __func__, i, prio2buffer[i]);
521 
522 		err = fill_pfc_en(priv->mdev, &curr_pfc_en);
523 		if (err)
524 			return err;
525 
526 		err = update_buffer_lossy(priv->mdev, max_mtu, curr_pfc_en, prio2buffer, xoff,
527 					  port_buff_cell_sz, &port_buffer, &update_buffer);
528 		if (err)
529 			return err;
530 	}
531 
532 	if (change & MLX5E_PORT_BUFFER_SIZE) {
533 		for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
534 			mlx5e_dbg(HW, priv, "%s: buffer[%d]=%d\n", __func__, i, buffer_size[i]);
535 			if (!port_buffer.buffer[i].lossy && !buffer_size[i]) {
536 				mlx5e_dbg(HW, priv, "%s: lossless buffer[%d] size cannot be zero\n",
537 					  __func__, i);
538 				return -EINVAL;
539 			}
540 
541 			port_buffer.buffer[i].size = buffer_size[i];
542 			total_used += buffer_size[i];
543 		}
544 
545 		mlx5e_dbg(HW, priv, "%s: total buffer requested=%d\n", __func__, total_used);
546 
547 		if (total_used > port_buffer.port_buffer_size)
548 			return -EINVAL;
549 
550 		update_buffer = true;
551 		err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
552 		if (err)
553 			return err;
554 	}
555 
556 	/* Need to update buffer configuration if xoff value is changed */
557 	if (!update_buffer && xoff != priv->dcbx.xoff) {
558 		update_buffer = true;
559 		err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
560 		if (err)
561 			return err;
562 	}
563 	priv->dcbx.xoff = xoff;
564 
565 	/* Apply the settings */
566 	if (update_buffer) {
567 		err = port_set_buffer(priv, &port_buffer);
568 		if (err)
569 			return err;
570 	}
571 
572 	if (update_prio2buffer)
573 		err = mlx5e_port_set_priority2buffer(priv->mdev, prio2buffer);
574 
575 	return err;
576 }
577