1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  * Copyright (c) 2019-2021 Marvell International Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <linux/crc8.h>
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/string.h>
13 #include "qed_hsi.h"
14 #include "qed_hw.h"
15 #include "qed_init_ops.h"
16 #include "qed_iro_hsi.h"
17 #include "qed_reg_addr.h"
18 
19 #define CDU_VALIDATION_DEFAULT_CFG CDU_CONTEXT_VALIDATION_DEFAULT_CFG
20 
21 static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = {
22 	{400, 336, 352, 368, 304, 384, 416, 352},	/* region 3 offsets */
23 	{528, 496, 416, 512, 448, 512, 544, 480},	/* region 4 offsets */
24 	{608, 544, 496, 576, 576, 592, 624, 560}	/* region 5 offsets */
25 };
26 
27 static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {
28 	{240, 240, 112, 0, 0, 0, 0, 96}	/* region 1 offsets */
29 };
30 
31 /* General constants */
32 #define QM_PQ_MEM_4KB(pq_size)	(pq_size ? DIV_ROUND_UP((pq_size + 1) *	\
33 							QM_PQ_ELEMENT_SIZE, \
34 							0x1000) : 0)
35 #define QM_PQ_SIZE_256B(pq_size)	(pq_size ? DIV_ROUND_UP(pq_size, \
36 								0x100) - 1 : 0)
37 #define QM_INVALID_PQ_ID		0xffff
38 
39 /* Max link speed (in Mbps) */
40 #define QM_MAX_LINK_SPEED               100000
41 
42 /* Feature enable */
43 #define QM_BYPASS_EN	1
44 #define QM_BYTE_CRD_EN	1
45 
46 /* Initial VOQ byte credit */
47 #define QM_INITIAL_VOQ_BYTE_CRD         98304
48 /* Other PQ constants */
49 #define QM_OTHER_PQS_PER_PF	4
50 
51 /* VOQ constants */
52 #define MAX_NUM_VOQS	(MAX_NUM_PORTS_K2 * NUM_TCS_4PORT_K2)
53 #define VOQS_BIT_MASK	(BIT(MAX_NUM_VOQS) - 1)
54 
55 /* WFQ constants */
56 
57 /* PF WFQ increment value, 0x9000 = 4*9*1024 */
58 #define QM_PF_WFQ_INC_VAL(weight)       ((weight) * 0x9000)
59 
60 /* PF WFQ Upper bound, in MB, 10 * burst size of 1ms in 50Gbps */
61 #define QM_PF_WFQ_UPPER_BOUND           62500000
62 
63 /* PF WFQ max increment value, 0.7 * upper bound */
64 #define QM_PF_WFQ_MAX_INC_VAL           ((QM_PF_WFQ_UPPER_BOUND * 7) / 10)
65 
66 /* Number of VOQs in E5 PF WFQ credit register (QmWfqCrd) */
67 #define QM_PF_WFQ_CRD_E5_NUM_VOQS       16
68 
69 /* VP WFQ increment value */
70 #define QM_VP_WFQ_INC_VAL(weight)       ((weight) * QM_VP_WFQ_MIN_INC_VAL)
71 
72 /* VP WFQ min increment value */
73 #define QM_VP_WFQ_MIN_INC_VAL           10800
74 
75 /* VP WFQ max increment value, 2^30 */
76 #define QM_VP_WFQ_MAX_INC_VAL           0x40000000
77 
78 /* VP WFQ bypass threshold */
79 #define QM_VP_WFQ_BYPASS_THRESH         (QM_VP_WFQ_MIN_INC_VAL - 100)
80 
81 /* VP RL credit task cost */
82 #define QM_VP_RL_CRD_TASK_COST          9700
83 
84 /* Bit of VOQ in VP WFQ PQ map */
85 #define QM_VP_WFQ_PQ_VOQ_SHIFT          0
86 
87 /* Bit of PF in VP WFQ PQ map */
88 #define QM_VP_WFQ_PQ_PF_SHIFT   5
89 
90 /* RL constants */
91 
92 /* Period in us */
93 #define QM_RL_PERIOD	5
94 
95 /* Period in 25MHz cycles */
96 #define QM_RL_PERIOD_CLK_25M	(25 * QM_RL_PERIOD)
97 
98 /* RL increment value - rate is specified in mbps */
99 #define QM_RL_INC_VAL(rate)                     ({	\
100 						typeof(rate) __rate = (rate); \
101 						max_t(u32,		\
102 						(u32)(((__rate ? __rate : \
103 						100000) *		\
104 						QM_RL_PERIOD *		\
105 						101) / (8 * 100)), 1); })
106 
107 /* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
108 #define QM_PF_RL_UPPER_BOUND	62500000
109 
110 /* Max PF RL increment value is 0.7 * upper bound */
111 #define QM_PF_RL_MAX_INC_VAL	((QM_PF_RL_UPPER_BOUND * 7) / 10)
112 
113 /* QCN RL Upper bound, speed is in Mpbs */
114 #define QM_GLOBAL_RL_UPPER_BOUND(speed)         ((u32)max_t( \
115 		u32,					    \
116 		(u32)(((speed) *			    \
117 		       QM_RL_PERIOD * 101) / (8 * 100)),    \
118 		QM_VP_RL_CRD_TASK_COST			    \
119 		+ 1000))
120 
121 /* AFullOprtnstcCrdMask constants */
122 #define QM_OPPOR_LINE_VOQ_DEF	1
123 #define QM_OPPOR_FW_STOP_DEF	0
124 #define QM_OPPOR_PQ_EMPTY_DEF	1
125 
126 /* Command Queue constants */
127 
128 /* Pure LB CmdQ lines (+spare) */
129 #define PBF_CMDQ_PURE_LB_LINES	150
130 
131 #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
132 	(PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
133 	 (ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
134 		PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
135 
136 #define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
137 	(PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
138 	 (ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
139 		PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
140 
141 /* Returns the VOQ line credit for the specified number of PBF command lines.
142  * PBF lines are specified in 256b units.
143  */
144 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
145 	((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
146 
147 /* BTB: blocks constants (block size = 256B) */
148 
149 /* 256B blocks in 9700B packet */
150 #define BTB_JUMBO_PKT_BLOCKS	38
151 
152 /* Headroom per-port */
153 #define BTB_HEADROOM_BLOCKS	BTB_JUMBO_PKT_BLOCKS
154 #define BTB_PURE_LB_FACTOR	10
155 
156 /* Factored (hence really 0.7) */
157 #define BTB_PURE_LB_RATIO	7
158 
159 /* QM stop command constants */
160 #define QM_STOP_PQ_MASK_WIDTH		32
161 #define QM_STOP_CMD_ADDR		2
162 #define QM_STOP_CMD_STRUCT_SIZE		2
163 #define QM_STOP_CMD_PAUSE_MASK_OFFSET	0
164 #define QM_STOP_CMD_PAUSE_MASK_SHIFT	0
165 #define QM_STOP_CMD_PAUSE_MASK_MASK	-1
166 #define QM_STOP_CMD_GROUP_ID_OFFSET	1
167 #define QM_STOP_CMD_GROUP_ID_SHIFT	16
168 #define QM_STOP_CMD_GROUP_ID_MASK	15
169 #define QM_STOP_CMD_PQ_TYPE_OFFSET	1
170 #define QM_STOP_CMD_PQ_TYPE_SHIFT	24
171 #define QM_STOP_CMD_PQ_TYPE_MASK	1
172 #define QM_STOP_CMD_MAX_POLL_COUNT	100
173 #define QM_STOP_CMD_POLL_PERIOD_US	500
174 
175 /* QM command macros */
176 #define QM_CMD_STRUCT_SIZE(cmd)	cmd ## _STRUCT_SIZE
177 #define QM_CMD_SET_FIELD(var, cmd, field, value) \
178 	SET_FIELD(var[cmd ## _ ## field ## _OFFSET], \
179 		  cmd ## _ ## field, \
180 		  value)
181 
182 #define QM_INIT_TX_PQ_MAP(p_hwfn, map, pq_id, vp_pq_id, rl_valid,	      \
183 			  rl_id, ext_voq, wrr)				      \
184 	do {								      \
185 		u32 __reg = 0;						      \
186 									      \
187 		BUILD_BUG_ON(sizeof((map).reg) != sizeof(__reg));	      \
188 		memset(&(map), 0, sizeof(map));				      \
189 		SET_FIELD(__reg, QM_RF_PQ_MAP_PQ_VALID, 1);	      \
190 		SET_FIELD(__reg, QM_RF_PQ_MAP_RL_VALID,	      \
191 			  !!(rl_valid));				      \
192 		SET_FIELD(__reg, QM_RF_PQ_MAP_VP_PQ_ID, (vp_pq_id)); \
193 		SET_FIELD(__reg, QM_RF_PQ_MAP_RL_ID, (rl_id));	      \
194 		SET_FIELD(__reg, QM_RF_PQ_MAP_VOQ, (ext_voq));	      \
195 		SET_FIELD(__reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,      \
196 			  (wrr));					      \
197 									      \
198 		STORE_RT_REG((p_hwfn), QM_REG_TXPQMAP_RT_OFFSET + (pq_id),    \
199 			     __reg);					      \
200 		(map).reg = cpu_to_le32(__reg);				      \
201 	} while (0)
202 
203 #define WRITE_PQ_INFO_TO_RAM	1
204 #define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
205 	(((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \
206 	((rl_valid ? 1 : 0) << 22) | (((rl) & 255) << 24) | \
207 	(((rl) >> 8) << 9))
208 
209 #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
210 	(XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \
211 	XSTORM_PQ_INFO_OFFSET(pq_id))
212 
213 /******************** INTERNAL IMPLEMENTATION *********************/
214 
215 /* Returns the external VOQ number */
216 static u8 qed_get_ext_voq(struct qed_hwfn *p_hwfn,
217 			  u8 port_id, u8 tc, u8 max_phys_tcs_per_port)
218 {
219 	if (tc == PURE_LB_TC)
220 		return NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB + port_id;
221 	else
222 		return port_id * max_phys_tcs_per_port + tc;
223 }
224 
225 /* Prepare PF RL enable/disable runtime init values */
226 static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
227 {
228 	STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
229 	if (pf_rl_en) {
230 		u8 num_ext_voqs = MAX_NUM_VOQS;
231 		u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
232 
233 		/* Enable RLs for all VOQs */
234 		STORE_RT_REG(p_hwfn,
235 			     QM_REG_RLPFVOQENABLE_RT_OFFSET,
236 			     (u32)voq_bit_mask);
237 
238 		/* Write RL period */
239 		STORE_RT_REG(p_hwfn,
240 			     QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
241 		STORE_RT_REG(p_hwfn,
242 			     QM_REG_RLPFPERIODTIMER_RT_OFFSET,
243 			     QM_RL_PERIOD_CLK_25M);
244 
245 		/* Set credit threshold for QM bypass flow */
246 		if (QM_BYPASS_EN)
247 			STORE_RT_REG(p_hwfn,
248 				     QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
249 				     QM_PF_RL_UPPER_BOUND);
250 	}
251 }
252 
253 /* Prepare PF WFQ enable/disable runtime init values */
254 static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
255 {
256 	STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
257 
258 	/* Set credit threshold for QM bypass flow */
259 	if (pf_wfq_en && QM_BYPASS_EN)
260 		STORE_RT_REG(p_hwfn,
261 			     QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
262 			     QM_PF_WFQ_UPPER_BOUND);
263 }
264 
265 /* Prepare global RL enable/disable runtime init values */
266 static void qed_enable_global_rl(struct qed_hwfn *p_hwfn, bool global_rl_en)
267 {
268 	STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
269 		     global_rl_en ? 1 : 0);
270 	if (global_rl_en) {
271 		/* Write RL period (use timer 0 only) */
272 		STORE_RT_REG(p_hwfn,
273 			     QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
274 			     QM_RL_PERIOD_CLK_25M);
275 		STORE_RT_REG(p_hwfn,
276 			     QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
277 			     QM_RL_PERIOD_CLK_25M);
278 
279 		/* Set credit threshold for QM bypass flow */
280 		if (QM_BYPASS_EN)
281 			STORE_RT_REG(p_hwfn,
282 				     QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
283 				     QM_GLOBAL_RL_UPPER_BOUND(10000) - 1);
284 	}
285 }
286 
287 /* Prepare VPORT WFQ enable/disable runtime init values */
288 static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
289 {
290 	STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
291 		     vport_wfq_en ? 1 : 0);
292 
293 	/* Set credit threshold for QM bypass flow */
294 	if (vport_wfq_en && QM_BYPASS_EN)
295 		STORE_RT_REG(p_hwfn,
296 			     QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
297 			     QM_VP_WFQ_BYPASS_THRESH);
298 }
299 
300 /* Prepare runtime init values to allocate PBF command queue lines for
301  * the specified VOQ.
302  */
303 static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
304 				       u8 ext_voq, u16 cmdq_lines)
305 {
306 	u32 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
307 
308 	OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
309 			 (u32)cmdq_lines);
310 	STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
311 		     qm_line_crd);
312 	STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
313 		     qm_line_crd);
314 }
315 
316 /* Prepare runtime init values to allocate PBF command queue lines. */
317 static void
318 qed_cmdq_lines_rt_init(struct qed_hwfn *p_hwfn,
319 		       u8 max_ports_per_engine,
320 		       u8 max_phys_tcs_per_port,
321 		       struct init_qm_port_params port_params[MAX_NUM_PORTS])
322 {
323 	u8 tc, ext_voq, port_id, num_tcs_in_port;
324 	u8 num_ext_voqs = MAX_NUM_VOQS;
325 
326 	/* Clear PBF lines of all VOQs */
327 	for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
328 		STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
329 
330 	for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
331 		u16 phys_lines, phys_lines_per_tc;
332 
333 		if (!port_params[port_id].active)
334 			continue;
335 
336 		/* Find number of command queue lines to divide between the
337 		 * active physical TCs.
338 		 */
339 		phys_lines = port_params[port_id].num_pbf_cmd_lines;
340 		phys_lines -= PBF_CMDQ_PURE_LB_LINES;
341 
342 		/* Find #lines per active physical TC */
343 		num_tcs_in_port = 0;
344 		for (tc = 0; tc < max_phys_tcs_per_port; tc++)
345 			if (((port_params[port_id].active_phys_tcs >>
346 			      tc) & 0x1) == 1)
347 				num_tcs_in_port++;
348 		phys_lines_per_tc = phys_lines / num_tcs_in_port;
349 
350 		/* Init registers per active TC */
351 		for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
352 			ext_voq = qed_get_ext_voq(p_hwfn,
353 						  port_id,
354 						  tc, max_phys_tcs_per_port);
355 			if (((port_params[port_id].active_phys_tcs >>
356 			      tc) & 0x1) == 1)
357 				qed_cmdq_lines_voq_rt_init(p_hwfn,
358 							   ext_voq,
359 							   phys_lines_per_tc);
360 		}
361 
362 		/* Init registers for pure LB TC */
363 		ext_voq = qed_get_ext_voq(p_hwfn,
364 					  port_id,
365 					  PURE_LB_TC, max_phys_tcs_per_port);
366 		qed_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
367 					   PBF_CMDQ_PURE_LB_LINES);
368 	}
369 }
370 
371 /* Prepare runtime init values to allocate guaranteed BTB blocks for the
372  * specified port. The guaranteed BTB space is divided between the TCs as
373  * follows (shared space Is currently not used):
374  * 1. Parameters:
375  *    B - BTB blocks for this port
376  *    C - Number of physical TCs for this port
377  * 2. Calculation:
378  *    a. 38 blocks (9700B jumbo frame) are allocated for global per port
379  *	 headroom.
380  *    b. B = B - 38 (remainder after global headroom allocation).
381  *    c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
382  *    d. B = B - MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
383  *    e. B/C blocks are allocated for each physical TC.
384  * Assumptions:
385  * - MTU is up to 9700 bytes (38 blocks)
386  * - All TCs are considered symmetrical (same rate and packet size)
387  * - No optimization for lossy TC (all are considered lossless). Shared space
388  *   is not enabled and allocated for each TC.
389  */
390 static void
391 qed_btb_blocks_rt_init(struct qed_hwfn *p_hwfn,
392 		       u8 max_ports_per_engine,
393 		       u8 max_phys_tcs_per_port,
394 		       struct init_qm_port_params port_params[MAX_NUM_PORTS])
395 {
396 	u32 usable_blocks, pure_lb_blocks, phys_blocks;
397 	u8 tc, ext_voq, port_id, num_tcs_in_port;
398 
399 	for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
400 		if (!port_params[port_id].active)
401 			continue;
402 
403 		/* Subtract headroom blocks */
404 		usable_blocks = port_params[port_id].num_btb_blocks -
405 				BTB_HEADROOM_BLOCKS;
406 
407 		/* Find blocks per physical TC. Use factor to avoid floating
408 		 * arithmethic.
409 		 */
410 		num_tcs_in_port = 0;
411 		for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
412 			if (((port_params[port_id].active_phys_tcs >>
413 			      tc) & 0x1) == 1)
414 				num_tcs_in_port++;
415 
416 		pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
417 				 (num_tcs_in_port * BTB_PURE_LB_FACTOR +
418 				  BTB_PURE_LB_RATIO);
419 		pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
420 				       pure_lb_blocks / BTB_PURE_LB_FACTOR);
421 		phys_blocks = (usable_blocks - pure_lb_blocks) /
422 			      num_tcs_in_port;
423 
424 		/* Init physical TCs */
425 		for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
426 			if (((port_params[port_id].active_phys_tcs >>
427 			      tc) & 0x1) == 1) {
428 				ext_voq =
429 					qed_get_ext_voq(p_hwfn,
430 							port_id,
431 							tc,
432 							max_phys_tcs_per_port);
433 				STORE_RT_REG(p_hwfn,
434 					     PBF_BTB_GUARANTEED_RT_OFFSET
435 					     (ext_voq), phys_blocks);
436 			}
437 		}
438 
439 		/* Init pure LB TC */
440 		ext_voq = qed_get_ext_voq(p_hwfn,
441 					  port_id,
442 					  PURE_LB_TC, max_phys_tcs_per_port);
443 		STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
444 			     pure_lb_blocks);
445 	}
446 }
447 
448 /* Prepare runtime init values for the specified RL.
449  * Set max link speed (100Gbps) per rate limiter.
450  * Return -1 on error.
451  */
452 static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn)
453 {
454 	u32 upper_bound = QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) |
455 			  (u32)QM_RL_CRD_REG_SIGN_BIT;
456 	u32 inc_val;
457 	u16 rl_id;
458 
459 	/* Go over all global RLs */
460 	for (rl_id = 0; rl_id < MAX_QM_GLOBAL_RLS; rl_id++) {
461 		inc_val = QM_RL_INC_VAL(QM_MAX_LINK_SPEED);
462 
463 		STORE_RT_REG(p_hwfn,
464 			     QM_REG_RLGLBLCRD_RT_OFFSET + rl_id,
465 			     (u32)QM_RL_CRD_REG_SIGN_BIT);
466 		STORE_RT_REG(p_hwfn,
467 			     QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id,
468 			     upper_bound);
469 		STORE_RT_REG(p_hwfn,
470 			     QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id, inc_val);
471 	}
472 
473 	return 0;
474 }
475 
476 /* Returns the upper bound for the specified Vport RL parameters.
477  * link_speed is in Mbps.
478  * Returns 0 in case of error.
479  */
480 static u32 qed_get_vport_rl_upper_bound(enum init_qm_rl_type vport_rl_type,
481 					u32 link_speed)
482 {
483 	switch (vport_rl_type) {
484 	case QM_RL_TYPE_NORMAL:
485 		return QM_INITIAL_VOQ_BYTE_CRD;
486 	case QM_RL_TYPE_QCN:
487 		return QM_GLOBAL_RL_UPPER_BOUND(link_speed);
488 	default:
489 		return 0;
490 	}
491 }
492 
493 /* Prepare VPORT RL runtime init values.
494  * Return -1 on error.
495  */
496 static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
497 				u16 start_rl,
498 				u16 num_rls,
499 				u32 link_speed,
500 				struct init_qm_rl_params *rl_params)
501 {
502 	u16 i, rl_id;
503 
504 	if (num_rls && start_rl + num_rls >= MAX_QM_GLOBAL_RLS) {
505 		DP_NOTICE(p_hwfn, "Invalid rate limiter configuration\n");
506 		return -1;
507 	}
508 
509 	/* Go over all PF VPORTs */
510 	for (i = 0, rl_id = start_rl; i < num_rls; i++, rl_id++) {
511 		u32 upper_bound, inc_val;
512 
513 		upper_bound =
514 		    qed_get_vport_rl_upper_bound((enum init_qm_rl_type)
515 						 rl_params[i].vport_rl_type,
516 						 link_speed);
517 
518 		inc_val =
519 		    QM_RL_INC_VAL(rl_params[i].vport_rl ?
520 				  rl_params[i].vport_rl : link_speed);
521 		if (inc_val > upper_bound) {
522 			DP_NOTICE(p_hwfn,
523 				  "Invalid RL rate - limit configuration\n");
524 			return -1;
525 		}
526 
527 		STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + rl_id,
528 			     (u32)QM_RL_CRD_REG_SIGN_BIT);
529 		STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id,
530 			     upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT);
531 		STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id,
532 			     inc_val);
533 	}
534 
535 	return 0;
536 }
537 
538 /* Prepare Tx PQ mapping runtime init values for the specified PF */
539 static int qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
540 				 struct qed_ptt *p_ptt,
541 				 struct qed_qm_pf_rt_init_params *p_params,
542 				 u32 base_mem_addr_4kb)
543 {
544 	u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
545 	struct init_qm_vport_params *vport_params = p_params->vport_params;
546 	u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
547 	u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
548 	struct init_qm_pq_params *pq_params = p_params->pq_params;
549 	u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
550 
551 	num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
552 
553 	first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
554 	last_pq_group = (p_params->start_pq + num_pqs - 1) /
555 			QM_PF_QUEUE_GROUP_SIZE;
556 
557 	pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
558 	vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
559 	mem_addr_4kb = base_mem_addr_4kb;
560 
561 	/* Set mapping from PQ group to PF */
562 	for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
563 		STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
564 			     (u32)(p_params->pf_id));
565 
566 	/* Set PQ sizes */
567 	STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
568 		     QM_PQ_SIZE_256B(p_params->num_pf_cids));
569 	STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
570 		     QM_PQ_SIZE_256B(p_params->num_vf_cids));
571 
572 	/* Go over all Tx PQs */
573 	for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
574 		u16 *p_first_tx_pq_id, vport_id_in_pf;
575 		struct qm_rf_pq_map tx_pq_map;
576 		u8 tc_id = pq_params[i].tc_id;
577 		bool is_vf_pq;
578 		u8 ext_voq;
579 
580 		ext_voq = qed_get_ext_voq(p_hwfn,
581 					  pq_params[i].port_id,
582 					  tc_id,
583 					  p_params->max_phys_tcs_per_port);
584 		is_vf_pq = (i >= p_params->num_pf_pqs);
585 
586 		/* Update first Tx PQ of VPORT/TC */
587 		vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport;
588 		p_first_tx_pq_id =
589 		    &vport_params[vport_id_in_pf].first_tx_pq_id[tc_id];
590 		if (*p_first_tx_pq_id == QM_INVALID_PQ_ID) {
591 			u32 map_val =
592 				(ext_voq << QM_VP_WFQ_PQ_VOQ_SHIFT) |
593 				(p_params->pf_id << QM_VP_WFQ_PQ_PF_SHIFT);
594 
595 			/* Create new VP PQ */
596 			*p_first_tx_pq_id = pq_id;
597 
598 			/* Map VP PQ to VOQ and PF */
599 			STORE_RT_REG(p_hwfn,
600 				     QM_REG_WFQVPMAP_RT_OFFSET +
601 				     *p_first_tx_pq_id,
602 				     map_val);
603 		}
604 
605 		/* Prepare PQ map entry */
606 		QM_INIT_TX_PQ_MAP(p_hwfn,
607 				  tx_pq_map,
608 				  pq_id,
609 				  *p_first_tx_pq_id,
610 				  pq_params[i].rl_valid,
611 				  pq_params[i].rl_id,
612 				  ext_voq, pq_params[i].wrr_group);
613 
614 		/* Set PQ base address */
615 		STORE_RT_REG(p_hwfn,
616 			     QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
617 			     mem_addr_4kb);
618 
619 		/* Clear PQ pointer table entry (64 bit) */
620 		if (p_params->is_pf_loading)
621 			for (j = 0; j < 2; j++)
622 				STORE_RT_REG(p_hwfn,
623 					     QM_REG_PTRTBLTX_RT_OFFSET +
624 					     (pq_id * 2) + j, 0);
625 
626 		/* Write PQ info to RAM */
627 		if (WRITE_PQ_INFO_TO_RAM != 0) {
628 			u32 pq_info = 0;
629 
630 			pq_info = PQ_INFO_ELEMENT(*p_first_tx_pq_id,
631 						  p_params->pf_id,
632 						  tc_id,
633 						  pq_params[i].port_id,
634 						  pq_params[i].rl_valid,
635 						  pq_params[i].rl_id);
636 			qed_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
637 			       pq_info);
638 		}
639 
640 		/* If VF PQ, add indication to PQ VF mask */
641 		if (is_vf_pq) {
642 			tx_pq_vf_mask[pq_id /
643 				      QM_PF_QUEUE_GROUP_SIZE] |=
644 			    BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE));
645 			mem_addr_4kb += vport_pq_mem_4kb;
646 		} else {
647 			mem_addr_4kb += pq_mem_4kb;
648 		}
649 	}
650 
651 	/* Store Tx PQ VF mask to size select register */
652 	for (i = 0; i < num_tx_pq_vf_masks; i++)
653 		if (tx_pq_vf_mask[i])
654 			STORE_RT_REG(p_hwfn,
655 				     QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i,
656 				     tx_pq_vf_mask[i]);
657 
658 	return 0;
659 }
660 
661 /* Prepare Other PQ mapping runtime init values for the specified PF */
662 static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
663 				     u8 pf_id,
664 				     bool is_pf_loading,
665 				     u32 num_pf_cids,
666 				     u32 num_tids, u32 base_mem_addr_4kb)
667 {
668 	u32 pq_size, pq_mem_4kb, mem_addr_4kb;
669 	u16 i, j, pq_id, pq_group;
670 
671 	/* A single other PQ group is used in each PF, where PQ group i is used
672 	 * in PF i.
673 	 */
674 	pq_group = pf_id;
675 	pq_size = num_pf_cids + num_tids;
676 	pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
677 	mem_addr_4kb = base_mem_addr_4kb;
678 
679 	/* Map PQ group to PF */
680 	STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
681 		     (u32)(pf_id));
682 
683 	/* Set PQ sizes */
684 	STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
685 		     QM_PQ_SIZE_256B(pq_size));
686 
687 	for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
688 	     i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
689 		/* Set PQ base address */
690 		STORE_RT_REG(p_hwfn,
691 			     QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
692 			     mem_addr_4kb);
693 
694 		/* Clear PQ pointer table entry */
695 		if (is_pf_loading)
696 			for (j = 0; j < 2; j++)
697 				STORE_RT_REG(p_hwfn,
698 					     QM_REG_PTRTBLOTHER_RT_OFFSET +
699 					     (pq_id * 2) + j, 0);
700 
701 		mem_addr_4kb += pq_mem_4kb;
702 	}
703 }
704 
705 /* Prepare PF WFQ runtime init values for the specified PF.
706  * Return -1 on error.
707  */
708 static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
709 			      struct qed_qm_pf_rt_init_params *p_params)
710 {
711 	u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
712 	struct init_qm_pq_params *pq_params = p_params->pq_params;
713 	u32 inc_val, crd_reg_offset;
714 	u8 ext_voq;
715 	u16 i;
716 
717 	inc_val = QM_PF_WFQ_INC_VAL(p_params->pf_wfq);
718 	if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) {
719 		DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
720 		return -1;
721 	}
722 
723 	for (i = 0; i < num_tx_pqs; i++) {
724 		ext_voq = qed_get_ext_voq(p_hwfn,
725 					  pq_params[i].port_id,
726 					  pq_params[i].tc_id,
727 					  p_params->max_phys_tcs_per_port);
728 		crd_reg_offset =
729 			(p_params->pf_id < MAX_NUM_PFS_BB ?
730 			 QM_REG_WFQPFCRD_RT_OFFSET :
731 			 QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
732 			ext_voq * MAX_NUM_PFS_BB +
733 			(p_params->pf_id % MAX_NUM_PFS_BB);
734 		OVERWRITE_RT_REG(p_hwfn,
735 				 crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT);
736 	}
737 
738 	STORE_RT_REG(p_hwfn,
739 		     QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
740 		     QM_PF_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
741 	STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
742 		     inc_val);
743 
744 	return 0;
745 }
746 
747 /* Prepare PF RL runtime init values for the specified PF.
748  * Return -1 on error.
749  */
750 static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
751 {
752 	u32 inc_val = QM_RL_INC_VAL(pf_rl);
753 
754 	if (inc_val > QM_PF_RL_MAX_INC_VAL) {
755 		DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
756 		return -1;
757 	}
758 
759 	STORE_RT_REG(p_hwfn,
760 		     QM_REG_RLPFCRD_RT_OFFSET + pf_id,
761 		     (u32)QM_RL_CRD_REG_SIGN_BIT);
762 	STORE_RT_REG(p_hwfn,
763 		     QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
764 		     QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
765 	STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
766 
767 	return 0;
768 }
769 
770 /* Prepare VPORT WFQ runtime init values for the specified VPORTs.
771  * Return -1 on error.
772  */
773 static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
774 			      u16 num_vports,
775 			      struct init_qm_vport_params *vport_params)
776 {
777 	u16 vport_pq_id, wfq, i;
778 	u32 inc_val;
779 	u8 tc;
780 
781 	/* Go over all PF VPORTs */
782 	for (i = 0; i < num_vports; i++) {
783 		/* Each VPORT can have several VPORT PQ IDs for various TCs */
784 		for (tc = 0; tc < NUM_OF_TCS; tc++) {
785 			/* Check if VPORT/TC is valid */
786 			vport_pq_id = vport_params[i].first_tx_pq_id[tc];
787 			if (vport_pq_id == QM_INVALID_PQ_ID)
788 				continue;
789 
790 			/* Find WFQ weight (per VPORT or per VPORT+TC) */
791 			wfq = vport_params[i].wfq;
792 			wfq = wfq ? wfq : vport_params[i].tc_wfq[tc];
793 			inc_val = QM_VP_WFQ_INC_VAL(wfq);
794 			if (inc_val > QM_VP_WFQ_MAX_INC_VAL) {
795 				DP_NOTICE(p_hwfn,
796 					  "Invalid VPORT WFQ weight configuration\n");
797 				return -1;
798 			}
799 
800 			/* Config registers */
801 			STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
802 				     vport_pq_id,
803 				     (u32)QM_WFQ_CRD_REG_SIGN_BIT);
804 			STORE_RT_REG(p_hwfn, QM_REG_WFQVPUPPERBOUND_RT_OFFSET +
805 				     vport_pq_id,
806 				     inc_val | QM_WFQ_CRD_REG_SIGN_BIT);
807 			STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET +
808 				     vport_pq_id, inc_val);
809 		}
810 	}
811 
812 	return 0;
813 }
814 
815 static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
816 				     struct qed_ptt *p_ptt)
817 {
818 	u32 reg_val, i;
819 
820 	for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
821 	     i++) {
822 		udelay(QM_STOP_CMD_POLL_PERIOD_US);
823 		reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
824 	}
825 
826 	/* Check if timeout while waiting for SDM command ready */
827 	if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
828 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
829 			   "Timeout when waiting for QM SDM command ready signal\n");
830 		return false;
831 	}
832 
833 	return true;
834 }
835 
836 static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
837 			    struct qed_ptt *p_ptt,
838 			    u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
839 {
840 	if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
841 		return false;
842 
843 	qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
844 	qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
845 	qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
846 	qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
847 	qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
848 
849 	return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
850 }
851 
852 /******************** INTERFACE IMPLEMENTATION *********************/
853 
854 u32 qed_qm_pf_mem_size(u32 num_pf_cids,
855 		       u32 num_vf_cids,
856 		       u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
857 {
858 	return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
859 	       QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
860 	       QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
861 }
862 
863 int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
864 			  struct qed_qm_common_rt_init_params *p_params)
865 {
866 	u32 mask = 0;
867 
868 	/* Init AFullOprtnstcCrdMask */
869 	SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ,
870 		  QM_OPPOR_LINE_VOQ_DEF);
871 	SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN);
872 	SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ,
873 		  p_params->pf_wfq_en ? 1 : 0);
874 	SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ,
875 		  p_params->vport_wfq_en ? 1 : 0);
876 	SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL,
877 		  p_params->pf_rl_en ? 1 : 0);
878 	SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL,
879 		  p_params->global_rl_en ? 1 : 0);
880 	SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF);
881 	SET_FIELD(mask,
882 		  QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY, QM_OPPOR_PQ_EMPTY_DEF);
883 	STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
884 
885 	/* Enable/disable PF RL */
886 	qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
887 
888 	/* Enable/disable PF WFQ */
889 	qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
890 
891 	/* Enable/disable global RL */
892 	qed_enable_global_rl(p_hwfn, p_params->global_rl_en);
893 
894 	/* Enable/disable VPORT WFQ */
895 	qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
896 
897 	/* Init PBF CMDQ line credit */
898 	qed_cmdq_lines_rt_init(p_hwfn,
899 			       p_params->max_ports_per_engine,
900 			       p_params->max_phys_tcs_per_port,
901 			       p_params->port_params);
902 
903 	/* Init BTB blocks in PBF */
904 	qed_btb_blocks_rt_init(p_hwfn,
905 			       p_params->max_ports_per_engine,
906 			       p_params->max_phys_tcs_per_port,
907 			       p_params->port_params);
908 
909 	qed_global_rl_rt_init(p_hwfn);
910 
911 	return 0;
912 }
913 
914 int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
915 		      struct qed_ptt *p_ptt,
916 		      struct qed_qm_pf_rt_init_params *p_params)
917 {
918 	struct init_qm_vport_params *vport_params = p_params->vport_params;
919 	u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids +
920 					       p_params->num_tids) *
921 				 QM_OTHER_PQS_PER_PF;
922 	u16 i;
923 	u8 tc;
924 
925 	/* Clear first Tx PQ ID array for each VPORT */
926 	for (i = 0; i < p_params->num_vports; i++)
927 		for (tc = 0; tc < NUM_OF_TCS; tc++)
928 			vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
929 
930 	/* Map Other PQs (if any) */
931 	qed_other_pq_map_rt_init(p_hwfn,
932 				 p_params->pf_id,
933 				 p_params->is_pf_loading, p_params->num_pf_cids,
934 				 p_params->num_tids, 0);
935 
936 	/* Map Tx PQs */
937 	if (qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb))
938 		return -1;
939 
940 	/* Init PF WFQ */
941 	if (p_params->pf_wfq)
942 		if (qed_pf_wfq_rt_init(p_hwfn, p_params))
943 			return -1;
944 
945 	/* Init PF RL */
946 	if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
947 		return -1;
948 
949 	/* Init VPORT WFQ */
950 	if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
951 		return -1;
952 
953 	/* Set VPORT RL */
954 	if (qed_vport_rl_rt_init(p_hwfn, p_params->start_rl,
955 				 p_params->num_rls, p_params->link_speed,
956 				 p_params->rl_params))
957 		return -1;
958 
959 	return 0;
960 }
961 
962 int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
963 		    struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
964 {
965 	u32 inc_val = QM_PF_WFQ_INC_VAL(pf_wfq);
966 
967 	if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) {
968 		DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
969 		return -1;
970 	}
971 
972 	qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
973 
974 	return 0;
975 }
976 
977 int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
978 		   struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl)
979 {
980 	u32 inc_val = QM_RL_INC_VAL(pf_rl);
981 
982 	if (inc_val > QM_PF_RL_MAX_INC_VAL) {
983 		DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
984 		return -1;
985 	}
986 
987 	qed_wr(p_hwfn,
988 	       p_ptt, QM_REG_RLPFCRD + pf_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
989 	qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
990 
991 	return 0;
992 }
993 
994 int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
995 		       struct qed_ptt *p_ptt,
996 		       u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq)
997 {
998 	int result = 0;
999 	u16 vport_pq_id;
1000 	u8 tc;
1001 
1002 	for (tc = 0; tc < NUM_OF_TCS && !result; tc++) {
1003 		vport_pq_id = first_tx_pq_id[tc];
1004 		if (vport_pq_id != QM_INVALID_PQ_ID)
1005 			result = qed_init_vport_tc_wfq(p_hwfn, p_ptt,
1006 						       vport_pq_id, wfq);
1007 	}
1008 
1009 	return result;
1010 }
1011 
1012 int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1013 			  u16 first_tx_pq_id, u16 wfq)
1014 {
1015 	u32 inc_val;
1016 
1017 	if (first_tx_pq_id == QM_INVALID_PQ_ID)
1018 		return -1;
1019 
1020 	inc_val = QM_VP_WFQ_INC_VAL(wfq);
1021 	if (!inc_val || inc_val > QM_VP_WFQ_MAX_INC_VAL) {
1022 		DP_NOTICE(p_hwfn, "Invalid VPORT WFQ configuration.\n");
1023 		return -1;
1024 	}
1025 
1026 	qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPCRD + first_tx_pq_id * 4,
1027 	       (u32)QM_WFQ_CRD_REG_SIGN_BIT);
1028 	qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPUPPERBOUND + first_tx_pq_id * 4,
1029 	       inc_val | QM_WFQ_CRD_REG_SIGN_BIT);
1030 	qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPWEIGHT + first_tx_pq_id * 4,
1031 	       inc_val);
1032 
1033 	return 0;
1034 }
1035 
1036 int qed_init_global_rl(struct qed_hwfn *p_hwfn,
1037 		       struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit,
1038 		       enum init_qm_rl_type vport_rl_type)
1039 {
1040 	u32 inc_val, upper_bound;
1041 
1042 	upper_bound =
1043 	    (vport_rl_type ==
1044 	     QM_RL_TYPE_QCN) ? QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) :
1045 	    QM_INITIAL_VOQ_BYTE_CRD;
1046 	inc_val = QM_RL_INC_VAL(rate_limit);
1047 	if (inc_val > upper_bound) {
1048 		DP_NOTICE(p_hwfn, "Invalid VPORT rate limit configuration.\n");
1049 		return -1;
1050 	}
1051 
1052 	qed_wr(p_hwfn, p_ptt,
1053 	       QM_REG_RLGLBLCRD + rl_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
1054 	qed_wr(p_hwfn,
1055 	       p_ptt,
1056 	       QM_REG_RLGLBLUPPERBOUND + rl_id * 4,
1057 	       upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT);
1058 	qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val);
1059 
1060 	return 0;
1061 }
1062 
1063 bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
1064 			  struct qed_ptt *p_ptt,
1065 			  bool is_release_cmd,
1066 			  bool is_tx_pq, u16 start_pq, u16 num_pqs)
1067 {
1068 	u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
1069 	u32 pq_mask = 0, last_pq, pq_id;
1070 
1071 	last_pq = start_pq + num_pqs - 1;
1072 
1073 	/* Set command's PQ type */
1074 	QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
1075 
1076 	/* Go over requested PQs */
1077 	for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
1078 		/* Set PQ bit in mask (stop command only) */
1079 		if (!is_release_cmd)
1080 			pq_mask |= BIT((pq_id % QM_STOP_PQ_MASK_WIDTH));
1081 
1082 		/* If last PQ or end of PQ mask, write command */
1083 		if ((pq_id == last_pq) ||
1084 		    (pq_id % QM_STOP_PQ_MASK_WIDTH ==
1085 		     (QM_STOP_PQ_MASK_WIDTH - 1))) {
1086 			QM_CMD_SET_FIELD(cmd_arr,
1087 					 QM_STOP_CMD, PAUSE_MASK, pq_mask);
1088 			QM_CMD_SET_FIELD(cmd_arr,
1089 					 QM_STOP_CMD,
1090 					 GROUP_ID,
1091 					 pq_id / QM_STOP_PQ_MASK_WIDTH);
1092 			if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
1093 					     cmd_arr[0], cmd_arr[1]))
1094 				return false;
1095 			pq_mask = 0;
1096 		}
1097 	}
1098 
1099 	return true;
1100 }
1101 
1102 #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
1103 	do { \
1104 		typeof(var) *__p_var = &(var); \
1105 		typeof(offset) __offset = offset; \
1106 		*__p_var = (*__p_var & ~BIT(__offset)) | \
1107 			   ((enable) ? BIT(__offset) : 0); \
1108 	} while (0)
1109 
1110 #define PRS_ETH_TUNN_OUTPUT_FORMAT     0xF4DAB910
1111 #define PRS_ETH_OUTPUT_FORMAT          0xFFFF4910
1112 
1113 #define ARR_REG_WR(dev, ptt, addr, arr,	arr_size) \
1114 	do { \
1115 		u32 i; \
1116 		\
1117 		for (i = 0; i < (arr_size); i++) \
1118 			qed_wr(dev, ptt, \
1119 			       ((addr) + (4 * i)), \
1120 			       ((u32 *)&(arr))[i]); \
1121 	} while (0)
1122 
1123 /**
1124  * qed_dmae_to_grc() - Internal function for writing from host to
1125  * wide-bus registers (split registers are not supported yet).
1126  *
1127  * @p_hwfn: HW device data.
1128  * @p_ptt: PTT window used for writing the registers.
1129  * @p_data: Pointer to source data.
1130  * @addr: Destination register address.
1131  * @len_in_dwords: Data length in dwords (u32).
1132  *
1133  * Return: Length of the written data in dwords (u32) or -1 on invalid
1134  *         input.
1135  */
1136 static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1137 			   __le32 *p_data, u32 addr, u32 len_in_dwords)
1138 {
1139 	struct qed_dmae_params params = { 0 };
1140 	u32 *data_cpu;
1141 	int rc;
1142 
1143 	if (!p_data)
1144 		return -1;
1145 
1146 	/* Set DMAE params */
1147 	SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1);
1148 
1149 	/* Execute DMAE command */
1150 	rc = qed_dmae_host2grc(p_hwfn, p_ptt,
1151 			       (u64)(uintptr_t)(p_data),
1152 			       addr, len_in_dwords, &params);
1153 
1154 	/* If not read using DMAE, read using GRC */
1155 	if (rc) {
1156 		DP_VERBOSE(p_hwfn,
1157 			   QED_MSG_DEBUG,
1158 			   "Failed writing to chip using DMAE, using GRC instead\n");
1159 
1160 		/* Swap to CPU byteorder and write to registers using GRC */
1161 		data_cpu = (__force u32 *)p_data;
1162 		le32_to_cpu_array(data_cpu, len_in_dwords);
1163 
1164 		ARR_REG_WR(p_hwfn, p_ptt, addr, data_cpu, len_in_dwords);
1165 		cpu_to_le32_array(data_cpu, len_in_dwords);
1166 	}
1167 
1168 	return len_in_dwords;
1169 }
1170 
1171 void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
1172 			     struct qed_ptt *p_ptt, u16 dest_port)
1173 {
1174 	/* Update PRS register */
1175 	qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
1176 
1177 	/* Update NIG register */
1178 	qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
1179 
1180 	/* Update PBF register */
1181 	qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
1182 }
1183 
1184 void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
1185 			  struct qed_ptt *p_ptt, bool vxlan_enable)
1186 {
1187 	u32 reg_val;
1188 	u8 shift;
1189 
1190 	/* Update PRS register */
1191 	reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1192 	SET_FIELD(reg_val,
1193 		  PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE, vxlan_enable);
1194 	qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1195 	if (reg_val) {
1196 		reg_val =
1197 		    qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0);
1198 
1199 		/* Update output  only if tunnel blocks not included. */
1200 		if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1201 			qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
1202 			       (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1203 	}
1204 
1205 	/* Update NIG register */
1206 	reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1207 	shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
1208 	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
1209 	qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1210 
1211 	/* Update DORQ register */
1212 	qed_wr(p_hwfn,
1213 	       p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, vxlan_enable ? 1 : 0);
1214 }
1215 
1216 void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
1217 			struct qed_ptt *p_ptt,
1218 			bool eth_gre_enable, bool ip_gre_enable)
1219 {
1220 	u32 reg_val;
1221 	u8 shift;
1222 
1223 	/* Update PRS register */
1224 	reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1225 	SET_FIELD(reg_val,
1226 		  PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE,
1227 		  eth_gre_enable);
1228 	SET_FIELD(reg_val,
1229 		  PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE,
1230 		  ip_gre_enable);
1231 	qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1232 	if (reg_val) {
1233 		reg_val =
1234 		    qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0);
1235 
1236 		/* Update output  only if tunnel blocks not included. */
1237 		if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1238 			qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
1239 			       (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1240 	}
1241 
1242 	/* Update NIG register */
1243 	reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1244 	shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
1245 	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable);
1246 	shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
1247 	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
1248 	qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1249 
1250 	/* Update DORQ registers */
1251 	qed_wr(p_hwfn,
1252 	       p_ptt,
1253 	       DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, eth_gre_enable ? 1 : 0);
1254 	qed_wr(p_hwfn,
1255 	       p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, ip_gre_enable ? 1 : 0);
1256 }
1257 
1258 void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
1259 			      struct qed_ptt *p_ptt, u16 dest_port)
1260 {
1261 	/* Update PRS register */
1262 	qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
1263 
1264 	/* Update NIG register */
1265 	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
1266 
1267 	/* Update PBF register */
1268 	qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
1269 }
1270 
1271 void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
1272 			   struct qed_ptt *p_ptt,
1273 			   bool eth_geneve_enable, bool ip_geneve_enable)
1274 {
1275 	u32 reg_val;
1276 
1277 	/* Update PRS register */
1278 	reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1279 	SET_FIELD(reg_val,
1280 		  PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE,
1281 		  eth_geneve_enable);
1282 	SET_FIELD(reg_val,
1283 		  PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE,
1284 		  ip_geneve_enable);
1285 	qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1286 	if (reg_val) {
1287 		reg_val =
1288 		    qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0);
1289 
1290 		/* Update output  only if tunnel blocks not included. */
1291 		if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1292 			qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
1293 			       (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1294 	}
1295 
1296 	/* Update NIG register */
1297 	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
1298 	       eth_geneve_enable ? 1 : 0);
1299 	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
1300 
1301 	/* EDPM with geneve tunnel not supported in BB */
1302 	if (QED_IS_BB_B0(p_hwfn->cdev))
1303 		return;
1304 
1305 	/* Update DORQ registers */
1306 	qed_wr(p_hwfn,
1307 	       p_ptt,
1308 	       DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2,
1309 	       eth_geneve_enable ? 1 : 0);
1310 	qed_wr(p_hwfn,
1311 	       p_ptt,
1312 	       DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2,
1313 	       ip_geneve_enable ? 1 : 0);
1314 }
1315 
1316 #define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET      3
1317 #define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT   0xC8DAB910
1318 
1319 void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
1320 				struct qed_ptt *p_ptt, bool enable)
1321 {
1322 	u32 reg_val, cfg_mask;
1323 
1324 	/* read PRS config register */
1325 	reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO);
1326 
1327 	/* set VXLAN_NO_L2_ENABLE mask */
1328 	cfg_mask = BIT(PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET);
1329 
1330 	if (enable) {
1331 		/* set VXLAN_NO_L2_ENABLE flag */
1332 		reg_val |= cfg_mask;
1333 
1334 		/* update PRS FIC  register */
1335 		qed_wr(p_hwfn,
1336 		       p_ptt,
1337 		       PRS_REG_OUTPUT_FORMAT_4_0,
1338 		       (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
1339 	} else {
1340 		/* clear VXLAN_NO_L2_ENABLE flag */
1341 		reg_val &= ~cfg_mask;
1342 	}
1343 
1344 	/* write PRS config register */
1345 	qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val);
1346 }
1347 
1348 #define T_ETH_PACKET_ACTION_GFT_EVENTID  23
1349 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR  272
1350 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25
1351 #define PARSER_ETH_CONN_CM_HDR 0
1352 #define CAM_LINE_SIZE sizeof(u32)
1353 #define RAM_LINE_SIZE sizeof(u64)
1354 #define REG_SIZE sizeof(u32)
1355 
1356 void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
1357 {
1358 	struct regpair ram_line = { 0 };
1359 
1360 	/* Disable gft search for PF */
1361 	qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
1362 
1363 	/* Clean ram & cam for next gft session */
1364 
1365 	/* Zero camline */
1366 	qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
1367 
1368 	/* Zero ramline */
1369 	qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo,
1370 			PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
1371 			sizeof(ram_line) / REG_SIZE);
1372 }
1373 
1374 void qed_gft_config(struct qed_hwfn *p_hwfn,
1375 		    struct qed_ptt *p_ptt,
1376 		    u16 pf_id,
1377 		    bool tcp,
1378 		    bool udp,
1379 		    bool ipv4, bool ipv6, enum gft_profile_type profile_type)
1380 {
1381 	struct regpair ram_line;
1382 	u32 search_non_ip_as_gft;
1383 	u32 reg_val, cam_line;
1384 	u32 lo = 0, hi = 0;
1385 
1386 	if (!ipv6 && !ipv4)
1387 		DP_NOTICE(p_hwfn,
1388 			  "gft_config: must accept at least on of - ipv4 or ipv6'\n");
1389 	if (!tcp && !udp)
1390 		DP_NOTICE(p_hwfn,
1391 			  "gft_config: must accept at least on of - udp or tcp\n");
1392 	if (profile_type >= MAX_GFT_PROFILE_TYPE)
1393 		DP_NOTICE(p_hwfn, "gft_config: unsupported gft_profile_type\n");
1394 
1395 	/* Set RFS event ID to be awakened i Tstorm By Prs */
1396 	reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
1397 		  PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1398 	reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1399 	qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
1400 
1401 	/* Do not load context only cid in PRS on match. */
1402 	qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
1403 
1404 	/* Do not use tenant ID exist bit for gft search */
1405 	qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
1406 
1407 	/* Set Cam */
1408 	cam_line = 0;
1409 	SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
1410 
1411 	/* Filters are per PF!! */
1412 	SET_FIELD(cam_line,
1413 		  GFT_CAM_LINE_MAPPED_PF_ID_MASK,
1414 		  GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
1415 	SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
1416 
1417 	if (!(tcp && udp)) {
1418 		SET_FIELD(cam_line,
1419 			  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
1420 			  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
1421 		if (tcp)
1422 			SET_FIELD(cam_line,
1423 				  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1424 				  GFT_PROFILE_TCP_PROTOCOL);
1425 		else
1426 			SET_FIELD(cam_line,
1427 				  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1428 				  GFT_PROFILE_UDP_PROTOCOL);
1429 	}
1430 
1431 	if (!(ipv4 && ipv6)) {
1432 		SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
1433 		if (ipv4)
1434 			SET_FIELD(cam_line,
1435 				  GFT_CAM_LINE_MAPPED_IP_VERSION,
1436 				  GFT_PROFILE_IPV4);
1437 		else
1438 			SET_FIELD(cam_line,
1439 				  GFT_CAM_LINE_MAPPED_IP_VERSION,
1440 				  GFT_PROFILE_IPV6);
1441 	}
1442 
1443 	/* Write characteristics to cam */
1444 	qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
1445 	       cam_line);
1446 	cam_line =
1447 	    qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
1448 
1449 	/* Write line to RAM - compare to filter 4 tuple */
1450 
1451 	/* Search no IP as GFT */
1452 	search_non_ip_as_gft = 0;
1453 
1454 	/* Tunnel type */
1455 	SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
1456 	SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
1457 
1458 	if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
1459 		SET_FIELD(hi, GFT_RAM_LINE_DST_IP, 1);
1460 		SET_FIELD(hi, GFT_RAM_LINE_SRC_IP, 1);
1461 		SET_FIELD(hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1462 		SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1);
1463 		SET_FIELD(lo, GFT_RAM_LINE_SRC_PORT, 1);
1464 		SET_FIELD(lo, GFT_RAM_LINE_DST_PORT, 1);
1465 	} else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
1466 		SET_FIELD(hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1467 		SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1);
1468 		SET_FIELD(lo, GFT_RAM_LINE_DST_PORT, 1);
1469 	} else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
1470 		SET_FIELD(hi, GFT_RAM_LINE_DST_IP, 1);
1471 		SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1);
1472 	} else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
1473 		SET_FIELD(hi, GFT_RAM_LINE_SRC_IP, 1);
1474 		SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1);
1475 	} else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
1476 		SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
1477 
1478 		/* Allow tunneled traffic without inner IP */
1479 		search_non_ip_as_gft = 1;
1480 	}
1481 
1482 	ram_line.lo = cpu_to_le32(lo);
1483 	ram_line.hi = cpu_to_le32(hi);
1484 
1485 	qed_wr(p_hwfn,
1486 	       p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft);
1487 	qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo,
1488 			PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
1489 			sizeof(ram_line) / REG_SIZE);
1490 
1491 	/* Set default profile so that no filter match will happen */
1492 	ram_line.lo = cpu_to_le32(0xffffffff);
1493 	ram_line.hi = cpu_to_le32(0x3ff);
1494 	qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo,
1495 			PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
1496 			PRS_GFT_CAM_LINES_NO_MATCH,
1497 			sizeof(ram_line) / REG_SIZE);
1498 
1499 	/* Enable gft search */
1500 	qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
1501 }
1502 
1503 DECLARE_CRC8_TABLE(cdu_crc8_table);
1504 
1505 /* Calculate and return CDU validation byte per connection type/region/cid */
1506 static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
1507 {
1508 	const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
1509 	u8 crc, validation_byte = 0;
1510 	static u8 crc8_table_valid; /* automatically initialized to 0 */
1511 	u32 validation_string = 0;
1512 	__be32 data_to_crc;
1513 
1514 	if (!crc8_table_valid) {
1515 		crc8_populate_msb(cdu_crc8_table, 0x07);
1516 		crc8_table_valid = 1;
1517 	}
1518 
1519 	/* The CRC is calculated on the String-to-compress:
1520 	 * [31:8]  = {CID[31:20],CID[11:0]}
1521 	 * [7:4]   = Region
1522 	 * [3:0]   = Type
1523 	 */
1524 	if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
1525 		validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
1526 
1527 	if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
1528 		validation_string |= ((region & 0xF) << 4);
1529 
1530 	if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
1531 		validation_string |= (conn_type & 0xF);
1532 
1533 	/* Convert to big-endian and calculate CRC8 */
1534 	data_to_crc = cpu_to_be32(validation_string);
1535 	crc = crc8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc),
1536 		   CRC8_INIT_VALUE);
1537 
1538 	/* The validation byte [7:0] is composed:
1539 	 * for type A validation
1540 	 * [7]          = active configuration bit
1541 	 * [6:0]        = crc[6:0]
1542 	 *
1543 	 * for type B validation
1544 	 * [7]          = active configuration bit
1545 	 * [6:3]        = connection_type[3:0]
1546 	 * [2:0]        = crc[2:0]
1547 	 */
1548 	validation_byte |=
1549 	    ((validation_cfg >>
1550 	      CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
1551 
1552 	if ((validation_cfg >>
1553 	     CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
1554 		validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
1555 	else
1556 		validation_byte |= crc & 0x7F;
1557 
1558 	return validation_byte;
1559 }
1560 
1561 /* Calcualte and set validation bytes for session context */
1562 void qed_calc_session_ctx_validation(void *p_ctx_mem,
1563 				     u16 ctx_size, u8 ctx_type, u32 cid)
1564 {
1565 	u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1566 
1567 	p_ctx = (u8 * const)p_ctx_mem;
1568 	x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1569 	t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1570 	u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1571 
1572 	memset(p_ctx, 0, ctx_size);
1573 
1574 	*x_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 3, cid);
1575 	*t_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 4, cid);
1576 	*u_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 5, cid);
1577 }
1578 
1579 /* Calcualte and set validation bytes for task context */
1580 void qed_calc_task_ctx_validation(void *p_ctx_mem,
1581 				  u16 ctx_size, u8 ctx_type, u32 tid)
1582 {
1583 	u8 *p_ctx, *region1_val_ptr;
1584 
1585 	p_ctx = (u8 * const)p_ctx_mem;
1586 	region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1587 
1588 	memset(p_ctx, 0, ctx_size);
1589 
1590 	*region1_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 1, tid);
1591 }
1592 
1593 /* Memset session context to 0 while preserving validation bytes */
1594 void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1595 {
1596 	u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1597 	u8 x_val, t_val, u_val;
1598 
1599 	p_ctx = (u8 * const)p_ctx_mem;
1600 	x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1601 	t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1602 	u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1603 
1604 	x_val = *x_val_ptr;
1605 	t_val = *t_val_ptr;
1606 	u_val = *u_val_ptr;
1607 
1608 	memset(p_ctx, 0, ctx_size);
1609 
1610 	*x_val_ptr = x_val;
1611 	*t_val_ptr = t_val;
1612 	*u_val_ptr = u_val;
1613 }
1614 
1615 /* Memset task context to 0 while preserving validation bytes */
1616 void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1617 {
1618 	u8 *p_ctx, *region1_val_ptr;
1619 	u8 region1_val;
1620 
1621 	p_ctx = (u8 * const)p_ctx_mem;
1622 	region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1623 
1624 	region1_val = *region1_val_ptr;
1625 
1626 	memset(p_ctx, 0, ctx_size);
1627 
1628 	*region1_val_ptr = region1_val;
1629 }
1630 
1631 /* Enable and configure context validation */
1632 void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
1633 				   struct qed_ptt *p_ptt)
1634 {
1635 	u32 ctx_validation;
1636 
1637 	/* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */
1638 	ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
1639 	qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
1640 
1641 	/* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */
1642 	ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
1643 	qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
1644 
1645 	/* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */
1646 	ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
1647 	qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
1648 }
1649 
1650 static u32 qed_get_rdma_assert_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id)
1651 {
1652 	switch (storm_id) {
1653 	case 0:
1654 		return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1655 		    TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
1656 	case 1:
1657 		return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1658 		    MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
1659 	case 2:
1660 		return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1661 		    USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
1662 	case 3:
1663 		return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1664 		    XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
1665 	case 4:
1666 		return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1667 		    YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
1668 	case 5:
1669 		return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1670 		    PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
1671 
1672 	default:
1673 		return 0;
1674 	}
1675 }
1676 
1677 void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
1678 			      struct qed_ptt *p_ptt,
1679 			      u8 assert_level[NUM_STORMS])
1680 {
1681 	u8 storm_id;
1682 
1683 	for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
1684 		u32 ram_addr = qed_get_rdma_assert_ram_addr(p_hwfn, storm_id);
1685 
1686 		qed_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]);
1687 	}
1688 }
1689 
1690 #define PHYS_ADDR_DWORDS        DIV_ROUND_UP(sizeof(dma_addr_t), 4)
1691 #define OVERLAY_HDR_SIZE_DWORDS (sizeof(struct fw_overlay_buf_hdr) / 4)
1692 
1693 static u32 qed_get_overlay_addr_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id)
1694 {
1695 	switch (storm_id) {
1696 	case 0:
1697 		return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1698 		    TSTORM_OVERLAY_BUF_ADDR_OFFSET;
1699 	case 1:
1700 		return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1701 		    MSTORM_OVERLAY_BUF_ADDR_OFFSET;
1702 	case 2:
1703 		return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1704 		    USTORM_OVERLAY_BUF_ADDR_OFFSET;
1705 	case 3:
1706 		return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1707 		    XSTORM_OVERLAY_BUF_ADDR_OFFSET;
1708 	case 4:
1709 		return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1710 		    YSTORM_OVERLAY_BUF_ADDR_OFFSET;
1711 	case 5:
1712 		return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1713 		    PSTORM_OVERLAY_BUF_ADDR_OFFSET;
1714 
1715 	default:
1716 		return 0;
1717 	}
1718 }
1719 
1720 struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
1721 					       const u32 * const
1722 					       fw_overlay_in_buf,
1723 					       u32 buf_size_in_bytes)
1724 {
1725 	u32 buf_size = buf_size_in_bytes / sizeof(u32), buf_offset = 0;
1726 	struct phys_mem_desc *allocated_mem;
1727 
1728 	if (!buf_size)
1729 		return NULL;
1730 
1731 	allocated_mem = kcalloc(NUM_STORMS, sizeof(struct phys_mem_desc),
1732 				GFP_KERNEL);
1733 	if (!allocated_mem)
1734 		return NULL;
1735 
1736 	memset(allocated_mem, 0, NUM_STORMS * sizeof(struct phys_mem_desc));
1737 
1738 	/* For each Storm, set physical address in RAM */
1739 	while (buf_offset < buf_size) {
1740 		struct phys_mem_desc *storm_mem_desc;
1741 		struct fw_overlay_buf_hdr *hdr;
1742 		u32 storm_buf_size;
1743 		u8 storm_id;
1744 
1745 		hdr =
1746 		    (struct fw_overlay_buf_hdr *)&fw_overlay_in_buf[buf_offset];
1747 		storm_buf_size = GET_FIELD(hdr->data,
1748 					   FW_OVERLAY_BUF_HDR_BUF_SIZE);
1749 		storm_id = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_STORM_ID);
1750 		if (storm_id >= NUM_STORMS)
1751 			break;
1752 		storm_mem_desc = allocated_mem + storm_id;
1753 		storm_mem_desc->size = storm_buf_size * sizeof(u32);
1754 
1755 		/* Allocate physical memory for Storm's overlays buffer */
1756 		storm_mem_desc->virt_addr =
1757 		    dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1758 				       storm_mem_desc->size,
1759 				       &storm_mem_desc->phys_addr, GFP_KERNEL);
1760 		if (!storm_mem_desc->virt_addr)
1761 			break;
1762 
1763 		/* Skip overlays buffer header */
1764 		buf_offset += OVERLAY_HDR_SIZE_DWORDS;
1765 
1766 		/* Copy Storm's overlays buffer to allocated memory */
1767 		memcpy(storm_mem_desc->virt_addr,
1768 		       &fw_overlay_in_buf[buf_offset], storm_mem_desc->size);
1769 
1770 		/* Advance to next Storm */
1771 		buf_offset += storm_buf_size;
1772 	}
1773 
1774 	/* If memory allocation has failed, free all allocated memory */
1775 	if (buf_offset < buf_size) {
1776 		qed_fw_overlay_mem_free(p_hwfn, &allocated_mem);
1777 		return NULL;
1778 	}
1779 
1780 	return allocated_mem;
1781 }
1782 
1783 void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
1784 			     struct qed_ptt *p_ptt,
1785 			     struct phys_mem_desc *fw_overlay_mem)
1786 {
1787 	u8 storm_id;
1788 
1789 	for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
1790 		struct phys_mem_desc *storm_mem_desc =
1791 		    (struct phys_mem_desc *)fw_overlay_mem + storm_id;
1792 		u32 ram_addr, i;
1793 
1794 		/* Skip Storms with no FW overlays */
1795 		if (!storm_mem_desc->virt_addr)
1796 			continue;
1797 
1798 		/* Calculate overlay RAM GRC address of current PF */
1799 		ram_addr = qed_get_overlay_addr_ram_addr(p_hwfn, storm_id) +
1800 			   sizeof(dma_addr_t) * p_hwfn->rel_pf_id;
1801 
1802 		/* Write Storm's overlay physical address to RAM */
1803 		for (i = 0; i < PHYS_ADDR_DWORDS; i++, ram_addr += sizeof(u32))
1804 			qed_wr(p_hwfn, p_ptt, ram_addr,
1805 			       ((u32 *)&storm_mem_desc->phys_addr)[i]);
1806 	}
1807 }
1808 
1809 void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
1810 			     struct phys_mem_desc **fw_overlay_mem)
1811 {
1812 	u8 storm_id;
1813 
1814 	if (!fw_overlay_mem || !(*fw_overlay_mem))
1815 		return;
1816 
1817 	for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
1818 		struct phys_mem_desc *storm_mem_desc =
1819 		    (struct phys_mem_desc *)*fw_overlay_mem + storm_id;
1820 
1821 		/* Free Storm's physical memory */
1822 		if (storm_mem_desc->virt_addr)
1823 			dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1824 					  storm_mem_desc->size,
1825 					  storm_mem_desc->virt_addr,
1826 					  storm_mem_desc->phys_addr);
1827 	}
1828 
1829 	/* Free allocated virtual memory */
1830 	kfree(*fw_overlay_mem);
1831 	*fw_overlay_mem = NULL;
1832 }
1833