1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3 
4 #include "ice_common.h"
5 #include "ice_flex_pipe.h"
6 #include "ice_flow.h"
7 
8 /* To support tunneling entries by PF, the package will append the PF number to
9  * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
10  */
11 static const struct ice_tunnel_type_scan tnls[] = {
12 	{ TNL_VXLAN,		"TNL_VXLAN_PF" },
13 	{ TNL_GENEVE,		"TNL_GENEVE_PF" },
14 	{ TNL_LAST,		"" }
15 };
16 
17 static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
18 	/* SWITCH */
19 	{
20 		ICE_SID_XLT0_SW,
21 		ICE_SID_XLT_KEY_BUILDER_SW,
22 		ICE_SID_XLT1_SW,
23 		ICE_SID_XLT2_SW,
24 		ICE_SID_PROFID_TCAM_SW,
25 		ICE_SID_PROFID_REDIR_SW,
26 		ICE_SID_FLD_VEC_SW,
27 		ICE_SID_CDID_KEY_BUILDER_SW,
28 		ICE_SID_CDID_REDIR_SW
29 	},
30 
31 	/* ACL */
32 	{
33 		ICE_SID_XLT0_ACL,
34 		ICE_SID_XLT_KEY_BUILDER_ACL,
35 		ICE_SID_XLT1_ACL,
36 		ICE_SID_XLT2_ACL,
37 		ICE_SID_PROFID_TCAM_ACL,
38 		ICE_SID_PROFID_REDIR_ACL,
39 		ICE_SID_FLD_VEC_ACL,
40 		ICE_SID_CDID_KEY_BUILDER_ACL,
41 		ICE_SID_CDID_REDIR_ACL
42 	},
43 
44 	/* FD */
45 	{
46 		ICE_SID_XLT0_FD,
47 		ICE_SID_XLT_KEY_BUILDER_FD,
48 		ICE_SID_XLT1_FD,
49 		ICE_SID_XLT2_FD,
50 		ICE_SID_PROFID_TCAM_FD,
51 		ICE_SID_PROFID_REDIR_FD,
52 		ICE_SID_FLD_VEC_FD,
53 		ICE_SID_CDID_KEY_BUILDER_FD,
54 		ICE_SID_CDID_REDIR_FD
55 	},
56 
57 	/* RSS */
58 	{
59 		ICE_SID_XLT0_RSS,
60 		ICE_SID_XLT_KEY_BUILDER_RSS,
61 		ICE_SID_XLT1_RSS,
62 		ICE_SID_XLT2_RSS,
63 		ICE_SID_PROFID_TCAM_RSS,
64 		ICE_SID_PROFID_REDIR_RSS,
65 		ICE_SID_FLD_VEC_RSS,
66 		ICE_SID_CDID_KEY_BUILDER_RSS,
67 		ICE_SID_CDID_REDIR_RSS
68 	},
69 
70 	/* PE */
71 	{
72 		ICE_SID_XLT0_PE,
73 		ICE_SID_XLT_KEY_BUILDER_PE,
74 		ICE_SID_XLT1_PE,
75 		ICE_SID_XLT2_PE,
76 		ICE_SID_PROFID_TCAM_PE,
77 		ICE_SID_PROFID_REDIR_PE,
78 		ICE_SID_FLD_VEC_PE,
79 		ICE_SID_CDID_KEY_BUILDER_PE,
80 		ICE_SID_CDID_REDIR_PE
81 	}
82 };
83 
84 /**
85  * ice_sect_id - returns section ID
86  * @blk: block type
87  * @sect: section type
88  *
89  * This helper function returns the proper section ID given a block type and a
90  * section type.
91  */
92 static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
93 {
94 	return ice_sect_lkup[blk][sect];
95 }
96 
97 /**
98  * ice_pkg_val_buf
99  * @buf: pointer to the ice buffer
100  *
101  * This helper function validates a buffer's header.
102  */
103 static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
104 {
105 	struct ice_buf_hdr *hdr;
106 	u16 section_count;
107 	u16 data_end;
108 
109 	hdr = (struct ice_buf_hdr *)buf->buf;
110 	/* verify data */
111 	section_count = le16_to_cpu(hdr->section_count);
112 	if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
113 		return NULL;
114 
115 	data_end = le16_to_cpu(hdr->data_end);
116 	if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
117 		return NULL;
118 
119 	return hdr;
120 }
121 
122 /**
123  * ice_find_buf_table
124  * @ice_seg: pointer to the ice segment
125  *
126  * Returns the address of the buffer table within the ice segment.
127  */
128 static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
129 {
130 	struct ice_nvm_table *nvms;
131 
132 	nvms = (struct ice_nvm_table *)
133 		(ice_seg->device_table +
134 		 le32_to_cpu(ice_seg->device_table_count));
135 
136 	return (__force struct ice_buf_table *)
137 		(nvms->vers + le32_to_cpu(nvms->table_count));
138 }
139 
140 /**
141  * ice_pkg_enum_buf
142  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
143  * @state: pointer to the enum state
144  *
145  * This function will enumerate all the buffers in the ice segment. The first
146  * call is made with the ice_seg parameter non-NULL; on subsequent calls,
147  * ice_seg is set to NULL which continues the enumeration. When the function
148  * returns a NULL pointer, then the end of the buffers has been reached, or an
149  * unexpected value has been detected (for example an invalid section count or
150  * an invalid buffer end value).
151  */
152 static struct ice_buf_hdr *
153 ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
154 {
155 	if (ice_seg) {
156 		state->buf_table = ice_find_buf_table(ice_seg);
157 		if (!state->buf_table)
158 			return NULL;
159 
160 		state->buf_idx = 0;
161 		return ice_pkg_val_buf(state->buf_table->buf_array);
162 	}
163 
164 	if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count))
165 		return ice_pkg_val_buf(state->buf_table->buf_array +
166 				       state->buf_idx);
167 	else
168 		return NULL;
169 }
170 
171 /**
172  * ice_pkg_advance_sect
173  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
174  * @state: pointer to the enum state
175  *
176  * This helper function will advance the section within the ice segment,
177  * also advancing the buffer if needed.
178  */
179 static bool
180 ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
181 {
182 	if (!ice_seg && !state->buf)
183 		return false;
184 
185 	if (!ice_seg && state->buf)
186 		if (++state->sect_idx < le16_to_cpu(state->buf->section_count))
187 			return true;
188 
189 	state->buf = ice_pkg_enum_buf(ice_seg, state);
190 	if (!state->buf)
191 		return false;
192 
193 	/* start of new buffer, reset section index */
194 	state->sect_idx = 0;
195 	return true;
196 }
197 
198 /**
199  * ice_pkg_enum_section
200  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
201  * @state: pointer to the enum state
202  * @sect_type: section type to enumerate
203  *
204  * This function will enumerate all the sections of a particular type in the
205  * ice segment. The first call is made with the ice_seg parameter non-NULL;
206  * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
207  * When the function returns a NULL pointer, then the end of the matching
208  * sections has been reached.
209  */
210 static void *
211 ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
212 		     u32 sect_type)
213 {
214 	u16 offset, size;
215 
216 	if (ice_seg)
217 		state->type = sect_type;
218 
219 	if (!ice_pkg_advance_sect(ice_seg, state))
220 		return NULL;
221 
222 	/* scan for next matching section */
223 	while (state->buf->section_entry[state->sect_idx].type !=
224 	       cpu_to_le32(state->type))
225 		if (!ice_pkg_advance_sect(NULL, state))
226 			return NULL;
227 
228 	/* validate section */
229 	offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
230 	if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
231 		return NULL;
232 
233 	size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size);
234 	if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
235 		return NULL;
236 
237 	/* make sure the section fits in the buffer */
238 	if (offset + size > ICE_PKG_BUF_SIZE)
239 		return NULL;
240 
241 	state->sect_type =
242 		le32_to_cpu(state->buf->section_entry[state->sect_idx].type);
243 
244 	/* calc pointer to this section */
245 	state->sect = ((u8 *)state->buf) +
246 		le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
247 
248 	return state->sect;
249 }
250 
251 /**
252  * ice_pkg_enum_entry
253  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
254  * @state: pointer to the enum state
255  * @sect_type: section type to enumerate
256  * @offset: pointer to variable that receives the offset in the table (optional)
257  * @handler: function that handles access to the entries into the section type
258  *
259  * This function will enumerate all the entries in particular section type in
260  * the ice segment. The first call is made with the ice_seg parameter non-NULL;
261  * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
262  * When the function returns a NULL pointer, then the end of the entries has
263  * been reached.
264  *
265  * Since each section may have a different header and entry size, the handler
266  * function is needed to determine the number and location entries in each
267  * section.
268  *
269  * The offset parameter is optional, but should be used for sections that
270  * contain an offset for each section table. For such cases, the section handler
271  * function must return the appropriate offset + index to give the absolution
272  * offset for each entry. For example, if the base for a section's header
273  * indicates a base offset of 10, and the index for the entry is 2, then
274  * section handler function should set the offset to 10 + 2 = 12.
275  */
276 static void *
277 ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
278 		   u32 sect_type, u32 *offset,
279 		   void *(*handler)(u32 sect_type, void *section,
280 				    u32 index, u32 *offset))
281 {
282 	void *entry;
283 
284 	if (ice_seg) {
285 		if (!handler)
286 			return NULL;
287 
288 		if (!ice_pkg_enum_section(ice_seg, state, sect_type))
289 			return NULL;
290 
291 		state->entry_idx = 0;
292 		state->handler = handler;
293 	} else {
294 		state->entry_idx++;
295 	}
296 
297 	if (!state->handler)
298 		return NULL;
299 
300 	/* get entry */
301 	entry = state->handler(state->sect_type, state->sect, state->entry_idx,
302 			       offset);
303 	if (!entry) {
304 		/* end of a section, look for another section of this type */
305 		if (!ice_pkg_enum_section(NULL, state, 0))
306 			return NULL;
307 
308 		state->entry_idx = 0;
309 		entry = state->handler(state->sect_type, state->sect,
310 				       state->entry_idx, offset);
311 	}
312 
313 	return entry;
314 }
315 
316 /**
317  * ice_boost_tcam_handler
318  * @sect_type: section type
319  * @section: pointer to section
320  * @index: index of the boost TCAM entry to be returned
321  * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
322  *
323  * This is a callback function that can be passed to ice_pkg_enum_entry.
324  * Handles enumeration of individual boost TCAM entries.
325  */
326 static void *
327 ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
328 {
329 	struct ice_boost_tcam_section *boost;
330 
331 	if (!section)
332 		return NULL;
333 
334 	if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
335 		return NULL;
336 
337 	if (index > ICE_MAX_BST_TCAMS_IN_BUF)
338 		return NULL;
339 
340 	if (offset)
341 		*offset = 0;
342 
343 	boost = section;
344 	if (index >= le16_to_cpu(boost->count))
345 		return NULL;
346 
347 	return boost->tcam + index;
348 }
349 
350 /**
351  * ice_find_boost_entry
352  * @ice_seg: pointer to the ice segment (non-NULL)
353  * @addr: Boost TCAM address of entry to search for
354  * @entry: returns pointer to the entry
355  *
356  * Finds a particular Boost TCAM entry and returns a pointer to that entry
357  * if it is found. The ice_seg parameter must not be NULL since the first call
358  * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
359  */
360 static enum ice_status
361 ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
362 		     struct ice_boost_tcam_entry **entry)
363 {
364 	struct ice_boost_tcam_entry *tcam;
365 	struct ice_pkg_enum state;
366 
367 	memset(&state, 0, sizeof(state));
368 
369 	if (!ice_seg)
370 		return ICE_ERR_PARAM;
371 
372 	do {
373 		tcam = ice_pkg_enum_entry(ice_seg, &state,
374 					  ICE_SID_RXPARSER_BOOST_TCAM, NULL,
375 					  ice_boost_tcam_handler);
376 		if (tcam && le16_to_cpu(tcam->addr) == addr) {
377 			*entry = tcam;
378 			return 0;
379 		}
380 
381 		ice_seg = NULL;
382 	} while (tcam);
383 
384 	*entry = NULL;
385 	return ICE_ERR_CFG;
386 }
387 
388 /**
389  * ice_label_enum_handler
390  * @sect_type: section type
391  * @section: pointer to section
392  * @index: index of the label entry to be returned
393  * @offset: pointer to receive absolute offset, always zero for label sections
394  *
395  * This is a callback function that can be passed to ice_pkg_enum_entry.
396  * Handles enumeration of individual label entries.
397  */
398 static void *
399 ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index,
400 		       u32 *offset)
401 {
402 	struct ice_label_section *labels;
403 
404 	if (!section)
405 		return NULL;
406 
407 	if (index > ICE_MAX_LABELS_IN_BUF)
408 		return NULL;
409 
410 	if (offset)
411 		*offset = 0;
412 
413 	labels = section;
414 	if (index >= le16_to_cpu(labels->count))
415 		return NULL;
416 
417 	return labels->label + index;
418 }
419 
420 /**
421  * ice_enum_labels
422  * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
423  * @type: the section type that will contain the label (0 on subsequent calls)
424  * @state: ice_pkg_enum structure that will hold the state of the enumeration
425  * @value: pointer to a value that will return the label's value if found
426  *
427  * Enumerates a list of labels in the package. The caller will call
428  * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
429  * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
430  * the end of the list has been reached.
431  */
432 static char *
433 ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
434 		u16 *value)
435 {
436 	struct ice_label *label;
437 
438 	/* Check for valid label section on first call */
439 	if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
440 		return NULL;
441 
442 	label = ice_pkg_enum_entry(ice_seg, state, type, NULL,
443 				   ice_label_enum_handler);
444 	if (!label)
445 		return NULL;
446 
447 	*value = le16_to_cpu(label->value);
448 	return label->name;
449 }
450 
451 /**
452  * ice_init_pkg_hints
453  * @hw: pointer to the HW structure
454  * @ice_seg: pointer to the segment of the package scan (non-NULL)
455  *
456  * This function will scan the package and save off relevant information
457  * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
458  * since the first call to ice_enum_labels requires a pointer to an actual
459  * ice_seg structure.
460  */
461 static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
462 {
463 	struct ice_pkg_enum state;
464 	char *label_name;
465 	u16 val;
466 	int i;
467 
468 	memset(&hw->tnl, 0, sizeof(hw->tnl));
469 	memset(&state, 0, sizeof(state));
470 
471 	if (!ice_seg)
472 		return;
473 
474 	label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
475 				     &val);
476 
477 	while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
478 		for (i = 0; tnls[i].type != TNL_LAST; i++) {
479 			size_t len = strlen(tnls[i].label_prefix);
480 
481 			/* Look for matching label start, before continuing */
482 			if (strncmp(label_name, tnls[i].label_prefix, len))
483 				continue;
484 
485 			/* Make sure this label matches our PF. Note that the PF
486 			 * character ('0' - '7') will be located where our
487 			 * prefix string's null terminator is located.
488 			 */
489 			if ((label_name[len] - '0') == hw->pf_id) {
490 				hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
491 				hw->tnl.tbl[hw->tnl.count].valid = false;
492 				hw->tnl.tbl[hw->tnl.count].in_use = false;
493 				hw->tnl.tbl[hw->tnl.count].marked = false;
494 				hw->tnl.tbl[hw->tnl.count].boost_addr = val;
495 				hw->tnl.tbl[hw->tnl.count].port = 0;
496 				hw->tnl.count++;
497 				break;
498 			}
499 		}
500 
501 		label_name = ice_enum_labels(NULL, 0, &state, &val);
502 	}
503 
504 	/* Cache the appropriate boost TCAM entry pointers */
505 	for (i = 0; i < hw->tnl.count; i++) {
506 		ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
507 				     &hw->tnl.tbl[i].boost_entry);
508 		if (hw->tnl.tbl[i].boost_entry)
509 			hw->tnl.tbl[i].valid = true;
510 	}
511 }
512 
513 /* Key creation */
514 
515 #define ICE_DC_KEY	0x1	/* don't care */
516 #define ICE_DC_KEYINV	0x1
517 #define ICE_NM_KEY	0x0	/* never match */
518 #define ICE_NM_KEYINV	0x0
519 #define ICE_0_KEY	0x1	/* match 0 */
520 #define ICE_0_KEYINV	0x0
521 #define ICE_1_KEY	0x0	/* match 1 */
522 #define ICE_1_KEYINV	0x1
523 
524 /**
525  * ice_gen_key_word - generate 16-bits of a key/mask word
526  * @val: the value
527  * @valid: valid bits mask (change only the valid bits)
528  * @dont_care: don't care mask
529  * @nvr_mtch: never match mask
530  * @key: pointer to an array of where the resulting key portion
531  * @key_inv: pointer to an array of where the resulting key invert portion
532  *
533  * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
534  * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
535  * of key and 8 bits of key invert.
536  *
537  *     '0' =    b01, always match a 0 bit
538  *     '1' =    b10, always match a 1 bit
539  *     '?' =    b11, don't care bit (always matches)
540  *     '~' =    b00, never match bit
541  *
542  * Input:
543  *          val:         b0  1  0  1  0  1
544  *          dont_care:   b0  0  1  1  0  0
545  *          never_mtch:  b0  0  0  0  1  1
546  *          ------------------------------
547  * Result:  key:        b01 10 11 11 00 00
548  */
549 static enum ice_status
550 ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
551 		 u8 *key_inv)
552 {
553 	u8 in_key = *key, in_key_inv = *key_inv;
554 	u8 i;
555 
556 	/* 'dont_care' and 'nvr_mtch' masks cannot overlap */
557 	if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
558 		return ICE_ERR_CFG;
559 
560 	*key = 0;
561 	*key_inv = 0;
562 
563 	/* encode the 8 bits into 8-bit key and 8-bit key invert */
564 	for (i = 0; i < 8; i++) {
565 		*key >>= 1;
566 		*key_inv >>= 1;
567 
568 		if (!(valid & 0x1)) { /* change only valid bits */
569 			*key |= (in_key & 0x1) << 7;
570 			*key_inv |= (in_key_inv & 0x1) << 7;
571 		} else if (dont_care & 0x1) { /* don't care bit */
572 			*key |= ICE_DC_KEY << 7;
573 			*key_inv |= ICE_DC_KEYINV << 7;
574 		} else if (nvr_mtch & 0x1) { /* never match bit */
575 			*key |= ICE_NM_KEY << 7;
576 			*key_inv |= ICE_NM_KEYINV << 7;
577 		} else if (val & 0x01) { /* exact 1 match */
578 			*key |= ICE_1_KEY << 7;
579 			*key_inv |= ICE_1_KEYINV << 7;
580 		} else { /* exact 0 match */
581 			*key |= ICE_0_KEY << 7;
582 			*key_inv |= ICE_0_KEYINV << 7;
583 		}
584 
585 		dont_care >>= 1;
586 		nvr_mtch >>= 1;
587 		valid >>= 1;
588 		val >>= 1;
589 		in_key >>= 1;
590 		in_key_inv >>= 1;
591 	}
592 
593 	return 0;
594 }
595 
596 /**
597  * ice_bits_max_set - determine if the number of bits set is within a maximum
598  * @mask: pointer to the byte array which is the mask
599  * @size: the number of bytes in the mask
600  * @max: the max number of set bits
601  *
602  * This function determines if there are at most 'max' number of bits set in an
603  * array. Returns true if the number for bits set is <= max or will return false
604  * otherwise.
605  */
606 static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
607 {
608 	u16 count = 0;
609 	u16 i;
610 
611 	/* check each byte */
612 	for (i = 0; i < size; i++) {
613 		/* if 0, go to next byte */
614 		if (!mask[i])
615 			continue;
616 
617 		/* We know there is at least one set bit in this byte because of
618 		 * the above check; if we already have found 'max' number of
619 		 * bits set, then we can return failure now.
620 		 */
621 		if (count == max)
622 			return false;
623 
624 		/* count the bits in this byte, checking threshold */
625 		count += hweight8(mask[i]);
626 		if (count > max)
627 			return false;
628 	}
629 
630 	return true;
631 }
632 
633 /**
634  * ice_set_key - generate a variable sized key with multiples of 16-bits
635  * @key: pointer to where the key will be stored
636  * @size: the size of the complete key in bytes (must be even)
637  * @val: array of 8-bit values that makes up the value portion of the key
638  * @upd: array of 8-bit masks that determine what key portion to update
639  * @dc: array of 8-bit masks that make up the don't care mask
640  * @nm: array of 8-bit masks that make up the never match mask
641  * @off: the offset of the first byte in the key to update
642  * @len: the number of bytes in the key update
643  *
644  * This function generates a key from a value, a don't care mask and a never
645  * match mask.
646  * upd, dc, and nm are optional parameters, and can be NULL:
647  *	upd == NULL --> udp mask is all 1's (update all bits)
648  *	dc == NULL --> dc mask is all 0's (no don't care bits)
649  *	nm == NULL --> nm mask is all 0's (no never match bits)
650  */
651 static enum ice_status
652 ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
653 	    u16 len)
654 {
655 	u16 half_size;
656 	u16 i;
657 
658 	/* size must be a multiple of 2 bytes. */
659 	if (size % 2)
660 		return ICE_ERR_CFG;
661 
662 	half_size = size / 2;
663 	if (off + len > half_size)
664 		return ICE_ERR_CFG;
665 
666 	/* Make sure at most one bit is set in the never match mask. Having more
667 	 * than one never match mask bit set will cause HW to consume excessive
668 	 * power otherwise; this is a power management efficiency check.
669 	 */
670 #define ICE_NVR_MTCH_BITS_MAX	1
671 	if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
672 		return ICE_ERR_CFG;
673 
674 	for (i = 0; i < len; i++)
675 		if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
676 				     dc ? dc[i] : 0, nm ? nm[i] : 0,
677 				     key + off + i, key + half_size + off + i))
678 			return ICE_ERR_CFG;
679 
680 	return 0;
681 }
682 
683 /**
684  * ice_acquire_global_cfg_lock
685  * @hw: pointer to the HW structure
686  * @access: access type (read or write)
687  *
688  * This function will request ownership of the global config lock for reading
689  * or writing of the package. When attempting to obtain write access, the
690  * caller must check for the following two return values:
691  *
692  * ICE_SUCCESS        - Means the caller has acquired the global config lock
693  *                      and can perform writing of the package.
694  * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
695  *                      package or has found that no update was necessary; in
696  *                      this case, the caller can just skip performing any
697  *                      update of the package.
698  */
699 static enum ice_status
700 ice_acquire_global_cfg_lock(struct ice_hw *hw,
701 			    enum ice_aq_res_access_type access)
702 {
703 	enum ice_status status;
704 
705 	status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
706 				 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
707 
708 	if (!status)
709 		mutex_lock(&ice_global_cfg_lock_sw);
710 	else if (status == ICE_ERR_AQ_NO_WORK)
711 		ice_debug(hw, ICE_DBG_PKG,
712 			  "Global config lock: No work to do\n");
713 
714 	return status;
715 }
716 
717 /**
718  * ice_release_global_cfg_lock
719  * @hw: pointer to the HW structure
720  *
721  * This function will release the global config lock.
722  */
723 static void ice_release_global_cfg_lock(struct ice_hw *hw)
724 {
725 	mutex_unlock(&ice_global_cfg_lock_sw);
726 	ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
727 }
728 
729 /**
730  * ice_acquire_change_lock
731  * @hw: pointer to the HW structure
732  * @access: access type (read or write)
733  *
734  * This function will request ownership of the change lock.
735  */
736 static enum ice_status
737 ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
738 {
739 	return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
740 			       ICE_CHANGE_LOCK_TIMEOUT);
741 }
742 
743 /**
744  * ice_release_change_lock
745  * @hw: pointer to the HW structure
746  *
747  * This function will release the change lock using the proper Admin Command.
748  */
749 static void ice_release_change_lock(struct ice_hw *hw)
750 {
751 	ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
752 }
753 
754 /**
755  * ice_aq_download_pkg
756  * @hw: pointer to the hardware structure
757  * @pkg_buf: the package buffer to transfer
758  * @buf_size: the size of the package buffer
759  * @last_buf: last buffer indicator
760  * @error_offset: returns error offset
761  * @error_info: returns error information
762  * @cd: pointer to command details structure or NULL
763  *
764  * Download Package (0x0C40)
765  */
766 static enum ice_status
767 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
768 		    u16 buf_size, bool last_buf, u32 *error_offset,
769 		    u32 *error_info, struct ice_sq_cd *cd)
770 {
771 	struct ice_aqc_download_pkg *cmd;
772 	struct ice_aq_desc desc;
773 	enum ice_status status;
774 
775 	if (error_offset)
776 		*error_offset = 0;
777 	if (error_info)
778 		*error_info = 0;
779 
780 	cmd = &desc.params.download_pkg;
781 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
782 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
783 
784 	if (last_buf)
785 		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
786 
787 	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
788 	if (status == ICE_ERR_AQ_ERROR) {
789 		/* Read error from buffer only when the FW returned an error */
790 		struct ice_aqc_download_pkg_resp *resp;
791 
792 		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
793 		if (error_offset)
794 			*error_offset = le32_to_cpu(resp->error_offset);
795 		if (error_info)
796 			*error_info = le32_to_cpu(resp->error_info);
797 	}
798 
799 	return status;
800 }
801 
802 /**
803  * ice_aq_update_pkg
804  * @hw: pointer to the hardware structure
805  * @pkg_buf: the package cmd buffer
806  * @buf_size: the size of the package cmd buffer
807  * @last_buf: last buffer indicator
808  * @error_offset: returns error offset
809  * @error_info: returns error information
810  * @cd: pointer to command details structure or NULL
811  *
812  * Update Package (0x0C42)
813  */
814 static enum ice_status
815 ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
816 		  bool last_buf, u32 *error_offset, u32 *error_info,
817 		  struct ice_sq_cd *cd)
818 {
819 	struct ice_aqc_download_pkg *cmd;
820 	struct ice_aq_desc desc;
821 	enum ice_status status;
822 
823 	if (error_offset)
824 		*error_offset = 0;
825 	if (error_info)
826 		*error_info = 0;
827 
828 	cmd = &desc.params.download_pkg;
829 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
830 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
831 
832 	if (last_buf)
833 		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
834 
835 	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
836 	if (status == ICE_ERR_AQ_ERROR) {
837 		/* Read error from buffer only when the FW returned an error */
838 		struct ice_aqc_download_pkg_resp *resp;
839 
840 		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
841 		if (error_offset)
842 			*error_offset = le32_to_cpu(resp->error_offset);
843 		if (error_info)
844 			*error_info = le32_to_cpu(resp->error_info);
845 	}
846 
847 	return status;
848 }
849 
850 /**
851  * ice_find_seg_in_pkg
852  * @hw: pointer to the hardware structure
853  * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
854  * @pkg_hdr: pointer to the package header to be searched
855  *
856  * This function searches a package file for a particular segment type. On
857  * success it returns a pointer to the segment header, otherwise it will
858  * return NULL.
859  */
860 static struct ice_generic_seg_hdr *
861 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
862 		    struct ice_pkg_hdr *pkg_hdr)
863 {
864 	u32 i;
865 
866 	ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
867 		  pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
868 		  pkg_hdr->pkg_format_ver.update,
869 		  pkg_hdr->pkg_format_ver.draft);
870 
871 	/* Search all package segments for the requested segment type */
872 	for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
873 		struct ice_generic_seg_hdr *seg;
874 
875 		seg = (struct ice_generic_seg_hdr *)
876 			((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i]));
877 
878 		if (le32_to_cpu(seg->seg_type) == seg_type)
879 			return seg;
880 	}
881 
882 	return NULL;
883 }
884 
885 /**
886  * ice_update_pkg
887  * @hw: pointer to the hardware structure
888  * @bufs: pointer to an array of buffers
889  * @count: the number of buffers in the array
890  *
891  * Obtains change lock and updates package.
892  */
893 static enum ice_status
894 ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
895 {
896 	enum ice_status status;
897 	u32 offset, info, i;
898 
899 	status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
900 	if (status)
901 		return status;
902 
903 	for (i = 0; i < count; i++) {
904 		struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
905 		bool last = ((i + 1) == count);
906 
907 		status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end),
908 					   last, &offset, &info, NULL);
909 
910 		if (status) {
911 			ice_debug(hw, ICE_DBG_PKG,
912 				  "Update pkg failed: err %d off %d inf %d\n",
913 				  status, offset, info);
914 			break;
915 		}
916 	}
917 
918 	ice_release_change_lock(hw);
919 
920 	return status;
921 }
922 
923 /**
924  * ice_dwnld_cfg_bufs
925  * @hw: pointer to the hardware structure
926  * @bufs: pointer to an array of buffers
927  * @count: the number of buffers in the array
928  *
929  * Obtains global config lock and downloads the package configuration buffers
930  * to the firmware. Metadata buffers are skipped, and the first metadata buffer
931  * found indicates that the rest of the buffers are all metadata buffers.
932  */
933 static enum ice_status
934 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
935 {
936 	enum ice_status status;
937 	struct ice_buf_hdr *bh;
938 	u32 offset, info, i;
939 
940 	if (!bufs || !count)
941 		return ICE_ERR_PARAM;
942 
943 	/* If the first buffer's first section has its metadata bit set
944 	 * then there are no buffers to be downloaded, and the operation is
945 	 * considered a success.
946 	 */
947 	bh = (struct ice_buf_hdr *)bufs;
948 	if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
949 		return 0;
950 
951 	/* reset pkg_dwnld_status in case this function is called in the
952 	 * reset/rebuild flow
953 	 */
954 	hw->pkg_dwnld_status = ICE_AQ_RC_OK;
955 
956 	status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
957 	if (status) {
958 		if (status == ICE_ERR_AQ_NO_WORK)
959 			hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
960 		else
961 			hw->pkg_dwnld_status = hw->adminq.sq_last_status;
962 		return status;
963 	}
964 
965 	for (i = 0; i < count; i++) {
966 		bool last = ((i + 1) == count);
967 
968 		if (!last) {
969 			/* check next buffer for metadata flag */
970 			bh = (struct ice_buf_hdr *)(bufs + i + 1);
971 
972 			/* A set metadata flag in the next buffer will signal
973 			 * that the current buffer will be the last buffer
974 			 * downloaded
975 			 */
976 			if (le16_to_cpu(bh->section_count))
977 				if (le32_to_cpu(bh->section_entry[0].type) &
978 				    ICE_METADATA_BUF)
979 					last = true;
980 		}
981 
982 		bh = (struct ice_buf_hdr *)(bufs + i);
983 
984 		status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
985 					     &offset, &info, NULL);
986 
987 		/* Save AQ status from download package */
988 		hw->pkg_dwnld_status = hw->adminq.sq_last_status;
989 		if (status) {
990 			ice_debug(hw, ICE_DBG_PKG,
991 				  "Pkg download failed: err %d off %d inf %d\n",
992 				  status, offset, info);
993 
994 			break;
995 		}
996 
997 		if (last)
998 			break;
999 	}
1000 
1001 	ice_release_global_cfg_lock(hw);
1002 
1003 	return status;
1004 }
1005 
1006 /**
1007  * ice_aq_get_pkg_info_list
1008  * @hw: pointer to the hardware structure
1009  * @pkg_info: the buffer which will receive the information list
1010  * @buf_size: the size of the pkg_info information buffer
1011  * @cd: pointer to command details structure or NULL
1012  *
1013  * Get Package Info List (0x0C43)
1014  */
1015 static enum ice_status
1016 ice_aq_get_pkg_info_list(struct ice_hw *hw,
1017 			 struct ice_aqc_get_pkg_info_resp *pkg_info,
1018 			 u16 buf_size, struct ice_sq_cd *cd)
1019 {
1020 	struct ice_aq_desc desc;
1021 
1022 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
1023 
1024 	return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
1025 }
1026 
1027 /**
1028  * ice_download_pkg
1029  * @hw: pointer to the hardware structure
1030  * @ice_seg: pointer to the segment of the package to be downloaded
1031  *
1032  * Handles the download of a complete package.
1033  */
1034 static enum ice_status
1035 ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
1036 {
1037 	struct ice_buf_table *ice_buf_tbl;
1038 
1039 	ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
1040 		  ice_seg->hdr.seg_format_ver.major,
1041 		  ice_seg->hdr.seg_format_ver.minor,
1042 		  ice_seg->hdr.seg_format_ver.update,
1043 		  ice_seg->hdr.seg_format_ver.draft);
1044 
1045 	ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
1046 		  le32_to_cpu(ice_seg->hdr.seg_type),
1047 		  le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
1048 
1049 	ice_buf_tbl = ice_find_buf_table(ice_seg);
1050 
1051 	ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
1052 		  le32_to_cpu(ice_buf_tbl->buf_count));
1053 
1054 	return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
1055 				  le32_to_cpu(ice_buf_tbl->buf_count));
1056 }
1057 
1058 /**
1059  * ice_init_pkg_info
1060  * @hw: pointer to the hardware structure
1061  * @pkg_hdr: pointer to the driver's package hdr
1062  *
1063  * Saves off the package details into the HW structure.
1064  */
1065 static enum ice_status
1066 ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
1067 {
1068 	struct ice_global_metadata_seg *meta_seg;
1069 	struct ice_generic_seg_hdr *seg_hdr;
1070 
1071 	if (!pkg_hdr)
1072 		return ICE_ERR_PARAM;
1073 
1074 	meta_seg = (struct ice_global_metadata_seg *)
1075 		   ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr);
1076 	if (meta_seg) {
1077 		hw->pkg_ver = meta_seg->pkg_ver;
1078 		memcpy(hw->pkg_name, meta_seg->pkg_name, sizeof(hw->pkg_name));
1079 
1080 		ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
1081 			  meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor,
1082 			  meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft,
1083 			  meta_seg->pkg_name);
1084 	} else {
1085 		ice_debug(hw, ICE_DBG_INIT,
1086 			  "Did not find metadata segment in driver package\n");
1087 		return ICE_ERR_CFG;
1088 	}
1089 
1090 	seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
1091 	if (seg_hdr) {
1092 		hw->ice_pkg_ver = seg_hdr->seg_format_ver;
1093 		memcpy(hw->ice_pkg_name, seg_hdr->seg_id,
1094 		       sizeof(hw->ice_pkg_name));
1095 
1096 		ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
1097 			  seg_hdr->seg_format_ver.major,
1098 			  seg_hdr->seg_format_ver.minor,
1099 			  seg_hdr->seg_format_ver.update,
1100 			  seg_hdr->seg_format_ver.draft,
1101 			  seg_hdr->seg_id);
1102 	} else {
1103 		ice_debug(hw, ICE_DBG_INIT,
1104 			  "Did not find ice segment in driver package\n");
1105 		return ICE_ERR_CFG;
1106 	}
1107 
1108 	return 0;
1109 }
1110 
1111 /**
1112  * ice_get_pkg_info
1113  * @hw: pointer to the hardware structure
1114  *
1115  * Store details of the package currently loaded in HW into the HW structure.
1116  */
1117 static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
1118 {
1119 	struct ice_aqc_get_pkg_info_resp *pkg_info;
1120 	enum ice_status status;
1121 	u16 size;
1122 	u32 i;
1123 
1124 	size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
1125 	pkg_info = kzalloc(size, GFP_KERNEL);
1126 	if (!pkg_info)
1127 		return ICE_ERR_NO_MEMORY;
1128 
1129 	status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
1130 	if (status)
1131 		goto init_pkg_free_alloc;
1132 
1133 	for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
1134 #define ICE_PKG_FLAG_COUNT	4
1135 		char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
1136 		u8 place = 0;
1137 
1138 		if (pkg_info->pkg_info[i].is_active) {
1139 			flags[place++] = 'A';
1140 			hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
1141 			hw->active_track_id =
1142 				le32_to_cpu(pkg_info->pkg_info[i].track_id);
1143 			memcpy(hw->active_pkg_name,
1144 			       pkg_info->pkg_info[i].name,
1145 			       sizeof(pkg_info->pkg_info[i].name));
1146 			hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
1147 		}
1148 		if (pkg_info->pkg_info[i].is_active_at_boot)
1149 			flags[place++] = 'B';
1150 		if (pkg_info->pkg_info[i].is_modified)
1151 			flags[place++] = 'M';
1152 		if (pkg_info->pkg_info[i].is_in_nvm)
1153 			flags[place++] = 'N';
1154 
1155 		ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
1156 			  i, pkg_info->pkg_info[i].ver.major,
1157 			  pkg_info->pkg_info[i].ver.minor,
1158 			  pkg_info->pkg_info[i].ver.update,
1159 			  pkg_info->pkg_info[i].ver.draft,
1160 			  pkg_info->pkg_info[i].name, flags);
1161 	}
1162 
1163 init_pkg_free_alloc:
1164 	kfree(pkg_info);
1165 
1166 	return status;
1167 }
1168 
1169 /**
1170  * ice_verify_pkg - verify package
1171  * @pkg: pointer to the package buffer
1172  * @len: size of the package buffer
1173  *
1174  * Verifies various attributes of the package file, including length, format
1175  * version, and the requirement of at least one segment.
1176  */
1177 static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
1178 {
1179 	u32 seg_count;
1180 	u32 i;
1181 
1182 	if (len < struct_size(pkg, seg_offset, 1))
1183 		return ICE_ERR_BUF_TOO_SHORT;
1184 
1185 	if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
1186 	    pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
1187 	    pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
1188 	    pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
1189 		return ICE_ERR_CFG;
1190 
1191 	/* pkg must have at least one segment */
1192 	seg_count = le32_to_cpu(pkg->seg_count);
1193 	if (seg_count < 1)
1194 		return ICE_ERR_CFG;
1195 
1196 	/* make sure segment array fits in package length */
1197 	if (len < struct_size(pkg, seg_offset, seg_count))
1198 		return ICE_ERR_BUF_TOO_SHORT;
1199 
1200 	/* all segments must fit within length */
1201 	for (i = 0; i < seg_count; i++) {
1202 		u32 off = le32_to_cpu(pkg->seg_offset[i]);
1203 		struct ice_generic_seg_hdr *seg;
1204 
1205 		/* segment header must fit */
1206 		if (len < off + sizeof(*seg))
1207 			return ICE_ERR_BUF_TOO_SHORT;
1208 
1209 		seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
1210 
1211 		/* segment body must fit */
1212 		if (len < off + le32_to_cpu(seg->seg_size))
1213 			return ICE_ERR_BUF_TOO_SHORT;
1214 	}
1215 
1216 	return 0;
1217 }
1218 
1219 /**
1220  * ice_free_seg - free package segment pointer
1221  * @hw: pointer to the hardware structure
1222  *
1223  * Frees the package segment pointer in the proper manner, depending on if the
1224  * segment was allocated or just the passed in pointer was stored.
1225  */
1226 void ice_free_seg(struct ice_hw *hw)
1227 {
1228 	if (hw->pkg_copy) {
1229 		devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy);
1230 		hw->pkg_copy = NULL;
1231 		hw->pkg_size = 0;
1232 	}
1233 	hw->seg = NULL;
1234 }
1235 
1236 /**
1237  * ice_init_pkg_regs - initialize additional package registers
1238  * @hw: pointer to the hardware structure
1239  */
1240 static void ice_init_pkg_regs(struct ice_hw *hw)
1241 {
1242 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
1243 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
1244 #define ICE_SW_BLK_IDX	0
1245 
1246 	/* setup Switch block input mask, which is 48-bits in two parts */
1247 	wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
1248 	wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
1249 }
1250 
1251 /**
1252  * ice_chk_pkg_version - check package version for compatibility with driver
1253  * @pkg_ver: pointer to a version structure to check
1254  *
1255  * Check to make sure that the package about to be downloaded is compatible with
1256  * the driver. To be compatible, the major and minor components of the package
1257  * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
1258  * definitions.
1259  */
1260 static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
1261 {
1262 	if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
1263 	    pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
1264 		return ICE_ERR_NOT_SUPPORTED;
1265 
1266 	return 0;
1267 }
1268 
1269 /**
1270  * ice_chk_pkg_compat
1271  * @hw: pointer to the hardware structure
1272  * @ospkg: pointer to the package hdr
1273  * @seg: pointer to the package segment hdr
1274  *
1275  * This function checks the package version compatibility with driver and NVM
1276  */
1277 static enum ice_status
1278 ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
1279 		   struct ice_seg **seg)
1280 {
1281 	struct ice_aqc_get_pkg_info_resp *pkg;
1282 	enum ice_status status;
1283 	u16 size;
1284 	u32 i;
1285 
1286 	/* Check package version compatibility */
1287 	status = ice_chk_pkg_version(&hw->pkg_ver);
1288 	if (status) {
1289 		ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
1290 		return status;
1291 	}
1292 
1293 	/* find ICE segment in given package */
1294 	*seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
1295 						     ospkg);
1296 	if (!*seg) {
1297 		ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
1298 		return ICE_ERR_CFG;
1299 	}
1300 
1301 	/* Check if FW is compatible with the OS package */
1302 	size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
1303 	pkg = kzalloc(size, GFP_KERNEL);
1304 	if (!pkg)
1305 		return ICE_ERR_NO_MEMORY;
1306 
1307 	status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
1308 	if (status)
1309 		goto fw_ddp_compat_free_alloc;
1310 
1311 	for (i = 0; i < le32_to_cpu(pkg->count); i++) {
1312 		/* loop till we find the NVM package */
1313 		if (!pkg->pkg_info[i].is_in_nvm)
1314 			continue;
1315 		if ((*seg)->hdr.seg_format_ver.major !=
1316 			pkg->pkg_info[i].ver.major ||
1317 		    (*seg)->hdr.seg_format_ver.minor >
1318 			pkg->pkg_info[i].ver.minor) {
1319 			status = ICE_ERR_FW_DDP_MISMATCH;
1320 			ice_debug(hw, ICE_DBG_INIT,
1321 				  "OS package is not compatible with NVM.\n");
1322 		}
1323 		/* done processing NVM package so break */
1324 		break;
1325 	}
1326 fw_ddp_compat_free_alloc:
1327 	kfree(pkg);
1328 	return status;
1329 }
1330 
1331 /**
1332  * ice_init_pkg - initialize/download package
1333  * @hw: pointer to the hardware structure
1334  * @buf: pointer to the package buffer
1335  * @len: size of the package buffer
1336  *
1337  * This function initializes a package. The package contains HW tables
1338  * required to do packet processing. First, the function extracts package
1339  * information such as version. Then it finds the ice configuration segment
1340  * within the package; this function then saves a copy of the segment pointer
1341  * within the supplied package buffer. Next, the function will cache any hints
1342  * from the package, followed by downloading the package itself. Note, that if
1343  * a previous PF driver has already downloaded the package successfully, then
1344  * the current driver will not have to download the package again.
1345  *
1346  * The local package contents will be used to query default behavior and to
1347  * update specific sections of the HW's version of the package (e.g. to update
1348  * the parse graph to understand new protocols).
1349  *
1350  * This function stores a pointer to the package buffer memory, and it is
1351  * expected that the supplied buffer will not be freed immediately. If the
1352  * package buffer needs to be freed, such as when read from a file, use
1353  * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
1354  * case.
1355  */
1356 enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
1357 {
1358 	struct ice_pkg_hdr *pkg;
1359 	enum ice_status status;
1360 	struct ice_seg *seg;
1361 
1362 	if (!buf || !len)
1363 		return ICE_ERR_PARAM;
1364 
1365 	pkg = (struct ice_pkg_hdr *)buf;
1366 	status = ice_verify_pkg(pkg, len);
1367 	if (status) {
1368 		ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
1369 			  status);
1370 		return status;
1371 	}
1372 
1373 	/* initialize package info */
1374 	status = ice_init_pkg_info(hw, pkg);
1375 	if (status)
1376 		return status;
1377 
1378 	/* before downloading the package, check package version for
1379 	 * compatibility with driver
1380 	 */
1381 	status = ice_chk_pkg_compat(hw, pkg, &seg);
1382 	if (status)
1383 		return status;
1384 
1385 	/* initialize package hints and then download package */
1386 	ice_init_pkg_hints(hw, seg);
1387 	status = ice_download_pkg(hw, seg);
1388 	if (status == ICE_ERR_AQ_NO_WORK) {
1389 		ice_debug(hw, ICE_DBG_INIT,
1390 			  "package previously loaded - no work.\n");
1391 		status = 0;
1392 	}
1393 
1394 	/* Get information on the package currently loaded in HW, then make sure
1395 	 * the driver is compatible with this version.
1396 	 */
1397 	if (!status) {
1398 		status = ice_get_pkg_info(hw);
1399 		if (!status)
1400 			status = ice_chk_pkg_version(&hw->active_pkg_ver);
1401 	}
1402 
1403 	if (!status) {
1404 		hw->seg = seg;
1405 		/* on successful package download update other required
1406 		 * registers to support the package and fill HW tables
1407 		 * with package content.
1408 		 */
1409 		ice_init_pkg_regs(hw);
1410 		ice_fill_blk_tbls(hw);
1411 	} else {
1412 		ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
1413 			  status);
1414 	}
1415 
1416 	return status;
1417 }
1418 
1419 /**
1420  * ice_copy_and_init_pkg - initialize/download a copy of the package
1421  * @hw: pointer to the hardware structure
1422  * @buf: pointer to the package buffer
1423  * @len: size of the package buffer
1424  *
1425  * This function copies the package buffer, and then calls ice_init_pkg() to
1426  * initialize the copied package contents.
1427  *
1428  * The copying is necessary if the package buffer supplied is constant, or if
1429  * the memory may disappear shortly after calling this function.
1430  *
1431  * If the package buffer resides in the data segment and can be modified, the
1432  * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
1433  *
1434  * However, if the package buffer needs to be copied first, such as when being
1435  * read from a file, the caller should use ice_copy_and_init_pkg().
1436  *
1437  * This function will first copy the package buffer, before calling
1438  * ice_init_pkg(). The caller is free to immediately destroy the original
1439  * package buffer, as the new copy will be managed by this function and
1440  * related routines.
1441  */
1442 enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
1443 {
1444 	enum ice_status status;
1445 	u8 *buf_copy;
1446 
1447 	if (!buf || !len)
1448 		return ICE_ERR_PARAM;
1449 
1450 	buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
1451 
1452 	status = ice_init_pkg(hw, buf_copy, len);
1453 	if (status) {
1454 		/* Free the copy, since we failed to initialize the package */
1455 		devm_kfree(ice_hw_to_dev(hw), buf_copy);
1456 	} else {
1457 		/* Track the copied pkg so we can free it later */
1458 		hw->pkg_copy = buf_copy;
1459 		hw->pkg_size = len;
1460 	}
1461 
1462 	return status;
1463 }
1464 
1465 /**
1466  * ice_pkg_buf_alloc
1467  * @hw: pointer to the HW structure
1468  *
1469  * Allocates a package buffer and returns a pointer to the buffer header.
1470  * Note: all package contents must be in Little Endian form.
1471  */
1472 static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
1473 {
1474 	struct ice_buf_build *bld;
1475 	struct ice_buf_hdr *buf;
1476 
1477 	bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL);
1478 	if (!bld)
1479 		return NULL;
1480 
1481 	buf = (struct ice_buf_hdr *)bld;
1482 	buf->data_end = cpu_to_le16(offsetof(struct ice_buf_hdr,
1483 					     section_entry));
1484 	return bld;
1485 }
1486 
1487 /**
1488  * ice_pkg_buf_free
1489  * @hw: pointer to the HW structure
1490  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1491  *
1492  * Frees a package buffer
1493  */
1494 static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
1495 {
1496 	devm_kfree(ice_hw_to_dev(hw), bld);
1497 }
1498 
1499 /**
1500  * ice_pkg_buf_reserve_section
1501  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1502  * @count: the number of sections to reserve
1503  *
1504  * Reserves one or more section table entries in a package buffer. This routine
1505  * can be called multiple times as long as they are made before calling
1506  * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
1507  * is called once, the number of sections that can be allocated will not be able
1508  * to be increased; not using all reserved sections is fine, but this will
1509  * result in some wasted space in the buffer.
1510  * Note: all package contents must be in Little Endian form.
1511  */
1512 static enum ice_status
1513 ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
1514 {
1515 	struct ice_buf_hdr *buf;
1516 	u16 section_count;
1517 	u16 data_end;
1518 
1519 	if (!bld)
1520 		return ICE_ERR_PARAM;
1521 
1522 	buf = (struct ice_buf_hdr *)&bld->buf;
1523 
1524 	/* already an active section, can't increase table size */
1525 	section_count = le16_to_cpu(buf->section_count);
1526 	if (section_count > 0)
1527 		return ICE_ERR_CFG;
1528 
1529 	if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
1530 		return ICE_ERR_CFG;
1531 	bld->reserved_section_table_entries += count;
1532 
1533 	data_end = le16_to_cpu(buf->data_end) +
1534 		   (count * sizeof(buf->section_entry[0]));
1535 	buf->data_end = cpu_to_le16(data_end);
1536 
1537 	return 0;
1538 }
1539 
1540 /**
1541  * ice_pkg_buf_alloc_section
1542  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1543  * @type: the section type value
1544  * @size: the size of the section to reserve (in bytes)
1545  *
1546  * Reserves memory in the buffer for a section's content and updates the
1547  * buffers' status accordingly. This routine returns a pointer to the first
1548  * byte of the section start within the buffer, which is used to fill in the
1549  * section contents.
1550  * Note: all package contents must be in Little Endian form.
1551  */
1552 static void *
1553 ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
1554 {
1555 	struct ice_buf_hdr *buf;
1556 	u16 sect_count;
1557 	u16 data_end;
1558 
1559 	if (!bld || !type || !size)
1560 		return NULL;
1561 
1562 	buf = (struct ice_buf_hdr *)&bld->buf;
1563 
1564 	/* check for enough space left in buffer */
1565 	data_end = le16_to_cpu(buf->data_end);
1566 
1567 	/* section start must align on 4 byte boundary */
1568 	data_end = ALIGN(data_end, 4);
1569 
1570 	if ((data_end + size) > ICE_MAX_S_DATA_END)
1571 		return NULL;
1572 
1573 	/* check for more available section table entries */
1574 	sect_count = le16_to_cpu(buf->section_count);
1575 	if (sect_count < bld->reserved_section_table_entries) {
1576 		void *section_ptr = ((u8 *)buf) + data_end;
1577 
1578 		buf->section_entry[sect_count].offset = cpu_to_le16(data_end);
1579 		buf->section_entry[sect_count].size = cpu_to_le16(size);
1580 		buf->section_entry[sect_count].type = cpu_to_le32(type);
1581 
1582 		data_end += size;
1583 		buf->data_end = cpu_to_le16(data_end);
1584 
1585 		buf->section_count = cpu_to_le16(sect_count + 1);
1586 		return section_ptr;
1587 	}
1588 
1589 	/* no free section table entries */
1590 	return NULL;
1591 }
1592 
1593 /**
1594  * ice_pkg_buf_get_active_sections
1595  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1596  *
1597  * Returns the number of active sections. Before using the package buffer
1598  * in an update package command, the caller should make sure that there is at
1599  * least one active section - otherwise, the buffer is not legal and should
1600  * not be used.
1601  * Note: all package contents must be in Little Endian form.
1602  */
1603 static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
1604 {
1605 	struct ice_buf_hdr *buf;
1606 
1607 	if (!bld)
1608 		return 0;
1609 
1610 	buf = (struct ice_buf_hdr *)&bld->buf;
1611 	return le16_to_cpu(buf->section_count);
1612 }
1613 
1614 /**
1615  * ice_pkg_buf
1616  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1617  *
1618  * Return a pointer to the buffer's header
1619  */
1620 static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
1621 {
1622 	if (!bld)
1623 		return NULL;
1624 
1625 	return &bld->buf;
1626 }
1627 
1628 /**
1629  * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage
1630  * @hw: pointer to the HW structure
1631  * @port: port to search for
1632  * @index: optionally returns index
1633  *
1634  * Returns whether a port is already in use as a tunnel, and optionally its
1635  * index
1636  */
1637 static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index)
1638 {
1639 	u16 i;
1640 
1641 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1642 		if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
1643 			if (index)
1644 				*index = i;
1645 			return true;
1646 		}
1647 
1648 	return false;
1649 }
1650 
1651 /**
1652  * ice_tunnel_port_in_use
1653  * @hw: pointer to the HW structure
1654  * @port: port to search for
1655  * @index: optionally returns index
1656  *
1657  * Returns whether a port is already in use as a tunnel, and optionally its
1658  * index
1659  */
1660 bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)
1661 {
1662 	bool res;
1663 
1664 	mutex_lock(&hw->tnl_lock);
1665 	res = ice_tunnel_port_in_use_hlpr(hw, port, index);
1666 	mutex_unlock(&hw->tnl_lock);
1667 
1668 	return res;
1669 }
1670 
1671 /**
1672  * ice_find_free_tunnel_entry
1673  * @hw: pointer to the HW structure
1674  * @type: tunnel type
1675  * @index: optionally returns index
1676  *
1677  * Returns whether there is a free tunnel entry, and optionally its index
1678  */
1679 static bool
1680 ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type,
1681 			   u16 *index)
1682 {
1683 	u16 i;
1684 
1685 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1686 		if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use &&
1687 		    hw->tnl.tbl[i].type == type) {
1688 			if (index)
1689 				*index = i;
1690 			return true;
1691 		}
1692 
1693 	return false;
1694 }
1695 
1696 /**
1697  * ice_get_open_tunnel_port - retrieve an open tunnel port
1698  * @hw: pointer to the HW structure
1699  * @type: tunnel type (TNL_ALL will return any open port)
1700  * @port: returns open port
1701  */
1702 bool
1703 ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
1704 			 u16 *port)
1705 {
1706 	bool res = false;
1707 	u16 i;
1708 
1709 	mutex_lock(&hw->tnl_lock);
1710 
1711 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1712 		if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
1713 		    (type == TNL_ALL || hw->tnl.tbl[i].type == type)) {
1714 			*port = hw->tnl.tbl[i].port;
1715 			res = true;
1716 			break;
1717 		}
1718 
1719 	mutex_unlock(&hw->tnl_lock);
1720 
1721 	return res;
1722 }
1723 
1724 /**
1725  * ice_create_tunnel
1726  * @hw: pointer to the HW structure
1727  * @type: type of tunnel
1728  * @port: port of tunnel to create
1729  *
1730  * Create a tunnel by updating the parse graph in the parser. We do that by
1731  * creating a package buffer with the tunnel info and issuing an update package
1732  * command.
1733  */
1734 enum ice_status
1735 ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
1736 {
1737 	struct ice_boost_tcam_section *sect_rx, *sect_tx;
1738 	enum ice_status status = ICE_ERR_MAX_LIMIT;
1739 	struct ice_buf_build *bld;
1740 	u16 index;
1741 
1742 	mutex_lock(&hw->tnl_lock);
1743 
1744 	if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) {
1745 		hw->tnl.tbl[index].ref++;
1746 		status = 0;
1747 		goto ice_create_tunnel_end;
1748 	}
1749 
1750 	if (!ice_find_free_tunnel_entry(hw, type, &index)) {
1751 		status = ICE_ERR_OUT_OF_RANGE;
1752 		goto ice_create_tunnel_end;
1753 	}
1754 
1755 	bld = ice_pkg_buf_alloc(hw);
1756 	if (!bld) {
1757 		status = ICE_ERR_NO_MEMORY;
1758 		goto ice_create_tunnel_end;
1759 	}
1760 
1761 	/* allocate 2 sections, one for Rx parser, one for Tx parser */
1762 	if (ice_pkg_buf_reserve_section(bld, 2))
1763 		goto ice_create_tunnel_err;
1764 
1765 	sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
1766 					    struct_size(sect_rx, tcam, 1));
1767 	if (!sect_rx)
1768 		goto ice_create_tunnel_err;
1769 	sect_rx->count = cpu_to_le16(1);
1770 
1771 	sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
1772 					    struct_size(sect_tx, tcam, 1));
1773 	if (!sect_tx)
1774 		goto ice_create_tunnel_err;
1775 	sect_tx->count = cpu_to_le16(1);
1776 
1777 	/* copy original boost entry to update package buffer */
1778 	memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
1779 	       sizeof(*sect_rx->tcam));
1780 
1781 	/* over-write the never-match dest port key bits with the encoded port
1782 	 * bits
1783 	 */
1784 	ice_set_key((u8 *)&sect_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
1785 		    (u8 *)&port, NULL, NULL, NULL,
1786 		    (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
1787 		    sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
1788 
1789 	/* exact copy of entry to Tx section entry */
1790 	memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
1791 
1792 	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
1793 	if (!status) {
1794 		hw->tnl.tbl[index].port = port;
1795 		hw->tnl.tbl[index].in_use = true;
1796 		hw->tnl.tbl[index].ref = 1;
1797 	}
1798 
1799 ice_create_tunnel_err:
1800 	ice_pkg_buf_free(hw, bld);
1801 
1802 ice_create_tunnel_end:
1803 	mutex_unlock(&hw->tnl_lock);
1804 
1805 	return status;
1806 }
1807 
1808 /**
1809  * ice_destroy_tunnel
1810  * @hw: pointer to the HW structure
1811  * @port: port of tunnel to destroy (ignored if the all parameter is true)
1812  * @all: flag that states to destroy all tunnels
1813  *
1814  * Destroys a tunnel or all tunnels by creating an update package buffer
1815  * targeting the specific updates requested and then performing an update
1816  * package.
1817  */
1818 enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
1819 {
1820 	struct ice_boost_tcam_section *sect_rx, *sect_tx;
1821 	enum ice_status status = ICE_ERR_MAX_LIMIT;
1822 	struct ice_buf_build *bld;
1823 	u16 count = 0;
1824 	u16 index;
1825 	u16 size;
1826 	u16 i;
1827 
1828 	mutex_lock(&hw->tnl_lock);
1829 
1830 	if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index))
1831 		if (hw->tnl.tbl[index].ref > 1) {
1832 			hw->tnl.tbl[index].ref--;
1833 			status = 0;
1834 			goto ice_destroy_tunnel_end;
1835 		}
1836 
1837 	/* determine count */
1838 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1839 		if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
1840 		    (all || hw->tnl.tbl[i].port == port))
1841 			count++;
1842 
1843 	if (!count) {
1844 		status = ICE_ERR_PARAM;
1845 		goto ice_destroy_tunnel_end;
1846 	}
1847 
1848 	/* size of section - there is at least one entry */
1849 	size = struct_size(sect_rx, tcam, count);
1850 
1851 	bld = ice_pkg_buf_alloc(hw);
1852 	if (!bld) {
1853 		status = ICE_ERR_NO_MEMORY;
1854 		goto ice_destroy_tunnel_end;
1855 	}
1856 
1857 	/* allocate 2 sections, one for Rx parser, one for Tx parser */
1858 	if (ice_pkg_buf_reserve_section(bld, 2))
1859 		goto ice_destroy_tunnel_err;
1860 
1861 	sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
1862 					    size);
1863 	if (!sect_rx)
1864 		goto ice_destroy_tunnel_err;
1865 	sect_rx->count = cpu_to_le16(1);
1866 
1867 	sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
1868 					    size);
1869 	if (!sect_tx)
1870 		goto ice_destroy_tunnel_err;
1871 	sect_tx->count = cpu_to_le16(1);
1872 
1873 	/* copy original boost entry to update package buffer, one copy to Rx
1874 	 * section, another copy to the Tx section
1875 	 */
1876 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1877 		if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
1878 		    (all || hw->tnl.tbl[i].port == port)) {
1879 			memcpy(sect_rx->tcam + i, hw->tnl.tbl[i].boost_entry,
1880 			       sizeof(*sect_rx->tcam));
1881 			memcpy(sect_tx->tcam + i, hw->tnl.tbl[i].boost_entry,
1882 			       sizeof(*sect_tx->tcam));
1883 			hw->tnl.tbl[i].marked = true;
1884 		}
1885 
1886 	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
1887 	if (!status)
1888 		for (i = 0; i < hw->tnl.count &&
1889 		     i < ICE_TUNNEL_MAX_ENTRIES; i++)
1890 			if (hw->tnl.tbl[i].marked) {
1891 				hw->tnl.tbl[i].ref = 0;
1892 				hw->tnl.tbl[i].port = 0;
1893 				hw->tnl.tbl[i].in_use = false;
1894 				hw->tnl.tbl[i].marked = false;
1895 			}
1896 
1897 ice_destroy_tunnel_err:
1898 	ice_pkg_buf_free(hw, bld);
1899 
1900 ice_destroy_tunnel_end:
1901 	mutex_unlock(&hw->tnl_lock);
1902 
1903 	return status;
1904 }
1905 
1906 /* PTG Management */
1907 
1908 /**
1909  * ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
1910  * @hw: pointer to the hardware structure
1911  * @blk: HW block
1912  * @ptype: the ptype to search for
1913  * @ptg: pointer to variable that receives the PTG
1914  *
1915  * This function will search the PTGs for a particular ptype, returning the
1916  * PTG ID that contains it through the PTG parameter, with the value of
1917  * ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
1918  */
1919 static enum ice_status
1920 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
1921 {
1922 	if (ptype >= ICE_XLT1_CNT || !ptg)
1923 		return ICE_ERR_PARAM;
1924 
1925 	*ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
1926 	return 0;
1927 }
1928 
1929 /**
1930  * ice_ptg_alloc_val - Allocates a new packet type group ID by value
1931  * @hw: pointer to the hardware structure
1932  * @blk: HW block
1933  * @ptg: the PTG to allocate
1934  *
1935  * This function allocates a given packet type group ID specified by the PTG
1936  * parameter.
1937  */
1938 static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
1939 {
1940 	hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
1941 }
1942 
1943 /**
1944  * ice_ptg_remove_ptype - Removes ptype from a particular packet type group
1945  * @hw: pointer to the hardware structure
1946  * @blk: HW block
1947  * @ptype: the ptype to remove
1948  * @ptg: the PTG to remove the ptype from
1949  *
1950  * This function will remove the ptype from the specific PTG, and move it to
1951  * the default PTG (ICE_DEFAULT_PTG).
1952  */
1953 static enum ice_status
1954 ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
1955 {
1956 	struct ice_ptg_ptype **ch;
1957 	struct ice_ptg_ptype *p;
1958 
1959 	if (ptype > ICE_XLT1_CNT - 1)
1960 		return ICE_ERR_PARAM;
1961 
1962 	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
1963 		return ICE_ERR_DOES_NOT_EXIST;
1964 
1965 	/* Should not happen if .in_use is set, bad config */
1966 	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
1967 		return ICE_ERR_CFG;
1968 
1969 	/* find the ptype within this PTG, and bypass the link over it */
1970 	p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
1971 	ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
1972 	while (p) {
1973 		if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
1974 			*ch = p->next_ptype;
1975 			break;
1976 		}
1977 
1978 		ch = &p->next_ptype;
1979 		p = p->next_ptype;
1980 	}
1981 
1982 	hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
1983 	hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
1984 
1985 	return 0;
1986 }
1987 
1988 /**
1989  * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group
1990  * @hw: pointer to the hardware structure
1991  * @blk: HW block
1992  * @ptype: the ptype to add or move
1993  * @ptg: the PTG to add or move the ptype to
1994  *
1995  * This function will either add or move a ptype to a particular PTG depending
1996  * on if the ptype is already part of another group. Note that using a
1997  * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
1998  * default PTG.
1999  */
2000 static enum ice_status
2001 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
2002 {
2003 	enum ice_status status;
2004 	u8 original_ptg;
2005 
2006 	if (ptype > ICE_XLT1_CNT - 1)
2007 		return ICE_ERR_PARAM;
2008 
2009 	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
2010 		return ICE_ERR_DOES_NOT_EXIST;
2011 
2012 	status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
2013 	if (status)
2014 		return status;
2015 
2016 	/* Is ptype already in the correct PTG? */
2017 	if (original_ptg == ptg)
2018 		return 0;
2019 
2020 	/* Remove from original PTG and move back to the default PTG */
2021 	if (original_ptg != ICE_DEFAULT_PTG)
2022 		ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
2023 
2024 	/* Moving to default PTG? Then we're done with this request */
2025 	if (ptg == ICE_DEFAULT_PTG)
2026 		return 0;
2027 
2028 	/* Add ptype to PTG at beginning of list */
2029 	hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
2030 		hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2031 	hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
2032 		&hw->blk[blk].xlt1.ptypes[ptype];
2033 
2034 	hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
2035 	hw->blk[blk].xlt1.t[ptype] = ptg;
2036 
2037 	return 0;
2038 }
2039 
2040 /* Block / table size info */
2041 struct ice_blk_size_details {
2042 	u16 xlt1;			/* # XLT1 entries */
2043 	u16 xlt2;			/* # XLT2 entries */
2044 	u16 prof_tcam;			/* # profile ID TCAM entries */
2045 	u16 prof_id;			/* # profile IDs */
2046 	u8 prof_cdid_bits;		/* # CDID one-hot bits used in key */
2047 	u16 prof_redir;			/* # profile redirection entries */
2048 	u16 es;				/* # extraction sequence entries */
2049 	u16 fvw;			/* # field vector words */
2050 	u8 overwrite;			/* overwrite existing entries allowed */
2051 	u8 reverse;			/* reverse FV order */
2052 };
2053 
2054 static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
2055 	/**
2056 	 * Table Definitions
2057 	 * XLT1 - Number of entries in XLT1 table
2058 	 * XLT2 - Number of entries in XLT2 table
2059 	 * TCAM - Number of entries Profile ID TCAM table
2060 	 * CDID - Control Domain ID of the hardware block
2061 	 * PRED - Number of entries in the Profile Redirection Table
2062 	 * FV   - Number of entries in the Field Vector
2063 	 * FVW  - Width (in WORDs) of the Field Vector
2064 	 * OVR  - Overwrite existing table entries
2065 	 * REV  - Reverse FV
2066 	 */
2067 	/*          XLT1        , XLT2        ,TCAM, PID,CDID,PRED,   FV, FVW */
2068 	/*          Overwrite   , Reverse FV */
2069 	/* SW  */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256,   0,  256, 256,  48,
2070 		    false, false },
2071 	/* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  32,
2072 		    false, false },
2073 	/* FD  */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  24,
2074 		    false, true  },
2075 	/* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  24,
2076 		    true,  true  },
2077 	/* PE  */ { ICE_XLT1_CNT, ICE_XLT2_CNT,  64,  32,   0,   32,  32,  24,
2078 		    false, false },
2079 };
2080 
2081 enum ice_sid_all {
2082 	ICE_SID_XLT1_OFF = 0,
2083 	ICE_SID_XLT2_OFF,
2084 	ICE_SID_PR_OFF,
2085 	ICE_SID_PR_REDIR_OFF,
2086 	ICE_SID_ES_OFF,
2087 	ICE_SID_OFF_COUNT,
2088 };
2089 
2090 /* Characteristic handling */
2091 
2092 /**
2093  * ice_match_prop_lst - determine if properties of two lists match
2094  * @list1: first properties list
2095  * @list2: second properties list
2096  *
2097  * Count, cookies and the order must match in order to be considered equivalent.
2098  */
2099 static bool
2100 ice_match_prop_lst(struct list_head *list1, struct list_head *list2)
2101 {
2102 	struct ice_vsig_prof *tmp1;
2103 	struct ice_vsig_prof *tmp2;
2104 	u16 chk_count = 0;
2105 	u16 count = 0;
2106 
2107 	/* compare counts */
2108 	list_for_each_entry(tmp1, list1, list)
2109 		count++;
2110 	list_for_each_entry(tmp2, list2, list)
2111 		chk_count++;
2112 	if (!count || count != chk_count)
2113 		return false;
2114 
2115 	tmp1 = list_first_entry(list1, struct ice_vsig_prof, list);
2116 	tmp2 = list_first_entry(list2, struct ice_vsig_prof, list);
2117 
2118 	/* profile cookies must compare, and in the exact same order to take
2119 	 * into account priority
2120 	 */
2121 	while (count--) {
2122 		if (tmp2->profile_cookie != tmp1->profile_cookie)
2123 			return false;
2124 
2125 		tmp1 = list_next_entry(tmp1, list);
2126 		tmp2 = list_next_entry(tmp2, list);
2127 	}
2128 
2129 	return true;
2130 }
2131 
2132 /* VSIG Management */
2133 
2134 /**
2135  * ice_vsig_find_vsi - find a VSIG that contains a specified VSI
2136  * @hw: pointer to the hardware structure
2137  * @blk: HW block
2138  * @vsi: VSI of interest
2139  * @vsig: pointer to receive the VSI group
2140  *
2141  * This function will lookup the VSI entry in the XLT2 list and return
2142  * the VSI group its associated with.
2143  */
2144 static enum ice_status
2145 ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
2146 {
2147 	if (!vsig || vsi >= ICE_MAX_VSI)
2148 		return ICE_ERR_PARAM;
2149 
2150 	/* As long as there's a default or valid VSIG associated with the input
2151 	 * VSI, the functions returns a success. Any handling of VSIG will be
2152 	 * done by the following add, update or remove functions.
2153 	 */
2154 	*vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
2155 
2156 	return 0;
2157 }
2158 
2159 /**
2160  * ice_vsig_alloc_val - allocate a new VSIG by value
2161  * @hw: pointer to the hardware structure
2162  * @blk: HW block
2163  * @vsig: the VSIG to allocate
2164  *
2165  * This function will allocate a given VSIG specified by the VSIG parameter.
2166  */
2167 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2168 {
2169 	u16 idx = vsig & ICE_VSIG_IDX_M;
2170 
2171 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
2172 		INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2173 		hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
2174 	}
2175 
2176 	return ICE_VSIG_VALUE(idx, hw->pf_id);
2177 }
2178 
2179 /**
2180  * ice_vsig_alloc - Finds a free entry and allocates a new VSIG
2181  * @hw: pointer to the hardware structure
2182  * @blk: HW block
2183  *
2184  * This function will iterate through the VSIG list and mark the first
2185  * unused entry for the new VSIG entry as used and return that value.
2186  */
2187 static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
2188 {
2189 	u16 i;
2190 
2191 	for (i = 1; i < ICE_MAX_VSIGS; i++)
2192 		if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2193 			return ice_vsig_alloc_val(hw, blk, i);
2194 
2195 	return ICE_DEFAULT_VSIG;
2196 }
2197 
2198 /**
2199  * ice_find_dup_props_vsig - find VSI group with a specified set of properties
2200  * @hw: pointer to the hardware structure
2201  * @blk: HW block
2202  * @chs: characteristic list
2203  * @vsig: returns the VSIG with the matching profiles, if found
2204  *
2205  * Each VSIG is associated with a characteristic set; i.e. all VSIs under
2206  * a group have the same characteristic set. To check if there exists a VSIG
2207  * which has the same characteristics as the input characteristics; this
2208  * function will iterate through the XLT2 list and return the VSIG that has a
2209  * matching configuration. In order to make sure that priorities are accounted
2210  * for, the list must match exactly, including the order in which the
2211  * characteristics are listed.
2212  */
2213 static enum ice_status
2214 ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
2215 			struct list_head *chs, u16 *vsig)
2216 {
2217 	struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
2218 	u16 i;
2219 
2220 	for (i = 0; i < xlt2->count; i++)
2221 		if (xlt2->vsig_tbl[i].in_use &&
2222 		    ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
2223 			*vsig = ICE_VSIG_VALUE(i, hw->pf_id);
2224 			return 0;
2225 		}
2226 
2227 	return ICE_ERR_DOES_NOT_EXIST;
2228 }
2229 
2230 /**
2231  * ice_vsig_free - free VSI group
2232  * @hw: pointer to the hardware structure
2233  * @blk: HW block
2234  * @vsig: VSIG to remove
2235  *
2236  * The function will remove all VSIs associated with the input VSIG and move
2237  * them to the DEFAULT_VSIG and mark the VSIG available.
2238  */
2239 static enum ice_status
2240 ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2241 {
2242 	struct ice_vsig_prof *dtmp, *del;
2243 	struct ice_vsig_vsi *vsi_cur;
2244 	u16 idx;
2245 
2246 	idx = vsig & ICE_VSIG_IDX_M;
2247 	if (idx >= ICE_MAX_VSIGS)
2248 		return ICE_ERR_PARAM;
2249 
2250 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2251 		return ICE_ERR_DOES_NOT_EXIST;
2252 
2253 	hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
2254 
2255 	vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2256 	/* If the VSIG has at least 1 VSI then iterate through the
2257 	 * list and remove the VSIs before deleting the group.
2258 	 */
2259 	if (vsi_cur) {
2260 		/* remove all vsis associated with this VSIG XLT2 entry */
2261 		do {
2262 			struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
2263 
2264 			vsi_cur->vsig = ICE_DEFAULT_VSIG;
2265 			vsi_cur->changed = 1;
2266 			vsi_cur->next_vsi = NULL;
2267 			vsi_cur = tmp;
2268 		} while (vsi_cur);
2269 
2270 		/* NULL terminate head of VSI list */
2271 		hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
2272 	}
2273 
2274 	/* free characteristic list */
2275 	list_for_each_entry_safe(del, dtmp,
2276 				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2277 				 list) {
2278 		list_del(&del->list);
2279 		devm_kfree(ice_hw_to_dev(hw), del);
2280 	}
2281 
2282 	/* if VSIG characteristic list was cleared for reset
2283 	 * re-initialize the list head
2284 	 */
2285 	INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2286 
2287 	return 0;
2288 }
2289 
2290 /**
2291  * ice_vsig_remove_vsi - remove VSI from VSIG
2292  * @hw: pointer to the hardware structure
2293  * @blk: HW block
2294  * @vsi: VSI to remove
2295  * @vsig: VSI group to remove from
2296  *
2297  * The function will remove the input VSI from its VSI group and move it
2298  * to the DEFAULT_VSIG.
2299  */
2300 static enum ice_status
2301 ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2302 {
2303 	struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
2304 	u16 idx;
2305 
2306 	idx = vsig & ICE_VSIG_IDX_M;
2307 
2308 	if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2309 		return ICE_ERR_PARAM;
2310 
2311 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2312 		return ICE_ERR_DOES_NOT_EXIST;
2313 
2314 	/* entry already in default VSIG, don't have to remove */
2315 	if (idx == ICE_DEFAULT_VSIG)
2316 		return 0;
2317 
2318 	vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2319 	if (!(*vsi_head))
2320 		return ICE_ERR_CFG;
2321 
2322 	vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
2323 	vsi_cur = (*vsi_head);
2324 
2325 	/* iterate the VSI list, skip over the entry to be removed */
2326 	while (vsi_cur) {
2327 		if (vsi_tgt == vsi_cur) {
2328 			(*vsi_head) = vsi_cur->next_vsi;
2329 			break;
2330 		}
2331 		vsi_head = &vsi_cur->next_vsi;
2332 		vsi_cur = vsi_cur->next_vsi;
2333 	}
2334 
2335 	/* verify if VSI was removed from group list */
2336 	if (!vsi_cur)
2337 		return ICE_ERR_DOES_NOT_EXIST;
2338 
2339 	vsi_cur->vsig = ICE_DEFAULT_VSIG;
2340 	vsi_cur->changed = 1;
2341 	vsi_cur->next_vsi = NULL;
2342 
2343 	return 0;
2344 }
2345 
2346 /**
2347  * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group
2348  * @hw: pointer to the hardware structure
2349  * @blk: HW block
2350  * @vsi: VSI to move
2351  * @vsig: destination VSI group
2352  *
2353  * This function will move or add the input VSI to the target VSIG.
2354  * The function will find the original VSIG the VSI belongs to and
2355  * move the entry to the DEFAULT_VSIG, update the original VSIG and
2356  * then move entry to the new VSIG.
2357  */
2358 static enum ice_status
2359 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2360 {
2361 	struct ice_vsig_vsi *tmp;
2362 	enum ice_status status;
2363 	u16 orig_vsig, idx;
2364 
2365 	idx = vsig & ICE_VSIG_IDX_M;
2366 
2367 	if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2368 		return ICE_ERR_PARAM;
2369 
2370 	/* if VSIG not in use and VSIG is not default type this VSIG
2371 	 * doesn't exist.
2372 	 */
2373 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
2374 	    vsig != ICE_DEFAULT_VSIG)
2375 		return ICE_ERR_DOES_NOT_EXIST;
2376 
2377 	status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
2378 	if (status)
2379 		return status;
2380 
2381 	/* no update required if vsigs match */
2382 	if (orig_vsig == vsig)
2383 		return 0;
2384 
2385 	if (orig_vsig != ICE_DEFAULT_VSIG) {
2386 		/* remove entry from orig_vsig and add to default VSIG */
2387 		status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
2388 		if (status)
2389 			return status;
2390 	}
2391 
2392 	if (idx == ICE_DEFAULT_VSIG)
2393 		return 0;
2394 
2395 	/* Create VSI entry and add VSIG and prop_mask values */
2396 	hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
2397 	hw->blk[blk].xlt2.vsis[vsi].changed = 1;
2398 
2399 	/* Add new entry to the head of the VSIG list */
2400 	tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2401 	hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
2402 		&hw->blk[blk].xlt2.vsis[vsi];
2403 	hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
2404 	hw->blk[blk].xlt2.t[vsi] = vsig;
2405 
2406 	return 0;
2407 }
2408 
2409 /**
2410  * ice_find_prof_id - find profile ID for a given field vector
2411  * @hw: pointer to the hardware structure
2412  * @blk: HW block
2413  * @fv: field vector to search for
2414  * @prof_id: receives the profile ID
2415  */
2416 static enum ice_status
2417 ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
2418 		 struct ice_fv_word *fv, u8 *prof_id)
2419 {
2420 	struct ice_es *es = &hw->blk[blk].es;
2421 	u16 off;
2422 	u8 i;
2423 
2424 	/* For FD, we don't want to re-use a existed profile with the same
2425 	 * field vector and mask. This will cause rule interference.
2426 	 */
2427 	if (blk == ICE_BLK_FD)
2428 		return ICE_ERR_DOES_NOT_EXIST;
2429 
2430 	for (i = 0; i < (u8)es->count; i++) {
2431 		off = i * es->fvw;
2432 
2433 		if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
2434 			continue;
2435 
2436 		*prof_id = i;
2437 		return 0;
2438 	}
2439 
2440 	return ICE_ERR_DOES_NOT_EXIST;
2441 }
2442 
2443 /**
2444  * ice_prof_id_rsrc_type - get profile ID resource type for a block type
2445  * @blk: the block type
2446  * @rsrc_type: pointer to variable to receive the resource type
2447  */
2448 static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
2449 {
2450 	switch (blk) {
2451 	case ICE_BLK_FD:
2452 		*rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
2453 		break;
2454 	case ICE_BLK_RSS:
2455 		*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
2456 		break;
2457 	default:
2458 		return false;
2459 	}
2460 	return true;
2461 }
2462 
2463 /**
2464  * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type
2465  * @blk: the block type
2466  * @rsrc_type: pointer to variable to receive the resource type
2467  */
2468 static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
2469 {
2470 	switch (blk) {
2471 	case ICE_BLK_FD:
2472 		*rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
2473 		break;
2474 	case ICE_BLK_RSS:
2475 		*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
2476 		break;
2477 	default:
2478 		return false;
2479 	}
2480 	return true;
2481 }
2482 
2483 /**
2484  * ice_alloc_tcam_ent - allocate hardware TCAM entry
2485  * @hw: pointer to the HW struct
2486  * @blk: the block to allocate the TCAM for
2487  * @tcam_idx: pointer to variable to receive the TCAM entry
2488  *
2489  * This function allocates a new entry in a Profile ID TCAM for a specific
2490  * block.
2491  */
2492 static enum ice_status
2493 ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 *tcam_idx)
2494 {
2495 	u16 res_type;
2496 
2497 	if (!ice_tcam_ent_rsrc_type(blk, &res_type))
2498 		return ICE_ERR_PARAM;
2499 
2500 	return ice_alloc_hw_res(hw, res_type, 1, true, tcam_idx);
2501 }
2502 
2503 /**
2504  * ice_free_tcam_ent - free hardware TCAM entry
2505  * @hw: pointer to the HW struct
2506  * @blk: the block from which to free the TCAM entry
2507  * @tcam_idx: the TCAM entry to free
2508  *
2509  * This function frees an entry in a Profile ID TCAM for a specific block.
2510  */
2511 static enum ice_status
2512 ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
2513 {
2514 	u16 res_type;
2515 
2516 	if (!ice_tcam_ent_rsrc_type(blk, &res_type))
2517 		return ICE_ERR_PARAM;
2518 
2519 	return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
2520 }
2521 
2522 /**
2523  * ice_alloc_prof_id - allocate profile ID
2524  * @hw: pointer to the HW struct
2525  * @blk: the block to allocate the profile ID for
2526  * @prof_id: pointer to variable to receive the profile ID
2527  *
2528  * This function allocates a new profile ID, which also corresponds to a Field
2529  * Vector (Extraction Sequence) entry.
2530  */
2531 static enum ice_status
2532 ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
2533 {
2534 	enum ice_status status;
2535 	u16 res_type;
2536 	u16 get_prof;
2537 
2538 	if (!ice_prof_id_rsrc_type(blk, &res_type))
2539 		return ICE_ERR_PARAM;
2540 
2541 	status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
2542 	if (!status)
2543 		*prof_id = (u8)get_prof;
2544 
2545 	return status;
2546 }
2547 
2548 /**
2549  * ice_free_prof_id - free profile ID
2550  * @hw: pointer to the HW struct
2551  * @blk: the block from which to free the profile ID
2552  * @prof_id: the profile ID to free
2553  *
2554  * This function frees a profile ID, which also corresponds to a Field Vector.
2555  */
2556 static enum ice_status
2557 ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2558 {
2559 	u16 tmp_prof_id = (u16)prof_id;
2560 	u16 res_type;
2561 
2562 	if (!ice_prof_id_rsrc_type(blk, &res_type))
2563 		return ICE_ERR_PARAM;
2564 
2565 	return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
2566 }
2567 
2568 /**
2569  * ice_prof_inc_ref - increment reference count for profile
2570  * @hw: pointer to the HW struct
2571  * @blk: the block from which to free the profile ID
2572  * @prof_id: the profile ID for which to increment the reference count
2573  */
2574 static enum ice_status
2575 ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2576 {
2577 	if (prof_id > hw->blk[blk].es.count)
2578 		return ICE_ERR_PARAM;
2579 
2580 	hw->blk[blk].es.ref_count[prof_id]++;
2581 
2582 	return 0;
2583 }
2584 
2585 /**
2586  * ice_write_es - write an extraction sequence to hardware
2587  * @hw: pointer to the HW struct
2588  * @blk: the block in which to write the extraction sequence
2589  * @prof_id: the profile ID to write
2590  * @fv: pointer to the extraction sequence to write - NULL to clear extraction
2591  */
2592 static void
2593 ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
2594 	     struct ice_fv_word *fv)
2595 {
2596 	u16 off;
2597 
2598 	off = prof_id * hw->blk[blk].es.fvw;
2599 	if (!fv) {
2600 		memset(&hw->blk[blk].es.t[off], 0,
2601 		       hw->blk[blk].es.fvw * sizeof(*fv));
2602 		hw->blk[blk].es.written[prof_id] = false;
2603 	} else {
2604 		memcpy(&hw->blk[blk].es.t[off], fv,
2605 		       hw->blk[blk].es.fvw * sizeof(*fv));
2606 	}
2607 }
2608 
2609 /**
2610  * ice_prof_dec_ref - decrement reference count for profile
2611  * @hw: pointer to the HW struct
2612  * @blk: the block from which to free the profile ID
2613  * @prof_id: the profile ID for which to decrement the reference count
2614  */
2615 static enum ice_status
2616 ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2617 {
2618 	if (prof_id > hw->blk[blk].es.count)
2619 		return ICE_ERR_PARAM;
2620 
2621 	if (hw->blk[blk].es.ref_count[prof_id] > 0) {
2622 		if (!--hw->blk[blk].es.ref_count[prof_id]) {
2623 			ice_write_es(hw, blk, prof_id, NULL);
2624 			return ice_free_prof_id(hw, blk, prof_id);
2625 		}
2626 	}
2627 
2628 	return 0;
2629 }
2630 
2631 /* Block / table section IDs */
2632 static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
2633 	/* SWITCH */
2634 	{	ICE_SID_XLT1_SW,
2635 		ICE_SID_XLT2_SW,
2636 		ICE_SID_PROFID_TCAM_SW,
2637 		ICE_SID_PROFID_REDIR_SW,
2638 		ICE_SID_FLD_VEC_SW
2639 	},
2640 
2641 	/* ACL */
2642 	{	ICE_SID_XLT1_ACL,
2643 		ICE_SID_XLT2_ACL,
2644 		ICE_SID_PROFID_TCAM_ACL,
2645 		ICE_SID_PROFID_REDIR_ACL,
2646 		ICE_SID_FLD_VEC_ACL
2647 	},
2648 
2649 	/* FD */
2650 	{	ICE_SID_XLT1_FD,
2651 		ICE_SID_XLT2_FD,
2652 		ICE_SID_PROFID_TCAM_FD,
2653 		ICE_SID_PROFID_REDIR_FD,
2654 		ICE_SID_FLD_VEC_FD
2655 	},
2656 
2657 	/* RSS */
2658 	{	ICE_SID_XLT1_RSS,
2659 		ICE_SID_XLT2_RSS,
2660 		ICE_SID_PROFID_TCAM_RSS,
2661 		ICE_SID_PROFID_REDIR_RSS,
2662 		ICE_SID_FLD_VEC_RSS
2663 	},
2664 
2665 	/* PE */
2666 	{	ICE_SID_XLT1_PE,
2667 		ICE_SID_XLT2_PE,
2668 		ICE_SID_PROFID_TCAM_PE,
2669 		ICE_SID_PROFID_REDIR_PE,
2670 		ICE_SID_FLD_VEC_PE
2671 	}
2672 };
2673 
2674 /**
2675  * ice_init_sw_xlt1_db - init software XLT1 database from HW tables
2676  * @hw: pointer to the hardware structure
2677  * @blk: the HW block to initialize
2678  */
2679 static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
2680 {
2681 	u16 pt;
2682 
2683 	for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
2684 		u8 ptg;
2685 
2686 		ptg = hw->blk[blk].xlt1.t[pt];
2687 		if (ptg != ICE_DEFAULT_PTG) {
2688 			ice_ptg_alloc_val(hw, blk, ptg);
2689 			ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
2690 		}
2691 	}
2692 }
2693 
2694 /**
2695  * ice_init_sw_xlt2_db - init software XLT2 database from HW tables
2696  * @hw: pointer to the hardware structure
2697  * @blk: the HW block to initialize
2698  */
2699 static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
2700 {
2701 	u16 vsi;
2702 
2703 	for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
2704 		u16 vsig;
2705 
2706 		vsig = hw->blk[blk].xlt2.t[vsi];
2707 		if (vsig) {
2708 			ice_vsig_alloc_val(hw, blk, vsig);
2709 			ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
2710 			/* no changes at this time, since this has been
2711 			 * initialized from the original package
2712 			 */
2713 			hw->blk[blk].xlt2.vsis[vsi].changed = 0;
2714 		}
2715 	}
2716 }
2717 
2718 /**
2719  * ice_init_sw_db - init software database from HW tables
2720  * @hw: pointer to the hardware structure
2721  */
2722 static void ice_init_sw_db(struct ice_hw *hw)
2723 {
2724 	u16 i;
2725 
2726 	for (i = 0; i < ICE_BLK_COUNT; i++) {
2727 		ice_init_sw_xlt1_db(hw, (enum ice_block)i);
2728 		ice_init_sw_xlt2_db(hw, (enum ice_block)i);
2729 	}
2730 }
2731 
2732 /**
2733  * ice_fill_tbl - Reads content of a single table type into database
2734  * @hw: pointer to the hardware structure
2735  * @block_id: Block ID of the table to copy
2736  * @sid: Section ID of the table to copy
2737  *
2738  * Will attempt to read the entire content of a given table of a single block
2739  * into the driver database. We assume that the buffer will always
2740  * be as large or larger than the data contained in the package. If
2741  * this condition is not met, there is most likely an error in the package
2742  * contents.
2743  */
2744 static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
2745 {
2746 	u32 dst_len, sect_len, offset = 0;
2747 	struct ice_prof_redir_section *pr;
2748 	struct ice_prof_id_section *pid;
2749 	struct ice_xlt1_section *xlt1;
2750 	struct ice_xlt2_section *xlt2;
2751 	struct ice_sw_fv_section *es;
2752 	struct ice_pkg_enum state;
2753 	u8 *src, *dst;
2754 	void *sect;
2755 
2756 	/* if the HW segment pointer is null then the first iteration of
2757 	 * ice_pkg_enum_section() will fail. In this case the HW tables will
2758 	 * not be filled and return success.
2759 	 */
2760 	if (!hw->seg) {
2761 		ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
2762 		return;
2763 	}
2764 
2765 	memset(&state, 0, sizeof(state));
2766 
2767 	sect = ice_pkg_enum_section(hw->seg, &state, sid);
2768 
2769 	while (sect) {
2770 		switch (sid) {
2771 		case ICE_SID_XLT1_SW:
2772 		case ICE_SID_XLT1_FD:
2773 		case ICE_SID_XLT1_RSS:
2774 		case ICE_SID_XLT1_ACL:
2775 		case ICE_SID_XLT1_PE:
2776 			xlt1 = (struct ice_xlt1_section *)sect;
2777 			src = xlt1->value;
2778 			sect_len = le16_to_cpu(xlt1->count) *
2779 				sizeof(*hw->blk[block_id].xlt1.t);
2780 			dst = hw->blk[block_id].xlt1.t;
2781 			dst_len = hw->blk[block_id].xlt1.count *
2782 				sizeof(*hw->blk[block_id].xlt1.t);
2783 			break;
2784 		case ICE_SID_XLT2_SW:
2785 		case ICE_SID_XLT2_FD:
2786 		case ICE_SID_XLT2_RSS:
2787 		case ICE_SID_XLT2_ACL:
2788 		case ICE_SID_XLT2_PE:
2789 			xlt2 = (struct ice_xlt2_section *)sect;
2790 			src = (__force u8 *)xlt2->value;
2791 			sect_len = le16_to_cpu(xlt2->count) *
2792 				sizeof(*hw->blk[block_id].xlt2.t);
2793 			dst = (u8 *)hw->blk[block_id].xlt2.t;
2794 			dst_len = hw->blk[block_id].xlt2.count *
2795 				sizeof(*hw->blk[block_id].xlt2.t);
2796 			break;
2797 		case ICE_SID_PROFID_TCAM_SW:
2798 		case ICE_SID_PROFID_TCAM_FD:
2799 		case ICE_SID_PROFID_TCAM_RSS:
2800 		case ICE_SID_PROFID_TCAM_ACL:
2801 		case ICE_SID_PROFID_TCAM_PE:
2802 			pid = (struct ice_prof_id_section *)sect;
2803 			src = (u8 *)pid->entry;
2804 			sect_len = le16_to_cpu(pid->count) *
2805 				sizeof(*hw->blk[block_id].prof.t);
2806 			dst = (u8 *)hw->blk[block_id].prof.t;
2807 			dst_len = hw->blk[block_id].prof.count *
2808 				sizeof(*hw->blk[block_id].prof.t);
2809 			break;
2810 		case ICE_SID_PROFID_REDIR_SW:
2811 		case ICE_SID_PROFID_REDIR_FD:
2812 		case ICE_SID_PROFID_REDIR_RSS:
2813 		case ICE_SID_PROFID_REDIR_ACL:
2814 		case ICE_SID_PROFID_REDIR_PE:
2815 			pr = (struct ice_prof_redir_section *)sect;
2816 			src = pr->redir_value;
2817 			sect_len = le16_to_cpu(pr->count) *
2818 				sizeof(*hw->blk[block_id].prof_redir.t);
2819 			dst = hw->blk[block_id].prof_redir.t;
2820 			dst_len = hw->blk[block_id].prof_redir.count *
2821 				sizeof(*hw->blk[block_id].prof_redir.t);
2822 			break;
2823 		case ICE_SID_FLD_VEC_SW:
2824 		case ICE_SID_FLD_VEC_FD:
2825 		case ICE_SID_FLD_VEC_RSS:
2826 		case ICE_SID_FLD_VEC_ACL:
2827 		case ICE_SID_FLD_VEC_PE:
2828 			es = (struct ice_sw_fv_section *)sect;
2829 			src = (u8 *)es->fv;
2830 			sect_len = (u32)(le16_to_cpu(es->count) *
2831 					 hw->blk[block_id].es.fvw) *
2832 				sizeof(*hw->blk[block_id].es.t);
2833 			dst = (u8 *)hw->blk[block_id].es.t;
2834 			dst_len = (u32)(hw->blk[block_id].es.count *
2835 					hw->blk[block_id].es.fvw) *
2836 				sizeof(*hw->blk[block_id].es.t);
2837 			break;
2838 		default:
2839 			return;
2840 		}
2841 
2842 		/* if the section offset exceeds destination length, terminate
2843 		 * table fill.
2844 		 */
2845 		if (offset > dst_len)
2846 			return;
2847 
2848 		/* if the sum of section size and offset exceed destination size
2849 		 * then we are out of bounds of the HW table size for that PF.
2850 		 * Changing section length to fill the remaining table space
2851 		 * of that PF.
2852 		 */
2853 		if ((offset + sect_len) > dst_len)
2854 			sect_len = dst_len - offset;
2855 
2856 		memcpy(dst + offset, src, sect_len);
2857 		offset += sect_len;
2858 		sect = ice_pkg_enum_section(NULL, &state, sid);
2859 	}
2860 }
2861 
2862 /**
2863  * ice_fill_blk_tbls - Read package context for tables
2864  * @hw: pointer to the hardware structure
2865  *
2866  * Reads the current package contents and populates the driver
2867  * database with the data iteratively for all advanced feature
2868  * blocks. Assume that the HW tables have been allocated.
2869  */
2870 void ice_fill_blk_tbls(struct ice_hw *hw)
2871 {
2872 	u8 i;
2873 
2874 	for (i = 0; i < ICE_BLK_COUNT; i++) {
2875 		enum ice_block blk_id = (enum ice_block)i;
2876 
2877 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
2878 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
2879 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
2880 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
2881 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
2882 	}
2883 
2884 	ice_init_sw_db(hw);
2885 }
2886 
2887 /**
2888  * ice_free_prof_map - free profile map
2889  * @hw: pointer to the hardware structure
2890  * @blk_idx: HW block index
2891  */
2892 static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
2893 {
2894 	struct ice_es *es = &hw->blk[blk_idx].es;
2895 	struct ice_prof_map *del, *tmp;
2896 
2897 	mutex_lock(&es->prof_map_lock);
2898 	list_for_each_entry_safe(del, tmp, &es->prof_map, list) {
2899 		list_del(&del->list);
2900 		devm_kfree(ice_hw_to_dev(hw), del);
2901 	}
2902 	INIT_LIST_HEAD(&es->prof_map);
2903 	mutex_unlock(&es->prof_map_lock);
2904 }
2905 
2906 /**
2907  * ice_free_flow_profs - free flow profile entries
2908  * @hw: pointer to the hardware structure
2909  * @blk_idx: HW block index
2910  */
2911 static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
2912 {
2913 	struct ice_flow_prof *p, *tmp;
2914 
2915 	mutex_lock(&hw->fl_profs_locks[blk_idx]);
2916 	list_for_each_entry_safe(p, tmp, &hw->fl_profs[blk_idx], l_entry) {
2917 		struct ice_flow_entry *e, *t;
2918 
2919 		list_for_each_entry_safe(e, t, &p->entries, l_entry)
2920 			ice_flow_rem_entry(hw, (enum ice_block)blk_idx,
2921 					   ICE_FLOW_ENTRY_HNDL(e));
2922 
2923 		list_del(&p->l_entry);
2924 
2925 		mutex_destroy(&p->entries_lock);
2926 		devm_kfree(ice_hw_to_dev(hw), p);
2927 	}
2928 	mutex_unlock(&hw->fl_profs_locks[blk_idx]);
2929 
2930 	/* if driver is in reset and tables are being cleared
2931 	 * re-initialize the flow profile list heads
2932 	 */
2933 	INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
2934 }
2935 
2936 /**
2937  * ice_free_vsig_tbl - free complete VSIG table entries
2938  * @hw: pointer to the hardware structure
2939  * @blk: the HW block on which to free the VSIG table entries
2940  */
2941 static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
2942 {
2943 	u16 i;
2944 
2945 	if (!hw->blk[blk].xlt2.vsig_tbl)
2946 		return;
2947 
2948 	for (i = 1; i < ICE_MAX_VSIGS; i++)
2949 		if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2950 			ice_vsig_free(hw, blk, i);
2951 }
2952 
2953 /**
2954  * ice_free_hw_tbls - free hardware table memory
2955  * @hw: pointer to the hardware structure
2956  */
2957 void ice_free_hw_tbls(struct ice_hw *hw)
2958 {
2959 	struct ice_rss_cfg *r, *rt;
2960 	u8 i;
2961 
2962 	for (i = 0; i < ICE_BLK_COUNT; i++) {
2963 		if (hw->blk[i].is_list_init) {
2964 			struct ice_es *es = &hw->blk[i].es;
2965 
2966 			ice_free_prof_map(hw, i);
2967 			mutex_destroy(&es->prof_map_lock);
2968 
2969 			ice_free_flow_profs(hw, i);
2970 			mutex_destroy(&hw->fl_profs_locks[i]);
2971 
2972 			hw->blk[i].is_list_init = false;
2973 		}
2974 		ice_free_vsig_tbl(hw, (enum ice_block)i);
2975 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes);
2976 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl);
2977 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t);
2978 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.t);
2979 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsig_tbl);
2980 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsis);
2981 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof.t);
2982 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t);
2983 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
2984 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
2985 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
2986 	}
2987 
2988 	list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) {
2989 		list_del(&r->l_entry);
2990 		devm_kfree(ice_hw_to_dev(hw), r);
2991 	}
2992 	mutex_destroy(&hw->rss_locks);
2993 	memset(hw->blk, 0, sizeof(hw->blk));
2994 }
2995 
2996 /**
2997  * ice_init_flow_profs - init flow profile locks and list heads
2998  * @hw: pointer to the hardware structure
2999  * @blk_idx: HW block index
3000  */
3001 static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
3002 {
3003 	mutex_init(&hw->fl_profs_locks[blk_idx]);
3004 	INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3005 }
3006 
3007 /**
3008  * ice_clear_hw_tbls - clear HW tables and flow profiles
3009  * @hw: pointer to the hardware structure
3010  */
3011 void ice_clear_hw_tbls(struct ice_hw *hw)
3012 {
3013 	u8 i;
3014 
3015 	for (i = 0; i < ICE_BLK_COUNT; i++) {
3016 		struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
3017 		struct ice_prof_tcam *prof = &hw->blk[i].prof;
3018 		struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3019 		struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3020 		struct ice_es *es = &hw->blk[i].es;
3021 
3022 		if (hw->blk[i].is_list_init) {
3023 			ice_free_prof_map(hw, i);
3024 			ice_free_flow_profs(hw, i);
3025 		}
3026 
3027 		ice_free_vsig_tbl(hw, (enum ice_block)i);
3028 
3029 		memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes));
3030 		memset(xlt1->ptg_tbl, 0,
3031 		       ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl));
3032 		memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t));
3033 
3034 		memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis));
3035 		memset(xlt2->vsig_tbl, 0,
3036 		       xlt2->count * sizeof(*xlt2->vsig_tbl));
3037 		memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t));
3038 
3039 		memset(prof->t, 0, prof->count * sizeof(*prof->t));
3040 		memset(prof_redir->t, 0,
3041 		       prof_redir->count * sizeof(*prof_redir->t));
3042 
3043 		memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
3044 		memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
3045 		memset(es->written, 0, es->count * sizeof(*es->written));
3046 	}
3047 }
3048 
3049 /**
3050  * ice_init_hw_tbls - init hardware table memory
3051  * @hw: pointer to the hardware structure
3052  */
3053 enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
3054 {
3055 	u8 i;
3056 
3057 	mutex_init(&hw->rss_locks);
3058 	INIT_LIST_HEAD(&hw->rss_list_head);
3059 	for (i = 0; i < ICE_BLK_COUNT; i++) {
3060 		struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
3061 		struct ice_prof_tcam *prof = &hw->blk[i].prof;
3062 		struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3063 		struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3064 		struct ice_es *es = &hw->blk[i].es;
3065 		u16 j;
3066 
3067 		if (hw->blk[i].is_list_init)
3068 			continue;
3069 
3070 		ice_init_flow_profs(hw, i);
3071 		mutex_init(&es->prof_map_lock);
3072 		INIT_LIST_HEAD(&es->prof_map);
3073 		hw->blk[i].is_list_init = true;
3074 
3075 		hw->blk[i].overwrite = blk_sizes[i].overwrite;
3076 		es->reverse = blk_sizes[i].reverse;
3077 
3078 		xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
3079 		xlt1->count = blk_sizes[i].xlt1;
3080 
3081 		xlt1->ptypes = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
3082 					    sizeof(*xlt1->ptypes), GFP_KERNEL);
3083 
3084 		if (!xlt1->ptypes)
3085 			goto err;
3086 
3087 		xlt1->ptg_tbl = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_PTGS,
3088 					     sizeof(*xlt1->ptg_tbl),
3089 					     GFP_KERNEL);
3090 
3091 		if (!xlt1->ptg_tbl)
3092 			goto err;
3093 
3094 		xlt1->t = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
3095 				       sizeof(*xlt1->t), GFP_KERNEL);
3096 		if (!xlt1->t)
3097 			goto err;
3098 
3099 		xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
3100 		xlt2->count = blk_sizes[i].xlt2;
3101 
3102 		xlt2->vsis = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3103 					  sizeof(*xlt2->vsis), GFP_KERNEL);
3104 
3105 		if (!xlt2->vsis)
3106 			goto err;
3107 
3108 		xlt2->vsig_tbl = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3109 					      sizeof(*xlt2->vsig_tbl),
3110 					      GFP_KERNEL);
3111 		if (!xlt2->vsig_tbl)
3112 			goto err;
3113 
3114 		for (j = 0; j < xlt2->count; j++)
3115 			INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
3116 
3117 		xlt2->t = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3118 				       sizeof(*xlt2->t), GFP_KERNEL);
3119 		if (!xlt2->t)
3120 			goto err;
3121 
3122 		prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
3123 		prof->count = blk_sizes[i].prof_tcam;
3124 		prof->max_prof_id = blk_sizes[i].prof_id;
3125 		prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
3126 		prof->t = devm_kcalloc(ice_hw_to_dev(hw), prof->count,
3127 				       sizeof(*prof->t), GFP_KERNEL);
3128 
3129 		if (!prof->t)
3130 			goto err;
3131 
3132 		prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
3133 		prof_redir->count = blk_sizes[i].prof_redir;
3134 		prof_redir->t = devm_kcalloc(ice_hw_to_dev(hw),
3135 					     prof_redir->count,
3136 					     sizeof(*prof_redir->t),
3137 					     GFP_KERNEL);
3138 
3139 		if (!prof_redir->t)
3140 			goto err;
3141 
3142 		es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
3143 		es->count = blk_sizes[i].es;
3144 		es->fvw = blk_sizes[i].fvw;
3145 		es->t = devm_kcalloc(ice_hw_to_dev(hw),
3146 				     (u32)(es->count * es->fvw),
3147 				     sizeof(*es->t), GFP_KERNEL);
3148 		if (!es->t)
3149 			goto err;
3150 
3151 		es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count,
3152 					     sizeof(*es->ref_count),
3153 					     GFP_KERNEL);
3154 		if (!es->ref_count)
3155 			goto err;
3156 
3157 		es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
3158 					   sizeof(*es->written), GFP_KERNEL);
3159 		if (!es->written)
3160 			goto err;
3161 	}
3162 	return 0;
3163 
3164 err:
3165 	ice_free_hw_tbls(hw);
3166 	return ICE_ERR_NO_MEMORY;
3167 }
3168 
3169 /**
3170  * ice_prof_gen_key - generate profile ID key
3171  * @hw: pointer to the HW struct
3172  * @blk: the block in which to write profile ID to
3173  * @ptg: packet type group (PTG) portion of key
3174  * @vsig: VSIG portion of key
3175  * @cdid: CDID portion of key
3176  * @flags: flag portion of key
3177  * @vl_msk: valid mask
3178  * @dc_msk: don't care mask
3179  * @nm_msk: never match mask
3180  * @key: output of profile ID key
3181  */
3182 static enum ice_status
3183 ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
3184 		 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
3185 		 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
3186 		 u8 key[ICE_TCAM_KEY_SZ])
3187 {
3188 	struct ice_prof_id_key inkey;
3189 
3190 	inkey.xlt1 = ptg;
3191 	inkey.xlt2_cdid = cpu_to_le16(vsig);
3192 	inkey.flags = cpu_to_le16(flags);
3193 
3194 	switch (hw->blk[blk].prof.cdid_bits) {
3195 	case 0:
3196 		break;
3197 	case 2:
3198 #define ICE_CD_2_M 0xC000U
3199 #define ICE_CD_2_S 14
3200 		inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_2_M);
3201 		inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_2_S);
3202 		break;
3203 	case 4:
3204 #define ICE_CD_4_M 0xF000U
3205 #define ICE_CD_4_S 12
3206 		inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_4_M);
3207 		inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_4_S);
3208 		break;
3209 	case 8:
3210 #define ICE_CD_8_M 0xFF00U
3211 #define ICE_CD_8_S 16
3212 		inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_8_M);
3213 		inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_8_S);
3214 		break;
3215 	default:
3216 		ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
3217 		break;
3218 	}
3219 
3220 	return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
3221 			   nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
3222 }
3223 
3224 /**
3225  * ice_tcam_write_entry - write TCAM entry
3226  * @hw: pointer to the HW struct
3227  * @blk: the block in which to write profile ID to
3228  * @idx: the entry index to write to
3229  * @prof_id: profile ID
3230  * @ptg: packet type group (PTG) portion of key
3231  * @vsig: VSIG portion of key
3232  * @cdid: CDID portion of key
3233  * @flags: flag portion of key
3234  * @vl_msk: valid mask
3235  * @dc_msk: don't care mask
3236  * @nm_msk: never match mask
3237  */
3238 static enum ice_status
3239 ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
3240 		     u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
3241 		     u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
3242 		     u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
3243 		     u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
3244 {
3245 	struct ice_prof_tcam_entry;
3246 	enum ice_status status;
3247 
3248 	status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
3249 				  dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
3250 	if (!status) {
3251 		hw->blk[blk].prof.t[idx].addr = cpu_to_le16(idx);
3252 		hw->blk[blk].prof.t[idx].prof_id = prof_id;
3253 	}
3254 
3255 	return status;
3256 }
3257 
3258 /**
3259  * ice_vsig_get_ref - returns number of VSIs belong to a VSIG
3260  * @hw: pointer to the hardware structure
3261  * @blk: HW block
3262  * @vsig: VSIG to query
3263  * @refs: pointer to variable to receive the reference count
3264  */
3265 static enum ice_status
3266 ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
3267 {
3268 	u16 idx = vsig & ICE_VSIG_IDX_M;
3269 	struct ice_vsig_vsi *ptr;
3270 
3271 	*refs = 0;
3272 
3273 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
3274 		return ICE_ERR_DOES_NOT_EXIST;
3275 
3276 	ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
3277 	while (ptr) {
3278 		(*refs)++;
3279 		ptr = ptr->next_vsi;
3280 	}
3281 
3282 	return 0;
3283 }
3284 
3285 /**
3286  * ice_has_prof_vsig - check to see if VSIG has a specific profile
3287  * @hw: pointer to the hardware structure
3288  * @blk: HW block
3289  * @vsig: VSIG to check against
3290  * @hdl: profile handle
3291  */
3292 static bool
3293 ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
3294 {
3295 	u16 idx = vsig & ICE_VSIG_IDX_M;
3296 	struct ice_vsig_prof *ent;
3297 
3298 	list_for_each_entry(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3299 			    list)
3300 		if (ent->profile_cookie == hdl)
3301 			return true;
3302 
3303 	ice_debug(hw, ICE_DBG_INIT,
3304 		  "Characteristic list for VSI group %d not found.\n",
3305 		  vsig);
3306 	return false;
3307 }
3308 
3309 /**
3310  * ice_prof_bld_es - build profile ID extraction sequence changes
3311  * @hw: pointer to the HW struct
3312  * @blk: hardware block
3313  * @bld: the update package buffer build to add to
3314  * @chgs: the list of changes to make in hardware
3315  */
3316 static enum ice_status
3317 ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
3318 		struct ice_buf_build *bld, struct list_head *chgs)
3319 {
3320 	u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
3321 	struct ice_chs_chg *tmp;
3322 
3323 	list_for_each_entry(tmp, chgs, list_entry)
3324 		if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
3325 			u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
3326 			struct ice_pkg_es *p;
3327 			u32 id;
3328 
3329 			id = ice_sect_id(blk, ICE_VEC_TBL);
3330 			p = ice_pkg_buf_alloc_section(bld, id,
3331 						      struct_size(p, es, 1) +
3332 						      vec_size -
3333 						      sizeof(p->es[0]));
3334 
3335 			if (!p)
3336 				return ICE_ERR_MAX_LIMIT;
3337 
3338 			p->count = cpu_to_le16(1);
3339 			p->offset = cpu_to_le16(tmp->prof_id);
3340 
3341 			memcpy(p->es, &hw->blk[blk].es.t[off], vec_size);
3342 		}
3343 
3344 	return 0;
3345 }
3346 
3347 /**
3348  * ice_prof_bld_tcam - build profile ID TCAM changes
3349  * @hw: pointer to the HW struct
3350  * @blk: hardware block
3351  * @bld: the update package buffer build to add to
3352  * @chgs: the list of changes to make in hardware
3353  */
3354 static enum ice_status
3355 ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
3356 		  struct ice_buf_build *bld, struct list_head *chgs)
3357 {
3358 	struct ice_chs_chg *tmp;
3359 
3360 	list_for_each_entry(tmp, chgs, list_entry)
3361 		if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
3362 			struct ice_prof_id_section *p;
3363 			u32 id;
3364 
3365 			id = ice_sect_id(blk, ICE_PROF_TCAM);
3366 			p = ice_pkg_buf_alloc_section(bld, id,
3367 						      struct_size(p, entry, 1));
3368 
3369 			if (!p)
3370 				return ICE_ERR_MAX_LIMIT;
3371 
3372 			p->count = cpu_to_le16(1);
3373 			p->entry[0].addr = cpu_to_le16(tmp->tcam_idx);
3374 			p->entry[0].prof_id = tmp->prof_id;
3375 
3376 			memcpy(p->entry[0].key,
3377 			       &hw->blk[blk].prof.t[tmp->tcam_idx].key,
3378 			       sizeof(hw->blk[blk].prof.t->key));
3379 		}
3380 
3381 	return 0;
3382 }
3383 
3384 /**
3385  * ice_prof_bld_xlt1 - build XLT1 changes
3386  * @blk: hardware block
3387  * @bld: the update package buffer build to add to
3388  * @chgs: the list of changes to make in hardware
3389  */
3390 static enum ice_status
3391 ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
3392 		  struct list_head *chgs)
3393 {
3394 	struct ice_chs_chg *tmp;
3395 
3396 	list_for_each_entry(tmp, chgs, list_entry)
3397 		if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
3398 			struct ice_xlt1_section *p;
3399 			u32 id;
3400 
3401 			id = ice_sect_id(blk, ICE_XLT1);
3402 			p = ice_pkg_buf_alloc_section(bld, id,
3403 						      struct_size(p, value, 1));
3404 
3405 			if (!p)
3406 				return ICE_ERR_MAX_LIMIT;
3407 
3408 			p->count = cpu_to_le16(1);
3409 			p->offset = cpu_to_le16(tmp->ptype);
3410 			p->value[0] = tmp->ptg;
3411 		}
3412 
3413 	return 0;
3414 }
3415 
3416 /**
3417  * ice_prof_bld_xlt2 - build XLT2 changes
3418  * @blk: hardware block
3419  * @bld: the update package buffer build to add to
3420  * @chgs: the list of changes to make in hardware
3421  */
3422 static enum ice_status
3423 ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
3424 		  struct list_head *chgs)
3425 {
3426 	struct ice_chs_chg *tmp;
3427 
3428 	list_for_each_entry(tmp, chgs, list_entry) {
3429 		struct ice_xlt2_section *p;
3430 		u32 id;
3431 
3432 		switch (tmp->type) {
3433 		case ICE_VSIG_ADD:
3434 		case ICE_VSI_MOVE:
3435 		case ICE_VSIG_REM:
3436 			id = ice_sect_id(blk, ICE_XLT2);
3437 			p = ice_pkg_buf_alloc_section(bld, id,
3438 						      struct_size(p, value, 1));
3439 
3440 			if (!p)
3441 				return ICE_ERR_MAX_LIMIT;
3442 
3443 			p->count = cpu_to_le16(1);
3444 			p->offset = cpu_to_le16(tmp->vsi);
3445 			p->value[0] = cpu_to_le16(tmp->vsig);
3446 			break;
3447 		default:
3448 			break;
3449 		}
3450 	}
3451 
3452 	return 0;
3453 }
3454 
3455 /**
3456  * ice_upd_prof_hw - update hardware using the change list
3457  * @hw: pointer to the HW struct
3458  * @blk: hardware block
3459  * @chgs: the list of changes to make in hardware
3460  */
3461 static enum ice_status
3462 ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
3463 		struct list_head *chgs)
3464 {
3465 	struct ice_buf_build *b;
3466 	struct ice_chs_chg *tmp;
3467 	enum ice_status status;
3468 	u16 pkg_sects;
3469 	u16 xlt1 = 0;
3470 	u16 xlt2 = 0;
3471 	u16 tcam = 0;
3472 	u16 es = 0;
3473 	u16 sects;
3474 
3475 	/* count number of sections we need */
3476 	list_for_each_entry(tmp, chgs, list_entry) {
3477 		switch (tmp->type) {
3478 		case ICE_PTG_ES_ADD:
3479 			if (tmp->add_ptg)
3480 				xlt1++;
3481 			if (tmp->add_prof)
3482 				es++;
3483 			break;
3484 		case ICE_TCAM_ADD:
3485 			tcam++;
3486 			break;
3487 		case ICE_VSIG_ADD:
3488 		case ICE_VSI_MOVE:
3489 		case ICE_VSIG_REM:
3490 			xlt2++;
3491 			break;
3492 		default:
3493 			break;
3494 		}
3495 	}
3496 	sects = xlt1 + xlt2 + tcam + es;
3497 
3498 	if (!sects)
3499 		return 0;
3500 
3501 	/* Build update package buffer */
3502 	b = ice_pkg_buf_alloc(hw);
3503 	if (!b)
3504 		return ICE_ERR_NO_MEMORY;
3505 
3506 	status = ice_pkg_buf_reserve_section(b, sects);
3507 	if (status)
3508 		goto error_tmp;
3509 
3510 	/* Preserve order of table update: ES, TCAM, PTG, VSIG */
3511 	if (es) {
3512 		status = ice_prof_bld_es(hw, blk, b, chgs);
3513 		if (status)
3514 			goto error_tmp;
3515 	}
3516 
3517 	if (tcam) {
3518 		status = ice_prof_bld_tcam(hw, blk, b, chgs);
3519 		if (status)
3520 			goto error_tmp;
3521 	}
3522 
3523 	if (xlt1) {
3524 		status = ice_prof_bld_xlt1(blk, b, chgs);
3525 		if (status)
3526 			goto error_tmp;
3527 	}
3528 
3529 	if (xlt2) {
3530 		status = ice_prof_bld_xlt2(blk, b, chgs);
3531 		if (status)
3532 			goto error_tmp;
3533 	}
3534 
3535 	/* After package buffer build check if the section count in buffer is
3536 	 * non-zero and matches the number of sections detected for package
3537 	 * update.
3538 	 */
3539 	pkg_sects = ice_pkg_buf_get_active_sections(b);
3540 	if (!pkg_sects || pkg_sects != sects) {
3541 		status = ICE_ERR_INVAL_SIZE;
3542 		goto error_tmp;
3543 	}
3544 
3545 	/* update package */
3546 	status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
3547 	if (status == ICE_ERR_AQ_ERROR)
3548 		ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
3549 
3550 error_tmp:
3551 	ice_pkg_buf_free(hw, b);
3552 	return status;
3553 }
3554 
3555 /**
3556  * ice_update_fd_mask - set Flow Director Field Vector mask for a profile
3557  * @hw: pointer to the HW struct
3558  * @prof_id: profile ID
3559  * @mask_sel: mask select
3560  *
3561  * This function enable any of the masks selected by the mask select parameter
3562  * for the profile specified.
3563  */
3564 static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel)
3565 {
3566 	wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel);
3567 
3568 	ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id,
3569 		  GLQF_FDMASK_SEL(prof_id), mask_sel);
3570 }
3571 
3572 struct ice_fd_src_dst_pair {
3573 	u8 prot_id;
3574 	u8 count;
3575 	u16 off;
3576 };
3577 
3578 static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
3579 	/* These are defined in pairs */
3580 	{ ICE_PROT_IPV4_OF_OR_S, 2, 12 },
3581 	{ ICE_PROT_IPV4_OF_OR_S, 2, 16 },
3582 
3583 	{ ICE_PROT_IPV4_IL, 2, 12 },
3584 	{ ICE_PROT_IPV4_IL, 2, 16 },
3585 
3586 	{ ICE_PROT_IPV6_OF_OR_S, 8, 8 },
3587 	{ ICE_PROT_IPV6_OF_OR_S, 8, 24 },
3588 
3589 	{ ICE_PROT_IPV6_IL, 8, 8 },
3590 	{ ICE_PROT_IPV6_IL, 8, 24 },
3591 
3592 	{ ICE_PROT_TCP_IL, 1, 0 },
3593 	{ ICE_PROT_TCP_IL, 1, 2 },
3594 
3595 	{ ICE_PROT_UDP_OF, 1, 0 },
3596 	{ ICE_PROT_UDP_OF, 1, 2 },
3597 
3598 	{ ICE_PROT_UDP_IL_OR_S, 1, 0 },
3599 	{ ICE_PROT_UDP_IL_OR_S, 1, 2 },
3600 
3601 	{ ICE_PROT_SCTP_IL, 1, 0 },
3602 	{ ICE_PROT_SCTP_IL, 1, 2 }
3603 };
3604 
3605 #define ICE_FD_SRC_DST_PAIR_COUNT	ARRAY_SIZE(ice_fd_pairs)
3606 
3607 /**
3608  * ice_update_fd_swap - set register appropriately for a FD FV extraction
3609  * @hw: pointer to the HW struct
3610  * @prof_id: profile ID
3611  * @es: extraction sequence (length of array is determined by the block)
3612  */
3613 static enum ice_status
3614 ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
3615 {
3616 	DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
3617 	u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 };
3618 #define ICE_FD_FV_NOT_FOUND (-2)
3619 	s8 first_free = ICE_FD_FV_NOT_FOUND;
3620 	u8 used[ICE_MAX_FV_WORDS] = { 0 };
3621 	s8 orig_free, si;
3622 	u32 mask_sel = 0;
3623 	u8 i, j, k;
3624 
3625 	bitmap_zero(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
3626 
3627 	/* This code assumes that the Flow Director field vectors are assigned
3628 	 * from the end of the FV indexes working towards the zero index, that
3629 	 * only complete fields will be included and will be consecutive, and
3630 	 * that there are no gaps between valid indexes.
3631 	 */
3632 
3633 	/* Determine swap fields present */
3634 	for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
3635 		/* Find the first free entry, assuming right to left population.
3636 		 * This is where we can start adding additional pairs if needed.
3637 		 */
3638 		if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id !=
3639 		    ICE_PROT_INVALID)
3640 			first_free = i - 1;
3641 
3642 		for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
3643 			if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
3644 			    es[i].off == ice_fd_pairs[j].off) {
3645 				set_bit(j, pair_list);
3646 				pair_start[j] = i;
3647 			}
3648 	}
3649 
3650 	orig_free = first_free;
3651 
3652 	/* determine missing swap fields that need to be added */
3653 	for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) {
3654 		u8 bit1 = test_bit(i + 1, pair_list);
3655 		u8 bit0 = test_bit(i, pair_list);
3656 
3657 		if (bit0 ^ bit1) {
3658 			u8 index;
3659 
3660 			/* add the appropriate 'paired' entry */
3661 			if (!bit0)
3662 				index = i;
3663 			else
3664 				index = i + 1;
3665 
3666 			/* check for room */
3667 			if (first_free + 1 < (s8)ice_fd_pairs[index].count)
3668 				return ICE_ERR_MAX_LIMIT;
3669 
3670 			/* place in extraction sequence */
3671 			for (k = 0; k < ice_fd_pairs[index].count; k++) {
3672 				es[first_free - k].prot_id =
3673 					ice_fd_pairs[index].prot_id;
3674 				es[first_free - k].off =
3675 					ice_fd_pairs[index].off + (k * 2);
3676 
3677 				if (k > first_free)
3678 					return ICE_ERR_OUT_OF_RANGE;
3679 
3680 				/* keep track of non-relevant fields */
3681 				mask_sel |= BIT(first_free - k);
3682 			}
3683 
3684 			pair_start[index] = first_free;
3685 			first_free -= ice_fd_pairs[index].count;
3686 		}
3687 	}
3688 
3689 	/* fill in the swap array */
3690 	si = hw->blk[ICE_BLK_FD].es.fvw - 1;
3691 	while (si >= 0) {
3692 		u8 indexes_used = 1;
3693 
3694 		/* assume flat at this index */
3695 #define ICE_SWAP_VALID	0x80
3696 		used[si] = si | ICE_SWAP_VALID;
3697 
3698 		if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) {
3699 			si -= indexes_used;
3700 			continue;
3701 		}
3702 
3703 		/* check for a swap location */
3704 		for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
3705 			if (es[si].prot_id == ice_fd_pairs[j].prot_id &&
3706 			    es[si].off == ice_fd_pairs[j].off) {
3707 				u8 idx;
3708 
3709 				/* determine the appropriate matching field */
3710 				idx = j + ((j % 2) ? -1 : 1);
3711 
3712 				indexes_used = ice_fd_pairs[idx].count;
3713 				for (k = 0; k < indexes_used; k++) {
3714 					used[si - k] = (pair_start[idx] - k) |
3715 						ICE_SWAP_VALID;
3716 				}
3717 
3718 				break;
3719 			}
3720 
3721 		si -= indexes_used;
3722 	}
3723 
3724 	/* for each set of 4 swap and 4 inset indexes, write the appropriate
3725 	 * register
3726 	 */
3727 	for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
3728 		u32 raw_swap = 0;
3729 		u32 raw_in = 0;
3730 
3731 		for (k = 0; k < 4; k++) {
3732 			u8 idx;
3733 
3734 			idx = (j * 4) + k;
3735 			if (used[idx] && !(mask_sel & BIT(idx))) {
3736 				raw_swap |= used[idx] << (k * BITS_PER_BYTE);
3737 #define ICE_INSET_DFLT 0x9f
3738 				raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE);
3739 			}
3740 		}
3741 
3742 		/* write the appropriate swap register set */
3743 		wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap);
3744 
3745 		ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n",
3746 			  prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap);
3747 
3748 		/* write the appropriate inset register set */
3749 		wr32(hw, GLQF_FDINSET(prof_id, j), raw_in);
3750 
3751 		ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n",
3752 			  prof_id, j, GLQF_FDINSET(prof_id, j), raw_in);
3753 	}
3754 
3755 	/* initially clear the mask select for this profile */
3756 	ice_update_fd_mask(hw, prof_id, 0);
3757 
3758 	return 0;
3759 }
3760 
3761 /**
3762  * ice_add_prof - add profile
3763  * @hw: pointer to the HW struct
3764  * @blk: hardware block
3765  * @id: profile tracking ID
3766  * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
3767  * @es: extraction sequence (length of array is determined by the block)
3768  *
3769  * This function registers a profile, which matches a set of PTGs with a
3770  * particular extraction sequence. While the hardware profile is allocated
3771  * it will not be written until the first call to ice_add_flow that specifies
3772  * the ID value used here.
3773  */
3774 enum ice_status
3775 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
3776 	     struct ice_fv_word *es)
3777 {
3778 	u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
3779 	DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
3780 	struct ice_prof_map *prof;
3781 	enum ice_status status;
3782 	u8 byte = 0;
3783 	u8 prof_id;
3784 
3785 	bitmap_zero(ptgs_used, ICE_XLT1_CNT);
3786 
3787 	mutex_lock(&hw->blk[blk].es.prof_map_lock);
3788 
3789 	/* search for existing profile */
3790 	status = ice_find_prof_id(hw, blk, es, &prof_id);
3791 	if (status) {
3792 		/* allocate profile ID */
3793 		status = ice_alloc_prof_id(hw, blk, &prof_id);
3794 		if (status)
3795 			goto err_ice_add_prof;
3796 		if (blk == ICE_BLK_FD) {
3797 			/* For Flow Director block, the extraction sequence may
3798 			 * need to be altered in the case where there are paired
3799 			 * fields that have no match. This is necessary because
3800 			 * for Flow Director, src and dest fields need to paired
3801 			 * for filter programming and these values are swapped
3802 			 * during Tx.
3803 			 */
3804 			status = ice_update_fd_swap(hw, prof_id, es);
3805 			if (status)
3806 				goto err_ice_add_prof;
3807 		}
3808 
3809 		/* and write new es */
3810 		ice_write_es(hw, blk, prof_id, es);
3811 	}
3812 
3813 	ice_prof_inc_ref(hw, blk, prof_id);
3814 
3815 	/* add profile info */
3816 	prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL);
3817 	if (!prof) {
3818 		status = ICE_ERR_NO_MEMORY;
3819 		goto err_ice_add_prof;
3820 	}
3821 
3822 	prof->profile_cookie = id;
3823 	prof->prof_id = prof_id;
3824 	prof->ptg_cnt = 0;
3825 	prof->context = 0;
3826 
3827 	/* build list of ptgs */
3828 	while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
3829 		u8 bit;
3830 
3831 		if (!ptypes[byte]) {
3832 			bytes--;
3833 			byte++;
3834 			continue;
3835 		}
3836 
3837 		/* Examine 8 bits per byte */
3838 		for_each_set_bit(bit, (unsigned long *)&ptypes[byte],
3839 				 BITS_PER_BYTE) {
3840 			u16 ptype;
3841 			u8 ptg;
3842 			u8 m;
3843 
3844 			ptype = byte * BITS_PER_BYTE + bit;
3845 
3846 			/* The package should place all ptypes in a non-zero
3847 			 * PTG, so the following call should never fail.
3848 			 */
3849 			if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
3850 				continue;
3851 
3852 			/* If PTG is already added, skip and continue */
3853 			if (test_bit(ptg, ptgs_used))
3854 				continue;
3855 
3856 			set_bit(ptg, ptgs_used);
3857 			prof->ptg[prof->ptg_cnt] = ptg;
3858 
3859 			if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
3860 				break;
3861 
3862 			/* nothing left in byte, then exit */
3863 			m = ~(u8)((1 << (bit + 1)) - 1);
3864 			if (!(ptypes[byte] & m))
3865 				break;
3866 		}
3867 
3868 		bytes--;
3869 		byte++;
3870 	}
3871 
3872 	list_add(&prof->list, &hw->blk[blk].es.prof_map);
3873 	status = 0;
3874 
3875 err_ice_add_prof:
3876 	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
3877 	return status;
3878 }
3879 
3880 /**
3881  * ice_search_prof_id - Search for a profile tracking ID
3882  * @hw: pointer to the HW struct
3883  * @blk: hardware block
3884  * @id: profile tracking ID
3885  *
3886  * This will search for a profile tracking ID which was previously added.
3887  * The profile map lock should be held before calling this function.
3888  */
3889 static struct ice_prof_map *
3890 ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
3891 {
3892 	struct ice_prof_map *entry = NULL;
3893 	struct ice_prof_map *map;
3894 
3895 	list_for_each_entry(map, &hw->blk[blk].es.prof_map, list)
3896 		if (map->profile_cookie == id) {
3897 			entry = map;
3898 			break;
3899 		}
3900 
3901 	return entry;
3902 }
3903 
3904 /**
3905  * ice_vsig_prof_id_count - count profiles in a VSIG
3906  * @hw: pointer to the HW struct
3907  * @blk: hardware block
3908  * @vsig: VSIG to remove the profile from
3909  */
3910 static u16
3911 ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
3912 {
3913 	u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
3914 	struct ice_vsig_prof *p;
3915 
3916 	list_for_each_entry(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3917 			    list)
3918 		count++;
3919 
3920 	return count;
3921 }
3922 
3923 /**
3924  * ice_rel_tcam_idx - release a TCAM index
3925  * @hw: pointer to the HW struct
3926  * @blk: hardware block
3927  * @idx: the index to release
3928  */
3929 static enum ice_status
3930 ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
3931 {
3932 	/* Masks to invoke a never match entry */
3933 	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3934 	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
3935 	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
3936 	enum ice_status status;
3937 
3938 	/* write the TCAM entry */
3939 	status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
3940 				      dc_msk, nm_msk);
3941 	if (status)
3942 		return status;
3943 
3944 	/* release the TCAM entry */
3945 	status = ice_free_tcam_ent(hw, blk, idx);
3946 
3947 	return status;
3948 }
3949 
3950 /**
3951  * ice_rem_prof_id - remove one profile from a VSIG
3952  * @hw: pointer to the HW struct
3953  * @blk: hardware block
3954  * @prof: pointer to profile structure to remove
3955  */
3956 static enum ice_status
3957 ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
3958 		struct ice_vsig_prof *prof)
3959 {
3960 	enum ice_status status;
3961 	u16 i;
3962 
3963 	for (i = 0; i < prof->tcam_count; i++)
3964 		if (prof->tcam[i].in_use) {
3965 			prof->tcam[i].in_use = false;
3966 			status = ice_rel_tcam_idx(hw, blk,
3967 						  prof->tcam[i].tcam_idx);
3968 			if (status)
3969 				return ICE_ERR_HW_TABLE;
3970 		}
3971 
3972 	return 0;
3973 }
3974 
3975 /**
3976  * ice_rem_vsig - remove VSIG
3977  * @hw: pointer to the HW struct
3978  * @blk: hardware block
3979  * @vsig: the VSIG to remove
3980  * @chg: the change list
3981  */
3982 static enum ice_status
3983 ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
3984 	     struct list_head *chg)
3985 {
3986 	u16 idx = vsig & ICE_VSIG_IDX_M;
3987 	struct ice_vsig_vsi *vsi_cur;
3988 	struct ice_vsig_prof *d, *t;
3989 	enum ice_status status;
3990 
3991 	/* remove TCAM entries */
3992 	list_for_each_entry_safe(d, t,
3993 				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3994 				 list) {
3995 		status = ice_rem_prof_id(hw, blk, d);
3996 		if (status)
3997 			return status;
3998 
3999 		list_del(&d->list);
4000 		devm_kfree(ice_hw_to_dev(hw), d);
4001 	}
4002 
4003 	/* Move all VSIS associated with this VSIG to the default VSIG */
4004 	vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
4005 	/* If the VSIG has at least 1 VSI then iterate through the list
4006 	 * and remove the VSIs before deleting the group.
4007 	 */
4008 	if (vsi_cur)
4009 		do {
4010 			struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
4011 			struct ice_chs_chg *p;
4012 
4013 			p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
4014 					 GFP_KERNEL);
4015 			if (!p)
4016 				return ICE_ERR_NO_MEMORY;
4017 
4018 			p->type = ICE_VSIG_REM;
4019 			p->orig_vsig = vsig;
4020 			p->vsig = ICE_DEFAULT_VSIG;
4021 			p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
4022 
4023 			list_add(&p->list_entry, chg);
4024 
4025 			vsi_cur = tmp;
4026 		} while (vsi_cur);
4027 
4028 	return ice_vsig_free(hw, blk, vsig);
4029 }
4030 
4031 /**
4032  * ice_rem_prof_id_vsig - remove a specific profile from a VSIG
4033  * @hw: pointer to the HW struct
4034  * @blk: hardware block
4035  * @vsig: VSIG to remove the profile from
4036  * @hdl: profile handle indicating which profile to remove
4037  * @chg: list to receive a record of changes
4038  */
4039 static enum ice_status
4040 ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
4041 		     struct list_head *chg)
4042 {
4043 	u16 idx = vsig & ICE_VSIG_IDX_M;
4044 	struct ice_vsig_prof *p, *t;
4045 	enum ice_status status;
4046 
4047 	list_for_each_entry_safe(p, t,
4048 				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4049 				 list)
4050 		if (p->profile_cookie == hdl) {
4051 			if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
4052 				/* this is the last profile, remove the VSIG */
4053 				return ice_rem_vsig(hw, blk, vsig, chg);
4054 
4055 			status = ice_rem_prof_id(hw, blk, p);
4056 			if (!status) {
4057 				list_del(&p->list);
4058 				devm_kfree(ice_hw_to_dev(hw), p);
4059 			}
4060 			return status;
4061 		}
4062 
4063 	return ICE_ERR_DOES_NOT_EXIST;
4064 }
4065 
4066 /**
4067  * ice_rem_flow_all - remove all flows with a particular profile
4068  * @hw: pointer to the HW struct
4069  * @blk: hardware block
4070  * @id: profile tracking ID
4071  */
4072 static enum ice_status
4073 ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
4074 {
4075 	struct ice_chs_chg *del, *tmp;
4076 	enum ice_status status;
4077 	struct list_head chg;
4078 	u16 i;
4079 
4080 	INIT_LIST_HEAD(&chg);
4081 
4082 	for (i = 1; i < ICE_MAX_VSIGS; i++)
4083 		if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
4084 			if (ice_has_prof_vsig(hw, blk, i, id)) {
4085 				status = ice_rem_prof_id_vsig(hw, blk, i, id,
4086 							      &chg);
4087 				if (status)
4088 					goto err_ice_rem_flow_all;
4089 			}
4090 		}
4091 
4092 	status = ice_upd_prof_hw(hw, blk, &chg);
4093 
4094 err_ice_rem_flow_all:
4095 	list_for_each_entry_safe(del, tmp, &chg, list_entry) {
4096 		list_del(&del->list_entry);
4097 		devm_kfree(ice_hw_to_dev(hw), del);
4098 	}
4099 
4100 	return status;
4101 }
4102 
4103 /**
4104  * ice_rem_prof - remove profile
4105  * @hw: pointer to the HW struct
4106  * @blk: hardware block
4107  * @id: profile tracking ID
4108  *
4109  * This will remove the profile specified by the ID parameter, which was
4110  * previously created through ice_add_prof. If any existing entries
4111  * are associated with this profile, they will be removed as well.
4112  */
4113 enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
4114 {
4115 	struct ice_prof_map *pmap;
4116 	enum ice_status status;
4117 
4118 	mutex_lock(&hw->blk[blk].es.prof_map_lock);
4119 
4120 	pmap = ice_search_prof_id(hw, blk, id);
4121 	if (!pmap) {
4122 		status = ICE_ERR_DOES_NOT_EXIST;
4123 		goto err_ice_rem_prof;
4124 	}
4125 
4126 	/* remove all flows with this profile */
4127 	status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
4128 	if (status)
4129 		goto err_ice_rem_prof;
4130 
4131 	/* dereference profile, and possibly remove */
4132 	ice_prof_dec_ref(hw, blk, pmap->prof_id);
4133 
4134 	list_del(&pmap->list);
4135 	devm_kfree(ice_hw_to_dev(hw), pmap);
4136 
4137 err_ice_rem_prof:
4138 	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4139 	return status;
4140 }
4141 
4142 /**
4143  * ice_get_prof - get profile
4144  * @hw: pointer to the HW struct
4145  * @blk: hardware block
4146  * @hdl: profile handle
4147  * @chg: change list
4148  */
4149 static enum ice_status
4150 ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
4151 	     struct list_head *chg)
4152 {
4153 	enum ice_status status = 0;
4154 	struct ice_prof_map *map;
4155 	struct ice_chs_chg *p;
4156 	u16 i;
4157 
4158 	mutex_lock(&hw->blk[blk].es.prof_map_lock);
4159 	/* Get the details on the profile specified by the handle ID */
4160 	map = ice_search_prof_id(hw, blk, hdl);
4161 	if (!map) {
4162 		status = ICE_ERR_DOES_NOT_EXIST;
4163 		goto err_ice_get_prof;
4164 	}
4165 
4166 	for (i = 0; i < map->ptg_cnt; i++)
4167 		if (!hw->blk[blk].es.written[map->prof_id]) {
4168 			/* add ES to change list */
4169 			p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
4170 					 GFP_KERNEL);
4171 			if (!p) {
4172 				status = ICE_ERR_NO_MEMORY;
4173 				goto err_ice_get_prof;
4174 			}
4175 
4176 			p->type = ICE_PTG_ES_ADD;
4177 			p->ptype = 0;
4178 			p->ptg = map->ptg[i];
4179 			p->add_ptg = 0;
4180 
4181 			p->add_prof = 1;
4182 			p->prof_id = map->prof_id;
4183 
4184 			hw->blk[blk].es.written[map->prof_id] = true;
4185 
4186 			list_add(&p->list_entry, chg);
4187 		}
4188 
4189 err_ice_get_prof:
4190 	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4191 	/* let caller clean up the change list */
4192 	return status;
4193 }
4194 
4195 /**
4196  * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG
4197  * @hw: pointer to the HW struct
4198  * @blk: hardware block
4199  * @vsig: VSIG from which to copy the list
4200  * @lst: output list
4201  *
4202  * This routine makes a copy of the list of profiles in the specified VSIG.
4203  */
4204 static enum ice_status
4205 ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4206 		   struct list_head *lst)
4207 {
4208 	struct ice_vsig_prof *ent1, *ent2;
4209 	u16 idx = vsig & ICE_VSIG_IDX_M;
4210 
4211 	list_for_each_entry(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4212 			    list) {
4213 		struct ice_vsig_prof *p;
4214 
4215 		/* copy to the input list */
4216 		p = devm_kmemdup(ice_hw_to_dev(hw), ent1, sizeof(*p),
4217 				 GFP_KERNEL);
4218 		if (!p)
4219 			goto err_ice_get_profs_vsig;
4220 
4221 		list_add_tail(&p->list, lst);
4222 	}
4223 
4224 	return 0;
4225 
4226 err_ice_get_profs_vsig:
4227 	list_for_each_entry_safe(ent1, ent2, lst, list) {
4228 		list_del(&ent1->list);
4229 		devm_kfree(ice_hw_to_dev(hw), ent1);
4230 	}
4231 
4232 	return ICE_ERR_NO_MEMORY;
4233 }
4234 
4235 /**
4236  * ice_add_prof_to_lst - add profile entry to a list
4237  * @hw: pointer to the HW struct
4238  * @blk: hardware block
4239  * @lst: the list to be added to
4240  * @hdl: profile handle of entry to add
4241  */
4242 static enum ice_status
4243 ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
4244 		    struct list_head *lst, u64 hdl)
4245 {
4246 	enum ice_status status = 0;
4247 	struct ice_prof_map *map;
4248 	struct ice_vsig_prof *p;
4249 	u16 i;
4250 
4251 	mutex_lock(&hw->blk[blk].es.prof_map_lock);
4252 	map = ice_search_prof_id(hw, blk, hdl);
4253 	if (!map) {
4254 		status = ICE_ERR_DOES_NOT_EXIST;
4255 		goto err_ice_add_prof_to_lst;
4256 	}
4257 
4258 	p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4259 	if (!p) {
4260 		status = ICE_ERR_NO_MEMORY;
4261 		goto err_ice_add_prof_to_lst;
4262 	}
4263 
4264 	p->profile_cookie = map->profile_cookie;
4265 	p->prof_id = map->prof_id;
4266 	p->tcam_count = map->ptg_cnt;
4267 
4268 	for (i = 0; i < map->ptg_cnt; i++) {
4269 		p->tcam[i].prof_id = map->prof_id;
4270 		p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
4271 		p->tcam[i].ptg = map->ptg[i];
4272 	}
4273 
4274 	list_add(&p->list, lst);
4275 
4276 err_ice_add_prof_to_lst:
4277 	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4278 	return status;
4279 }
4280 
4281 /**
4282  * ice_move_vsi - move VSI to another VSIG
4283  * @hw: pointer to the HW struct
4284  * @blk: hardware block
4285  * @vsi: the VSI to move
4286  * @vsig: the VSIG to move the VSI to
4287  * @chg: the change list
4288  */
4289 static enum ice_status
4290 ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
4291 	     struct list_head *chg)
4292 {
4293 	enum ice_status status;
4294 	struct ice_chs_chg *p;
4295 	u16 orig_vsig;
4296 
4297 	p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4298 	if (!p)
4299 		return ICE_ERR_NO_MEMORY;
4300 
4301 	status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
4302 	if (!status)
4303 		status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
4304 
4305 	if (status) {
4306 		devm_kfree(ice_hw_to_dev(hw), p);
4307 		return status;
4308 	}
4309 
4310 	p->type = ICE_VSI_MOVE;
4311 	p->vsi = vsi;
4312 	p->orig_vsig = orig_vsig;
4313 	p->vsig = vsig;
4314 
4315 	list_add(&p->list_entry, chg);
4316 
4317 	return 0;
4318 }
4319 
4320 /**
4321  * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
4322  * @hw: pointer to the HW struct
4323  * @idx: the index of the TCAM entry to remove
4324  * @chg: the list of change structures to search
4325  */
4326 static void
4327 ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg)
4328 {
4329 	struct ice_chs_chg *pos, *tmp;
4330 
4331 	list_for_each_entry_safe(tmp, pos, chg, list_entry)
4332 		if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
4333 			list_del(&tmp->list_entry);
4334 			devm_kfree(ice_hw_to_dev(hw), tmp);
4335 		}
4336 }
4337 
4338 /**
4339  * ice_prof_tcam_ena_dis - add enable or disable TCAM change
4340  * @hw: pointer to the HW struct
4341  * @blk: hardware block
4342  * @enable: true to enable, false to disable
4343  * @vsig: the VSIG of the TCAM entry
4344  * @tcam: pointer the TCAM info structure of the TCAM to disable
4345  * @chg: the change list
4346  *
4347  * This function appends an enable or disable TCAM entry in the change log
4348  */
4349 static enum ice_status
4350 ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
4351 		      u16 vsig, struct ice_tcam_inf *tcam,
4352 		      struct list_head *chg)
4353 {
4354 	enum ice_status status;
4355 	struct ice_chs_chg *p;
4356 
4357 	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4358 	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
4359 	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
4360 
4361 	/* if disabling, free the TCAM */
4362 	if (!enable) {
4363 		status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
4364 
4365 		/* if we have already created a change for this TCAM entry, then
4366 		 * we need to remove that entry, in order to prevent writing to
4367 		 * a TCAM entry we no longer will have ownership of.
4368 		 */
4369 		ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
4370 		tcam->tcam_idx = 0;
4371 		tcam->in_use = 0;
4372 		return status;
4373 	}
4374 
4375 	/* for re-enabling, reallocate a TCAM */
4376 	status = ice_alloc_tcam_ent(hw, blk, &tcam->tcam_idx);
4377 	if (status)
4378 		return status;
4379 
4380 	/* add TCAM to change list */
4381 	p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4382 	if (!p)
4383 		return ICE_ERR_NO_MEMORY;
4384 
4385 	status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
4386 				      tcam->ptg, vsig, 0, 0, vl_msk, dc_msk,
4387 				      nm_msk);
4388 	if (status)
4389 		goto err_ice_prof_tcam_ena_dis;
4390 
4391 	tcam->in_use = 1;
4392 
4393 	p->type = ICE_TCAM_ADD;
4394 	p->add_tcam_idx = true;
4395 	p->prof_id = tcam->prof_id;
4396 	p->ptg = tcam->ptg;
4397 	p->vsig = 0;
4398 	p->tcam_idx = tcam->tcam_idx;
4399 
4400 	/* log change */
4401 	list_add(&p->list_entry, chg);
4402 
4403 	return 0;
4404 
4405 err_ice_prof_tcam_ena_dis:
4406 	devm_kfree(ice_hw_to_dev(hw), p);
4407 	return status;
4408 }
4409 
4410 /**
4411  * ice_adj_prof_priorities - adjust profile based on priorities
4412  * @hw: pointer to the HW struct
4413  * @blk: hardware block
4414  * @vsig: the VSIG for which to adjust profile priorities
4415  * @chg: the change list
4416  */
4417 static enum ice_status
4418 ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4419 			struct list_head *chg)
4420 {
4421 	DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
4422 	struct ice_vsig_prof *t;
4423 	enum ice_status status;
4424 	u16 idx;
4425 
4426 	bitmap_zero(ptgs_used, ICE_XLT1_CNT);
4427 	idx = vsig & ICE_VSIG_IDX_M;
4428 
4429 	/* Priority is based on the order in which the profiles are added. The
4430 	 * newest added profile has highest priority and the oldest added
4431 	 * profile has the lowest priority. Since the profile property list for
4432 	 * a VSIG is sorted from newest to oldest, this code traverses the list
4433 	 * in order and enables the first of each PTG that it finds (that is not
4434 	 * already enabled); it also disables any duplicate PTGs that it finds
4435 	 * in the older profiles (that are currently enabled).
4436 	 */
4437 
4438 	list_for_each_entry(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4439 			    list) {
4440 		u16 i;
4441 
4442 		for (i = 0; i < t->tcam_count; i++) {
4443 			/* Scan the priorities from newest to oldest.
4444 			 * Make sure that the newest profiles take priority.
4445 			 */
4446 			if (test_bit(t->tcam[i].ptg, ptgs_used) &&
4447 			    t->tcam[i].in_use) {
4448 				/* need to mark this PTG as never match, as it
4449 				 * was already in use and therefore duplicate
4450 				 * (and lower priority)
4451 				 */
4452 				status = ice_prof_tcam_ena_dis(hw, blk, false,
4453 							       vsig,
4454 							       &t->tcam[i],
4455 							       chg);
4456 				if (status)
4457 					return status;
4458 			} else if (!test_bit(t->tcam[i].ptg, ptgs_used) &&
4459 				   !t->tcam[i].in_use) {
4460 				/* need to enable this PTG, as it in not in use
4461 				 * and not enabled (highest priority)
4462 				 */
4463 				status = ice_prof_tcam_ena_dis(hw, blk, true,
4464 							       vsig,
4465 							       &t->tcam[i],
4466 							       chg);
4467 				if (status)
4468 					return status;
4469 			}
4470 
4471 			/* keep track of used ptgs */
4472 			set_bit(t->tcam[i].ptg, ptgs_used);
4473 		}
4474 	}
4475 
4476 	return 0;
4477 }
4478 
4479 /**
4480  * ice_add_prof_id_vsig - add profile to VSIG
4481  * @hw: pointer to the HW struct
4482  * @blk: hardware block
4483  * @vsig: the VSIG to which this profile is to be added
4484  * @hdl: the profile handle indicating the profile to add
4485  * @rev: true to add entries to the end of the list
4486  * @chg: the change list
4487  */
4488 static enum ice_status
4489 ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
4490 		     bool rev, struct list_head *chg)
4491 {
4492 	/* Masks that ignore flags */
4493 	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4494 	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
4495 	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
4496 	enum ice_status status = 0;
4497 	struct ice_prof_map *map;
4498 	struct ice_vsig_prof *t;
4499 	struct ice_chs_chg *p;
4500 	u16 vsig_idx, i;
4501 
4502 	/* Error, if this VSIG already has this profile */
4503 	if (ice_has_prof_vsig(hw, blk, vsig, hdl))
4504 		return ICE_ERR_ALREADY_EXISTS;
4505 
4506 	/* new VSIG profile structure */
4507 	t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL);
4508 	if (!t)
4509 		return ICE_ERR_NO_MEMORY;
4510 
4511 	mutex_lock(&hw->blk[blk].es.prof_map_lock);
4512 	/* Get the details on the profile specified by the handle ID */
4513 	map = ice_search_prof_id(hw, blk, hdl);
4514 	if (!map) {
4515 		status = ICE_ERR_DOES_NOT_EXIST;
4516 		goto err_ice_add_prof_id_vsig;
4517 	}
4518 
4519 	t->profile_cookie = map->profile_cookie;
4520 	t->prof_id = map->prof_id;
4521 	t->tcam_count = map->ptg_cnt;
4522 
4523 	/* create TCAM entries */
4524 	for (i = 0; i < map->ptg_cnt; i++) {
4525 		u16 tcam_idx;
4526 
4527 		/* add TCAM to change list */
4528 		p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4529 		if (!p) {
4530 			status = ICE_ERR_NO_MEMORY;
4531 			goto err_ice_add_prof_id_vsig;
4532 		}
4533 
4534 		/* allocate the TCAM entry index */
4535 		status = ice_alloc_tcam_ent(hw, blk, &tcam_idx);
4536 		if (status) {
4537 			devm_kfree(ice_hw_to_dev(hw), p);
4538 			goto err_ice_add_prof_id_vsig;
4539 		}
4540 
4541 		t->tcam[i].ptg = map->ptg[i];
4542 		t->tcam[i].prof_id = map->prof_id;
4543 		t->tcam[i].tcam_idx = tcam_idx;
4544 		t->tcam[i].in_use = true;
4545 
4546 		p->type = ICE_TCAM_ADD;
4547 		p->add_tcam_idx = true;
4548 		p->prof_id = t->tcam[i].prof_id;
4549 		p->ptg = t->tcam[i].ptg;
4550 		p->vsig = vsig;
4551 		p->tcam_idx = t->tcam[i].tcam_idx;
4552 
4553 		/* write the TCAM entry */
4554 		status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
4555 					      t->tcam[i].prof_id,
4556 					      t->tcam[i].ptg, vsig, 0, 0,
4557 					      vl_msk, dc_msk, nm_msk);
4558 		if (status) {
4559 			devm_kfree(ice_hw_to_dev(hw), p);
4560 			goto err_ice_add_prof_id_vsig;
4561 		}
4562 
4563 		/* log change */
4564 		list_add(&p->list_entry, chg);
4565 	}
4566 
4567 	/* add profile to VSIG */
4568 	vsig_idx = vsig & ICE_VSIG_IDX_M;
4569 	if (rev)
4570 		list_add_tail(&t->list,
4571 			      &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
4572 	else
4573 		list_add(&t->list,
4574 			 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
4575 
4576 	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4577 	return status;
4578 
4579 err_ice_add_prof_id_vsig:
4580 	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4581 	/* let caller clean up the change list */
4582 	devm_kfree(ice_hw_to_dev(hw), t);
4583 	return status;
4584 }
4585 
4586 /**
4587  * ice_create_prof_id_vsig - add a new VSIG with a single profile
4588  * @hw: pointer to the HW struct
4589  * @blk: hardware block
4590  * @vsi: the initial VSI that will be in VSIG
4591  * @hdl: the profile handle of the profile that will be added to the VSIG
4592  * @chg: the change list
4593  */
4594 static enum ice_status
4595 ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
4596 			struct list_head *chg)
4597 {
4598 	enum ice_status status;
4599 	struct ice_chs_chg *p;
4600 	u16 new_vsig;
4601 
4602 	p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4603 	if (!p)
4604 		return ICE_ERR_NO_MEMORY;
4605 
4606 	new_vsig = ice_vsig_alloc(hw, blk);
4607 	if (!new_vsig) {
4608 		status = ICE_ERR_HW_TABLE;
4609 		goto err_ice_create_prof_id_vsig;
4610 	}
4611 
4612 	status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
4613 	if (status)
4614 		goto err_ice_create_prof_id_vsig;
4615 
4616 	status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
4617 	if (status)
4618 		goto err_ice_create_prof_id_vsig;
4619 
4620 	p->type = ICE_VSIG_ADD;
4621 	p->vsi = vsi;
4622 	p->orig_vsig = ICE_DEFAULT_VSIG;
4623 	p->vsig = new_vsig;
4624 
4625 	list_add(&p->list_entry, chg);
4626 
4627 	return 0;
4628 
4629 err_ice_create_prof_id_vsig:
4630 	/* let caller clean up the change list */
4631 	devm_kfree(ice_hw_to_dev(hw), p);
4632 	return status;
4633 }
4634 
4635 /**
4636  * ice_create_vsig_from_lst - create a new VSIG with a list of profiles
4637  * @hw: pointer to the HW struct
4638  * @blk: hardware block
4639  * @vsi: the initial VSI that will be in VSIG
4640  * @lst: the list of profile that will be added to the VSIG
4641  * @new_vsig: return of new VSIG
4642  * @chg: the change list
4643  */
4644 static enum ice_status
4645 ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
4646 			 struct list_head *lst, u16 *new_vsig,
4647 			 struct list_head *chg)
4648 {
4649 	struct ice_vsig_prof *t;
4650 	enum ice_status status;
4651 	u16 vsig;
4652 
4653 	vsig = ice_vsig_alloc(hw, blk);
4654 	if (!vsig)
4655 		return ICE_ERR_HW_TABLE;
4656 
4657 	status = ice_move_vsi(hw, blk, vsi, vsig, chg);
4658 	if (status)
4659 		return status;
4660 
4661 	list_for_each_entry(t, lst, list) {
4662 		/* Reverse the order here since we are copying the list */
4663 		status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
4664 					      true, chg);
4665 		if (status)
4666 			return status;
4667 	}
4668 
4669 	*new_vsig = vsig;
4670 
4671 	return 0;
4672 }
4673 
4674 /**
4675  * ice_find_prof_vsig - find a VSIG with a specific profile handle
4676  * @hw: pointer to the HW struct
4677  * @blk: hardware block
4678  * @hdl: the profile handle of the profile to search for
4679  * @vsig: returns the VSIG with the matching profile
4680  */
4681 static bool
4682 ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
4683 {
4684 	struct ice_vsig_prof *t;
4685 	enum ice_status status;
4686 	struct list_head lst;
4687 
4688 	INIT_LIST_HEAD(&lst);
4689 
4690 	t = kzalloc(sizeof(*t), GFP_KERNEL);
4691 	if (!t)
4692 		return false;
4693 
4694 	t->profile_cookie = hdl;
4695 	list_add(&t->list, &lst);
4696 
4697 	status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
4698 
4699 	list_del(&t->list);
4700 	kfree(t);
4701 
4702 	return !status;
4703 }
4704 
4705 /**
4706  * ice_add_prof_id_flow - add profile flow
4707  * @hw: pointer to the HW struct
4708  * @blk: hardware block
4709  * @vsi: the VSI to enable with the profile specified by ID
4710  * @hdl: profile handle
4711  *
4712  * Calling this function will update the hardware tables to enable the
4713  * profile indicated by the ID parameter for the VSIs specified in the VSI
4714  * array. Once successfully called, the flow will be enabled.
4715  */
4716 enum ice_status
4717 ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
4718 {
4719 	struct ice_vsig_prof *tmp1, *del1;
4720 	struct ice_chs_chg *tmp, *del;
4721 	struct list_head union_lst;
4722 	enum ice_status status;
4723 	struct list_head chg;
4724 	u16 vsig;
4725 
4726 	INIT_LIST_HEAD(&union_lst);
4727 	INIT_LIST_HEAD(&chg);
4728 
4729 	/* Get profile */
4730 	status = ice_get_prof(hw, blk, hdl, &chg);
4731 	if (status)
4732 		return status;
4733 
4734 	/* determine if VSI is already part of a VSIG */
4735 	status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
4736 	if (!status && vsig) {
4737 		bool only_vsi;
4738 		u16 or_vsig;
4739 		u16 ref;
4740 
4741 		/* found in VSIG */
4742 		or_vsig = vsig;
4743 
4744 		/* make sure that there is no overlap/conflict between the new
4745 		 * characteristics and the existing ones; we don't support that
4746 		 * scenario
4747 		 */
4748 		if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
4749 			status = ICE_ERR_ALREADY_EXISTS;
4750 			goto err_ice_add_prof_id_flow;
4751 		}
4752 
4753 		/* last VSI in the VSIG? */
4754 		status = ice_vsig_get_ref(hw, blk, vsig, &ref);
4755 		if (status)
4756 			goto err_ice_add_prof_id_flow;
4757 		only_vsi = (ref == 1);
4758 
4759 		/* create a union of the current profiles and the one being
4760 		 * added
4761 		 */
4762 		status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
4763 		if (status)
4764 			goto err_ice_add_prof_id_flow;
4765 
4766 		status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
4767 		if (status)
4768 			goto err_ice_add_prof_id_flow;
4769 
4770 		/* search for an existing VSIG with an exact charc match */
4771 		status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
4772 		if (!status) {
4773 			/* move VSI to the VSIG that matches */
4774 			status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
4775 			if (status)
4776 				goto err_ice_add_prof_id_flow;
4777 
4778 			/* VSI has been moved out of or_vsig. If the or_vsig had
4779 			 * only that VSI it is now empty and can be removed.
4780 			 */
4781 			if (only_vsi) {
4782 				status = ice_rem_vsig(hw, blk, or_vsig, &chg);
4783 				if (status)
4784 					goto err_ice_add_prof_id_flow;
4785 			}
4786 		} else if (only_vsi) {
4787 			/* If the original VSIG only contains one VSI, then it
4788 			 * will be the requesting VSI. In this case the VSI is
4789 			 * not sharing entries and we can simply add the new
4790 			 * profile to the VSIG.
4791 			 */
4792 			status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
4793 						      &chg);
4794 			if (status)
4795 				goto err_ice_add_prof_id_flow;
4796 
4797 			/* Adjust priorities */
4798 			status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
4799 			if (status)
4800 				goto err_ice_add_prof_id_flow;
4801 		} else {
4802 			/* No match, so we need a new VSIG */
4803 			status = ice_create_vsig_from_lst(hw, blk, vsi,
4804 							  &union_lst, &vsig,
4805 							  &chg);
4806 			if (status)
4807 				goto err_ice_add_prof_id_flow;
4808 
4809 			/* Adjust priorities */
4810 			status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
4811 			if (status)
4812 				goto err_ice_add_prof_id_flow;
4813 		}
4814 	} else {
4815 		/* need to find or add a VSIG */
4816 		/* search for an existing VSIG with an exact charc match */
4817 		if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
4818 			/* found an exact match */
4819 			/* add or move VSI to the VSIG that matches */
4820 			status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
4821 			if (status)
4822 				goto err_ice_add_prof_id_flow;
4823 		} else {
4824 			/* we did not find an exact match */
4825 			/* we need to add a VSIG */
4826 			status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
4827 							 &chg);
4828 			if (status)
4829 				goto err_ice_add_prof_id_flow;
4830 		}
4831 	}
4832 
4833 	/* update hardware */
4834 	if (!status)
4835 		status = ice_upd_prof_hw(hw, blk, &chg);
4836 
4837 err_ice_add_prof_id_flow:
4838 	list_for_each_entry_safe(del, tmp, &chg, list_entry) {
4839 		list_del(&del->list_entry);
4840 		devm_kfree(ice_hw_to_dev(hw), del);
4841 	}
4842 
4843 	list_for_each_entry_safe(del1, tmp1, &union_lst, list) {
4844 		list_del(&del1->list);
4845 		devm_kfree(ice_hw_to_dev(hw), del1);
4846 	}
4847 
4848 	return status;
4849 }
4850 
4851 /**
4852  * ice_rem_prof_from_list - remove a profile from list
4853  * @hw: pointer to the HW struct
4854  * @lst: list to remove the profile from
4855  * @hdl: the profile handle indicating the profile to remove
4856  */
4857 static enum ice_status
4858 ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
4859 {
4860 	struct ice_vsig_prof *ent, *tmp;
4861 
4862 	list_for_each_entry_safe(ent, tmp, lst, list)
4863 		if (ent->profile_cookie == hdl) {
4864 			list_del(&ent->list);
4865 			devm_kfree(ice_hw_to_dev(hw), ent);
4866 			return 0;
4867 		}
4868 
4869 	return ICE_ERR_DOES_NOT_EXIST;
4870 }
4871 
4872 /**
4873  * ice_rem_prof_id_flow - remove flow
4874  * @hw: pointer to the HW struct
4875  * @blk: hardware block
4876  * @vsi: the VSI from which to remove the profile specified by ID
4877  * @hdl: profile tracking handle
4878  *
4879  * Calling this function will update the hardware tables to remove the
4880  * profile indicated by the ID parameter for the VSIs specified in the VSI
4881  * array. Once successfully called, the flow will be disabled.
4882  */
4883 enum ice_status
4884 ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
4885 {
4886 	struct ice_vsig_prof *tmp1, *del1;
4887 	struct ice_chs_chg *tmp, *del;
4888 	struct list_head chg, copy;
4889 	enum ice_status status;
4890 	u16 vsig;
4891 
4892 	INIT_LIST_HEAD(&copy);
4893 	INIT_LIST_HEAD(&chg);
4894 
4895 	/* determine if VSI is already part of a VSIG */
4896 	status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
4897 	if (!status && vsig) {
4898 		bool last_profile;
4899 		bool only_vsi;
4900 		u16 ref;
4901 
4902 		/* found in VSIG */
4903 		last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
4904 		status = ice_vsig_get_ref(hw, blk, vsig, &ref);
4905 		if (status)
4906 			goto err_ice_rem_prof_id_flow;
4907 		only_vsi = (ref == 1);
4908 
4909 		if (only_vsi) {
4910 			/* If the original VSIG only contains one reference,
4911 			 * which will be the requesting VSI, then the VSI is not
4912 			 * sharing entries and we can simply remove the specific
4913 			 * characteristics from the VSIG.
4914 			 */
4915 
4916 			if (last_profile) {
4917 				/* If there are no profiles left for this VSIG,
4918 				 * then simply remove the the VSIG.
4919 				 */
4920 				status = ice_rem_vsig(hw, blk, vsig, &chg);
4921 				if (status)
4922 					goto err_ice_rem_prof_id_flow;
4923 			} else {
4924 				status = ice_rem_prof_id_vsig(hw, blk, vsig,
4925 							      hdl, &chg);
4926 				if (status)
4927 					goto err_ice_rem_prof_id_flow;
4928 
4929 				/* Adjust priorities */
4930 				status = ice_adj_prof_priorities(hw, blk, vsig,
4931 								 &chg);
4932 				if (status)
4933 					goto err_ice_rem_prof_id_flow;
4934 			}
4935 
4936 		} else {
4937 			/* Make a copy of the VSIG's list of Profiles */
4938 			status = ice_get_profs_vsig(hw, blk, vsig, &copy);
4939 			if (status)
4940 				goto err_ice_rem_prof_id_flow;
4941 
4942 			/* Remove specified profile entry from the list */
4943 			status = ice_rem_prof_from_list(hw, &copy, hdl);
4944 			if (status)
4945 				goto err_ice_rem_prof_id_flow;
4946 
4947 			if (list_empty(&copy)) {
4948 				status = ice_move_vsi(hw, blk, vsi,
4949 						      ICE_DEFAULT_VSIG, &chg);
4950 				if (status)
4951 					goto err_ice_rem_prof_id_flow;
4952 
4953 			} else if (!ice_find_dup_props_vsig(hw, blk, &copy,
4954 							    &vsig)) {
4955 				/* found an exact match */
4956 				/* add or move VSI to the VSIG that matches */
4957 				/* Search for a VSIG with a matching profile
4958 				 * list
4959 				 */
4960 
4961 				/* Found match, move VSI to the matching VSIG */
4962 				status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
4963 				if (status)
4964 					goto err_ice_rem_prof_id_flow;
4965 			} else {
4966 				/* since no existing VSIG supports this
4967 				 * characteristic pattern, we need to create a
4968 				 * new VSIG and TCAM entries
4969 				 */
4970 				status = ice_create_vsig_from_lst(hw, blk, vsi,
4971 								  &copy, &vsig,
4972 								  &chg);
4973 				if (status)
4974 					goto err_ice_rem_prof_id_flow;
4975 
4976 				/* Adjust priorities */
4977 				status = ice_adj_prof_priorities(hw, blk, vsig,
4978 								 &chg);
4979 				if (status)
4980 					goto err_ice_rem_prof_id_flow;
4981 			}
4982 		}
4983 	} else {
4984 		status = ICE_ERR_DOES_NOT_EXIST;
4985 	}
4986 
4987 	/* update hardware tables */
4988 	if (!status)
4989 		status = ice_upd_prof_hw(hw, blk, &chg);
4990 
4991 err_ice_rem_prof_id_flow:
4992 	list_for_each_entry_safe(del, tmp, &chg, list_entry) {
4993 		list_del(&del->list_entry);
4994 		devm_kfree(ice_hw_to_dev(hw), del);
4995 	}
4996 
4997 	list_for_each_entry_safe(del1, tmp1, &copy, list) {
4998 		list_del(&del1->list);
4999 		devm_kfree(ice_hw_to_dev(hw), del1);
5000 	}
5001 
5002 	return status;
5003 }
5004