1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3 
4 #include "ice_common.h"
5 #include "ice_flex_pipe.h"
6 #include "ice_flow.h"
7 
8 /* To support tunneling entries by PF, the package will append the PF number to
9  * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
10  */
11 static const struct ice_tunnel_type_scan tnls[] = {
12 	{ TNL_VXLAN,		"TNL_VXLAN_PF" },
13 	{ TNL_GENEVE,		"TNL_GENEVE_PF" },
14 	{ TNL_LAST,		"" }
15 };
16 
17 static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
18 	/* SWITCH */
19 	{
20 		ICE_SID_XLT0_SW,
21 		ICE_SID_XLT_KEY_BUILDER_SW,
22 		ICE_SID_XLT1_SW,
23 		ICE_SID_XLT2_SW,
24 		ICE_SID_PROFID_TCAM_SW,
25 		ICE_SID_PROFID_REDIR_SW,
26 		ICE_SID_FLD_VEC_SW,
27 		ICE_SID_CDID_KEY_BUILDER_SW,
28 		ICE_SID_CDID_REDIR_SW
29 	},
30 
31 	/* ACL */
32 	{
33 		ICE_SID_XLT0_ACL,
34 		ICE_SID_XLT_KEY_BUILDER_ACL,
35 		ICE_SID_XLT1_ACL,
36 		ICE_SID_XLT2_ACL,
37 		ICE_SID_PROFID_TCAM_ACL,
38 		ICE_SID_PROFID_REDIR_ACL,
39 		ICE_SID_FLD_VEC_ACL,
40 		ICE_SID_CDID_KEY_BUILDER_ACL,
41 		ICE_SID_CDID_REDIR_ACL
42 	},
43 
44 	/* FD */
45 	{
46 		ICE_SID_XLT0_FD,
47 		ICE_SID_XLT_KEY_BUILDER_FD,
48 		ICE_SID_XLT1_FD,
49 		ICE_SID_XLT2_FD,
50 		ICE_SID_PROFID_TCAM_FD,
51 		ICE_SID_PROFID_REDIR_FD,
52 		ICE_SID_FLD_VEC_FD,
53 		ICE_SID_CDID_KEY_BUILDER_FD,
54 		ICE_SID_CDID_REDIR_FD
55 	},
56 
57 	/* RSS */
58 	{
59 		ICE_SID_XLT0_RSS,
60 		ICE_SID_XLT_KEY_BUILDER_RSS,
61 		ICE_SID_XLT1_RSS,
62 		ICE_SID_XLT2_RSS,
63 		ICE_SID_PROFID_TCAM_RSS,
64 		ICE_SID_PROFID_REDIR_RSS,
65 		ICE_SID_FLD_VEC_RSS,
66 		ICE_SID_CDID_KEY_BUILDER_RSS,
67 		ICE_SID_CDID_REDIR_RSS
68 	},
69 
70 	/* PE */
71 	{
72 		ICE_SID_XLT0_PE,
73 		ICE_SID_XLT_KEY_BUILDER_PE,
74 		ICE_SID_XLT1_PE,
75 		ICE_SID_XLT2_PE,
76 		ICE_SID_PROFID_TCAM_PE,
77 		ICE_SID_PROFID_REDIR_PE,
78 		ICE_SID_FLD_VEC_PE,
79 		ICE_SID_CDID_KEY_BUILDER_PE,
80 		ICE_SID_CDID_REDIR_PE
81 	}
82 };
83 
84 /**
85  * ice_sect_id - returns section ID
86  * @blk: block type
87  * @sect: section type
88  *
89  * This helper function returns the proper section ID given a block type and a
90  * section type.
91  */
92 static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
93 {
94 	return ice_sect_lkup[blk][sect];
95 }
96 
97 /**
98  * ice_pkg_val_buf
99  * @buf: pointer to the ice buffer
100  *
101  * This helper function validates a buffer's header.
102  */
103 static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
104 {
105 	struct ice_buf_hdr *hdr;
106 	u16 section_count;
107 	u16 data_end;
108 
109 	hdr = (struct ice_buf_hdr *)buf->buf;
110 	/* verify data */
111 	section_count = le16_to_cpu(hdr->section_count);
112 	if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
113 		return NULL;
114 
115 	data_end = le16_to_cpu(hdr->data_end);
116 	if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
117 		return NULL;
118 
119 	return hdr;
120 }
121 
122 /**
123  * ice_find_buf_table
124  * @ice_seg: pointer to the ice segment
125  *
126  * Returns the address of the buffer table within the ice segment.
127  */
128 static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
129 {
130 	struct ice_nvm_table *nvms;
131 
132 	nvms = (struct ice_nvm_table *)
133 		(ice_seg->device_table +
134 		 le32_to_cpu(ice_seg->device_table_count));
135 
136 	return (__force struct ice_buf_table *)
137 		(nvms->vers + le32_to_cpu(nvms->table_count));
138 }
139 
140 /**
141  * ice_pkg_enum_buf
142  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
143  * @state: pointer to the enum state
144  *
145  * This function will enumerate all the buffers in the ice segment. The first
146  * call is made with the ice_seg parameter non-NULL; on subsequent calls,
147  * ice_seg is set to NULL which continues the enumeration. When the function
148  * returns a NULL pointer, then the end of the buffers has been reached, or an
149  * unexpected value has been detected (for example an invalid section count or
150  * an invalid buffer end value).
151  */
152 static struct ice_buf_hdr *
153 ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
154 {
155 	if (ice_seg) {
156 		state->buf_table = ice_find_buf_table(ice_seg);
157 		if (!state->buf_table)
158 			return NULL;
159 
160 		state->buf_idx = 0;
161 		return ice_pkg_val_buf(state->buf_table->buf_array);
162 	}
163 
164 	if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count))
165 		return ice_pkg_val_buf(state->buf_table->buf_array +
166 				       state->buf_idx);
167 	else
168 		return NULL;
169 }
170 
171 /**
172  * ice_pkg_advance_sect
173  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
174  * @state: pointer to the enum state
175  *
176  * This helper function will advance the section within the ice segment,
177  * also advancing the buffer if needed.
178  */
179 static bool
180 ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
181 {
182 	if (!ice_seg && !state->buf)
183 		return false;
184 
185 	if (!ice_seg && state->buf)
186 		if (++state->sect_idx < le16_to_cpu(state->buf->section_count))
187 			return true;
188 
189 	state->buf = ice_pkg_enum_buf(ice_seg, state);
190 	if (!state->buf)
191 		return false;
192 
193 	/* start of new buffer, reset section index */
194 	state->sect_idx = 0;
195 	return true;
196 }
197 
198 /**
199  * ice_pkg_enum_section
200  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
201  * @state: pointer to the enum state
202  * @sect_type: section type to enumerate
203  *
204  * This function will enumerate all the sections of a particular type in the
205  * ice segment. The first call is made with the ice_seg parameter non-NULL;
206  * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
207  * When the function returns a NULL pointer, then the end of the matching
208  * sections has been reached.
209  */
210 static void *
211 ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
212 		     u32 sect_type)
213 {
214 	u16 offset, size;
215 
216 	if (ice_seg)
217 		state->type = sect_type;
218 
219 	if (!ice_pkg_advance_sect(ice_seg, state))
220 		return NULL;
221 
222 	/* scan for next matching section */
223 	while (state->buf->section_entry[state->sect_idx].type !=
224 	       cpu_to_le32(state->type))
225 		if (!ice_pkg_advance_sect(NULL, state))
226 			return NULL;
227 
228 	/* validate section */
229 	offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
230 	if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
231 		return NULL;
232 
233 	size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size);
234 	if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
235 		return NULL;
236 
237 	/* make sure the section fits in the buffer */
238 	if (offset + size > ICE_PKG_BUF_SIZE)
239 		return NULL;
240 
241 	state->sect_type =
242 		le32_to_cpu(state->buf->section_entry[state->sect_idx].type);
243 
244 	/* calc pointer to this section */
245 	state->sect = ((u8 *)state->buf) +
246 		le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
247 
248 	return state->sect;
249 }
250 
251 /**
252  * ice_pkg_enum_entry
253  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
254  * @state: pointer to the enum state
255  * @sect_type: section type to enumerate
256  * @offset: pointer to variable that receives the offset in the table (optional)
257  * @handler: function that handles access to the entries into the section type
258  *
259  * This function will enumerate all the entries in particular section type in
260  * the ice segment. The first call is made with the ice_seg parameter non-NULL;
261  * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
262  * When the function returns a NULL pointer, then the end of the entries has
263  * been reached.
264  *
265  * Since each section may have a different header and entry size, the handler
266  * function is needed to determine the number and location entries in each
267  * section.
268  *
269  * The offset parameter is optional, but should be used for sections that
270  * contain an offset for each section table. For such cases, the section handler
271  * function must return the appropriate offset + index to give the absolution
272  * offset for each entry. For example, if the base for a section's header
273  * indicates a base offset of 10, and the index for the entry is 2, then
274  * section handler function should set the offset to 10 + 2 = 12.
275  */
276 static void *
277 ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
278 		   u32 sect_type, u32 *offset,
279 		   void *(*handler)(u32 sect_type, void *section,
280 				    u32 index, u32 *offset))
281 {
282 	void *entry;
283 
284 	if (ice_seg) {
285 		if (!handler)
286 			return NULL;
287 
288 		if (!ice_pkg_enum_section(ice_seg, state, sect_type))
289 			return NULL;
290 
291 		state->entry_idx = 0;
292 		state->handler = handler;
293 	} else {
294 		state->entry_idx++;
295 	}
296 
297 	if (!state->handler)
298 		return NULL;
299 
300 	/* get entry */
301 	entry = state->handler(state->sect_type, state->sect, state->entry_idx,
302 			       offset);
303 	if (!entry) {
304 		/* end of a section, look for another section of this type */
305 		if (!ice_pkg_enum_section(NULL, state, 0))
306 			return NULL;
307 
308 		state->entry_idx = 0;
309 		entry = state->handler(state->sect_type, state->sect,
310 				       state->entry_idx, offset);
311 	}
312 
313 	return entry;
314 }
315 
316 /**
317  * ice_boost_tcam_handler
318  * @sect_type: section type
319  * @section: pointer to section
320  * @index: index of the boost TCAM entry to be returned
321  * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
322  *
323  * This is a callback function that can be passed to ice_pkg_enum_entry.
324  * Handles enumeration of individual boost TCAM entries.
325  */
326 static void *
327 ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
328 {
329 	struct ice_boost_tcam_section *boost;
330 
331 	if (!section)
332 		return NULL;
333 
334 	if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
335 		return NULL;
336 
337 	/* cppcheck-suppress nullPointer */
338 	if (index > ICE_MAX_BST_TCAMS_IN_BUF)
339 		return NULL;
340 
341 	if (offset)
342 		*offset = 0;
343 
344 	boost = section;
345 	if (index >= le16_to_cpu(boost->count))
346 		return NULL;
347 
348 	return boost->tcam + index;
349 }
350 
351 /**
352  * ice_find_boost_entry
353  * @ice_seg: pointer to the ice segment (non-NULL)
354  * @addr: Boost TCAM address of entry to search for
355  * @entry: returns pointer to the entry
356  *
357  * Finds a particular Boost TCAM entry and returns a pointer to that entry
358  * if it is found. The ice_seg parameter must not be NULL since the first call
359  * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
360  */
361 static enum ice_status
362 ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
363 		     struct ice_boost_tcam_entry **entry)
364 {
365 	struct ice_boost_tcam_entry *tcam;
366 	struct ice_pkg_enum state;
367 
368 	memset(&state, 0, sizeof(state));
369 
370 	if (!ice_seg)
371 		return ICE_ERR_PARAM;
372 
373 	do {
374 		tcam = ice_pkg_enum_entry(ice_seg, &state,
375 					  ICE_SID_RXPARSER_BOOST_TCAM, NULL,
376 					  ice_boost_tcam_handler);
377 		if (tcam && le16_to_cpu(tcam->addr) == addr) {
378 			*entry = tcam;
379 			return 0;
380 		}
381 
382 		ice_seg = NULL;
383 	} while (tcam);
384 
385 	*entry = NULL;
386 	return ICE_ERR_CFG;
387 }
388 
389 /**
390  * ice_label_enum_handler
391  * @sect_type: section type
392  * @section: pointer to section
393  * @index: index of the label entry to be returned
394  * @offset: pointer to receive absolute offset, always zero for label sections
395  *
396  * This is a callback function that can be passed to ice_pkg_enum_entry.
397  * Handles enumeration of individual label entries.
398  */
399 static void *
400 ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index,
401 		       u32 *offset)
402 {
403 	struct ice_label_section *labels;
404 
405 	if (!section)
406 		return NULL;
407 
408 	/* cppcheck-suppress nullPointer */
409 	if (index > ICE_MAX_LABELS_IN_BUF)
410 		return NULL;
411 
412 	if (offset)
413 		*offset = 0;
414 
415 	labels = section;
416 	if (index >= le16_to_cpu(labels->count))
417 		return NULL;
418 
419 	return labels->label + index;
420 }
421 
422 /**
423  * ice_enum_labels
424  * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
425  * @type: the section type that will contain the label (0 on subsequent calls)
426  * @state: ice_pkg_enum structure that will hold the state of the enumeration
427  * @value: pointer to a value that will return the label's value if found
428  *
429  * Enumerates a list of labels in the package. The caller will call
430  * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
431  * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
432  * the end of the list has been reached.
433  */
434 static char *
435 ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
436 		u16 *value)
437 {
438 	struct ice_label *label;
439 
440 	/* Check for valid label section on first call */
441 	if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
442 		return NULL;
443 
444 	label = ice_pkg_enum_entry(ice_seg, state, type, NULL,
445 				   ice_label_enum_handler);
446 	if (!label)
447 		return NULL;
448 
449 	*value = le16_to_cpu(label->value);
450 	return label->name;
451 }
452 
453 /**
454  * ice_init_pkg_hints
455  * @hw: pointer to the HW structure
456  * @ice_seg: pointer to the segment of the package scan (non-NULL)
457  *
458  * This function will scan the package and save off relevant information
459  * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
460  * since the first call to ice_enum_labels requires a pointer to an actual
461  * ice_seg structure.
462  */
463 static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
464 {
465 	struct ice_pkg_enum state;
466 	char *label_name;
467 	u16 val;
468 	int i;
469 
470 	memset(&hw->tnl, 0, sizeof(hw->tnl));
471 	memset(&state, 0, sizeof(state));
472 
473 	if (!ice_seg)
474 		return;
475 
476 	label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
477 				     &val);
478 
479 	while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
480 		for (i = 0; tnls[i].type != TNL_LAST; i++) {
481 			size_t len = strlen(tnls[i].label_prefix);
482 
483 			/* Look for matching label start, before continuing */
484 			if (strncmp(label_name, tnls[i].label_prefix, len))
485 				continue;
486 
487 			/* Make sure this label matches our PF. Note that the PF
488 			 * character ('0' - '7') will be located where our
489 			 * prefix string's null terminator is located.
490 			 */
491 			if ((label_name[len] - '0') == hw->pf_id) {
492 				hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
493 				hw->tnl.tbl[hw->tnl.count].valid = false;
494 				hw->tnl.tbl[hw->tnl.count].boost_addr = val;
495 				hw->tnl.tbl[hw->tnl.count].port = 0;
496 				hw->tnl.count++;
497 				break;
498 			}
499 		}
500 
501 		label_name = ice_enum_labels(NULL, 0, &state, &val);
502 	}
503 
504 	/* Cache the appropriate boost TCAM entry pointers */
505 	for (i = 0; i < hw->tnl.count; i++) {
506 		ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
507 				     &hw->tnl.tbl[i].boost_entry);
508 		if (hw->tnl.tbl[i].boost_entry) {
509 			hw->tnl.tbl[i].valid = true;
510 			if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT)
511 				hw->tnl.valid_count[hw->tnl.tbl[i].type]++;
512 		}
513 	}
514 }
515 
516 /* Key creation */
517 
518 #define ICE_DC_KEY	0x1	/* don't care */
519 #define ICE_DC_KEYINV	0x1
520 #define ICE_NM_KEY	0x0	/* never match */
521 #define ICE_NM_KEYINV	0x0
522 #define ICE_0_KEY	0x1	/* match 0 */
523 #define ICE_0_KEYINV	0x0
524 #define ICE_1_KEY	0x0	/* match 1 */
525 #define ICE_1_KEYINV	0x1
526 
527 /**
528  * ice_gen_key_word - generate 16-bits of a key/mask word
529  * @val: the value
530  * @valid: valid bits mask (change only the valid bits)
531  * @dont_care: don't care mask
532  * @nvr_mtch: never match mask
533  * @key: pointer to an array of where the resulting key portion
534  * @key_inv: pointer to an array of where the resulting key invert portion
535  *
536  * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
537  * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
538  * of key and 8 bits of key invert.
539  *
540  *     '0' =    b01, always match a 0 bit
541  *     '1' =    b10, always match a 1 bit
542  *     '?' =    b11, don't care bit (always matches)
543  *     '~' =    b00, never match bit
544  *
545  * Input:
546  *          val:         b0  1  0  1  0  1
547  *          dont_care:   b0  0  1  1  0  0
548  *          never_mtch:  b0  0  0  0  1  1
549  *          ------------------------------
550  * Result:  key:        b01 10 11 11 00 00
551  */
552 static enum ice_status
553 ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
554 		 u8 *key_inv)
555 {
556 	u8 in_key = *key, in_key_inv = *key_inv;
557 	u8 i;
558 
559 	/* 'dont_care' and 'nvr_mtch' masks cannot overlap */
560 	if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
561 		return ICE_ERR_CFG;
562 
563 	*key = 0;
564 	*key_inv = 0;
565 
566 	/* encode the 8 bits into 8-bit key and 8-bit key invert */
567 	for (i = 0; i < 8; i++) {
568 		*key >>= 1;
569 		*key_inv >>= 1;
570 
571 		if (!(valid & 0x1)) { /* change only valid bits */
572 			*key |= (in_key & 0x1) << 7;
573 			*key_inv |= (in_key_inv & 0x1) << 7;
574 		} else if (dont_care & 0x1) { /* don't care bit */
575 			*key |= ICE_DC_KEY << 7;
576 			*key_inv |= ICE_DC_KEYINV << 7;
577 		} else if (nvr_mtch & 0x1) { /* never match bit */
578 			*key |= ICE_NM_KEY << 7;
579 			*key_inv |= ICE_NM_KEYINV << 7;
580 		} else if (val & 0x01) { /* exact 1 match */
581 			*key |= ICE_1_KEY << 7;
582 			*key_inv |= ICE_1_KEYINV << 7;
583 		} else { /* exact 0 match */
584 			*key |= ICE_0_KEY << 7;
585 			*key_inv |= ICE_0_KEYINV << 7;
586 		}
587 
588 		dont_care >>= 1;
589 		nvr_mtch >>= 1;
590 		valid >>= 1;
591 		val >>= 1;
592 		in_key >>= 1;
593 		in_key_inv >>= 1;
594 	}
595 
596 	return 0;
597 }
598 
599 /**
600  * ice_bits_max_set - determine if the number of bits set is within a maximum
601  * @mask: pointer to the byte array which is the mask
602  * @size: the number of bytes in the mask
603  * @max: the max number of set bits
604  *
605  * This function determines if there are at most 'max' number of bits set in an
606  * array. Returns true if the number for bits set is <= max or will return false
607  * otherwise.
608  */
609 static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
610 {
611 	u16 count = 0;
612 	u16 i;
613 
614 	/* check each byte */
615 	for (i = 0; i < size; i++) {
616 		/* if 0, go to next byte */
617 		if (!mask[i])
618 			continue;
619 
620 		/* We know there is at least one set bit in this byte because of
621 		 * the above check; if we already have found 'max' number of
622 		 * bits set, then we can return failure now.
623 		 */
624 		if (count == max)
625 			return false;
626 
627 		/* count the bits in this byte, checking threshold */
628 		count += hweight8(mask[i]);
629 		if (count > max)
630 			return false;
631 	}
632 
633 	return true;
634 }
635 
636 /**
637  * ice_set_key - generate a variable sized key with multiples of 16-bits
638  * @key: pointer to where the key will be stored
639  * @size: the size of the complete key in bytes (must be even)
640  * @val: array of 8-bit values that makes up the value portion of the key
641  * @upd: array of 8-bit masks that determine what key portion to update
642  * @dc: array of 8-bit masks that make up the don't care mask
643  * @nm: array of 8-bit masks that make up the never match mask
644  * @off: the offset of the first byte in the key to update
645  * @len: the number of bytes in the key update
646  *
647  * This function generates a key from a value, a don't care mask and a never
648  * match mask.
649  * upd, dc, and nm are optional parameters, and can be NULL:
650  *	upd == NULL --> upd mask is all 1's (update all bits)
651  *	dc == NULL --> dc mask is all 0's (no don't care bits)
652  *	nm == NULL --> nm mask is all 0's (no never match bits)
653  */
654 static enum ice_status
655 ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
656 	    u16 len)
657 {
658 	u16 half_size;
659 	u16 i;
660 
661 	/* size must be a multiple of 2 bytes. */
662 	if (size % 2)
663 		return ICE_ERR_CFG;
664 
665 	half_size = size / 2;
666 	if (off + len > half_size)
667 		return ICE_ERR_CFG;
668 
669 	/* Make sure at most one bit is set in the never match mask. Having more
670 	 * than one never match mask bit set will cause HW to consume excessive
671 	 * power otherwise; this is a power management efficiency check.
672 	 */
673 #define ICE_NVR_MTCH_BITS_MAX	1
674 	if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
675 		return ICE_ERR_CFG;
676 
677 	for (i = 0; i < len; i++)
678 		if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
679 				     dc ? dc[i] : 0, nm ? nm[i] : 0,
680 				     key + off + i, key + half_size + off + i))
681 			return ICE_ERR_CFG;
682 
683 	return 0;
684 }
685 
686 /**
687  * ice_acquire_global_cfg_lock
688  * @hw: pointer to the HW structure
689  * @access: access type (read or write)
690  *
691  * This function will request ownership of the global config lock for reading
692  * or writing of the package. When attempting to obtain write access, the
693  * caller must check for the following two return values:
694  *
695  * ICE_SUCCESS        - Means the caller has acquired the global config lock
696  *                      and can perform writing of the package.
697  * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
698  *                      package or has found that no update was necessary; in
699  *                      this case, the caller can just skip performing any
700  *                      update of the package.
701  */
702 static enum ice_status
703 ice_acquire_global_cfg_lock(struct ice_hw *hw,
704 			    enum ice_aq_res_access_type access)
705 {
706 	enum ice_status status;
707 
708 	status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
709 				 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
710 
711 	if (!status)
712 		mutex_lock(&ice_global_cfg_lock_sw);
713 	else if (status == ICE_ERR_AQ_NO_WORK)
714 		ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
715 
716 	return status;
717 }
718 
719 /**
720  * ice_release_global_cfg_lock
721  * @hw: pointer to the HW structure
722  *
723  * This function will release the global config lock.
724  */
725 static void ice_release_global_cfg_lock(struct ice_hw *hw)
726 {
727 	mutex_unlock(&ice_global_cfg_lock_sw);
728 	ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
729 }
730 
731 /**
732  * ice_acquire_change_lock
733  * @hw: pointer to the HW structure
734  * @access: access type (read or write)
735  *
736  * This function will request ownership of the change lock.
737  */
738 enum ice_status
739 ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
740 {
741 	return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
742 			       ICE_CHANGE_LOCK_TIMEOUT);
743 }
744 
745 /**
746  * ice_release_change_lock
747  * @hw: pointer to the HW structure
748  *
749  * This function will release the change lock using the proper Admin Command.
750  */
751 void ice_release_change_lock(struct ice_hw *hw)
752 {
753 	ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
754 }
755 
756 /**
757  * ice_aq_download_pkg
758  * @hw: pointer to the hardware structure
759  * @pkg_buf: the package buffer to transfer
760  * @buf_size: the size of the package buffer
761  * @last_buf: last buffer indicator
762  * @error_offset: returns error offset
763  * @error_info: returns error information
764  * @cd: pointer to command details structure or NULL
765  *
766  * Download Package (0x0C40)
767  */
768 static enum ice_status
769 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
770 		    u16 buf_size, bool last_buf, u32 *error_offset,
771 		    u32 *error_info, struct ice_sq_cd *cd)
772 {
773 	struct ice_aqc_download_pkg *cmd;
774 	struct ice_aq_desc desc;
775 	enum ice_status status;
776 
777 	if (error_offset)
778 		*error_offset = 0;
779 	if (error_info)
780 		*error_info = 0;
781 
782 	cmd = &desc.params.download_pkg;
783 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
784 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
785 
786 	if (last_buf)
787 		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
788 
789 	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
790 	if (status == ICE_ERR_AQ_ERROR) {
791 		/* Read error from buffer only when the FW returned an error */
792 		struct ice_aqc_download_pkg_resp *resp;
793 
794 		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
795 		if (error_offset)
796 			*error_offset = le32_to_cpu(resp->error_offset);
797 		if (error_info)
798 			*error_info = le32_to_cpu(resp->error_info);
799 	}
800 
801 	return status;
802 }
803 
804 /**
805  * ice_aq_update_pkg
806  * @hw: pointer to the hardware structure
807  * @pkg_buf: the package cmd buffer
808  * @buf_size: the size of the package cmd buffer
809  * @last_buf: last buffer indicator
810  * @error_offset: returns error offset
811  * @error_info: returns error information
812  * @cd: pointer to command details structure or NULL
813  *
814  * Update Package (0x0C42)
815  */
816 static enum ice_status
817 ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
818 		  bool last_buf, u32 *error_offset, u32 *error_info,
819 		  struct ice_sq_cd *cd)
820 {
821 	struct ice_aqc_download_pkg *cmd;
822 	struct ice_aq_desc desc;
823 	enum ice_status status;
824 
825 	if (error_offset)
826 		*error_offset = 0;
827 	if (error_info)
828 		*error_info = 0;
829 
830 	cmd = &desc.params.download_pkg;
831 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
832 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
833 
834 	if (last_buf)
835 		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
836 
837 	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
838 	if (status == ICE_ERR_AQ_ERROR) {
839 		/* Read error from buffer only when the FW returned an error */
840 		struct ice_aqc_download_pkg_resp *resp;
841 
842 		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
843 		if (error_offset)
844 			*error_offset = le32_to_cpu(resp->error_offset);
845 		if (error_info)
846 			*error_info = le32_to_cpu(resp->error_info);
847 	}
848 
849 	return status;
850 }
851 
852 /**
853  * ice_find_seg_in_pkg
854  * @hw: pointer to the hardware structure
855  * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
856  * @pkg_hdr: pointer to the package header to be searched
857  *
858  * This function searches a package file for a particular segment type. On
859  * success it returns a pointer to the segment header, otherwise it will
860  * return NULL.
861  */
862 static struct ice_generic_seg_hdr *
863 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
864 		    struct ice_pkg_hdr *pkg_hdr)
865 {
866 	u32 i;
867 
868 	ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
869 		  pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
870 		  pkg_hdr->pkg_format_ver.update,
871 		  pkg_hdr->pkg_format_ver.draft);
872 
873 	/* Search all package segments for the requested segment type */
874 	for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
875 		struct ice_generic_seg_hdr *seg;
876 
877 		seg = (struct ice_generic_seg_hdr *)
878 			((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i]));
879 
880 		if (le32_to_cpu(seg->seg_type) == seg_type)
881 			return seg;
882 	}
883 
884 	return NULL;
885 }
886 
887 /**
888  * ice_update_pkg
889  * @hw: pointer to the hardware structure
890  * @bufs: pointer to an array of buffers
891  * @count: the number of buffers in the array
892  *
893  * Obtains change lock and updates package.
894  */
895 static enum ice_status
896 ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
897 {
898 	enum ice_status status;
899 	u32 offset, info, i;
900 
901 	status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
902 	if (status)
903 		return status;
904 
905 	for (i = 0; i < count; i++) {
906 		struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
907 		bool last = ((i + 1) == count);
908 
909 		status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end),
910 					   last, &offset, &info, NULL);
911 
912 		if (status) {
913 			ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
914 				  status, offset, info);
915 			break;
916 		}
917 	}
918 
919 	ice_release_change_lock(hw);
920 
921 	return status;
922 }
923 
924 /**
925  * ice_dwnld_cfg_bufs
926  * @hw: pointer to the hardware structure
927  * @bufs: pointer to an array of buffers
928  * @count: the number of buffers in the array
929  *
930  * Obtains global config lock and downloads the package configuration buffers
931  * to the firmware. Metadata buffers are skipped, and the first metadata buffer
932  * found indicates that the rest of the buffers are all metadata buffers.
933  */
934 static enum ice_status
935 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
936 {
937 	enum ice_status status;
938 	struct ice_buf_hdr *bh;
939 	u32 offset, info, i;
940 
941 	if (!bufs || !count)
942 		return ICE_ERR_PARAM;
943 
944 	/* If the first buffer's first section has its metadata bit set
945 	 * then there are no buffers to be downloaded, and the operation is
946 	 * considered a success.
947 	 */
948 	bh = (struct ice_buf_hdr *)bufs;
949 	if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
950 		return 0;
951 
952 	/* reset pkg_dwnld_status in case this function is called in the
953 	 * reset/rebuild flow
954 	 */
955 	hw->pkg_dwnld_status = ICE_AQ_RC_OK;
956 
957 	status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
958 	if (status) {
959 		if (status == ICE_ERR_AQ_NO_WORK)
960 			hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
961 		else
962 			hw->pkg_dwnld_status = hw->adminq.sq_last_status;
963 		return status;
964 	}
965 
966 	for (i = 0; i < count; i++) {
967 		bool last = ((i + 1) == count);
968 
969 		if (!last) {
970 			/* check next buffer for metadata flag */
971 			bh = (struct ice_buf_hdr *)(bufs + i + 1);
972 
973 			/* A set metadata flag in the next buffer will signal
974 			 * that the current buffer will be the last buffer
975 			 * downloaded
976 			 */
977 			if (le16_to_cpu(bh->section_count))
978 				if (le32_to_cpu(bh->section_entry[0].type) &
979 				    ICE_METADATA_BUF)
980 					last = true;
981 		}
982 
983 		bh = (struct ice_buf_hdr *)(bufs + i);
984 
985 		status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
986 					     &offset, &info, NULL);
987 
988 		/* Save AQ status from download package */
989 		hw->pkg_dwnld_status = hw->adminq.sq_last_status;
990 		if (status) {
991 			ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
992 				  status, offset, info);
993 
994 			break;
995 		}
996 
997 		if (last)
998 			break;
999 	}
1000 
1001 	ice_release_global_cfg_lock(hw);
1002 
1003 	return status;
1004 }
1005 
1006 /**
1007  * ice_aq_get_pkg_info_list
1008  * @hw: pointer to the hardware structure
1009  * @pkg_info: the buffer which will receive the information list
1010  * @buf_size: the size of the pkg_info information buffer
1011  * @cd: pointer to command details structure or NULL
1012  *
1013  * Get Package Info List (0x0C43)
1014  */
1015 static enum ice_status
1016 ice_aq_get_pkg_info_list(struct ice_hw *hw,
1017 			 struct ice_aqc_get_pkg_info_resp *pkg_info,
1018 			 u16 buf_size, struct ice_sq_cd *cd)
1019 {
1020 	struct ice_aq_desc desc;
1021 
1022 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
1023 
1024 	return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
1025 }
1026 
1027 /**
1028  * ice_download_pkg
1029  * @hw: pointer to the hardware structure
1030  * @ice_seg: pointer to the segment of the package to be downloaded
1031  *
1032  * Handles the download of a complete package.
1033  */
1034 static enum ice_status
1035 ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
1036 {
1037 	struct ice_buf_table *ice_buf_tbl;
1038 
1039 	ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
1040 		  ice_seg->hdr.seg_format_ver.major,
1041 		  ice_seg->hdr.seg_format_ver.minor,
1042 		  ice_seg->hdr.seg_format_ver.update,
1043 		  ice_seg->hdr.seg_format_ver.draft);
1044 
1045 	ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
1046 		  le32_to_cpu(ice_seg->hdr.seg_type),
1047 		  le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
1048 
1049 	ice_buf_tbl = ice_find_buf_table(ice_seg);
1050 
1051 	ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
1052 		  le32_to_cpu(ice_buf_tbl->buf_count));
1053 
1054 	return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
1055 				  le32_to_cpu(ice_buf_tbl->buf_count));
1056 }
1057 
1058 /**
1059  * ice_init_pkg_info
1060  * @hw: pointer to the hardware structure
1061  * @pkg_hdr: pointer to the driver's package hdr
1062  *
1063  * Saves off the package details into the HW structure.
1064  */
1065 static enum ice_status
1066 ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
1067 {
1068 	struct ice_generic_seg_hdr *seg_hdr;
1069 
1070 	if (!pkg_hdr)
1071 		return ICE_ERR_PARAM;
1072 
1073 	seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
1074 	if (seg_hdr) {
1075 		struct ice_meta_sect *meta;
1076 		struct ice_pkg_enum state;
1077 
1078 		memset(&state, 0, sizeof(state));
1079 
1080 		/* Get package information from the Metadata Section */
1081 		meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
1082 					    ICE_SID_METADATA);
1083 		if (!meta) {
1084 			ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
1085 			return ICE_ERR_CFG;
1086 		}
1087 
1088 		hw->pkg_ver = meta->ver;
1089 		memcpy(hw->pkg_name, meta->name, sizeof(meta->name));
1090 
1091 		ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
1092 			  meta->ver.major, meta->ver.minor, meta->ver.update,
1093 			  meta->ver.draft, meta->name);
1094 
1095 		hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
1096 		memcpy(hw->ice_seg_id, seg_hdr->seg_id,
1097 		       sizeof(hw->ice_seg_id));
1098 
1099 		ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
1100 			  seg_hdr->seg_format_ver.major,
1101 			  seg_hdr->seg_format_ver.minor,
1102 			  seg_hdr->seg_format_ver.update,
1103 			  seg_hdr->seg_format_ver.draft,
1104 			  seg_hdr->seg_id);
1105 	} else {
1106 		ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
1107 		return ICE_ERR_CFG;
1108 	}
1109 
1110 	return 0;
1111 }
1112 
1113 /**
1114  * ice_get_pkg_info
1115  * @hw: pointer to the hardware structure
1116  *
1117  * Store details of the package currently loaded in HW into the HW structure.
1118  */
1119 static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
1120 {
1121 	struct ice_aqc_get_pkg_info_resp *pkg_info;
1122 	enum ice_status status;
1123 	u16 size;
1124 	u32 i;
1125 
1126 	size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
1127 	pkg_info = kzalloc(size, GFP_KERNEL);
1128 	if (!pkg_info)
1129 		return ICE_ERR_NO_MEMORY;
1130 
1131 	status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
1132 	if (status)
1133 		goto init_pkg_free_alloc;
1134 
1135 	for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
1136 #define ICE_PKG_FLAG_COUNT	4
1137 		char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
1138 		u8 place = 0;
1139 
1140 		if (pkg_info->pkg_info[i].is_active) {
1141 			flags[place++] = 'A';
1142 			hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
1143 			hw->active_track_id =
1144 				le32_to_cpu(pkg_info->pkg_info[i].track_id);
1145 			memcpy(hw->active_pkg_name,
1146 			       pkg_info->pkg_info[i].name,
1147 			       sizeof(pkg_info->pkg_info[i].name));
1148 			hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
1149 		}
1150 		if (pkg_info->pkg_info[i].is_active_at_boot)
1151 			flags[place++] = 'B';
1152 		if (pkg_info->pkg_info[i].is_modified)
1153 			flags[place++] = 'M';
1154 		if (pkg_info->pkg_info[i].is_in_nvm)
1155 			flags[place++] = 'N';
1156 
1157 		ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
1158 			  i, pkg_info->pkg_info[i].ver.major,
1159 			  pkg_info->pkg_info[i].ver.minor,
1160 			  pkg_info->pkg_info[i].ver.update,
1161 			  pkg_info->pkg_info[i].ver.draft,
1162 			  pkg_info->pkg_info[i].name, flags);
1163 	}
1164 
1165 init_pkg_free_alloc:
1166 	kfree(pkg_info);
1167 
1168 	return status;
1169 }
1170 
1171 /**
1172  * ice_verify_pkg - verify package
1173  * @pkg: pointer to the package buffer
1174  * @len: size of the package buffer
1175  *
1176  * Verifies various attributes of the package file, including length, format
1177  * version, and the requirement of at least one segment.
1178  */
1179 static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
1180 {
1181 	u32 seg_count;
1182 	u32 i;
1183 
1184 	if (len < struct_size(pkg, seg_offset, 1))
1185 		return ICE_ERR_BUF_TOO_SHORT;
1186 
1187 	if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
1188 	    pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
1189 	    pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
1190 	    pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
1191 		return ICE_ERR_CFG;
1192 
1193 	/* pkg must have at least one segment */
1194 	seg_count = le32_to_cpu(pkg->seg_count);
1195 	if (seg_count < 1)
1196 		return ICE_ERR_CFG;
1197 
1198 	/* make sure segment array fits in package length */
1199 	if (len < struct_size(pkg, seg_offset, seg_count))
1200 		return ICE_ERR_BUF_TOO_SHORT;
1201 
1202 	/* all segments must fit within length */
1203 	for (i = 0; i < seg_count; i++) {
1204 		u32 off = le32_to_cpu(pkg->seg_offset[i]);
1205 		struct ice_generic_seg_hdr *seg;
1206 
1207 		/* segment header must fit */
1208 		if (len < off + sizeof(*seg))
1209 			return ICE_ERR_BUF_TOO_SHORT;
1210 
1211 		seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
1212 
1213 		/* segment body must fit */
1214 		if (len < off + le32_to_cpu(seg->seg_size))
1215 			return ICE_ERR_BUF_TOO_SHORT;
1216 	}
1217 
1218 	return 0;
1219 }
1220 
1221 /**
1222  * ice_free_seg - free package segment pointer
1223  * @hw: pointer to the hardware structure
1224  *
1225  * Frees the package segment pointer in the proper manner, depending on if the
1226  * segment was allocated or just the passed in pointer was stored.
1227  */
1228 void ice_free_seg(struct ice_hw *hw)
1229 {
1230 	if (hw->pkg_copy) {
1231 		devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy);
1232 		hw->pkg_copy = NULL;
1233 		hw->pkg_size = 0;
1234 	}
1235 	hw->seg = NULL;
1236 }
1237 
1238 /**
1239  * ice_init_pkg_regs - initialize additional package registers
1240  * @hw: pointer to the hardware structure
1241  */
1242 static void ice_init_pkg_regs(struct ice_hw *hw)
1243 {
1244 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
1245 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
1246 #define ICE_SW_BLK_IDX	0
1247 
1248 	/* setup Switch block input mask, which is 48-bits in two parts */
1249 	wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
1250 	wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
1251 }
1252 
1253 /**
1254  * ice_chk_pkg_version - check package version for compatibility with driver
1255  * @pkg_ver: pointer to a version structure to check
1256  *
1257  * Check to make sure that the package about to be downloaded is compatible with
1258  * the driver. To be compatible, the major and minor components of the package
1259  * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
1260  * definitions.
1261  */
1262 static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
1263 {
1264 	if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
1265 	    pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
1266 		return ICE_ERR_NOT_SUPPORTED;
1267 
1268 	return 0;
1269 }
1270 
1271 /**
1272  * ice_chk_pkg_compat
1273  * @hw: pointer to the hardware structure
1274  * @ospkg: pointer to the package hdr
1275  * @seg: pointer to the package segment hdr
1276  *
1277  * This function checks the package version compatibility with driver and NVM
1278  */
1279 static enum ice_status
1280 ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
1281 		   struct ice_seg **seg)
1282 {
1283 	struct ice_aqc_get_pkg_info_resp *pkg;
1284 	enum ice_status status;
1285 	u16 size;
1286 	u32 i;
1287 
1288 	/* Check package version compatibility */
1289 	status = ice_chk_pkg_version(&hw->pkg_ver);
1290 	if (status) {
1291 		ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
1292 		return status;
1293 	}
1294 
1295 	/* find ICE segment in given package */
1296 	*seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
1297 						     ospkg);
1298 	if (!*seg) {
1299 		ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
1300 		return ICE_ERR_CFG;
1301 	}
1302 
1303 	/* Check if FW is compatible with the OS package */
1304 	size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
1305 	pkg = kzalloc(size, GFP_KERNEL);
1306 	if (!pkg)
1307 		return ICE_ERR_NO_MEMORY;
1308 
1309 	status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
1310 	if (status)
1311 		goto fw_ddp_compat_free_alloc;
1312 
1313 	for (i = 0; i < le32_to_cpu(pkg->count); i++) {
1314 		/* loop till we find the NVM package */
1315 		if (!pkg->pkg_info[i].is_in_nvm)
1316 			continue;
1317 		if ((*seg)->hdr.seg_format_ver.major !=
1318 			pkg->pkg_info[i].ver.major ||
1319 		    (*seg)->hdr.seg_format_ver.minor >
1320 			pkg->pkg_info[i].ver.minor) {
1321 			status = ICE_ERR_FW_DDP_MISMATCH;
1322 			ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
1323 		}
1324 		/* done processing NVM package so break */
1325 		break;
1326 	}
1327 fw_ddp_compat_free_alloc:
1328 	kfree(pkg);
1329 	return status;
1330 }
1331 
1332 /**
1333  * ice_sw_fv_handler
1334  * @sect_type: section type
1335  * @section: pointer to section
1336  * @index: index of the field vector entry to be returned
1337  * @offset: ptr to variable that receives the offset in the field vector table
1338  *
1339  * This is a callback function that can be passed to ice_pkg_enum_entry.
1340  * This function treats the given section as of type ice_sw_fv_section and
1341  * enumerates offset field. "offset" is an index into the field vector table.
1342  */
1343 static void *
1344 ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
1345 {
1346 	struct ice_sw_fv_section *fv_section = section;
1347 
1348 	if (!section || sect_type != ICE_SID_FLD_VEC_SW)
1349 		return NULL;
1350 	if (index >= le16_to_cpu(fv_section->count))
1351 		return NULL;
1352 	if (offset)
1353 		/* "index" passed in to this function is relative to a given
1354 		 * 4k block. To get to the true index into the field vector
1355 		 * table need to add the relative index to the base_offset
1356 		 * field of this section
1357 		 */
1358 		*offset = le16_to_cpu(fv_section->base_offset) + index;
1359 	return fv_section->fv + index;
1360 }
1361 
1362 /**
1363  * ice_get_prof_index_max - get the max profile index for used profile
1364  * @hw: pointer to the HW struct
1365  *
1366  * Calling this function will get the max profile index for used profile
1367  * and store the index number in struct ice_switch_info *switch_info
1368  * in HW for following use.
1369  */
1370 static enum ice_status ice_get_prof_index_max(struct ice_hw *hw)
1371 {
1372 	u16 prof_index = 0, j, max_prof_index = 0;
1373 	struct ice_pkg_enum state;
1374 	struct ice_seg *ice_seg;
1375 	bool flag = false;
1376 	struct ice_fv *fv;
1377 	u32 offset;
1378 
1379 	memset(&state, 0, sizeof(state));
1380 
1381 	if (!hw->seg)
1382 		return ICE_ERR_PARAM;
1383 
1384 	ice_seg = hw->seg;
1385 
1386 	do {
1387 		fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1388 					&offset, ice_sw_fv_handler);
1389 		if (!fv)
1390 			break;
1391 		ice_seg = NULL;
1392 
1393 		/* in the profile that not be used, the prot_id is set to 0xff
1394 		 * and the off is set to 0x1ff for all the field vectors.
1395 		 */
1396 		for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1397 			if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
1398 			    fv->ew[j].off != ICE_FV_OFFSET_INVAL)
1399 				flag = true;
1400 		if (flag && prof_index > max_prof_index)
1401 			max_prof_index = prof_index;
1402 
1403 		prof_index++;
1404 		flag = false;
1405 	} while (fv);
1406 
1407 	hw->switch_info->max_used_prof_index = max_prof_index;
1408 
1409 	return 0;
1410 }
1411 
1412 /**
1413  * ice_init_pkg - initialize/download package
1414  * @hw: pointer to the hardware structure
1415  * @buf: pointer to the package buffer
1416  * @len: size of the package buffer
1417  *
1418  * This function initializes a package. The package contains HW tables
1419  * required to do packet processing. First, the function extracts package
1420  * information such as version. Then it finds the ice configuration segment
1421  * within the package; this function then saves a copy of the segment pointer
1422  * within the supplied package buffer. Next, the function will cache any hints
1423  * from the package, followed by downloading the package itself. Note, that if
1424  * a previous PF driver has already downloaded the package successfully, then
1425  * the current driver will not have to download the package again.
1426  *
1427  * The local package contents will be used to query default behavior and to
1428  * update specific sections of the HW's version of the package (e.g. to update
1429  * the parse graph to understand new protocols).
1430  *
1431  * This function stores a pointer to the package buffer memory, and it is
1432  * expected that the supplied buffer will not be freed immediately. If the
1433  * package buffer needs to be freed, such as when read from a file, use
1434  * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
1435  * case.
1436  */
1437 enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
1438 {
1439 	struct ice_pkg_hdr *pkg;
1440 	enum ice_status status;
1441 	struct ice_seg *seg;
1442 
1443 	if (!buf || !len)
1444 		return ICE_ERR_PARAM;
1445 
1446 	pkg = (struct ice_pkg_hdr *)buf;
1447 	status = ice_verify_pkg(pkg, len);
1448 	if (status) {
1449 		ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
1450 			  status);
1451 		return status;
1452 	}
1453 
1454 	/* initialize package info */
1455 	status = ice_init_pkg_info(hw, pkg);
1456 	if (status)
1457 		return status;
1458 
1459 	/* before downloading the package, check package version for
1460 	 * compatibility with driver
1461 	 */
1462 	status = ice_chk_pkg_compat(hw, pkg, &seg);
1463 	if (status)
1464 		return status;
1465 
1466 	/* initialize package hints and then download package */
1467 	ice_init_pkg_hints(hw, seg);
1468 	status = ice_download_pkg(hw, seg);
1469 	if (status == ICE_ERR_AQ_NO_WORK) {
1470 		ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
1471 		status = 0;
1472 	}
1473 
1474 	/* Get information on the package currently loaded in HW, then make sure
1475 	 * the driver is compatible with this version.
1476 	 */
1477 	if (!status) {
1478 		status = ice_get_pkg_info(hw);
1479 		if (!status)
1480 			status = ice_chk_pkg_version(&hw->active_pkg_ver);
1481 	}
1482 
1483 	if (!status) {
1484 		hw->seg = seg;
1485 		/* on successful package download update other required
1486 		 * registers to support the package and fill HW tables
1487 		 * with package content.
1488 		 */
1489 		ice_init_pkg_regs(hw);
1490 		ice_fill_blk_tbls(hw);
1491 		ice_get_prof_index_max(hw);
1492 	} else {
1493 		ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
1494 			  status);
1495 	}
1496 
1497 	return status;
1498 }
1499 
1500 /**
1501  * ice_copy_and_init_pkg - initialize/download a copy of the package
1502  * @hw: pointer to the hardware structure
1503  * @buf: pointer to the package buffer
1504  * @len: size of the package buffer
1505  *
1506  * This function copies the package buffer, and then calls ice_init_pkg() to
1507  * initialize the copied package contents.
1508  *
1509  * The copying is necessary if the package buffer supplied is constant, or if
1510  * the memory may disappear shortly after calling this function.
1511  *
1512  * If the package buffer resides in the data segment and can be modified, the
1513  * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
1514  *
1515  * However, if the package buffer needs to be copied first, such as when being
1516  * read from a file, the caller should use ice_copy_and_init_pkg().
1517  *
1518  * This function will first copy the package buffer, before calling
1519  * ice_init_pkg(). The caller is free to immediately destroy the original
1520  * package buffer, as the new copy will be managed by this function and
1521  * related routines.
1522  */
1523 enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
1524 {
1525 	enum ice_status status;
1526 	u8 *buf_copy;
1527 
1528 	if (!buf || !len)
1529 		return ICE_ERR_PARAM;
1530 
1531 	buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
1532 
1533 	status = ice_init_pkg(hw, buf_copy, len);
1534 	if (status) {
1535 		/* Free the copy, since we failed to initialize the package */
1536 		devm_kfree(ice_hw_to_dev(hw), buf_copy);
1537 	} else {
1538 		/* Track the copied pkg so we can free it later */
1539 		hw->pkg_copy = buf_copy;
1540 		hw->pkg_size = len;
1541 	}
1542 
1543 	return status;
1544 }
1545 
1546 /**
1547  * ice_pkg_buf_alloc
1548  * @hw: pointer to the HW structure
1549  *
1550  * Allocates a package buffer and returns a pointer to the buffer header.
1551  * Note: all package contents must be in Little Endian form.
1552  */
1553 static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
1554 {
1555 	struct ice_buf_build *bld;
1556 	struct ice_buf_hdr *buf;
1557 
1558 	bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL);
1559 	if (!bld)
1560 		return NULL;
1561 
1562 	buf = (struct ice_buf_hdr *)bld;
1563 	buf->data_end = cpu_to_le16(offsetof(struct ice_buf_hdr,
1564 					     section_entry));
1565 	return bld;
1566 }
1567 
1568 /**
1569  * ice_get_sw_prof_type - determine switch profile type
1570  * @hw: pointer to the HW structure
1571  * @fv: pointer to the switch field vector
1572  */
1573 static enum ice_prof_type
1574 ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
1575 {
1576 	u16 i;
1577 
1578 	for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
1579 		/* UDP tunnel will have UDP_OF protocol ID and VNI offset */
1580 		if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
1581 		    fv->ew[i].off == ICE_VNI_OFFSET)
1582 			return ICE_PROF_TUN_UDP;
1583 
1584 		/* GRE tunnel will have GRE protocol */
1585 		if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
1586 			return ICE_PROF_TUN_GRE;
1587 	}
1588 
1589 	return ICE_PROF_NON_TUN;
1590 }
1591 
1592 /**
1593  * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
1594  * @hw: pointer to hardware structure
1595  * @req_profs: type of profiles requested
1596  * @bm: pointer to memory for returning the bitmap of field vectors
1597  */
1598 void
1599 ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
1600 		     unsigned long *bm)
1601 {
1602 	struct ice_pkg_enum state;
1603 	struct ice_seg *ice_seg;
1604 	struct ice_fv *fv;
1605 
1606 	if (req_profs == ICE_PROF_ALL) {
1607 		bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES);
1608 		return;
1609 	}
1610 
1611 	memset(&state, 0, sizeof(state));
1612 	bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
1613 	ice_seg = hw->seg;
1614 	do {
1615 		enum ice_prof_type prof_type;
1616 		u32 offset;
1617 
1618 		fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1619 					&offset, ice_sw_fv_handler);
1620 		ice_seg = NULL;
1621 
1622 		if (fv) {
1623 			/* Determine field vector type */
1624 			prof_type = ice_get_sw_prof_type(hw, fv);
1625 
1626 			if (req_profs & prof_type)
1627 				set_bit((u16)offset, bm);
1628 		}
1629 	} while (fv);
1630 }
1631 
1632 /**
1633  * ice_get_sw_fv_list
1634  * @hw: pointer to the HW structure
1635  * @prot_ids: field vector to search for with a given protocol ID
1636  * @ids_cnt: lookup/protocol count
1637  * @bm: bitmap of field vectors to consider
1638  * @fv_list: Head of a list
1639  *
1640  * Finds all the field vector entries from switch block that contain
1641  * a given protocol ID and returns a list of structures of type
1642  * "ice_sw_fv_list_entry". Every structure in the list has a field vector
1643  * definition and profile ID information
1644  * NOTE: The caller of the function is responsible for freeing the memory
1645  * allocated for every list entry.
1646  */
1647 enum ice_status
1648 ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
1649 		   unsigned long *bm, struct list_head *fv_list)
1650 {
1651 	struct ice_sw_fv_list_entry *fvl;
1652 	struct ice_sw_fv_list_entry *tmp;
1653 	struct ice_pkg_enum state;
1654 	struct ice_seg *ice_seg;
1655 	struct ice_fv *fv;
1656 	u32 offset;
1657 
1658 	memset(&state, 0, sizeof(state));
1659 
1660 	if (!ids_cnt || !hw->seg)
1661 		return ICE_ERR_PARAM;
1662 
1663 	ice_seg = hw->seg;
1664 	do {
1665 		u16 i;
1666 
1667 		fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1668 					&offset, ice_sw_fv_handler);
1669 		if (!fv)
1670 			break;
1671 		ice_seg = NULL;
1672 
1673 		/* If field vector is not in the bitmap list, then skip this
1674 		 * profile.
1675 		 */
1676 		if (!test_bit((u16)offset, bm))
1677 			continue;
1678 
1679 		for (i = 0; i < ids_cnt; i++) {
1680 			int j;
1681 
1682 			/* This code assumes that if a switch field vector line
1683 			 * has a matching protocol, then this line will contain
1684 			 * the entries necessary to represent every field in
1685 			 * that protocol header.
1686 			 */
1687 			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1688 				if (fv->ew[j].prot_id == prot_ids[i])
1689 					break;
1690 			if (j >= hw->blk[ICE_BLK_SW].es.fvw)
1691 				break;
1692 			if (i + 1 == ids_cnt) {
1693 				fvl = devm_kzalloc(ice_hw_to_dev(hw),
1694 						   sizeof(*fvl), GFP_KERNEL);
1695 				if (!fvl)
1696 					goto err;
1697 				fvl->fv_ptr = fv;
1698 				fvl->profile_id = offset;
1699 				list_add(&fvl->list_entry, fv_list);
1700 				break;
1701 			}
1702 		}
1703 	} while (fv);
1704 	if (list_empty(fv_list))
1705 		return ICE_ERR_CFG;
1706 	return 0;
1707 
1708 err:
1709 	list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) {
1710 		list_del(&fvl->list_entry);
1711 		devm_kfree(ice_hw_to_dev(hw), fvl);
1712 	}
1713 
1714 	return ICE_ERR_NO_MEMORY;
1715 }
1716 
1717 /**
1718  * ice_init_prof_result_bm - Initialize the profile result index bitmap
1719  * @hw: pointer to hardware structure
1720  */
1721 void ice_init_prof_result_bm(struct ice_hw *hw)
1722 {
1723 	struct ice_pkg_enum state;
1724 	struct ice_seg *ice_seg;
1725 	struct ice_fv *fv;
1726 
1727 	memset(&state, 0, sizeof(state));
1728 
1729 	if (!hw->seg)
1730 		return;
1731 
1732 	ice_seg = hw->seg;
1733 	do {
1734 		u32 off;
1735 		u16 i;
1736 
1737 		fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1738 					&off, ice_sw_fv_handler);
1739 		ice_seg = NULL;
1740 		if (!fv)
1741 			break;
1742 
1743 		bitmap_zero(hw->switch_info->prof_res_bm[off],
1744 			    ICE_MAX_FV_WORDS);
1745 
1746 		/* Determine empty field vector indices, these can be
1747 		 * used for recipe results. Skip index 0, since it is
1748 		 * always used for Switch ID.
1749 		 */
1750 		for (i = 1; i < ICE_MAX_FV_WORDS; i++)
1751 			if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
1752 			    fv->ew[i].off == ICE_FV_OFFSET_INVAL)
1753 				set_bit(i, hw->switch_info->prof_res_bm[off]);
1754 	} while (fv);
1755 }
1756 
1757 /**
1758  * ice_pkg_buf_free
1759  * @hw: pointer to the HW structure
1760  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1761  *
1762  * Frees a package buffer
1763  */
1764 static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
1765 {
1766 	devm_kfree(ice_hw_to_dev(hw), bld);
1767 }
1768 
1769 /**
1770  * ice_pkg_buf_reserve_section
1771  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1772  * @count: the number of sections to reserve
1773  *
1774  * Reserves one or more section table entries in a package buffer. This routine
1775  * can be called multiple times as long as they are made before calling
1776  * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
1777  * is called once, the number of sections that can be allocated will not be able
1778  * to be increased; not using all reserved sections is fine, but this will
1779  * result in some wasted space in the buffer.
1780  * Note: all package contents must be in Little Endian form.
1781  */
1782 static enum ice_status
1783 ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
1784 {
1785 	struct ice_buf_hdr *buf;
1786 	u16 section_count;
1787 	u16 data_end;
1788 
1789 	if (!bld)
1790 		return ICE_ERR_PARAM;
1791 
1792 	buf = (struct ice_buf_hdr *)&bld->buf;
1793 
1794 	/* already an active section, can't increase table size */
1795 	section_count = le16_to_cpu(buf->section_count);
1796 	if (section_count > 0)
1797 		return ICE_ERR_CFG;
1798 
1799 	if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
1800 		return ICE_ERR_CFG;
1801 	bld->reserved_section_table_entries += count;
1802 
1803 	data_end = le16_to_cpu(buf->data_end) +
1804 		flex_array_size(buf, section_entry, count);
1805 	buf->data_end = cpu_to_le16(data_end);
1806 
1807 	return 0;
1808 }
1809 
1810 /**
1811  * ice_pkg_buf_alloc_section
1812  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1813  * @type: the section type value
1814  * @size: the size of the section to reserve (in bytes)
1815  *
1816  * Reserves memory in the buffer for a section's content and updates the
1817  * buffers' status accordingly. This routine returns a pointer to the first
1818  * byte of the section start within the buffer, which is used to fill in the
1819  * section contents.
1820  * Note: all package contents must be in Little Endian form.
1821  */
1822 static void *
1823 ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
1824 {
1825 	struct ice_buf_hdr *buf;
1826 	u16 sect_count;
1827 	u16 data_end;
1828 
1829 	if (!bld || !type || !size)
1830 		return NULL;
1831 
1832 	buf = (struct ice_buf_hdr *)&bld->buf;
1833 
1834 	/* check for enough space left in buffer */
1835 	data_end = le16_to_cpu(buf->data_end);
1836 
1837 	/* section start must align on 4 byte boundary */
1838 	data_end = ALIGN(data_end, 4);
1839 
1840 	if ((data_end + size) > ICE_MAX_S_DATA_END)
1841 		return NULL;
1842 
1843 	/* check for more available section table entries */
1844 	sect_count = le16_to_cpu(buf->section_count);
1845 	if (sect_count < bld->reserved_section_table_entries) {
1846 		void *section_ptr = ((u8 *)buf) + data_end;
1847 
1848 		buf->section_entry[sect_count].offset = cpu_to_le16(data_end);
1849 		buf->section_entry[sect_count].size = cpu_to_le16(size);
1850 		buf->section_entry[sect_count].type = cpu_to_le32(type);
1851 
1852 		data_end += size;
1853 		buf->data_end = cpu_to_le16(data_end);
1854 
1855 		buf->section_count = cpu_to_le16(sect_count + 1);
1856 		return section_ptr;
1857 	}
1858 
1859 	/* no free section table entries */
1860 	return NULL;
1861 }
1862 
1863 /**
1864  * ice_pkg_buf_get_active_sections
1865  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1866  *
1867  * Returns the number of active sections. Before using the package buffer
1868  * in an update package command, the caller should make sure that there is at
1869  * least one active section - otherwise, the buffer is not legal and should
1870  * not be used.
1871  * Note: all package contents must be in Little Endian form.
1872  */
1873 static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
1874 {
1875 	struct ice_buf_hdr *buf;
1876 
1877 	if (!bld)
1878 		return 0;
1879 
1880 	buf = (struct ice_buf_hdr *)&bld->buf;
1881 	return le16_to_cpu(buf->section_count);
1882 }
1883 
1884 /**
1885  * ice_pkg_buf
1886  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1887  *
1888  * Return a pointer to the buffer's header
1889  */
1890 static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
1891 {
1892 	if (!bld)
1893 		return NULL;
1894 
1895 	return &bld->buf;
1896 }
1897 
1898 /**
1899  * ice_get_open_tunnel_port - retrieve an open tunnel port
1900  * @hw: pointer to the HW structure
1901  * @port: returns open port
1902  * @type: type of tunnel, can be TNL_LAST if it doesn't matter
1903  */
1904 bool
1905 ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
1906 			 enum ice_tunnel_type type)
1907 {
1908 	bool res = false;
1909 	u16 i;
1910 
1911 	mutex_lock(&hw->tnl_lock);
1912 
1913 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1914 		if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port &&
1915 		    (type == TNL_LAST || type == hw->tnl.tbl[i].type)) {
1916 			*port = hw->tnl.tbl[i].port;
1917 			res = true;
1918 			break;
1919 		}
1920 
1921 	mutex_unlock(&hw->tnl_lock);
1922 
1923 	return res;
1924 }
1925 
1926 /**
1927  * ice_tunnel_idx_to_entry - convert linear index to the sparse one
1928  * @hw: pointer to the HW structure
1929  * @type: type of tunnel
1930  * @idx: linear index
1931  *
1932  * Stack assumes we have 2 linear tables with indexes [0, count_valid),
1933  * but really the port table may be sprase, and types are mixed, so convert
1934  * the stack index into the device index.
1935  */
1936 static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
1937 				   u16 idx)
1938 {
1939 	u16 i;
1940 
1941 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1942 		if (hw->tnl.tbl[i].valid &&
1943 		    hw->tnl.tbl[i].type == type &&
1944 		    idx-- == 0)
1945 			return i;
1946 
1947 	WARN_ON_ONCE(1);
1948 	return 0;
1949 }
1950 
1951 /**
1952  * ice_create_tunnel
1953  * @hw: pointer to the HW structure
1954  * @index: device table entry
1955  * @type: type of tunnel
1956  * @port: port of tunnel to create
1957  *
1958  * Create a tunnel by updating the parse graph in the parser. We do that by
1959  * creating a package buffer with the tunnel info and issuing an update package
1960  * command.
1961  */
1962 static enum ice_status
1963 ice_create_tunnel(struct ice_hw *hw, u16 index,
1964 		  enum ice_tunnel_type type, u16 port)
1965 {
1966 	struct ice_boost_tcam_section *sect_rx, *sect_tx;
1967 	enum ice_status status = ICE_ERR_MAX_LIMIT;
1968 	struct ice_buf_build *bld;
1969 
1970 	mutex_lock(&hw->tnl_lock);
1971 
1972 	bld = ice_pkg_buf_alloc(hw);
1973 	if (!bld) {
1974 		status = ICE_ERR_NO_MEMORY;
1975 		goto ice_create_tunnel_end;
1976 	}
1977 
1978 	/* allocate 2 sections, one for Rx parser, one for Tx parser */
1979 	if (ice_pkg_buf_reserve_section(bld, 2))
1980 		goto ice_create_tunnel_err;
1981 
1982 	sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
1983 					    struct_size(sect_rx, tcam, 1));
1984 	if (!sect_rx)
1985 		goto ice_create_tunnel_err;
1986 	sect_rx->count = cpu_to_le16(1);
1987 
1988 	sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
1989 					    struct_size(sect_tx, tcam, 1));
1990 	if (!sect_tx)
1991 		goto ice_create_tunnel_err;
1992 	sect_tx->count = cpu_to_le16(1);
1993 
1994 	/* copy original boost entry to update package buffer */
1995 	memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
1996 	       sizeof(*sect_rx->tcam));
1997 
1998 	/* over-write the never-match dest port key bits with the encoded port
1999 	 * bits
2000 	 */
2001 	ice_set_key((u8 *)&sect_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
2002 		    (u8 *)&port, NULL, NULL, NULL,
2003 		    (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
2004 		    sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
2005 
2006 	/* exact copy of entry to Tx section entry */
2007 	memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
2008 
2009 	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
2010 	if (!status)
2011 		hw->tnl.tbl[index].port = port;
2012 
2013 ice_create_tunnel_err:
2014 	ice_pkg_buf_free(hw, bld);
2015 
2016 ice_create_tunnel_end:
2017 	mutex_unlock(&hw->tnl_lock);
2018 
2019 	return status;
2020 }
2021 
2022 /**
2023  * ice_destroy_tunnel
2024  * @hw: pointer to the HW structure
2025  * @index: device table entry
2026  * @type: type of tunnel
2027  * @port: port of tunnel to destroy (ignored if the all parameter is true)
2028  *
2029  * Destroys a tunnel or all tunnels by creating an update package buffer
2030  * targeting the specific updates requested and then performing an update
2031  * package.
2032  */
2033 static enum ice_status
2034 ice_destroy_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type,
2035 		   u16 port)
2036 {
2037 	struct ice_boost_tcam_section *sect_rx, *sect_tx;
2038 	enum ice_status status = ICE_ERR_MAX_LIMIT;
2039 	struct ice_buf_build *bld;
2040 
2041 	mutex_lock(&hw->tnl_lock);
2042 
2043 	if (WARN_ON(!hw->tnl.tbl[index].valid ||
2044 		    hw->tnl.tbl[index].type != type ||
2045 		    hw->tnl.tbl[index].port != port)) {
2046 		status = ICE_ERR_OUT_OF_RANGE;
2047 		goto ice_destroy_tunnel_end;
2048 	}
2049 
2050 	bld = ice_pkg_buf_alloc(hw);
2051 	if (!bld) {
2052 		status = ICE_ERR_NO_MEMORY;
2053 		goto ice_destroy_tunnel_end;
2054 	}
2055 
2056 	/* allocate 2 sections, one for Rx parser, one for Tx parser */
2057 	if (ice_pkg_buf_reserve_section(bld, 2))
2058 		goto ice_destroy_tunnel_err;
2059 
2060 	sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
2061 					    struct_size(sect_rx, tcam, 1));
2062 	if (!sect_rx)
2063 		goto ice_destroy_tunnel_err;
2064 	sect_rx->count = cpu_to_le16(1);
2065 
2066 	sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
2067 					    struct_size(sect_tx, tcam, 1));
2068 	if (!sect_tx)
2069 		goto ice_destroy_tunnel_err;
2070 	sect_tx->count = cpu_to_le16(1);
2071 
2072 	/* copy original boost entry to update package buffer, one copy to Rx
2073 	 * section, another copy to the Tx section
2074 	 */
2075 	memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
2076 	       sizeof(*sect_rx->tcam));
2077 	memcpy(sect_tx->tcam, hw->tnl.tbl[index].boost_entry,
2078 	       sizeof(*sect_tx->tcam));
2079 
2080 	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
2081 	if (!status)
2082 		hw->tnl.tbl[index].port = 0;
2083 
2084 ice_destroy_tunnel_err:
2085 	ice_pkg_buf_free(hw, bld);
2086 
2087 ice_destroy_tunnel_end:
2088 	mutex_unlock(&hw->tnl_lock);
2089 
2090 	return status;
2091 }
2092 
2093 int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
2094 			    unsigned int idx, struct udp_tunnel_info *ti)
2095 {
2096 	struct ice_netdev_priv *np = netdev_priv(netdev);
2097 	struct ice_vsi *vsi = np->vsi;
2098 	struct ice_pf *pf = vsi->back;
2099 	enum ice_tunnel_type tnl_type;
2100 	enum ice_status status;
2101 	u16 index;
2102 
2103 	tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
2104 	index = ice_tunnel_idx_to_entry(&pf->hw, tnl_type, idx);
2105 
2106 	status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
2107 	if (status) {
2108 		netdev_err(netdev, "Error adding UDP tunnel - %s\n",
2109 			   ice_stat_str(status));
2110 		return -EIO;
2111 	}
2112 
2113 	udp_tunnel_nic_set_port_priv(netdev, table, idx, index);
2114 	return 0;
2115 }
2116 
2117 int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
2118 			      unsigned int idx, struct udp_tunnel_info *ti)
2119 {
2120 	struct ice_netdev_priv *np = netdev_priv(netdev);
2121 	struct ice_vsi *vsi = np->vsi;
2122 	struct ice_pf *pf = vsi->back;
2123 	enum ice_tunnel_type tnl_type;
2124 	enum ice_status status;
2125 
2126 	tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
2127 
2128 	status = ice_destroy_tunnel(&pf->hw, ti->hw_priv, tnl_type,
2129 				    ntohs(ti->port));
2130 	if (status) {
2131 		netdev_err(netdev, "Error removing UDP tunnel - %s\n",
2132 			   ice_stat_str(status));
2133 		return -EIO;
2134 	}
2135 
2136 	return 0;
2137 }
2138 
2139 /**
2140  * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index
2141  * @hw: pointer to the hardware structure
2142  * @blk: hardware block
2143  * @prof: profile ID
2144  * @fv_idx: field vector word index
2145  * @prot: variable to receive the protocol ID
2146  * @off: variable to receive the protocol offset
2147  */
2148 enum ice_status
2149 ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
2150 		  u8 *prot, u16 *off)
2151 {
2152 	struct ice_fv_word *fv_ext;
2153 
2154 	if (prof >= hw->blk[blk].es.count)
2155 		return ICE_ERR_PARAM;
2156 
2157 	if (fv_idx >= hw->blk[blk].es.fvw)
2158 		return ICE_ERR_PARAM;
2159 
2160 	fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
2161 
2162 	*prot = fv_ext[fv_idx].prot_id;
2163 	*off = fv_ext[fv_idx].off;
2164 
2165 	return 0;
2166 }
2167 
2168 /* PTG Management */
2169 
2170 /**
2171  * ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
2172  * @hw: pointer to the hardware structure
2173  * @blk: HW block
2174  * @ptype: the ptype to search for
2175  * @ptg: pointer to variable that receives the PTG
2176  *
2177  * This function will search the PTGs for a particular ptype, returning the
2178  * PTG ID that contains it through the PTG parameter, with the value of
2179  * ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
2180  */
2181 static enum ice_status
2182 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
2183 {
2184 	if (ptype >= ICE_XLT1_CNT || !ptg)
2185 		return ICE_ERR_PARAM;
2186 
2187 	*ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
2188 	return 0;
2189 }
2190 
2191 /**
2192  * ice_ptg_alloc_val - Allocates a new packet type group ID by value
2193  * @hw: pointer to the hardware structure
2194  * @blk: HW block
2195  * @ptg: the PTG to allocate
2196  *
2197  * This function allocates a given packet type group ID specified by the PTG
2198  * parameter.
2199  */
2200 static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
2201 {
2202 	hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
2203 }
2204 
2205 /**
2206  * ice_ptg_remove_ptype - Removes ptype from a particular packet type group
2207  * @hw: pointer to the hardware structure
2208  * @blk: HW block
2209  * @ptype: the ptype to remove
2210  * @ptg: the PTG to remove the ptype from
2211  *
2212  * This function will remove the ptype from the specific PTG, and move it to
2213  * the default PTG (ICE_DEFAULT_PTG).
2214  */
2215 static enum ice_status
2216 ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
2217 {
2218 	struct ice_ptg_ptype **ch;
2219 	struct ice_ptg_ptype *p;
2220 
2221 	if (ptype > ICE_XLT1_CNT - 1)
2222 		return ICE_ERR_PARAM;
2223 
2224 	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
2225 		return ICE_ERR_DOES_NOT_EXIST;
2226 
2227 	/* Should not happen if .in_use is set, bad config */
2228 	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
2229 		return ICE_ERR_CFG;
2230 
2231 	/* find the ptype within this PTG, and bypass the link over it */
2232 	p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2233 	ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2234 	while (p) {
2235 		if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
2236 			*ch = p->next_ptype;
2237 			break;
2238 		}
2239 
2240 		ch = &p->next_ptype;
2241 		p = p->next_ptype;
2242 	}
2243 
2244 	hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
2245 	hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
2246 
2247 	return 0;
2248 }
2249 
2250 /**
2251  * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group
2252  * @hw: pointer to the hardware structure
2253  * @blk: HW block
2254  * @ptype: the ptype to add or move
2255  * @ptg: the PTG to add or move the ptype to
2256  *
2257  * This function will either add or move a ptype to a particular PTG depending
2258  * on if the ptype is already part of another group. Note that using a
2259  * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
2260  * default PTG.
2261  */
2262 static enum ice_status
2263 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
2264 {
2265 	enum ice_status status;
2266 	u8 original_ptg;
2267 
2268 	if (ptype > ICE_XLT1_CNT - 1)
2269 		return ICE_ERR_PARAM;
2270 
2271 	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
2272 		return ICE_ERR_DOES_NOT_EXIST;
2273 
2274 	status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
2275 	if (status)
2276 		return status;
2277 
2278 	/* Is ptype already in the correct PTG? */
2279 	if (original_ptg == ptg)
2280 		return 0;
2281 
2282 	/* Remove from original PTG and move back to the default PTG */
2283 	if (original_ptg != ICE_DEFAULT_PTG)
2284 		ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
2285 
2286 	/* Moving to default PTG? Then we're done with this request */
2287 	if (ptg == ICE_DEFAULT_PTG)
2288 		return 0;
2289 
2290 	/* Add ptype to PTG at beginning of list */
2291 	hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
2292 		hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2293 	hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
2294 		&hw->blk[blk].xlt1.ptypes[ptype];
2295 
2296 	hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
2297 	hw->blk[blk].xlt1.t[ptype] = ptg;
2298 
2299 	return 0;
2300 }
2301 
2302 /* Block / table size info */
2303 struct ice_blk_size_details {
2304 	u16 xlt1;			/* # XLT1 entries */
2305 	u16 xlt2;			/* # XLT2 entries */
2306 	u16 prof_tcam;			/* # profile ID TCAM entries */
2307 	u16 prof_id;			/* # profile IDs */
2308 	u8 prof_cdid_bits;		/* # CDID one-hot bits used in key */
2309 	u16 prof_redir;			/* # profile redirection entries */
2310 	u16 es;				/* # extraction sequence entries */
2311 	u16 fvw;			/* # field vector words */
2312 	u8 overwrite;			/* overwrite existing entries allowed */
2313 	u8 reverse;			/* reverse FV order */
2314 };
2315 
2316 static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
2317 	/**
2318 	 * Table Definitions
2319 	 * XLT1 - Number of entries in XLT1 table
2320 	 * XLT2 - Number of entries in XLT2 table
2321 	 * TCAM - Number of entries Profile ID TCAM table
2322 	 * CDID - Control Domain ID of the hardware block
2323 	 * PRED - Number of entries in the Profile Redirection Table
2324 	 * FV   - Number of entries in the Field Vector
2325 	 * FVW  - Width (in WORDs) of the Field Vector
2326 	 * OVR  - Overwrite existing table entries
2327 	 * REV  - Reverse FV
2328 	 */
2329 	/*          XLT1        , XLT2        ,TCAM, PID,CDID,PRED,   FV, FVW */
2330 	/*          Overwrite   , Reverse FV */
2331 	/* SW  */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256,   0,  256, 256,  48,
2332 		    false, false },
2333 	/* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  32,
2334 		    false, false },
2335 	/* FD  */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  24,
2336 		    false, true  },
2337 	/* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  24,
2338 		    true,  true  },
2339 	/* PE  */ { ICE_XLT1_CNT, ICE_XLT2_CNT,  64,  32,   0,   32,  32,  24,
2340 		    false, false },
2341 };
2342 
2343 enum ice_sid_all {
2344 	ICE_SID_XLT1_OFF = 0,
2345 	ICE_SID_XLT2_OFF,
2346 	ICE_SID_PR_OFF,
2347 	ICE_SID_PR_REDIR_OFF,
2348 	ICE_SID_ES_OFF,
2349 	ICE_SID_OFF_COUNT,
2350 };
2351 
2352 /* Characteristic handling */
2353 
2354 /**
2355  * ice_match_prop_lst - determine if properties of two lists match
2356  * @list1: first properties list
2357  * @list2: second properties list
2358  *
2359  * Count, cookies and the order must match in order to be considered equivalent.
2360  */
2361 static bool
2362 ice_match_prop_lst(struct list_head *list1, struct list_head *list2)
2363 {
2364 	struct ice_vsig_prof *tmp1;
2365 	struct ice_vsig_prof *tmp2;
2366 	u16 chk_count = 0;
2367 	u16 count = 0;
2368 
2369 	/* compare counts */
2370 	list_for_each_entry(tmp1, list1, list)
2371 		count++;
2372 	list_for_each_entry(tmp2, list2, list)
2373 		chk_count++;
2374 	/* cppcheck-suppress knownConditionTrueFalse */
2375 	if (!count || count != chk_count)
2376 		return false;
2377 
2378 	tmp1 = list_first_entry(list1, struct ice_vsig_prof, list);
2379 	tmp2 = list_first_entry(list2, struct ice_vsig_prof, list);
2380 
2381 	/* profile cookies must compare, and in the exact same order to take
2382 	 * into account priority
2383 	 */
2384 	while (count--) {
2385 		if (tmp2->profile_cookie != tmp1->profile_cookie)
2386 			return false;
2387 
2388 		tmp1 = list_next_entry(tmp1, list);
2389 		tmp2 = list_next_entry(tmp2, list);
2390 	}
2391 
2392 	return true;
2393 }
2394 
2395 /* VSIG Management */
2396 
2397 /**
2398  * ice_vsig_find_vsi - find a VSIG that contains a specified VSI
2399  * @hw: pointer to the hardware structure
2400  * @blk: HW block
2401  * @vsi: VSI of interest
2402  * @vsig: pointer to receive the VSI group
2403  *
2404  * This function will lookup the VSI entry in the XLT2 list and return
2405  * the VSI group its associated with.
2406  */
2407 static enum ice_status
2408 ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
2409 {
2410 	if (!vsig || vsi >= ICE_MAX_VSI)
2411 		return ICE_ERR_PARAM;
2412 
2413 	/* As long as there's a default or valid VSIG associated with the input
2414 	 * VSI, the functions returns a success. Any handling of VSIG will be
2415 	 * done by the following add, update or remove functions.
2416 	 */
2417 	*vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
2418 
2419 	return 0;
2420 }
2421 
2422 /**
2423  * ice_vsig_alloc_val - allocate a new VSIG by value
2424  * @hw: pointer to the hardware structure
2425  * @blk: HW block
2426  * @vsig: the VSIG to allocate
2427  *
2428  * This function will allocate a given VSIG specified by the VSIG parameter.
2429  */
2430 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2431 {
2432 	u16 idx = vsig & ICE_VSIG_IDX_M;
2433 
2434 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
2435 		INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2436 		hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
2437 	}
2438 
2439 	return ICE_VSIG_VALUE(idx, hw->pf_id);
2440 }
2441 
2442 /**
2443  * ice_vsig_alloc - Finds a free entry and allocates a new VSIG
2444  * @hw: pointer to the hardware structure
2445  * @blk: HW block
2446  *
2447  * This function will iterate through the VSIG list and mark the first
2448  * unused entry for the new VSIG entry as used and return that value.
2449  */
2450 static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
2451 {
2452 	u16 i;
2453 
2454 	for (i = 1; i < ICE_MAX_VSIGS; i++)
2455 		if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2456 			return ice_vsig_alloc_val(hw, blk, i);
2457 
2458 	return ICE_DEFAULT_VSIG;
2459 }
2460 
2461 /**
2462  * ice_find_dup_props_vsig - find VSI group with a specified set of properties
2463  * @hw: pointer to the hardware structure
2464  * @blk: HW block
2465  * @chs: characteristic list
2466  * @vsig: returns the VSIG with the matching profiles, if found
2467  *
2468  * Each VSIG is associated with a characteristic set; i.e. all VSIs under
2469  * a group have the same characteristic set. To check if there exists a VSIG
2470  * which has the same characteristics as the input characteristics; this
2471  * function will iterate through the XLT2 list and return the VSIG that has a
2472  * matching configuration. In order to make sure that priorities are accounted
2473  * for, the list must match exactly, including the order in which the
2474  * characteristics are listed.
2475  */
2476 static enum ice_status
2477 ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
2478 			struct list_head *chs, u16 *vsig)
2479 {
2480 	struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
2481 	u16 i;
2482 
2483 	for (i = 0; i < xlt2->count; i++)
2484 		if (xlt2->vsig_tbl[i].in_use &&
2485 		    ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
2486 			*vsig = ICE_VSIG_VALUE(i, hw->pf_id);
2487 			return 0;
2488 		}
2489 
2490 	return ICE_ERR_DOES_NOT_EXIST;
2491 }
2492 
2493 /**
2494  * ice_vsig_free - free VSI group
2495  * @hw: pointer to the hardware structure
2496  * @blk: HW block
2497  * @vsig: VSIG to remove
2498  *
2499  * The function will remove all VSIs associated with the input VSIG and move
2500  * them to the DEFAULT_VSIG and mark the VSIG available.
2501  */
2502 static enum ice_status
2503 ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2504 {
2505 	struct ice_vsig_prof *dtmp, *del;
2506 	struct ice_vsig_vsi *vsi_cur;
2507 	u16 idx;
2508 
2509 	idx = vsig & ICE_VSIG_IDX_M;
2510 	if (idx >= ICE_MAX_VSIGS)
2511 		return ICE_ERR_PARAM;
2512 
2513 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2514 		return ICE_ERR_DOES_NOT_EXIST;
2515 
2516 	hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
2517 
2518 	vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2519 	/* If the VSIG has at least 1 VSI then iterate through the
2520 	 * list and remove the VSIs before deleting the group.
2521 	 */
2522 	if (vsi_cur) {
2523 		/* remove all vsis associated with this VSIG XLT2 entry */
2524 		do {
2525 			struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
2526 
2527 			vsi_cur->vsig = ICE_DEFAULT_VSIG;
2528 			vsi_cur->changed = 1;
2529 			vsi_cur->next_vsi = NULL;
2530 			vsi_cur = tmp;
2531 		} while (vsi_cur);
2532 
2533 		/* NULL terminate head of VSI list */
2534 		hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
2535 	}
2536 
2537 	/* free characteristic list */
2538 	list_for_each_entry_safe(del, dtmp,
2539 				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2540 				 list) {
2541 		list_del(&del->list);
2542 		devm_kfree(ice_hw_to_dev(hw), del);
2543 	}
2544 
2545 	/* if VSIG characteristic list was cleared for reset
2546 	 * re-initialize the list head
2547 	 */
2548 	INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2549 
2550 	return 0;
2551 }
2552 
2553 /**
2554  * ice_vsig_remove_vsi - remove VSI from VSIG
2555  * @hw: pointer to the hardware structure
2556  * @blk: HW block
2557  * @vsi: VSI to remove
2558  * @vsig: VSI group to remove from
2559  *
2560  * The function will remove the input VSI from its VSI group and move it
2561  * to the DEFAULT_VSIG.
2562  */
2563 static enum ice_status
2564 ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2565 {
2566 	struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
2567 	u16 idx;
2568 
2569 	idx = vsig & ICE_VSIG_IDX_M;
2570 
2571 	if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2572 		return ICE_ERR_PARAM;
2573 
2574 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2575 		return ICE_ERR_DOES_NOT_EXIST;
2576 
2577 	/* entry already in default VSIG, don't have to remove */
2578 	if (idx == ICE_DEFAULT_VSIG)
2579 		return 0;
2580 
2581 	vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2582 	if (!(*vsi_head))
2583 		return ICE_ERR_CFG;
2584 
2585 	vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
2586 	vsi_cur = (*vsi_head);
2587 
2588 	/* iterate the VSI list, skip over the entry to be removed */
2589 	while (vsi_cur) {
2590 		if (vsi_tgt == vsi_cur) {
2591 			(*vsi_head) = vsi_cur->next_vsi;
2592 			break;
2593 		}
2594 		vsi_head = &vsi_cur->next_vsi;
2595 		vsi_cur = vsi_cur->next_vsi;
2596 	}
2597 
2598 	/* verify if VSI was removed from group list */
2599 	if (!vsi_cur)
2600 		return ICE_ERR_DOES_NOT_EXIST;
2601 
2602 	vsi_cur->vsig = ICE_DEFAULT_VSIG;
2603 	vsi_cur->changed = 1;
2604 	vsi_cur->next_vsi = NULL;
2605 
2606 	return 0;
2607 }
2608 
2609 /**
2610  * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group
2611  * @hw: pointer to the hardware structure
2612  * @blk: HW block
2613  * @vsi: VSI to move
2614  * @vsig: destination VSI group
2615  *
2616  * This function will move or add the input VSI to the target VSIG.
2617  * The function will find the original VSIG the VSI belongs to and
2618  * move the entry to the DEFAULT_VSIG, update the original VSIG and
2619  * then move entry to the new VSIG.
2620  */
2621 static enum ice_status
2622 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2623 {
2624 	struct ice_vsig_vsi *tmp;
2625 	enum ice_status status;
2626 	u16 orig_vsig, idx;
2627 
2628 	idx = vsig & ICE_VSIG_IDX_M;
2629 
2630 	if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2631 		return ICE_ERR_PARAM;
2632 
2633 	/* if VSIG not in use and VSIG is not default type this VSIG
2634 	 * doesn't exist.
2635 	 */
2636 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
2637 	    vsig != ICE_DEFAULT_VSIG)
2638 		return ICE_ERR_DOES_NOT_EXIST;
2639 
2640 	status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
2641 	if (status)
2642 		return status;
2643 
2644 	/* no update required if vsigs match */
2645 	if (orig_vsig == vsig)
2646 		return 0;
2647 
2648 	if (orig_vsig != ICE_DEFAULT_VSIG) {
2649 		/* remove entry from orig_vsig and add to default VSIG */
2650 		status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
2651 		if (status)
2652 			return status;
2653 	}
2654 
2655 	if (idx == ICE_DEFAULT_VSIG)
2656 		return 0;
2657 
2658 	/* Create VSI entry and add VSIG and prop_mask values */
2659 	hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
2660 	hw->blk[blk].xlt2.vsis[vsi].changed = 1;
2661 
2662 	/* Add new entry to the head of the VSIG list */
2663 	tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2664 	hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
2665 		&hw->blk[blk].xlt2.vsis[vsi];
2666 	hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
2667 	hw->blk[blk].xlt2.t[vsi] = vsig;
2668 
2669 	return 0;
2670 }
2671 
2672 /**
2673  * ice_prof_has_mask_idx - determine if profile index masking is identical
2674  * @hw: pointer to the hardware structure
2675  * @blk: HW block
2676  * @prof: profile to check
2677  * @idx: profile index to check
2678  * @mask: mask to match
2679  */
2680 static bool
2681 ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx,
2682 		      u16 mask)
2683 {
2684 	bool expect_no_mask = false;
2685 	bool found = false;
2686 	bool match = false;
2687 	u16 i;
2688 
2689 	/* If mask is 0x0000 or 0xffff, then there is no masking */
2690 	if (mask == 0 || mask == 0xffff)
2691 		expect_no_mask = true;
2692 
2693 	/* Scan the enabled masks on this profile, for the specified idx */
2694 	for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first +
2695 	     hw->blk[blk].masks.count; i++)
2696 		if (hw->blk[blk].es.mask_ena[prof] & BIT(i))
2697 			if (hw->blk[blk].masks.masks[i].in_use &&
2698 			    hw->blk[blk].masks.masks[i].idx == idx) {
2699 				found = true;
2700 				if (hw->blk[blk].masks.masks[i].mask == mask)
2701 					match = true;
2702 				break;
2703 			}
2704 
2705 	if (expect_no_mask) {
2706 		if (found)
2707 			return false;
2708 	} else {
2709 		if (!match)
2710 			return false;
2711 	}
2712 
2713 	return true;
2714 }
2715 
2716 /**
2717  * ice_prof_has_mask - determine if profile masking is identical
2718  * @hw: pointer to the hardware structure
2719  * @blk: HW block
2720  * @prof: profile to check
2721  * @masks: masks to match
2722  */
2723 static bool
2724 ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
2725 {
2726 	u16 i;
2727 
2728 	/* es->mask_ena[prof] will have the mask */
2729 	for (i = 0; i < hw->blk[blk].es.fvw; i++)
2730 		if (!ice_prof_has_mask_idx(hw, blk, prof, i, masks[i]))
2731 			return false;
2732 
2733 	return true;
2734 }
2735 
2736 /**
2737  * ice_find_prof_id_with_mask - find profile ID for a given field vector
2738  * @hw: pointer to the hardware structure
2739  * @blk: HW block
2740  * @fv: field vector to search for
2741  * @masks: masks for FV
2742  * @prof_id: receives the profile ID
2743  */
2744 static enum ice_status
2745 ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
2746 			   struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
2747 {
2748 	struct ice_es *es = &hw->blk[blk].es;
2749 	u8 i;
2750 
2751 	/* For FD, we don't want to re-use a existed profile with the same
2752 	 * field vector and mask. This will cause rule interference.
2753 	 */
2754 	if (blk == ICE_BLK_FD)
2755 		return ICE_ERR_DOES_NOT_EXIST;
2756 
2757 	for (i = 0; i < (u8)es->count; i++) {
2758 		u16 off = i * es->fvw;
2759 
2760 		if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
2761 			continue;
2762 
2763 		/* check if masks settings are the same for this profile */
2764 		if (masks && !ice_prof_has_mask(hw, blk, i, masks))
2765 			continue;
2766 
2767 		*prof_id = i;
2768 		return 0;
2769 	}
2770 
2771 	return ICE_ERR_DOES_NOT_EXIST;
2772 }
2773 
2774 /**
2775  * ice_prof_id_rsrc_type - get profile ID resource type for a block type
2776  * @blk: the block type
2777  * @rsrc_type: pointer to variable to receive the resource type
2778  */
2779 static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
2780 {
2781 	switch (blk) {
2782 	case ICE_BLK_FD:
2783 		*rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
2784 		break;
2785 	case ICE_BLK_RSS:
2786 		*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
2787 		break;
2788 	default:
2789 		return false;
2790 	}
2791 	return true;
2792 }
2793 
2794 /**
2795  * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type
2796  * @blk: the block type
2797  * @rsrc_type: pointer to variable to receive the resource type
2798  */
2799 static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
2800 {
2801 	switch (blk) {
2802 	case ICE_BLK_FD:
2803 		*rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
2804 		break;
2805 	case ICE_BLK_RSS:
2806 		*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
2807 		break;
2808 	default:
2809 		return false;
2810 	}
2811 	return true;
2812 }
2813 
2814 /**
2815  * ice_alloc_tcam_ent - allocate hardware TCAM entry
2816  * @hw: pointer to the HW struct
2817  * @blk: the block to allocate the TCAM for
2818  * @btm: true to allocate from bottom of table, false to allocate from top
2819  * @tcam_idx: pointer to variable to receive the TCAM entry
2820  *
2821  * This function allocates a new entry in a Profile ID TCAM for a specific
2822  * block.
2823  */
2824 static enum ice_status
2825 ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
2826 		   u16 *tcam_idx)
2827 {
2828 	u16 res_type;
2829 
2830 	if (!ice_tcam_ent_rsrc_type(blk, &res_type))
2831 		return ICE_ERR_PARAM;
2832 
2833 	return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
2834 }
2835 
2836 /**
2837  * ice_free_tcam_ent - free hardware TCAM entry
2838  * @hw: pointer to the HW struct
2839  * @blk: the block from which to free the TCAM entry
2840  * @tcam_idx: the TCAM entry to free
2841  *
2842  * This function frees an entry in a Profile ID TCAM for a specific block.
2843  */
2844 static enum ice_status
2845 ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
2846 {
2847 	u16 res_type;
2848 
2849 	if (!ice_tcam_ent_rsrc_type(blk, &res_type))
2850 		return ICE_ERR_PARAM;
2851 
2852 	return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
2853 }
2854 
2855 /**
2856  * ice_alloc_prof_id - allocate profile ID
2857  * @hw: pointer to the HW struct
2858  * @blk: the block to allocate the profile ID for
2859  * @prof_id: pointer to variable to receive the profile ID
2860  *
2861  * This function allocates a new profile ID, which also corresponds to a Field
2862  * Vector (Extraction Sequence) entry.
2863  */
2864 static enum ice_status
2865 ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
2866 {
2867 	enum ice_status status;
2868 	u16 res_type;
2869 	u16 get_prof;
2870 
2871 	if (!ice_prof_id_rsrc_type(blk, &res_type))
2872 		return ICE_ERR_PARAM;
2873 
2874 	status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
2875 	if (!status)
2876 		*prof_id = (u8)get_prof;
2877 
2878 	return status;
2879 }
2880 
2881 /**
2882  * ice_free_prof_id - free profile ID
2883  * @hw: pointer to the HW struct
2884  * @blk: the block from which to free the profile ID
2885  * @prof_id: the profile ID to free
2886  *
2887  * This function frees a profile ID, which also corresponds to a Field Vector.
2888  */
2889 static enum ice_status
2890 ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2891 {
2892 	u16 tmp_prof_id = (u16)prof_id;
2893 	u16 res_type;
2894 
2895 	if (!ice_prof_id_rsrc_type(blk, &res_type))
2896 		return ICE_ERR_PARAM;
2897 
2898 	return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
2899 }
2900 
2901 /**
2902  * ice_prof_inc_ref - increment reference count for profile
2903  * @hw: pointer to the HW struct
2904  * @blk: the block from which to free the profile ID
2905  * @prof_id: the profile ID for which to increment the reference count
2906  */
2907 static enum ice_status
2908 ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2909 {
2910 	if (prof_id > hw->blk[blk].es.count)
2911 		return ICE_ERR_PARAM;
2912 
2913 	hw->blk[blk].es.ref_count[prof_id]++;
2914 
2915 	return 0;
2916 }
2917 
2918 /**
2919  * ice_write_prof_mask_reg - write profile mask register
2920  * @hw: pointer to the HW struct
2921  * @blk: hardware block
2922  * @mask_idx: mask index
2923  * @idx: index of the FV which will use the mask
2924  * @mask: the 16-bit mask
2925  */
2926 static void
2927 ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx,
2928 			u16 idx, u16 mask)
2929 {
2930 	u32 offset;
2931 	u32 val;
2932 
2933 	switch (blk) {
2934 	case ICE_BLK_RSS:
2935 		offset = GLQF_HMASK(mask_idx);
2936 		val = (idx << GLQF_HMASK_MSK_INDEX_S) & GLQF_HMASK_MSK_INDEX_M;
2937 		val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M;
2938 		break;
2939 	case ICE_BLK_FD:
2940 		offset = GLQF_FDMASK(mask_idx);
2941 		val = (idx << GLQF_FDMASK_MSK_INDEX_S) & GLQF_FDMASK_MSK_INDEX_M;
2942 		val |= (mask << GLQF_FDMASK_MASK_S) & GLQF_FDMASK_MASK_M;
2943 		break;
2944 	default:
2945 		ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
2946 			  blk);
2947 		return;
2948 	}
2949 
2950 	wr32(hw, offset, val);
2951 	ice_debug(hw, ICE_DBG_PKG, "write mask, blk %d (%d): %x = %x\n",
2952 		  blk, idx, offset, val);
2953 }
2954 
2955 /**
2956  * ice_write_prof_mask_enable_res - write profile mask enable register
2957  * @hw: pointer to the HW struct
2958  * @blk: hardware block
2959  * @prof_id: profile ID
2960  * @enable_mask: enable mask
2961  */
2962 static void
2963 ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk,
2964 			       u16 prof_id, u32 enable_mask)
2965 {
2966 	u32 offset;
2967 
2968 	switch (blk) {
2969 	case ICE_BLK_RSS:
2970 		offset = GLQF_HMASK_SEL(prof_id);
2971 		break;
2972 	case ICE_BLK_FD:
2973 		offset = GLQF_FDMASK_SEL(prof_id);
2974 		break;
2975 	default:
2976 		ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
2977 			  blk);
2978 		return;
2979 	}
2980 
2981 	wr32(hw, offset, enable_mask);
2982 	ice_debug(hw, ICE_DBG_PKG, "write mask enable, blk %d (%d): %x = %x\n",
2983 		  blk, prof_id, offset, enable_mask);
2984 }
2985 
2986 /**
2987  * ice_init_prof_masks - initial prof masks
2988  * @hw: pointer to the HW struct
2989  * @blk: hardware block
2990  */
2991 static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
2992 {
2993 	u16 per_pf;
2994 	u16 i;
2995 
2996 	mutex_init(&hw->blk[blk].masks.lock);
2997 
2998 	per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
2999 
3000 	hw->blk[blk].masks.count = per_pf;
3001 	hw->blk[blk].masks.first = hw->pf_id * per_pf;
3002 
3003 	memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks));
3004 
3005 	for (i = hw->blk[blk].masks.first;
3006 	     i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
3007 		ice_write_prof_mask_reg(hw, blk, i, 0, 0);
3008 }
3009 
3010 /**
3011  * ice_init_all_prof_masks - initialize all prof masks
3012  * @hw: pointer to the HW struct
3013  */
3014 static void ice_init_all_prof_masks(struct ice_hw *hw)
3015 {
3016 	ice_init_prof_masks(hw, ICE_BLK_RSS);
3017 	ice_init_prof_masks(hw, ICE_BLK_FD);
3018 }
3019 
3020 /**
3021  * ice_alloc_prof_mask - allocate profile mask
3022  * @hw: pointer to the HW struct
3023  * @blk: hardware block
3024  * @idx: index of FV which will use the mask
3025  * @mask: the 16-bit mask
3026  * @mask_idx: variable to receive the mask index
3027  */
3028 static enum ice_status
3029 ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
3030 		    u16 *mask_idx)
3031 {
3032 	bool found_unused = false, found_copy = false;
3033 	enum ice_status status = ICE_ERR_MAX_LIMIT;
3034 	u16 unused_idx = 0, copy_idx = 0;
3035 	u16 i;
3036 
3037 	if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3038 		return ICE_ERR_PARAM;
3039 
3040 	mutex_lock(&hw->blk[blk].masks.lock);
3041 
3042 	for (i = hw->blk[blk].masks.first;
3043 	     i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
3044 		if (hw->blk[blk].masks.masks[i].in_use) {
3045 			/* if mask is in use and it exactly duplicates the
3046 			 * desired mask and index, then in can be reused
3047 			 */
3048 			if (hw->blk[blk].masks.masks[i].mask == mask &&
3049 			    hw->blk[blk].masks.masks[i].idx == idx) {
3050 				found_copy = true;
3051 				copy_idx = i;
3052 				break;
3053 			}
3054 		} else {
3055 			/* save off unused index, but keep searching in case
3056 			 * there is an exact match later on
3057 			 */
3058 			if (!found_unused) {
3059 				found_unused = true;
3060 				unused_idx = i;
3061 			}
3062 		}
3063 
3064 	if (found_copy)
3065 		i = copy_idx;
3066 	else if (found_unused)
3067 		i = unused_idx;
3068 	else
3069 		goto err_ice_alloc_prof_mask;
3070 
3071 	/* update mask for a new entry */
3072 	if (found_unused) {
3073 		hw->blk[blk].masks.masks[i].in_use = true;
3074 		hw->blk[blk].masks.masks[i].mask = mask;
3075 		hw->blk[blk].masks.masks[i].idx = idx;
3076 		hw->blk[blk].masks.masks[i].ref = 0;
3077 		ice_write_prof_mask_reg(hw, blk, i, idx, mask);
3078 	}
3079 
3080 	hw->blk[blk].masks.masks[i].ref++;
3081 	*mask_idx = i;
3082 	status = 0;
3083 
3084 err_ice_alloc_prof_mask:
3085 	mutex_unlock(&hw->blk[blk].masks.lock);
3086 
3087 	return status;
3088 }
3089 
3090 /**
3091  * ice_free_prof_mask - free profile mask
3092  * @hw: pointer to the HW struct
3093  * @blk: hardware block
3094  * @mask_idx: index of mask
3095  */
3096 static enum ice_status
3097 ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx)
3098 {
3099 	if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3100 		return ICE_ERR_PARAM;
3101 
3102 	if (!(mask_idx >= hw->blk[blk].masks.first &&
3103 	      mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count))
3104 		return ICE_ERR_DOES_NOT_EXIST;
3105 
3106 	mutex_lock(&hw->blk[blk].masks.lock);
3107 
3108 	if (!hw->blk[blk].masks.masks[mask_idx].in_use)
3109 		goto exit_ice_free_prof_mask;
3110 
3111 	if (hw->blk[blk].masks.masks[mask_idx].ref > 1) {
3112 		hw->blk[blk].masks.masks[mask_idx].ref--;
3113 		goto exit_ice_free_prof_mask;
3114 	}
3115 
3116 	/* remove mask */
3117 	hw->blk[blk].masks.masks[mask_idx].in_use = false;
3118 	hw->blk[blk].masks.masks[mask_idx].mask = 0;
3119 	hw->blk[blk].masks.masks[mask_idx].idx = 0;
3120 
3121 	/* update mask as unused entry */
3122 	ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d\n", blk,
3123 		  mask_idx);
3124 	ice_write_prof_mask_reg(hw, blk, mask_idx, 0, 0);
3125 
3126 exit_ice_free_prof_mask:
3127 	mutex_unlock(&hw->blk[blk].masks.lock);
3128 
3129 	return 0;
3130 }
3131 
3132 /**
3133  * ice_free_prof_masks - free all profile masks for a profile
3134  * @hw: pointer to the HW struct
3135  * @blk: hardware block
3136  * @prof_id: profile ID
3137  */
3138 static enum ice_status
3139 ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
3140 {
3141 	u32 mask_bm;
3142 	u16 i;
3143 
3144 	if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3145 		return ICE_ERR_PARAM;
3146 
3147 	mask_bm = hw->blk[blk].es.mask_ena[prof_id];
3148 	for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++)
3149 		if (mask_bm & BIT(i))
3150 			ice_free_prof_mask(hw, blk, i);
3151 
3152 	return 0;
3153 }
3154 
3155 /**
3156  * ice_shutdown_prof_masks - releases lock for masking
3157  * @hw: pointer to the HW struct
3158  * @blk: hardware block
3159  *
3160  * This should be called before unloading the driver
3161  */
3162 static void ice_shutdown_prof_masks(struct ice_hw *hw, enum ice_block blk)
3163 {
3164 	u16 i;
3165 
3166 	mutex_lock(&hw->blk[blk].masks.lock);
3167 
3168 	for (i = hw->blk[blk].masks.first;
3169 	     i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) {
3170 		ice_write_prof_mask_reg(hw, blk, i, 0, 0);
3171 
3172 		hw->blk[blk].masks.masks[i].in_use = false;
3173 		hw->blk[blk].masks.masks[i].idx = 0;
3174 		hw->blk[blk].masks.masks[i].mask = 0;
3175 	}
3176 
3177 	mutex_unlock(&hw->blk[blk].masks.lock);
3178 	mutex_destroy(&hw->blk[blk].masks.lock);
3179 }
3180 
3181 /**
3182  * ice_shutdown_all_prof_masks - releases all locks for masking
3183  * @hw: pointer to the HW struct
3184  *
3185  * This should be called before unloading the driver
3186  */
3187 static void ice_shutdown_all_prof_masks(struct ice_hw *hw)
3188 {
3189 	ice_shutdown_prof_masks(hw, ICE_BLK_RSS);
3190 	ice_shutdown_prof_masks(hw, ICE_BLK_FD);
3191 }
3192 
3193 /**
3194  * ice_update_prof_masking - set registers according to masking
3195  * @hw: pointer to the HW struct
3196  * @blk: hardware block
3197  * @prof_id: profile ID
3198  * @masks: masks
3199  */
3200 static enum ice_status
3201 ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
3202 			u16 *masks)
3203 {
3204 	bool err = false;
3205 	u32 ena_mask = 0;
3206 	u16 idx;
3207 	u16 i;
3208 
3209 	/* Only support FD and RSS masking, otherwise nothing to be done */
3210 	if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3211 		return 0;
3212 
3213 	for (i = 0; i < hw->blk[blk].es.fvw; i++)
3214 		if (masks[i] && masks[i] != 0xFFFF) {
3215 			if (!ice_alloc_prof_mask(hw, blk, i, masks[i], &idx)) {
3216 				ena_mask |= BIT(idx);
3217 			} else {
3218 				/* not enough bitmaps */
3219 				err = true;
3220 				break;
3221 			}
3222 		}
3223 
3224 	if (err) {
3225 		/* free any bitmaps we have allocated */
3226 		for (i = 0; i < BITS_PER_BYTE * sizeof(ena_mask); i++)
3227 			if (ena_mask & BIT(i))
3228 				ice_free_prof_mask(hw, blk, i);
3229 
3230 		return ICE_ERR_OUT_OF_RANGE;
3231 	}
3232 
3233 	/* enable the masks for this profile */
3234 	ice_write_prof_mask_enable_res(hw, blk, prof_id, ena_mask);
3235 
3236 	/* store enabled masks with profile so that they can be freed later */
3237 	hw->blk[blk].es.mask_ena[prof_id] = ena_mask;
3238 
3239 	return 0;
3240 }
3241 
3242 /**
3243  * ice_write_es - write an extraction sequence to hardware
3244  * @hw: pointer to the HW struct
3245  * @blk: the block in which to write the extraction sequence
3246  * @prof_id: the profile ID to write
3247  * @fv: pointer to the extraction sequence to write - NULL to clear extraction
3248  */
3249 static void
3250 ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
3251 	     struct ice_fv_word *fv)
3252 {
3253 	u16 off;
3254 
3255 	off = prof_id * hw->blk[blk].es.fvw;
3256 	if (!fv) {
3257 		memset(&hw->blk[blk].es.t[off], 0,
3258 		       hw->blk[blk].es.fvw * sizeof(*fv));
3259 		hw->blk[blk].es.written[prof_id] = false;
3260 	} else {
3261 		memcpy(&hw->blk[blk].es.t[off], fv,
3262 		       hw->blk[blk].es.fvw * sizeof(*fv));
3263 	}
3264 }
3265 
3266 /**
3267  * ice_prof_dec_ref - decrement reference count for profile
3268  * @hw: pointer to the HW struct
3269  * @blk: the block from which to free the profile ID
3270  * @prof_id: the profile ID for which to decrement the reference count
3271  */
3272 static enum ice_status
3273 ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
3274 {
3275 	if (prof_id > hw->blk[blk].es.count)
3276 		return ICE_ERR_PARAM;
3277 
3278 	if (hw->blk[blk].es.ref_count[prof_id] > 0) {
3279 		if (!--hw->blk[blk].es.ref_count[prof_id]) {
3280 			ice_write_es(hw, blk, prof_id, NULL);
3281 			ice_free_prof_masks(hw, blk, prof_id);
3282 			return ice_free_prof_id(hw, blk, prof_id);
3283 		}
3284 	}
3285 
3286 	return 0;
3287 }
3288 
3289 /* Block / table section IDs */
3290 static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
3291 	/* SWITCH */
3292 	{	ICE_SID_XLT1_SW,
3293 		ICE_SID_XLT2_SW,
3294 		ICE_SID_PROFID_TCAM_SW,
3295 		ICE_SID_PROFID_REDIR_SW,
3296 		ICE_SID_FLD_VEC_SW
3297 	},
3298 
3299 	/* ACL */
3300 	{	ICE_SID_XLT1_ACL,
3301 		ICE_SID_XLT2_ACL,
3302 		ICE_SID_PROFID_TCAM_ACL,
3303 		ICE_SID_PROFID_REDIR_ACL,
3304 		ICE_SID_FLD_VEC_ACL
3305 	},
3306 
3307 	/* FD */
3308 	{	ICE_SID_XLT1_FD,
3309 		ICE_SID_XLT2_FD,
3310 		ICE_SID_PROFID_TCAM_FD,
3311 		ICE_SID_PROFID_REDIR_FD,
3312 		ICE_SID_FLD_VEC_FD
3313 	},
3314 
3315 	/* RSS */
3316 	{	ICE_SID_XLT1_RSS,
3317 		ICE_SID_XLT2_RSS,
3318 		ICE_SID_PROFID_TCAM_RSS,
3319 		ICE_SID_PROFID_REDIR_RSS,
3320 		ICE_SID_FLD_VEC_RSS
3321 	},
3322 
3323 	/* PE */
3324 	{	ICE_SID_XLT1_PE,
3325 		ICE_SID_XLT2_PE,
3326 		ICE_SID_PROFID_TCAM_PE,
3327 		ICE_SID_PROFID_REDIR_PE,
3328 		ICE_SID_FLD_VEC_PE
3329 	}
3330 };
3331 
3332 /**
3333  * ice_init_sw_xlt1_db - init software XLT1 database from HW tables
3334  * @hw: pointer to the hardware structure
3335  * @blk: the HW block to initialize
3336  */
3337 static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
3338 {
3339 	u16 pt;
3340 
3341 	for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
3342 		u8 ptg;
3343 
3344 		ptg = hw->blk[blk].xlt1.t[pt];
3345 		if (ptg != ICE_DEFAULT_PTG) {
3346 			ice_ptg_alloc_val(hw, blk, ptg);
3347 			ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
3348 		}
3349 	}
3350 }
3351 
3352 /**
3353  * ice_init_sw_xlt2_db - init software XLT2 database from HW tables
3354  * @hw: pointer to the hardware structure
3355  * @blk: the HW block to initialize
3356  */
3357 static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
3358 {
3359 	u16 vsi;
3360 
3361 	for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
3362 		u16 vsig;
3363 
3364 		vsig = hw->blk[blk].xlt2.t[vsi];
3365 		if (vsig) {
3366 			ice_vsig_alloc_val(hw, blk, vsig);
3367 			ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
3368 			/* no changes at this time, since this has been
3369 			 * initialized from the original package
3370 			 */
3371 			hw->blk[blk].xlt2.vsis[vsi].changed = 0;
3372 		}
3373 	}
3374 }
3375 
3376 /**
3377  * ice_init_sw_db - init software database from HW tables
3378  * @hw: pointer to the hardware structure
3379  */
3380 static void ice_init_sw_db(struct ice_hw *hw)
3381 {
3382 	u16 i;
3383 
3384 	for (i = 0; i < ICE_BLK_COUNT; i++) {
3385 		ice_init_sw_xlt1_db(hw, (enum ice_block)i);
3386 		ice_init_sw_xlt2_db(hw, (enum ice_block)i);
3387 	}
3388 }
3389 
3390 /**
3391  * ice_fill_tbl - Reads content of a single table type into database
3392  * @hw: pointer to the hardware structure
3393  * @block_id: Block ID of the table to copy
3394  * @sid: Section ID of the table to copy
3395  *
3396  * Will attempt to read the entire content of a given table of a single block
3397  * into the driver database. We assume that the buffer will always
3398  * be as large or larger than the data contained in the package. If
3399  * this condition is not met, there is most likely an error in the package
3400  * contents.
3401  */
3402 static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
3403 {
3404 	u32 dst_len, sect_len, offset = 0;
3405 	struct ice_prof_redir_section *pr;
3406 	struct ice_prof_id_section *pid;
3407 	struct ice_xlt1_section *xlt1;
3408 	struct ice_xlt2_section *xlt2;
3409 	struct ice_sw_fv_section *es;
3410 	struct ice_pkg_enum state;
3411 	u8 *src, *dst;
3412 	void *sect;
3413 
3414 	/* if the HW segment pointer is null then the first iteration of
3415 	 * ice_pkg_enum_section() will fail. In this case the HW tables will
3416 	 * not be filled and return success.
3417 	 */
3418 	if (!hw->seg) {
3419 		ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
3420 		return;
3421 	}
3422 
3423 	memset(&state, 0, sizeof(state));
3424 
3425 	sect = ice_pkg_enum_section(hw->seg, &state, sid);
3426 
3427 	while (sect) {
3428 		switch (sid) {
3429 		case ICE_SID_XLT1_SW:
3430 		case ICE_SID_XLT1_FD:
3431 		case ICE_SID_XLT1_RSS:
3432 		case ICE_SID_XLT1_ACL:
3433 		case ICE_SID_XLT1_PE:
3434 			xlt1 = sect;
3435 			src = xlt1->value;
3436 			sect_len = le16_to_cpu(xlt1->count) *
3437 				sizeof(*hw->blk[block_id].xlt1.t);
3438 			dst = hw->blk[block_id].xlt1.t;
3439 			dst_len = hw->blk[block_id].xlt1.count *
3440 				sizeof(*hw->blk[block_id].xlt1.t);
3441 			break;
3442 		case ICE_SID_XLT2_SW:
3443 		case ICE_SID_XLT2_FD:
3444 		case ICE_SID_XLT2_RSS:
3445 		case ICE_SID_XLT2_ACL:
3446 		case ICE_SID_XLT2_PE:
3447 			xlt2 = sect;
3448 			src = (__force u8 *)xlt2->value;
3449 			sect_len = le16_to_cpu(xlt2->count) *
3450 				sizeof(*hw->blk[block_id].xlt2.t);
3451 			dst = (u8 *)hw->blk[block_id].xlt2.t;
3452 			dst_len = hw->blk[block_id].xlt2.count *
3453 				sizeof(*hw->blk[block_id].xlt2.t);
3454 			break;
3455 		case ICE_SID_PROFID_TCAM_SW:
3456 		case ICE_SID_PROFID_TCAM_FD:
3457 		case ICE_SID_PROFID_TCAM_RSS:
3458 		case ICE_SID_PROFID_TCAM_ACL:
3459 		case ICE_SID_PROFID_TCAM_PE:
3460 			pid = sect;
3461 			src = (u8 *)pid->entry;
3462 			sect_len = le16_to_cpu(pid->count) *
3463 				sizeof(*hw->blk[block_id].prof.t);
3464 			dst = (u8 *)hw->blk[block_id].prof.t;
3465 			dst_len = hw->blk[block_id].prof.count *
3466 				sizeof(*hw->blk[block_id].prof.t);
3467 			break;
3468 		case ICE_SID_PROFID_REDIR_SW:
3469 		case ICE_SID_PROFID_REDIR_FD:
3470 		case ICE_SID_PROFID_REDIR_RSS:
3471 		case ICE_SID_PROFID_REDIR_ACL:
3472 		case ICE_SID_PROFID_REDIR_PE:
3473 			pr = sect;
3474 			src = pr->redir_value;
3475 			sect_len = le16_to_cpu(pr->count) *
3476 				sizeof(*hw->blk[block_id].prof_redir.t);
3477 			dst = hw->blk[block_id].prof_redir.t;
3478 			dst_len = hw->blk[block_id].prof_redir.count *
3479 				sizeof(*hw->blk[block_id].prof_redir.t);
3480 			break;
3481 		case ICE_SID_FLD_VEC_SW:
3482 		case ICE_SID_FLD_VEC_FD:
3483 		case ICE_SID_FLD_VEC_RSS:
3484 		case ICE_SID_FLD_VEC_ACL:
3485 		case ICE_SID_FLD_VEC_PE:
3486 			es = sect;
3487 			src = (u8 *)es->fv;
3488 			sect_len = (u32)(le16_to_cpu(es->count) *
3489 					 hw->blk[block_id].es.fvw) *
3490 				sizeof(*hw->blk[block_id].es.t);
3491 			dst = (u8 *)hw->blk[block_id].es.t;
3492 			dst_len = (u32)(hw->blk[block_id].es.count *
3493 					hw->blk[block_id].es.fvw) *
3494 				sizeof(*hw->blk[block_id].es.t);
3495 			break;
3496 		default:
3497 			return;
3498 		}
3499 
3500 		/* if the section offset exceeds destination length, terminate
3501 		 * table fill.
3502 		 */
3503 		if (offset > dst_len)
3504 			return;
3505 
3506 		/* if the sum of section size and offset exceed destination size
3507 		 * then we are out of bounds of the HW table size for that PF.
3508 		 * Changing section length to fill the remaining table space
3509 		 * of that PF.
3510 		 */
3511 		if ((offset + sect_len) > dst_len)
3512 			sect_len = dst_len - offset;
3513 
3514 		memcpy(dst + offset, src, sect_len);
3515 		offset += sect_len;
3516 		sect = ice_pkg_enum_section(NULL, &state, sid);
3517 	}
3518 }
3519 
3520 /**
3521  * ice_fill_blk_tbls - Read package context for tables
3522  * @hw: pointer to the hardware structure
3523  *
3524  * Reads the current package contents and populates the driver
3525  * database with the data iteratively for all advanced feature
3526  * blocks. Assume that the HW tables have been allocated.
3527  */
3528 void ice_fill_blk_tbls(struct ice_hw *hw)
3529 {
3530 	u8 i;
3531 
3532 	for (i = 0; i < ICE_BLK_COUNT; i++) {
3533 		enum ice_block blk_id = (enum ice_block)i;
3534 
3535 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
3536 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
3537 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
3538 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
3539 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
3540 	}
3541 
3542 	ice_init_sw_db(hw);
3543 }
3544 
3545 /**
3546  * ice_free_prof_map - free profile map
3547  * @hw: pointer to the hardware structure
3548  * @blk_idx: HW block index
3549  */
3550 static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
3551 {
3552 	struct ice_es *es = &hw->blk[blk_idx].es;
3553 	struct ice_prof_map *del, *tmp;
3554 
3555 	mutex_lock(&es->prof_map_lock);
3556 	list_for_each_entry_safe(del, tmp, &es->prof_map, list) {
3557 		list_del(&del->list);
3558 		devm_kfree(ice_hw_to_dev(hw), del);
3559 	}
3560 	INIT_LIST_HEAD(&es->prof_map);
3561 	mutex_unlock(&es->prof_map_lock);
3562 }
3563 
3564 /**
3565  * ice_free_flow_profs - free flow profile entries
3566  * @hw: pointer to the hardware structure
3567  * @blk_idx: HW block index
3568  */
3569 static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
3570 {
3571 	struct ice_flow_prof *p, *tmp;
3572 
3573 	mutex_lock(&hw->fl_profs_locks[blk_idx]);
3574 	list_for_each_entry_safe(p, tmp, &hw->fl_profs[blk_idx], l_entry) {
3575 		struct ice_flow_entry *e, *t;
3576 
3577 		list_for_each_entry_safe(e, t, &p->entries, l_entry)
3578 			ice_flow_rem_entry(hw, (enum ice_block)blk_idx,
3579 					   ICE_FLOW_ENTRY_HNDL(e));
3580 
3581 		list_del(&p->l_entry);
3582 
3583 		mutex_destroy(&p->entries_lock);
3584 		devm_kfree(ice_hw_to_dev(hw), p);
3585 	}
3586 	mutex_unlock(&hw->fl_profs_locks[blk_idx]);
3587 
3588 	/* if driver is in reset and tables are being cleared
3589 	 * re-initialize the flow profile list heads
3590 	 */
3591 	INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3592 }
3593 
3594 /**
3595  * ice_free_vsig_tbl - free complete VSIG table entries
3596  * @hw: pointer to the hardware structure
3597  * @blk: the HW block on which to free the VSIG table entries
3598  */
3599 static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
3600 {
3601 	u16 i;
3602 
3603 	if (!hw->blk[blk].xlt2.vsig_tbl)
3604 		return;
3605 
3606 	for (i = 1; i < ICE_MAX_VSIGS; i++)
3607 		if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
3608 			ice_vsig_free(hw, blk, i);
3609 }
3610 
3611 /**
3612  * ice_free_hw_tbls - free hardware table memory
3613  * @hw: pointer to the hardware structure
3614  */
3615 void ice_free_hw_tbls(struct ice_hw *hw)
3616 {
3617 	struct ice_rss_cfg *r, *rt;
3618 	u8 i;
3619 
3620 	for (i = 0; i < ICE_BLK_COUNT; i++) {
3621 		if (hw->blk[i].is_list_init) {
3622 			struct ice_es *es = &hw->blk[i].es;
3623 
3624 			ice_free_prof_map(hw, i);
3625 			mutex_destroy(&es->prof_map_lock);
3626 
3627 			ice_free_flow_profs(hw, i);
3628 			mutex_destroy(&hw->fl_profs_locks[i]);
3629 
3630 			hw->blk[i].is_list_init = false;
3631 		}
3632 		ice_free_vsig_tbl(hw, (enum ice_block)i);
3633 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes);
3634 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl);
3635 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t);
3636 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.t);
3637 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsig_tbl);
3638 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsis);
3639 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof.t);
3640 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t);
3641 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
3642 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
3643 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
3644 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.mask_ena);
3645 	}
3646 
3647 	list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) {
3648 		list_del(&r->l_entry);
3649 		devm_kfree(ice_hw_to_dev(hw), r);
3650 	}
3651 	mutex_destroy(&hw->rss_locks);
3652 	ice_shutdown_all_prof_masks(hw);
3653 	memset(hw->blk, 0, sizeof(hw->blk));
3654 }
3655 
3656 /**
3657  * ice_init_flow_profs - init flow profile locks and list heads
3658  * @hw: pointer to the hardware structure
3659  * @blk_idx: HW block index
3660  */
3661 static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
3662 {
3663 	mutex_init(&hw->fl_profs_locks[blk_idx]);
3664 	INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3665 }
3666 
3667 /**
3668  * ice_clear_hw_tbls - clear HW tables and flow profiles
3669  * @hw: pointer to the hardware structure
3670  */
3671 void ice_clear_hw_tbls(struct ice_hw *hw)
3672 {
3673 	u8 i;
3674 
3675 	for (i = 0; i < ICE_BLK_COUNT; i++) {
3676 		struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
3677 		struct ice_prof_tcam *prof = &hw->blk[i].prof;
3678 		struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3679 		struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3680 		struct ice_es *es = &hw->blk[i].es;
3681 
3682 		if (hw->blk[i].is_list_init) {
3683 			ice_free_prof_map(hw, i);
3684 			ice_free_flow_profs(hw, i);
3685 		}
3686 
3687 		ice_free_vsig_tbl(hw, (enum ice_block)i);
3688 
3689 		memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes));
3690 		memset(xlt1->ptg_tbl, 0,
3691 		       ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl));
3692 		memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t));
3693 
3694 		memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis));
3695 		memset(xlt2->vsig_tbl, 0,
3696 		       xlt2->count * sizeof(*xlt2->vsig_tbl));
3697 		memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t));
3698 
3699 		memset(prof->t, 0, prof->count * sizeof(*prof->t));
3700 		memset(prof_redir->t, 0,
3701 		       prof_redir->count * sizeof(*prof_redir->t));
3702 
3703 		memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
3704 		memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
3705 		memset(es->written, 0, es->count * sizeof(*es->written));
3706 		memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena));
3707 	}
3708 }
3709 
3710 /**
3711  * ice_init_hw_tbls - init hardware table memory
3712  * @hw: pointer to the hardware structure
3713  */
3714 enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
3715 {
3716 	u8 i;
3717 
3718 	mutex_init(&hw->rss_locks);
3719 	INIT_LIST_HEAD(&hw->rss_list_head);
3720 	ice_init_all_prof_masks(hw);
3721 	for (i = 0; i < ICE_BLK_COUNT; i++) {
3722 		struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
3723 		struct ice_prof_tcam *prof = &hw->blk[i].prof;
3724 		struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3725 		struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3726 		struct ice_es *es = &hw->blk[i].es;
3727 		u16 j;
3728 
3729 		if (hw->blk[i].is_list_init)
3730 			continue;
3731 
3732 		ice_init_flow_profs(hw, i);
3733 		mutex_init(&es->prof_map_lock);
3734 		INIT_LIST_HEAD(&es->prof_map);
3735 		hw->blk[i].is_list_init = true;
3736 
3737 		hw->blk[i].overwrite = blk_sizes[i].overwrite;
3738 		es->reverse = blk_sizes[i].reverse;
3739 
3740 		xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
3741 		xlt1->count = blk_sizes[i].xlt1;
3742 
3743 		xlt1->ptypes = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
3744 					    sizeof(*xlt1->ptypes), GFP_KERNEL);
3745 
3746 		if (!xlt1->ptypes)
3747 			goto err;
3748 
3749 		xlt1->ptg_tbl = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_PTGS,
3750 					     sizeof(*xlt1->ptg_tbl),
3751 					     GFP_KERNEL);
3752 
3753 		if (!xlt1->ptg_tbl)
3754 			goto err;
3755 
3756 		xlt1->t = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
3757 				       sizeof(*xlt1->t), GFP_KERNEL);
3758 		if (!xlt1->t)
3759 			goto err;
3760 
3761 		xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
3762 		xlt2->count = blk_sizes[i].xlt2;
3763 
3764 		xlt2->vsis = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3765 					  sizeof(*xlt2->vsis), GFP_KERNEL);
3766 
3767 		if (!xlt2->vsis)
3768 			goto err;
3769 
3770 		xlt2->vsig_tbl = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3771 					      sizeof(*xlt2->vsig_tbl),
3772 					      GFP_KERNEL);
3773 		if (!xlt2->vsig_tbl)
3774 			goto err;
3775 
3776 		for (j = 0; j < xlt2->count; j++)
3777 			INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
3778 
3779 		xlt2->t = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3780 				       sizeof(*xlt2->t), GFP_KERNEL);
3781 		if (!xlt2->t)
3782 			goto err;
3783 
3784 		prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
3785 		prof->count = blk_sizes[i].prof_tcam;
3786 		prof->max_prof_id = blk_sizes[i].prof_id;
3787 		prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
3788 		prof->t = devm_kcalloc(ice_hw_to_dev(hw), prof->count,
3789 				       sizeof(*prof->t), GFP_KERNEL);
3790 
3791 		if (!prof->t)
3792 			goto err;
3793 
3794 		prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
3795 		prof_redir->count = blk_sizes[i].prof_redir;
3796 		prof_redir->t = devm_kcalloc(ice_hw_to_dev(hw),
3797 					     prof_redir->count,
3798 					     sizeof(*prof_redir->t),
3799 					     GFP_KERNEL);
3800 
3801 		if (!prof_redir->t)
3802 			goto err;
3803 
3804 		es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
3805 		es->count = blk_sizes[i].es;
3806 		es->fvw = blk_sizes[i].fvw;
3807 		es->t = devm_kcalloc(ice_hw_to_dev(hw),
3808 				     (u32)(es->count * es->fvw),
3809 				     sizeof(*es->t), GFP_KERNEL);
3810 		if (!es->t)
3811 			goto err;
3812 
3813 		es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count,
3814 					     sizeof(*es->ref_count),
3815 					     GFP_KERNEL);
3816 		if (!es->ref_count)
3817 			goto err;
3818 
3819 		es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
3820 					   sizeof(*es->written), GFP_KERNEL);
3821 		if (!es->written)
3822 			goto err;
3823 
3824 		es->mask_ena = devm_kcalloc(ice_hw_to_dev(hw), es->count,
3825 					    sizeof(*es->mask_ena), GFP_KERNEL);
3826 		if (!es->mask_ena)
3827 			goto err;
3828 	}
3829 	return 0;
3830 
3831 err:
3832 	ice_free_hw_tbls(hw);
3833 	return ICE_ERR_NO_MEMORY;
3834 }
3835 
3836 /**
3837  * ice_prof_gen_key - generate profile ID key
3838  * @hw: pointer to the HW struct
3839  * @blk: the block in which to write profile ID to
3840  * @ptg: packet type group (PTG) portion of key
3841  * @vsig: VSIG portion of key
3842  * @cdid: CDID portion of key
3843  * @flags: flag portion of key
3844  * @vl_msk: valid mask
3845  * @dc_msk: don't care mask
3846  * @nm_msk: never match mask
3847  * @key: output of profile ID key
3848  */
3849 static enum ice_status
3850 ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
3851 		 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
3852 		 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
3853 		 u8 key[ICE_TCAM_KEY_SZ])
3854 {
3855 	struct ice_prof_id_key inkey;
3856 
3857 	inkey.xlt1 = ptg;
3858 	inkey.xlt2_cdid = cpu_to_le16(vsig);
3859 	inkey.flags = cpu_to_le16(flags);
3860 
3861 	switch (hw->blk[blk].prof.cdid_bits) {
3862 	case 0:
3863 		break;
3864 	case 2:
3865 #define ICE_CD_2_M 0xC000U
3866 #define ICE_CD_2_S 14
3867 		inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_2_M);
3868 		inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_2_S);
3869 		break;
3870 	case 4:
3871 #define ICE_CD_4_M 0xF000U
3872 #define ICE_CD_4_S 12
3873 		inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_4_M);
3874 		inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_4_S);
3875 		break;
3876 	case 8:
3877 #define ICE_CD_8_M 0xFF00U
3878 #define ICE_CD_8_S 16
3879 		inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_8_M);
3880 		inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_8_S);
3881 		break;
3882 	default:
3883 		ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
3884 		break;
3885 	}
3886 
3887 	return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
3888 			   nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
3889 }
3890 
3891 /**
3892  * ice_tcam_write_entry - write TCAM entry
3893  * @hw: pointer to the HW struct
3894  * @blk: the block in which to write profile ID to
3895  * @idx: the entry index to write to
3896  * @prof_id: profile ID
3897  * @ptg: packet type group (PTG) portion of key
3898  * @vsig: VSIG portion of key
3899  * @cdid: CDID portion of key
3900  * @flags: flag portion of key
3901  * @vl_msk: valid mask
3902  * @dc_msk: don't care mask
3903  * @nm_msk: never match mask
3904  */
3905 static enum ice_status
3906 ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
3907 		     u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
3908 		     u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
3909 		     u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
3910 		     u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
3911 {
3912 	struct ice_prof_tcam_entry;
3913 	enum ice_status status;
3914 
3915 	status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
3916 				  dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
3917 	if (!status) {
3918 		hw->blk[blk].prof.t[idx].addr = cpu_to_le16(idx);
3919 		hw->blk[blk].prof.t[idx].prof_id = prof_id;
3920 	}
3921 
3922 	return status;
3923 }
3924 
3925 /**
3926  * ice_vsig_get_ref - returns number of VSIs belong to a VSIG
3927  * @hw: pointer to the hardware structure
3928  * @blk: HW block
3929  * @vsig: VSIG to query
3930  * @refs: pointer to variable to receive the reference count
3931  */
3932 static enum ice_status
3933 ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
3934 {
3935 	u16 idx = vsig & ICE_VSIG_IDX_M;
3936 	struct ice_vsig_vsi *ptr;
3937 
3938 	*refs = 0;
3939 
3940 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
3941 		return ICE_ERR_DOES_NOT_EXIST;
3942 
3943 	ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
3944 	while (ptr) {
3945 		(*refs)++;
3946 		ptr = ptr->next_vsi;
3947 	}
3948 
3949 	return 0;
3950 }
3951 
3952 /**
3953  * ice_has_prof_vsig - check to see if VSIG has a specific profile
3954  * @hw: pointer to the hardware structure
3955  * @blk: HW block
3956  * @vsig: VSIG to check against
3957  * @hdl: profile handle
3958  */
3959 static bool
3960 ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
3961 {
3962 	u16 idx = vsig & ICE_VSIG_IDX_M;
3963 	struct ice_vsig_prof *ent;
3964 
3965 	list_for_each_entry(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3966 			    list)
3967 		if (ent->profile_cookie == hdl)
3968 			return true;
3969 
3970 	ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n",
3971 		  vsig);
3972 	return false;
3973 }
3974 
3975 /**
3976  * ice_prof_bld_es - build profile ID extraction sequence changes
3977  * @hw: pointer to the HW struct
3978  * @blk: hardware block
3979  * @bld: the update package buffer build to add to
3980  * @chgs: the list of changes to make in hardware
3981  */
3982 static enum ice_status
3983 ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
3984 		struct ice_buf_build *bld, struct list_head *chgs)
3985 {
3986 	u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
3987 	struct ice_chs_chg *tmp;
3988 
3989 	list_for_each_entry(tmp, chgs, list_entry)
3990 		if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
3991 			u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
3992 			struct ice_pkg_es *p;
3993 			u32 id;
3994 
3995 			id = ice_sect_id(blk, ICE_VEC_TBL);
3996 			p = ice_pkg_buf_alloc_section(bld, id,
3997 						      struct_size(p, es, 1) +
3998 						      vec_size -
3999 						      sizeof(p->es[0]));
4000 
4001 			if (!p)
4002 				return ICE_ERR_MAX_LIMIT;
4003 
4004 			p->count = cpu_to_le16(1);
4005 			p->offset = cpu_to_le16(tmp->prof_id);
4006 
4007 			memcpy(p->es, &hw->blk[blk].es.t[off], vec_size);
4008 		}
4009 
4010 	return 0;
4011 }
4012 
4013 /**
4014  * ice_prof_bld_tcam - build profile ID TCAM changes
4015  * @hw: pointer to the HW struct
4016  * @blk: hardware block
4017  * @bld: the update package buffer build to add to
4018  * @chgs: the list of changes to make in hardware
4019  */
4020 static enum ice_status
4021 ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
4022 		  struct ice_buf_build *bld, struct list_head *chgs)
4023 {
4024 	struct ice_chs_chg *tmp;
4025 
4026 	list_for_each_entry(tmp, chgs, list_entry)
4027 		if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
4028 			struct ice_prof_id_section *p;
4029 			u32 id;
4030 
4031 			id = ice_sect_id(blk, ICE_PROF_TCAM);
4032 			p = ice_pkg_buf_alloc_section(bld, id,
4033 						      struct_size(p, entry, 1));
4034 
4035 			if (!p)
4036 				return ICE_ERR_MAX_LIMIT;
4037 
4038 			p->count = cpu_to_le16(1);
4039 			p->entry[0].addr = cpu_to_le16(tmp->tcam_idx);
4040 			p->entry[0].prof_id = tmp->prof_id;
4041 
4042 			memcpy(p->entry[0].key,
4043 			       &hw->blk[blk].prof.t[tmp->tcam_idx].key,
4044 			       sizeof(hw->blk[blk].prof.t->key));
4045 		}
4046 
4047 	return 0;
4048 }
4049 
4050 /**
4051  * ice_prof_bld_xlt1 - build XLT1 changes
4052  * @blk: hardware block
4053  * @bld: the update package buffer build to add to
4054  * @chgs: the list of changes to make in hardware
4055  */
4056 static enum ice_status
4057 ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
4058 		  struct list_head *chgs)
4059 {
4060 	struct ice_chs_chg *tmp;
4061 
4062 	list_for_each_entry(tmp, chgs, list_entry)
4063 		if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
4064 			struct ice_xlt1_section *p;
4065 			u32 id;
4066 
4067 			id = ice_sect_id(blk, ICE_XLT1);
4068 			p = ice_pkg_buf_alloc_section(bld, id,
4069 						      struct_size(p, value, 1));
4070 
4071 			if (!p)
4072 				return ICE_ERR_MAX_LIMIT;
4073 
4074 			p->count = cpu_to_le16(1);
4075 			p->offset = cpu_to_le16(tmp->ptype);
4076 			p->value[0] = tmp->ptg;
4077 		}
4078 
4079 	return 0;
4080 }
4081 
4082 /**
4083  * ice_prof_bld_xlt2 - build XLT2 changes
4084  * @blk: hardware block
4085  * @bld: the update package buffer build to add to
4086  * @chgs: the list of changes to make in hardware
4087  */
4088 static enum ice_status
4089 ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
4090 		  struct list_head *chgs)
4091 {
4092 	struct ice_chs_chg *tmp;
4093 
4094 	list_for_each_entry(tmp, chgs, list_entry) {
4095 		struct ice_xlt2_section *p;
4096 		u32 id;
4097 
4098 		switch (tmp->type) {
4099 		case ICE_VSIG_ADD:
4100 		case ICE_VSI_MOVE:
4101 		case ICE_VSIG_REM:
4102 			id = ice_sect_id(blk, ICE_XLT2);
4103 			p = ice_pkg_buf_alloc_section(bld, id,
4104 						      struct_size(p, value, 1));
4105 
4106 			if (!p)
4107 				return ICE_ERR_MAX_LIMIT;
4108 
4109 			p->count = cpu_to_le16(1);
4110 			p->offset = cpu_to_le16(tmp->vsi);
4111 			p->value[0] = cpu_to_le16(tmp->vsig);
4112 			break;
4113 		default:
4114 			break;
4115 		}
4116 	}
4117 
4118 	return 0;
4119 }
4120 
4121 /**
4122  * ice_upd_prof_hw - update hardware using the change list
4123  * @hw: pointer to the HW struct
4124  * @blk: hardware block
4125  * @chgs: the list of changes to make in hardware
4126  */
4127 static enum ice_status
4128 ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
4129 		struct list_head *chgs)
4130 {
4131 	struct ice_buf_build *b;
4132 	struct ice_chs_chg *tmp;
4133 	enum ice_status status;
4134 	u16 pkg_sects;
4135 	u16 xlt1 = 0;
4136 	u16 xlt2 = 0;
4137 	u16 tcam = 0;
4138 	u16 es = 0;
4139 	u16 sects;
4140 
4141 	/* count number of sections we need */
4142 	list_for_each_entry(tmp, chgs, list_entry) {
4143 		switch (tmp->type) {
4144 		case ICE_PTG_ES_ADD:
4145 			if (tmp->add_ptg)
4146 				xlt1++;
4147 			if (tmp->add_prof)
4148 				es++;
4149 			break;
4150 		case ICE_TCAM_ADD:
4151 			tcam++;
4152 			break;
4153 		case ICE_VSIG_ADD:
4154 		case ICE_VSI_MOVE:
4155 		case ICE_VSIG_REM:
4156 			xlt2++;
4157 			break;
4158 		default:
4159 			break;
4160 		}
4161 	}
4162 	sects = xlt1 + xlt2 + tcam + es;
4163 
4164 	if (!sects)
4165 		return 0;
4166 
4167 	/* Build update package buffer */
4168 	b = ice_pkg_buf_alloc(hw);
4169 	if (!b)
4170 		return ICE_ERR_NO_MEMORY;
4171 
4172 	status = ice_pkg_buf_reserve_section(b, sects);
4173 	if (status)
4174 		goto error_tmp;
4175 
4176 	/* Preserve order of table update: ES, TCAM, PTG, VSIG */
4177 	if (es) {
4178 		status = ice_prof_bld_es(hw, blk, b, chgs);
4179 		if (status)
4180 			goto error_tmp;
4181 	}
4182 
4183 	if (tcam) {
4184 		status = ice_prof_bld_tcam(hw, blk, b, chgs);
4185 		if (status)
4186 			goto error_tmp;
4187 	}
4188 
4189 	if (xlt1) {
4190 		status = ice_prof_bld_xlt1(blk, b, chgs);
4191 		if (status)
4192 			goto error_tmp;
4193 	}
4194 
4195 	if (xlt2) {
4196 		status = ice_prof_bld_xlt2(blk, b, chgs);
4197 		if (status)
4198 			goto error_tmp;
4199 	}
4200 
4201 	/* After package buffer build check if the section count in buffer is
4202 	 * non-zero and matches the number of sections detected for package
4203 	 * update.
4204 	 */
4205 	pkg_sects = ice_pkg_buf_get_active_sections(b);
4206 	if (!pkg_sects || pkg_sects != sects) {
4207 		status = ICE_ERR_INVAL_SIZE;
4208 		goto error_tmp;
4209 	}
4210 
4211 	/* update package */
4212 	status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
4213 	if (status == ICE_ERR_AQ_ERROR)
4214 		ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
4215 
4216 error_tmp:
4217 	ice_pkg_buf_free(hw, b);
4218 	return status;
4219 }
4220 
4221 /**
4222  * ice_update_fd_mask - set Flow Director Field Vector mask for a profile
4223  * @hw: pointer to the HW struct
4224  * @prof_id: profile ID
4225  * @mask_sel: mask select
4226  *
4227  * This function enable any of the masks selected by the mask select parameter
4228  * for the profile specified.
4229  */
4230 static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel)
4231 {
4232 	wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel);
4233 
4234 	ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id,
4235 		  GLQF_FDMASK_SEL(prof_id), mask_sel);
4236 }
4237 
4238 struct ice_fd_src_dst_pair {
4239 	u8 prot_id;
4240 	u8 count;
4241 	u16 off;
4242 };
4243 
4244 static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
4245 	/* These are defined in pairs */
4246 	{ ICE_PROT_IPV4_OF_OR_S, 2, 12 },
4247 	{ ICE_PROT_IPV4_OF_OR_S, 2, 16 },
4248 
4249 	{ ICE_PROT_IPV4_IL, 2, 12 },
4250 	{ ICE_PROT_IPV4_IL, 2, 16 },
4251 
4252 	{ ICE_PROT_IPV6_OF_OR_S, 8, 8 },
4253 	{ ICE_PROT_IPV6_OF_OR_S, 8, 24 },
4254 
4255 	{ ICE_PROT_IPV6_IL, 8, 8 },
4256 	{ ICE_PROT_IPV6_IL, 8, 24 },
4257 
4258 	{ ICE_PROT_TCP_IL, 1, 0 },
4259 	{ ICE_PROT_TCP_IL, 1, 2 },
4260 
4261 	{ ICE_PROT_UDP_OF, 1, 0 },
4262 	{ ICE_PROT_UDP_OF, 1, 2 },
4263 
4264 	{ ICE_PROT_UDP_IL_OR_S, 1, 0 },
4265 	{ ICE_PROT_UDP_IL_OR_S, 1, 2 },
4266 
4267 	{ ICE_PROT_SCTP_IL, 1, 0 },
4268 	{ ICE_PROT_SCTP_IL, 1, 2 }
4269 };
4270 
4271 #define ICE_FD_SRC_DST_PAIR_COUNT	ARRAY_SIZE(ice_fd_pairs)
4272 
4273 /**
4274  * ice_update_fd_swap - set register appropriately for a FD FV extraction
4275  * @hw: pointer to the HW struct
4276  * @prof_id: profile ID
4277  * @es: extraction sequence (length of array is determined by the block)
4278  */
4279 static enum ice_status
4280 ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
4281 {
4282 	DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
4283 	u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 };
4284 #define ICE_FD_FV_NOT_FOUND (-2)
4285 	s8 first_free = ICE_FD_FV_NOT_FOUND;
4286 	u8 used[ICE_MAX_FV_WORDS] = { 0 };
4287 	s8 orig_free, si;
4288 	u32 mask_sel = 0;
4289 	u8 i, j, k;
4290 
4291 	bitmap_zero(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
4292 
4293 	/* This code assumes that the Flow Director field vectors are assigned
4294 	 * from the end of the FV indexes working towards the zero index, that
4295 	 * only complete fields will be included and will be consecutive, and
4296 	 * that there are no gaps between valid indexes.
4297 	 */
4298 
4299 	/* Determine swap fields present */
4300 	for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
4301 		/* Find the first free entry, assuming right to left population.
4302 		 * This is where we can start adding additional pairs if needed.
4303 		 */
4304 		if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id !=
4305 		    ICE_PROT_INVALID)
4306 			first_free = i - 1;
4307 
4308 		for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
4309 			if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
4310 			    es[i].off == ice_fd_pairs[j].off) {
4311 				set_bit(j, pair_list);
4312 				pair_start[j] = i;
4313 			}
4314 	}
4315 
4316 	orig_free = first_free;
4317 
4318 	/* determine missing swap fields that need to be added */
4319 	for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) {
4320 		u8 bit1 = test_bit(i + 1, pair_list);
4321 		u8 bit0 = test_bit(i, pair_list);
4322 
4323 		if (bit0 ^ bit1) {
4324 			u8 index;
4325 
4326 			/* add the appropriate 'paired' entry */
4327 			if (!bit0)
4328 				index = i;
4329 			else
4330 				index = i + 1;
4331 
4332 			/* check for room */
4333 			if (first_free + 1 < (s8)ice_fd_pairs[index].count)
4334 				return ICE_ERR_MAX_LIMIT;
4335 
4336 			/* place in extraction sequence */
4337 			for (k = 0; k < ice_fd_pairs[index].count; k++) {
4338 				es[first_free - k].prot_id =
4339 					ice_fd_pairs[index].prot_id;
4340 				es[first_free - k].off =
4341 					ice_fd_pairs[index].off + (k * 2);
4342 
4343 				if (k > first_free)
4344 					return ICE_ERR_OUT_OF_RANGE;
4345 
4346 				/* keep track of non-relevant fields */
4347 				mask_sel |= BIT(first_free - k);
4348 			}
4349 
4350 			pair_start[index] = first_free;
4351 			first_free -= ice_fd_pairs[index].count;
4352 		}
4353 	}
4354 
4355 	/* fill in the swap array */
4356 	si = hw->blk[ICE_BLK_FD].es.fvw - 1;
4357 	while (si >= 0) {
4358 		u8 indexes_used = 1;
4359 
4360 		/* assume flat at this index */
4361 #define ICE_SWAP_VALID	0x80
4362 		used[si] = si | ICE_SWAP_VALID;
4363 
4364 		if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) {
4365 			si -= indexes_used;
4366 			continue;
4367 		}
4368 
4369 		/* check for a swap location */
4370 		for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
4371 			if (es[si].prot_id == ice_fd_pairs[j].prot_id &&
4372 			    es[si].off == ice_fd_pairs[j].off) {
4373 				u8 idx;
4374 
4375 				/* determine the appropriate matching field */
4376 				idx = j + ((j % 2) ? -1 : 1);
4377 
4378 				indexes_used = ice_fd_pairs[idx].count;
4379 				for (k = 0; k < indexes_used; k++) {
4380 					used[si - k] = (pair_start[idx] - k) |
4381 						ICE_SWAP_VALID;
4382 				}
4383 
4384 				break;
4385 			}
4386 
4387 		si -= indexes_used;
4388 	}
4389 
4390 	/* for each set of 4 swap and 4 inset indexes, write the appropriate
4391 	 * register
4392 	 */
4393 	for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
4394 		u32 raw_swap = 0;
4395 		u32 raw_in = 0;
4396 
4397 		for (k = 0; k < 4; k++) {
4398 			u8 idx;
4399 
4400 			idx = (j * 4) + k;
4401 			if (used[idx] && !(mask_sel & BIT(idx))) {
4402 				raw_swap |= used[idx] << (k * BITS_PER_BYTE);
4403 #define ICE_INSET_DFLT 0x9f
4404 				raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE);
4405 			}
4406 		}
4407 
4408 		/* write the appropriate swap register set */
4409 		wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap);
4410 
4411 		ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n",
4412 			  prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap);
4413 
4414 		/* write the appropriate inset register set */
4415 		wr32(hw, GLQF_FDINSET(prof_id, j), raw_in);
4416 
4417 		ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n",
4418 			  prof_id, j, GLQF_FDINSET(prof_id, j), raw_in);
4419 	}
4420 
4421 	/* initially clear the mask select for this profile */
4422 	ice_update_fd_mask(hw, prof_id, 0);
4423 
4424 	return 0;
4425 }
4426 
4427 /* The entries here needs to match the order of enum ice_ptype_attrib */
4428 static const struct ice_ptype_attrib_info ice_ptype_attributes[] = {
4429 	{ ICE_GTP_PDU_EH,	ICE_GTP_PDU_FLAG_MASK },
4430 	{ ICE_GTP_SESSION,	ICE_GTP_FLAGS_MASK },
4431 	{ ICE_GTP_DOWNLINK,	ICE_GTP_FLAGS_MASK },
4432 	{ ICE_GTP_UPLINK,	ICE_GTP_FLAGS_MASK },
4433 };
4434 
4435 /**
4436  * ice_get_ptype_attrib_info - get PTYPE attribute information
4437  * @type: attribute type
4438  * @info: pointer to variable to the attribute information
4439  */
4440 static void
4441 ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type,
4442 			  struct ice_ptype_attrib_info *info)
4443 {
4444 	*info = ice_ptype_attributes[type];
4445 }
4446 
4447 /**
4448  * ice_add_prof_attrib - add any PTG with attributes to profile
4449  * @prof: pointer to the profile to which PTG entries will be added
4450  * @ptg: PTG to be added
4451  * @ptype: PTYPE that needs to be looked up
4452  * @attr: array of attributes that will be considered
4453  * @attr_cnt: number of elements in the attribute array
4454  */
4455 static enum ice_status
4456 ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
4457 		    const struct ice_ptype_attributes *attr, u16 attr_cnt)
4458 {
4459 	bool found = false;
4460 	u16 i;
4461 
4462 	for (i = 0; i < attr_cnt; i++)
4463 		if (attr[i].ptype == ptype) {
4464 			found = true;
4465 
4466 			prof->ptg[prof->ptg_cnt] = ptg;
4467 			ice_get_ptype_attrib_info(attr[i].attrib,
4468 						  &prof->attr[prof->ptg_cnt]);
4469 
4470 			if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
4471 				return ICE_ERR_MAX_LIMIT;
4472 		}
4473 
4474 	if (!found)
4475 		return ICE_ERR_DOES_NOT_EXIST;
4476 
4477 	return 0;
4478 }
4479 
4480 /**
4481  * ice_add_prof - add profile
4482  * @hw: pointer to the HW struct
4483  * @blk: hardware block
4484  * @id: profile tracking ID
4485  * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
4486  * @attr: array of attributes
4487  * @attr_cnt: number of elements in attr array
4488  * @es: extraction sequence (length of array is determined by the block)
4489  * @masks: mask for extraction sequence
4490  *
4491  * This function registers a profile, which matches a set of PTYPES with a
4492  * particular extraction sequence. While the hardware profile is allocated
4493  * it will not be written until the first call to ice_add_flow that specifies
4494  * the ID value used here.
4495  */
4496 enum ice_status
4497 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
4498 	     const struct ice_ptype_attributes *attr, u16 attr_cnt,
4499 	     struct ice_fv_word *es, u16 *masks)
4500 {
4501 	u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
4502 	DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
4503 	struct ice_prof_map *prof;
4504 	enum ice_status status;
4505 	u8 byte = 0;
4506 	u8 prof_id;
4507 
4508 	bitmap_zero(ptgs_used, ICE_XLT1_CNT);
4509 
4510 	mutex_lock(&hw->blk[blk].es.prof_map_lock);
4511 
4512 	/* search for existing profile */
4513 	status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
4514 	if (status) {
4515 		/* allocate profile ID */
4516 		status = ice_alloc_prof_id(hw, blk, &prof_id);
4517 		if (status)
4518 			goto err_ice_add_prof;
4519 		if (blk == ICE_BLK_FD) {
4520 			/* For Flow Director block, the extraction sequence may
4521 			 * need to be altered in the case where there are paired
4522 			 * fields that have no match. This is necessary because
4523 			 * for Flow Director, src and dest fields need to paired
4524 			 * for filter programming and these values are swapped
4525 			 * during Tx.
4526 			 */
4527 			status = ice_update_fd_swap(hw, prof_id, es);
4528 			if (status)
4529 				goto err_ice_add_prof;
4530 		}
4531 		status = ice_update_prof_masking(hw, blk, prof_id, masks);
4532 		if (status)
4533 			goto err_ice_add_prof;
4534 
4535 		/* and write new es */
4536 		ice_write_es(hw, blk, prof_id, es);
4537 	}
4538 
4539 	ice_prof_inc_ref(hw, blk, prof_id);
4540 
4541 	/* add profile info */
4542 	prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL);
4543 	if (!prof) {
4544 		status = ICE_ERR_NO_MEMORY;
4545 		goto err_ice_add_prof;
4546 	}
4547 
4548 	prof->profile_cookie = id;
4549 	prof->prof_id = prof_id;
4550 	prof->ptg_cnt = 0;
4551 	prof->context = 0;
4552 
4553 	/* build list of ptgs */
4554 	while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
4555 		u8 bit;
4556 
4557 		if (!ptypes[byte]) {
4558 			bytes--;
4559 			byte++;
4560 			continue;
4561 		}
4562 
4563 		/* Examine 8 bits per byte */
4564 		for_each_set_bit(bit, (unsigned long *)&ptypes[byte],
4565 				 BITS_PER_BYTE) {
4566 			u16 ptype;
4567 			u8 ptg;
4568 
4569 			ptype = byte * BITS_PER_BYTE + bit;
4570 
4571 			/* The package should place all ptypes in a non-zero
4572 			 * PTG, so the following call should never fail.
4573 			 */
4574 			if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
4575 				continue;
4576 
4577 			/* If PTG is already added, skip and continue */
4578 			if (test_bit(ptg, ptgs_used))
4579 				continue;
4580 
4581 			set_bit(ptg, ptgs_used);
4582 			/* Check to see there are any attributes for
4583 			 * this PTYPE, and add them if found.
4584 			 */
4585 			status = ice_add_prof_attrib(prof, ptg, ptype,
4586 						     attr, attr_cnt);
4587 			if (status == ICE_ERR_MAX_LIMIT)
4588 				break;
4589 			if (status) {
4590 				/* This is simple a PTYPE/PTG with no
4591 				 * attribute
4592 				 */
4593 				prof->ptg[prof->ptg_cnt] = ptg;
4594 				prof->attr[prof->ptg_cnt].flags = 0;
4595 				prof->attr[prof->ptg_cnt].mask = 0;
4596 
4597 				if (++prof->ptg_cnt >=
4598 				    ICE_MAX_PTG_PER_PROFILE)
4599 					break;
4600 			}
4601 		}
4602 
4603 		bytes--;
4604 		byte++;
4605 	}
4606 
4607 	list_add(&prof->list, &hw->blk[blk].es.prof_map);
4608 	status = 0;
4609 
4610 err_ice_add_prof:
4611 	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4612 	return status;
4613 }
4614 
4615 /**
4616  * ice_search_prof_id - Search for a profile tracking ID
4617  * @hw: pointer to the HW struct
4618  * @blk: hardware block
4619  * @id: profile tracking ID
4620  *
4621  * This will search for a profile tracking ID which was previously added.
4622  * The profile map lock should be held before calling this function.
4623  */
4624 static struct ice_prof_map *
4625 ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
4626 {
4627 	struct ice_prof_map *entry = NULL;
4628 	struct ice_prof_map *map;
4629 
4630 	list_for_each_entry(map, &hw->blk[blk].es.prof_map, list)
4631 		if (map->profile_cookie == id) {
4632 			entry = map;
4633 			break;
4634 		}
4635 
4636 	return entry;
4637 }
4638 
4639 /**
4640  * ice_vsig_prof_id_count - count profiles in a VSIG
4641  * @hw: pointer to the HW struct
4642  * @blk: hardware block
4643  * @vsig: VSIG to remove the profile from
4644  */
4645 static u16
4646 ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
4647 {
4648 	u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
4649 	struct ice_vsig_prof *p;
4650 
4651 	list_for_each_entry(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4652 			    list)
4653 		count++;
4654 
4655 	return count;
4656 }
4657 
4658 /**
4659  * ice_rel_tcam_idx - release a TCAM index
4660  * @hw: pointer to the HW struct
4661  * @blk: hardware block
4662  * @idx: the index to release
4663  */
4664 static enum ice_status
4665 ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
4666 {
4667 	/* Masks to invoke a never match entry */
4668 	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4669 	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
4670 	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
4671 	enum ice_status status;
4672 
4673 	/* write the TCAM entry */
4674 	status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
4675 				      dc_msk, nm_msk);
4676 	if (status)
4677 		return status;
4678 
4679 	/* release the TCAM entry */
4680 	status = ice_free_tcam_ent(hw, blk, idx);
4681 
4682 	return status;
4683 }
4684 
4685 /**
4686  * ice_rem_prof_id - remove one profile from a VSIG
4687  * @hw: pointer to the HW struct
4688  * @blk: hardware block
4689  * @prof: pointer to profile structure to remove
4690  */
4691 static enum ice_status
4692 ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
4693 		struct ice_vsig_prof *prof)
4694 {
4695 	enum ice_status status;
4696 	u16 i;
4697 
4698 	for (i = 0; i < prof->tcam_count; i++)
4699 		if (prof->tcam[i].in_use) {
4700 			prof->tcam[i].in_use = false;
4701 			status = ice_rel_tcam_idx(hw, blk,
4702 						  prof->tcam[i].tcam_idx);
4703 			if (status)
4704 				return ICE_ERR_HW_TABLE;
4705 		}
4706 
4707 	return 0;
4708 }
4709 
4710 /**
4711  * ice_rem_vsig - remove VSIG
4712  * @hw: pointer to the HW struct
4713  * @blk: hardware block
4714  * @vsig: the VSIG to remove
4715  * @chg: the change list
4716  */
4717 static enum ice_status
4718 ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4719 	     struct list_head *chg)
4720 {
4721 	u16 idx = vsig & ICE_VSIG_IDX_M;
4722 	struct ice_vsig_vsi *vsi_cur;
4723 	struct ice_vsig_prof *d, *t;
4724 	enum ice_status status;
4725 
4726 	/* remove TCAM entries */
4727 	list_for_each_entry_safe(d, t,
4728 				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4729 				 list) {
4730 		status = ice_rem_prof_id(hw, blk, d);
4731 		if (status)
4732 			return status;
4733 
4734 		list_del(&d->list);
4735 		devm_kfree(ice_hw_to_dev(hw), d);
4736 	}
4737 
4738 	/* Move all VSIS associated with this VSIG to the default VSIG */
4739 	vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
4740 	/* If the VSIG has at least 1 VSI then iterate through the list
4741 	 * and remove the VSIs before deleting the group.
4742 	 */
4743 	if (vsi_cur)
4744 		do {
4745 			struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
4746 			struct ice_chs_chg *p;
4747 
4748 			p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
4749 					 GFP_KERNEL);
4750 			if (!p)
4751 				return ICE_ERR_NO_MEMORY;
4752 
4753 			p->type = ICE_VSIG_REM;
4754 			p->orig_vsig = vsig;
4755 			p->vsig = ICE_DEFAULT_VSIG;
4756 			p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
4757 
4758 			list_add(&p->list_entry, chg);
4759 
4760 			vsi_cur = tmp;
4761 		} while (vsi_cur);
4762 
4763 	return ice_vsig_free(hw, blk, vsig);
4764 }
4765 
4766 /**
4767  * ice_rem_prof_id_vsig - remove a specific profile from a VSIG
4768  * @hw: pointer to the HW struct
4769  * @blk: hardware block
4770  * @vsig: VSIG to remove the profile from
4771  * @hdl: profile handle indicating which profile to remove
4772  * @chg: list to receive a record of changes
4773  */
4774 static enum ice_status
4775 ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
4776 		     struct list_head *chg)
4777 {
4778 	u16 idx = vsig & ICE_VSIG_IDX_M;
4779 	struct ice_vsig_prof *p, *t;
4780 	enum ice_status status;
4781 
4782 	list_for_each_entry_safe(p, t,
4783 				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4784 				 list)
4785 		if (p->profile_cookie == hdl) {
4786 			if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
4787 				/* this is the last profile, remove the VSIG */
4788 				return ice_rem_vsig(hw, blk, vsig, chg);
4789 
4790 			status = ice_rem_prof_id(hw, blk, p);
4791 			if (!status) {
4792 				list_del(&p->list);
4793 				devm_kfree(ice_hw_to_dev(hw), p);
4794 			}
4795 			return status;
4796 		}
4797 
4798 	return ICE_ERR_DOES_NOT_EXIST;
4799 }
4800 
4801 /**
4802  * ice_rem_flow_all - remove all flows with a particular profile
4803  * @hw: pointer to the HW struct
4804  * @blk: hardware block
4805  * @id: profile tracking ID
4806  */
4807 static enum ice_status
4808 ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
4809 {
4810 	struct ice_chs_chg *del, *tmp;
4811 	enum ice_status status;
4812 	struct list_head chg;
4813 	u16 i;
4814 
4815 	INIT_LIST_HEAD(&chg);
4816 
4817 	for (i = 1; i < ICE_MAX_VSIGS; i++)
4818 		if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
4819 			if (ice_has_prof_vsig(hw, blk, i, id)) {
4820 				status = ice_rem_prof_id_vsig(hw, blk, i, id,
4821 							      &chg);
4822 				if (status)
4823 					goto err_ice_rem_flow_all;
4824 			}
4825 		}
4826 
4827 	status = ice_upd_prof_hw(hw, blk, &chg);
4828 
4829 err_ice_rem_flow_all:
4830 	list_for_each_entry_safe(del, tmp, &chg, list_entry) {
4831 		list_del(&del->list_entry);
4832 		devm_kfree(ice_hw_to_dev(hw), del);
4833 	}
4834 
4835 	return status;
4836 }
4837 
4838 /**
4839  * ice_rem_prof - remove profile
4840  * @hw: pointer to the HW struct
4841  * @blk: hardware block
4842  * @id: profile tracking ID
4843  *
4844  * This will remove the profile specified by the ID parameter, which was
4845  * previously created through ice_add_prof. If any existing entries
4846  * are associated with this profile, they will be removed as well.
4847  */
4848 enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
4849 {
4850 	struct ice_prof_map *pmap;
4851 	enum ice_status status;
4852 
4853 	mutex_lock(&hw->blk[blk].es.prof_map_lock);
4854 
4855 	pmap = ice_search_prof_id(hw, blk, id);
4856 	if (!pmap) {
4857 		status = ICE_ERR_DOES_NOT_EXIST;
4858 		goto err_ice_rem_prof;
4859 	}
4860 
4861 	/* remove all flows with this profile */
4862 	status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
4863 	if (status)
4864 		goto err_ice_rem_prof;
4865 
4866 	/* dereference profile, and possibly remove */
4867 	ice_prof_dec_ref(hw, blk, pmap->prof_id);
4868 
4869 	list_del(&pmap->list);
4870 	devm_kfree(ice_hw_to_dev(hw), pmap);
4871 
4872 err_ice_rem_prof:
4873 	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4874 	return status;
4875 }
4876 
4877 /**
4878  * ice_get_prof - get profile
4879  * @hw: pointer to the HW struct
4880  * @blk: hardware block
4881  * @hdl: profile handle
4882  * @chg: change list
4883  */
4884 static enum ice_status
4885 ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
4886 	     struct list_head *chg)
4887 {
4888 	enum ice_status status = 0;
4889 	struct ice_prof_map *map;
4890 	struct ice_chs_chg *p;
4891 	u16 i;
4892 
4893 	mutex_lock(&hw->blk[blk].es.prof_map_lock);
4894 	/* Get the details on the profile specified by the handle ID */
4895 	map = ice_search_prof_id(hw, blk, hdl);
4896 	if (!map) {
4897 		status = ICE_ERR_DOES_NOT_EXIST;
4898 		goto err_ice_get_prof;
4899 	}
4900 
4901 	for (i = 0; i < map->ptg_cnt; i++)
4902 		if (!hw->blk[blk].es.written[map->prof_id]) {
4903 			/* add ES to change list */
4904 			p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
4905 					 GFP_KERNEL);
4906 			if (!p) {
4907 				status = ICE_ERR_NO_MEMORY;
4908 				goto err_ice_get_prof;
4909 			}
4910 
4911 			p->type = ICE_PTG_ES_ADD;
4912 			p->ptype = 0;
4913 			p->ptg = map->ptg[i];
4914 			p->add_ptg = 0;
4915 
4916 			p->add_prof = 1;
4917 			p->prof_id = map->prof_id;
4918 
4919 			hw->blk[blk].es.written[map->prof_id] = true;
4920 
4921 			list_add(&p->list_entry, chg);
4922 		}
4923 
4924 err_ice_get_prof:
4925 	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4926 	/* let caller clean up the change list */
4927 	return status;
4928 }
4929 
4930 /**
4931  * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG
4932  * @hw: pointer to the HW struct
4933  * @blk: hardware block
4934  * @vsig: VSIG from which to copy the list
4935  * @lst: output list
4936  *
4937  * This routine makes a copy of the list of profiles in the specified VSIG.
4938  */
4939 static enum ice_status
4940 ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4941 		   struct list_head *lst)
4942 {
4943 	struct ice_vsig_prof *ent1, *ent2;
4944 	u16 idx = vsig & ICE_VSIG_IDX_M;
4945 
4946 	list_for_each_entry(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4947 			    list) {
4948 		struct ice_vsig_prof *p;
4949 
4950 		/* copy to the input list */
4951 		p = devm_kmemdup(ice_hw_to_dev(hw), ent1, sizeof(*p),
4952 				 GFP_KERNEL);
4953 		if (!p)
4954 			goto err_ice_get_profs_vsig;
4955 
4956 		list_add_tail(&p->list, lst);
4957 	}
4958 
4959 	return 0;
4960 
4961 err_ice_get_profs_vsig:
4962 	list_for_each_entry_safe(ent1, ent2, lst, list) {
4963 		list_del(&ent1->list);
4964 		devm_kfree(ice_hw_to_dev(hw), ent1);
4965 	}
4966 
4967 	return ICE_ERR_NO_MEMORY;
4968 }
4969 
4970 /**
4971  * ice_add_prof_to_lst - add profile entry to a list
4972  * @hw: pointer to the HW struct
4973  * @blk: hardware block
4974  * @lst: the list to be added to
4975  * @hdl: profile handle of entry to add
4976  */
4977 static enum ice_status
4978 ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
4979 		    struct list_head *lst, u64 hdl)
4980 {
4981 	enum ice_status status = 0;
4982 	struct ice_prof_map *map;
4983 	struct ice_vsig_prof *p;
4984 	u16 i;
4985 
4986 	mutex_lock(&hw->blk[blk].es.prof_map_lock);
4987 	map = ice_search_prof_id(hw, blk, hdl);
4988 	if (!map) {
4989 		status = ICE_ERR_DOES_NOT_EXIST;
4990 		goto err_ice_add_prof_to_lst;
4991 	}
4992 
4993 	p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4994 	if (!p) {
4995 		status = ICE_ERR_NO_MEMORY;
4996 		goto err_ice_add_prof_to_lst;
4997 	}
4998 
4999 	p->profile_cookie = map->profile_cookie;
5000 	p->prof_id = map->prof_id;
5001 	p->tcam_count = map->ptg_cnt;
5002 
5003 	for (i = 0; i < map->ptg_cnt; i++) {
5004 		p->tcam[i].prof_id = map->prof_id;
5005 		p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
5006 		p->tcam[i].ptg = map->ptg[i];
5007 	}
5008 
5009 	list_add(&p->list, lst);
5010 
5011 err_ice_add_prof_to_lst:
5012 	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
5013 	return status;
5014 }
5015 
5016 /**
5017  * ice_move_vsi - move VSI to another VSIG
5018  * @hw: pointer to the HW struct
5019  * @blk: hardware block
5020  * @vsi: the VSI to move
5021  * @vsig: the VSIG to move the VSI to
5022  * @chg: the change list
5023  */
5024 static enum ice_status
5025 ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
5026 	     struct list_head *chg)
5027 {
5028 	enum ice_status status;
5029 	struct ice_chs_chg *p;
5030 	u16 orig_vsig;
5031 
5032 	p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
5033 	if (!p)
5034 		return ICE_ERR_NO_MEMORY;
5035 
5036 	status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
5037 	if (!status)
5038 		status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
5039 
5040 	if (status) {
5041 		devm_kfree(ice_hw_to_dev(hw), p);
5042 		return status;
5043 	}
5044 
5045 	p->type = ICE_VSI_MOVE;
5046 	p->vsi = vsi;
5047 	p->orig_vsig = orig_vsig;
5048 	p->vsig = vsig;
5049 
5050 	list_add(&p->list_entry, chg);
5051 
5052 	return 0;
5053 }
5054 
5055 /**
5056  * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
5057  * @hw: pointer to the HW struct
5058  * @idx: the index of the TCAM entry to remove
5059  * @chg: the list of change structures to search
5060  */
5061 static void
5062 ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg)
5063 {
5064 	struct ice_chs_chg *pos, *tmp;
5065 
5066 	list_for_each_entry_safe(tmp, pos, chg, list_entry)
5067 		if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
5068 			list_del(&tmp->list_entry);
5069 			devm_kfree(ice_hw_to_dev(hw), tmp);
5070 		}
5071 }
5072 
5073 /**
5074  * ice_prof_tcam_ena_dis - add enable or disable TCAM change
5075  * @hw: pointer to the HW struct
5076  * @blk: hardware block
5077  * @enable: true to enable, false to disable
5078  * @vsig: the VSIG of the TCAM entry
5079  * @tcam: pointer the TCAM info structure of the TCAM to disable
5080  * @chg: the change list
5081  *
5082  * This function appends an enable or disable TCAM entry in the change log
5083  */
5084 static enum ice_status
5085 ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
5086 		      u16 vsig, struct ice_tcam_inf *tcam,
5087 		      struct list_head *chg)
5088 {
5089 	enum ice_status status;
5090 	struct ice_chs_chg *p;
5091 
5092 	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5093 	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
5094 	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
5095 
5096 	/* if disabling, free the TCAM */
5097 	if (!enable) {
5098 		status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
5099 
5100 		/* if we have already created a change for this TCAM entry, then
5101 		 * we need to remove that entry, in order to prevent writing to
5102 		 * a TCAM entry we no longer will have ownership of.
5103 		 */
5104 		ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
5105 		tcam->tcam_idx = 0;
5106 		tcam->in_use = 0;
5107 		return status;
5108 	}
5109 
5110 	/* for re-enabling, reallocate a TCAM */
5111 	/* for entries with empty attribute masks, allocate entry from
5112 	 * the bottom of the TCAM table; otherwise, allocate from the
5113 	 * top of the table in order to give it higher priority
5114 	 */
5115 	status = ice_alloc_tcam_ent(hw, blk, tcam->attr.mask == 0,
5116 				    &tcam->tcam_idx);
5117 	if (status)
5118 		return status;
5119 
5120 	/* add TCAM to change list */
5121 	p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
5122 	if (!p)
5123 		return ICE_ERR_NO_MEMORY;
5124 
5125 	status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
5126 				      tcam->ptg, vsig, 0, tcam->attr.flags,
5127 				      vl_msk, dc_msk, nm_msk);
5128 	if (status)
5129 		goto err_ice_prof_tcam_ena_dis;
5130 
5131 	tcam->in_use = 1;
5132 
5133 	p->type = ICE_TCAM_ADD;
5134 	p->add_tcam_idx = true;
5135 	p->prof_id = tcam->prof_id;
5136 	p->ptg = tcam->ptg;
5137 	p->vsig = 0;
5138 	p->tcam_idx = tcam->tcam_idx;
5139 
5140 	/* log change */
5141 	list_add(&p->list_entry, chg);
5142 
5143 	return 0;
5144 
5145 err_ice_prof_tcam_ena_dis:
5146 	devm_kfree(ice_hw_to_dev(hw), p);
5147 	return status;
5148 }
5149 
5150 /**
5151  * ice_adj_prof_priorities - adjust profile based on priorities
5152  * @hw: pointer to the HW struct
5153  * @blk: hardware block
5154  * @vsig: the VSIG for which to adjust profile priorities
5155  * @chg: the change list
5156  */
5157 static enum ice_status
5158 ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
5159 			struct list_head *chg)
5160 {
5161 	DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
5162 	struct ice_vsig_prof *t;
5163 	enum ice_status status;
5164 	u16 idx;
5165 
5166 	bitmap_zero(ptgs_used, ICE_XLT1_CNT);
5167 	idx = vsig & ICE_VSIG_IDX_M;
5168 
5169 	/* Priority is based on the order in which the profiles are added. The
5170 	 * newest added profile has highest priority and the oldest added
5171 	 * profile has the lowest priority. Since the profile property list for
5172 	 * a VSIG is sorted from newest to oldest, this code traverses the list
5173 	 * in order and enables the first of each PTG that it finds (that is not
5174 	 * already enabled); it also disables any duplicate PTGs that it finds
5175 	 * in the older profiles (that are currently enabled).
5176 	 */
5177 
5178 	list_for_each_entry(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5179 			    list) {
5180 		u16 i;
5181 
5182 		for (i = 0; i < t->tcam_count; i++) {
5183 			/* Scan the priorities from newest to oldest.
5184 			 * Make sure that the newest profiles take priority.
5185 			 */
5186 			if (test_bit(t->tcam[i].ptg, ptgs_used) &&
5187 			    t->tcam[i].in_use) {
5188 				/* need to mark this PTG as never match, as it
5189 				 * was already in use and therefore duplicate
5190 				 * (and lower priority)
5191 				 */
5192 				status = ice_prof_tcam_ena_dis(hw, blk, false,
5193 							       vsig,
5194 							       &t->tcam[i],
5195 							       chg);
5196 				if (status)
5197 					return status;
5198 			} else if (!test_bit(t->tcam[i].ptg, ptgs_used) &&
5199 				   !t->tcam[i].in_use) {
5200 				/* need to enable this PTG, as it in not in use
5201 				 * and not enabled (highest priority)
5202 				 */
5203 				status = ice_prof_tcam_ena_dis(hw, blk, true,
5204 							       vsig,
5205 							       &t->tcam[i],
5206 							       chg);
5207 				if (status)
5208 					return status;
5209 			}
5210 
5211 			/* keep track of used ptgs */
5212 			set_bit(t->tcam[i].ptg, ptgs_used);
5213 		}
5214 	}
5215 
5216 	return 0;
5217 }
5218 
5219 /**
5220  * ice_add_prof_id_vsig - add profile to VSIG
5221  * @hw: pointer to the HW struct
5222  * @blk: hardware block
5223  * @vsig: the VSIG to which this profile is to be added
5224  * @hdl: the profile handle indicating the profile to add
5225  * @rev: true to add entries to the end of the list
5226  * @chg: the change list
5227  */
5228 static enum ice_status
5229 ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
5230 		     bool rev, struct list_head *chg)
5231 {
5232 	/* Masks that ignore flags */
5233 	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5234 	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
5235 	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
5236 	enum ice_status status = 0;
5237 	struct ice_prof_map *map;
5238 	struct ice_vsig_prof *t;
5239 	struct ice_chs_chg *p;
5240 	u16 vsig_idx, i;
5241 
5242 	/* Error, if this VSIG already has this profile */
5243 	if (ice_has_prof_vsig(hw, blk, vsig, hdl))
5244 		return ICE_ERR_ALREADY_EXISTS;
5245 
5246 	/* new VSIG profile structure */
5247 	t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL);
5248 	if (!t)
5249 		return ICE_ERR_NO_MEMORY;
5250 
5251 	mutex_lock(&hw->blk[blk].es.prof_map_lock);
5252 	/* Get the details on the profile specified by the handle ID */
5253 	map = ice_search_prof_id(hw, blk, hdl);
5254 	if (!map) {
5255 		status = ICE_ERR_DOES_NOT_EXIST;
5256 		goto err_ice_add_prof_id_vsig;
5257 	}
5258 
5259 	t->profile_cookie = map->profile_cookie;
5260 	t->prof_id = map->prof_id;
5261 	t->tcam_count = map->ptg_cnt;
5262 
5263 	/* create TCAM entries */
5264 	for (i = 0; i < map->ptg_cnt; i++) {
5265 		u16 tcam_idx;
5266 
5267 		/* add TCAM to change list */
5268 		p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
5269 		if (!p) {
5270 			status = ICE_ERR_NO_MEMORY;
5271 			goto err_ice_add_prof_id_vsig;
5272 		}
5273 
5274 		/* allocate the TCAM entry index */
5275 		/* for entries with empty attribute masks, allocate entry from
5276 		 * the bottom of the TCAM table; otherwise, allocate from the
5277 		 * top of the table in order to give it higher priority
5278 		 */
5279 		status = ice_alloc_tcam_ent(hw, blk, map->attr[i].mask == 0,
5280 					    &tcam_idx);
5281 		if (status) {
5282 			devm_kfree(ice_hw_to_dev(hw), p);
5283 			goto err_ice_add_prof_id_vsig;
5284 		}
5285 
5286 		t->tcam[i].ptg = map->ptg[i];
5287 		t->tcam[i].prof_id = map->prof_id;
5288 		t->tcam[i].tcam_idx = tcam_idx;
5289 		t->tcam[i].attr = map->attr[i];
5290 		t->tcam[i].in_use = true;
5291 
5292 		p->type = ICE_TCAM_ADD;
5293 		p->add_tcam_idx = true;
5294 		p->prof_id = t->tcam[i].prof_id;
5295 		p->ptg = t->tcam[i].ptg;
5296 		p->vsig = vsig;
5297 		p->tcam_idx = t->tcam[i].tcam_idx;
5298 
5299 		/* write the TCAM entry */
5300 		status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
5301 					      t->tcam[i].prof_id,
5302 					      t->tcam[i].ptg, vsig, 0, 0,
5303 					      vl_msk, dc_msk, nm_msk);
5304 		if (status) {
5305 			devm_kfree(ice_hw_to_dev(hw), p);
5306 			goto err_ice_add_prof_id_vsig;
5307 		}
5308 
5309 		/* log change */
5310 		list_add(&p->list_entry, chg);
5311 	}
5312 
5313 	/* add profile to VSIG */
5314 	vsig_idx = vsig & ICE_VSIG_IDX_M;
5315 	if (rev)
5316 		list_add_tail(&t->list,
5317 			      &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
5318 	else
5319 		list_add(&t->list,
5320 			 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
5321 
5322 	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
5323 	return status;
5324 
5325 err_ice_add_prof_id_vsig:
5326 	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
5327 	/* let caller clean up the change list */
5328 	devm_kfree(ice_hw_to_dev(hw), t);
5329 	return status;
5330 }
5331 
5332 /**
5333  * ice_create_prof_id_vsig - add a new VSIG with a single profile
5334  * @hw: pointer to the HW struct
5335  * @blk: hardware block
5336  * @vsi: the initial VSI that will be in VSIG
5337  * @hdl: the profile handle of the profile that will be added to the VSIG
5338  * @chg: the change list
5339  */
5340 static enum ice_status
5341 ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
5342 			struct list_head *chg)
5343 {
5344 	enum ice_status status;
5345 	struct ice_chs_chg *p;
5346 	u16 new_vsig;
5347 
5348 	p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
5349 	if (!p)
5350 		return ICE_ERR_NO_MEMORY;
5351 
5352 	new_vsig = ice_vsig_alloc(hw, blk);
5353 	if (!new_vsig) {
5354 		status = ICE_ERR_HW_TABLE;
5355 		goto err_ice_create_prof_id_vsig;
5356 	}
5357 
5358 	status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
5359 	if (status)
5360 		goto err_ice_create_prof_id_vsig;
5361 
5362 	status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
5363 	if (status)
5364 		goto err_ice_create_prof_id_vsig;
5365 
5366 	p->type = ICE_VSIG_ADD;
5367 	p->vsi = vsi;
5368 	p->orig_vsig = ICE_DEFAULT_VSIG;
5369 	p->vsig = new_vsig;
5370 
5371 	list_add(&p->list_entry, chg);
5372 
5373 	return 0;
5374 
5375 err_ice_create_prof_id_vsig:
5376 	/* let caller clean up the change list */
5377 	devm_kfree(ice_hw_to_dev(hw), p);
5378 	return status;
5379 }
5380 
5381 /**
5382  * ice_create_vsig_from_lst - create a new VSIG with a list of profiles
5383  * @hw: pointer to the HW struct
5384  * @blk: hardware block
5385  * @vsi: the initial VSI that will be in VSIG
5386  * @lst: the list of profile that will be added to the VSIG
5387  * @new_vsig: return of new VSIG
5388  * @chg: the change list
5389  */
5390 static enum ice_status
5391 ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
5392 			 struct list_head *lst, u16 *new_vsig,
5393 			 struct list_head *chg)
5394 {
5395 	struct ice_vsig_prof *t;
5396 	enum ice_status status;
5397 	u16 vsig;
5398 
5399 	vsig = ice_vsig_alloc(hw, blk);
5400 	if (!vsig)
5401 		return ICE_ERR_HW_TABLE;
5402 
5403 	status = ice_move_vsi(hw, blk, vsi, vsig, chg);
5404 	if (status)
5405 		return status;
5406 
5407 	list_for_each_entry(t, lst, list) {
5408 		/* Reverse the order here since we are copying the list */
5409 		status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
5410 					      true, chg);
5411 		if (status)
5412 			return status;
5413 	}
5414 
5415 	*new_vsig = vsig;
5416 
5417 	return 0;
5418 }
5419 
5420 /**
5421  * ice_find_prof_vsig - find a VSIG with a specific profile handle
5422  * @hw: pointer to the HW struct
5423  * @blk: hardware block
5424  * @hdl: the profile handle of the profile to search for
5425  * @vsig: returns the VSIG with the matching profile
5426  */
5427 static bool
5428 ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
5429 {
5430 	struct ice_vsig_prof *t;
5431 	enum ice_status status;
5432 	struct list_head lst;
5433 
5434 	INIT_LIST_HEAD(&lst);
5435 
5436 	t = kzalloc(sizeof(*t), GFP_KERNEL);
5437 	if (!t)
5438 		return false;
5439 
5440 	t->profile_cookie = hdl;
5441 	list_add(&t->list, &lst);
5442 
5443 	status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
5444 
5445 	list_del(&t->list);
5446 	kfree(t);
5447 
5448 	return !status;
5449 }
5450 
5451 /**
5452  * ice_add_prof_id_flow - add profile flow
5453  * @hw: pointer to the HW struct
5454  * @blk: hardware block
5455  * @vsi: the VSI to enable with the profile specified by ID
5456  * @hdl: profile handle
5457  *
5458  * Calling this function will update the hardware tables to enable the
5459  * profile indicated by the ID parameter for the VSIs specified in the VSI
5460  * array. Once successfully called, the flow will be enabled.
5461  */
5462 enum ice_status
5463 ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
5464 {
5465 	struct ice_vsig_prof *tmp1, *del1;
5466 	struct ice_chs_chg *tmp, *del;
5467 	struct list_head union_lst;
5468 	enum ice_status status;
5469 	struct list_head chg;
5470 	u16 vsig;
5471 
5472 	INIT_LIST_HEAD(&union_lst);
5473 	INIT_LIST_HEAD(&chg);
5474 
5475 	/* Get profile */
5476 	status = ice_get_prof(hw, blk, hdl, &chg);
5477 	if (status)
5478 		return status;
5479 
5480 	/* determine if VSI is already part of a VSIG */
5481 	status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
5482 	if (!status && vsig) {
5483 		bool only_vsi;
5484 		u16 or_vsig;
5485 		u16 ref;
5486 
5487 		/* found in VSIG */
5488 		or_vsig = vsig;
5489 
5490 		/* make sure that there is no overlap/conflict between the new
5491 		 * characteristics and the existing ones; we don't support that
5492 		 * scenario
5493 		 */
5494 		if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
5495 			status = ICE_ERR_ALREADY_EXISTS;
5496 			goto err_ice_add_prof_id_flow;
5497 		}
5498 
5499 		/* last VSI in the VSIG? */
5500 		status = ice_vsig_get_ref(hw, blk, vsig, &ref);
5501 		if (status)
5502 			goto err_ice_add_prof_id_flow;
5503 		only_vsi = (ref == 1);
5504 
5505 		/* create a union of the current profiles and the one being
5506 		 * added
5507 		 */
5508 		status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
5509 		if (status)
5510 			goto err_ice_add_prof_id_flow;
5511 
5512 		status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
5513 		if (status)
5514 			goto err_ice_add_prof_id_flow;
5515 
5516 		/* search for an existing VSIG with an exact charc match */
5517 		status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
5518 		if (!status) {
5519 			/* move VSI to the VSIG that matches */
5520 			status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5521 			if (status)
5522 				goto err_ice_add_prof_id_flow;
5523 
5524 			/* VSI has been moved out of or_vsig. If the or_vsig had
5525 			 * only that VSI it is now empty and can be removed.
5526 			 */
5527 			if (only_vsi) {
5528 				status = ice_rem_vsig(hw, blk, or_vsig, &chg);
5529 				if (status)
5530 					goto err_ice_add_prof_id_flow;
5531 			}
5532 		} else if (only_vsi) {
5533 			/* If the original VSIG only contains one VSI, then it
5534 			 * will be the requesting VSI. In this case the VSI is
5535 			 * not sharing entries and we can simply add the new
5536 			 * profile to the VSIG.
5537 			 */
5538 			status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
5539 						      &chg);
5540 			if (status)
5541 				goto err_ice_add_prof_id_flow;
5542 
5543 			/* Adjust priorities */
5544 			status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
5545 			if (status)
5546 				goto err_ice_add_prof_id_flow;
5547 		} else {
5548 			/* No match, so we need a new VSIG */
5549 			status = ice_create_vsig_from_lst(hw, blk, vsi,
5550 							  &union_lst, &vsig,
5551 							  &chg);
5552 			if (status)
5553 				goto err_ice_add_prof_id_flow;
5554 
5555 			/* Adjust priorities */
5556 			status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
5557 			if (status)
5558 				goto err_ice_add_prof_id_flow;
5559 		}
5560 	} else {
5561 		/* need to find or add a VSIG */
5562 		/* search for an existing VSIG with an exact charc match */
5563 		if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
5564 			/* found an exact match */
5565 			/* add or move VSI to the VSIG that matches */
5566 			status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5567 			if (status)
5568 				goto err_ice_add_prof_id_flow;
5569 		} else {
5570 			/* we did not find an exact match */
5571 			/* we need to add a VSIG */
5572 			status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
5573 							 &chg);
5574 			if (status)
5575 				goto err_ice_add_prof_id_flow;
5576 		}
5577 	}
5578 
5579 	/* update hardware */
5580 	if (!status)
5581 		status = ice_upd_prof_hw(hw, blk, &chg);
5582 
5583 err_ice_add_prof_id_flow:
5584 	list_for_each_entry_safe(del, tmp, &chg, list_entry) {
5585 		list_del(&del->list_entry);
5586 		devm_kfree(ice_hw_to_dev(hw), del);
5587 	}
5588 
5589 	list_for_each_entry_safe(del1, tmp1, &union_lst, list) {
5590 		list_del(&del1->list);
5591 		devm_kfree(ice_hw_to_dev(hw), del1);
5592 	}
5593 
5594 	return status;
5595 }
5596 
5597 /**
5598  * ice_rem_prof_from_list - remove a profile from list
5599  * @hw: pointer to the HW struct
5600  * @lst: list to remove the profile from
5601  * @hdl: the profile handle indicating the profile to remove
5602  */
5603 static enum ice_status
5604 ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
5605 {
5606 	struct ice_vsig_prof *ent, *tmp;
5607 
5608 	list_for_each_entry_safe(ent, tmp, lst, list)
5609 		if (ent->profile_cookie == hdl) {
5610 			list_del(&ent->list);
5611 			devm_kfree(ice_hw_to_dev(hw), ent);
5612 			return 0;
5613 		}
5614 
5615 	return ICE_ERR_DOES_NOT_EXIST;
5616 }
5617 
5618 /**
5619  * ice_rem_prof_id_flow - remove flow
5620  * @hw: pointer to the HW struct
5621  * @blk: hardware block
5622  * @vsi: the VSI from which to remove the profile specified by ID
5623  * @hdl: profile tracking handle
5624  *
5625  * Calling this function will update the hardware tables to remove the
5626  * profile indicated by the ID parameter for the VSIs specified in the VSI
5627  * array. Once successfully called, the flow will be disabled.
5628  */
5629 enum ice_status
5630 ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
5631 {
5632 	struct ice_vsig_prof *tmp1, *del1;
5633 	struct ice_chs_chg *tmp, *del;
5634 	struct list_head chg, copy;
5635 	enum ice_status status;
5636 	u16 vsig;
5637 
5638 	INIT_LIST_HEAD(&copy);
5639 	INIT_LIST_HEAD(&chg);
5640 
5641 	/* determine if VSI is already part of a VSIG */
5642 	status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
5643 	if (!status && vsig) {
5644 		bool last_profile;
5645 		bool only_vsi;
5646 		u16 ref;
5647 
5648 		/* found in VSIG */
5649 		last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
5650 		status = ice_vsig_get_ref(hw, blk, vsig, &ref);
5651 		if (status)
5652 			goto err_ice_rem_prof_id_flow;
5653 		only_vsi = (ref == 1);
5654 
5655 		if (only_vsi) {
5656 			/* If the original VSIG only contains one reference,
5657 			 * which will be the requesting VSI, then the VSI is not
5658 			 * sharing entries and we can simply remove the specific
5659 			 * characteristics from the VSIG.
5660 			 */
5661 
5662 			if (last_profile) {
5663 				/* If there are no profiles left for this VSIG,
5664 				 * then simply remove the VSIG.
5665 				 */
5666 				status = ice_rem_vsig(hw, blk, vsig, &chg);
5667 				if (status)
5668 					goto err_ice_rem_prof_id_flow;
5669 			} else {
5670 				status = ice_rem_prof_id_vsig(hw, blk, vsig,
5671 							      hdl, &chg);
5672 				if (status)
5673 					goto err_ice_rem_prof_id_flow;
5674 
5675 				/* Adjust priorities */
5676 				status = ice_adj_prof_priorities(hw, blk, vsig,
5677 								 &chg);
5678 				if (status)
5679 					goto err_ice_rem_prof_id_flow;
5680 			}
5681 
5682 		} else {
5683 			/* Make a copy of the VSIG's list of Profiles */
5684 			status = ice_get_profs_vsig(hw, blk, vsig, &copy);
5685 			if (status)
5686 				goto err_ice_rem_prof_id_flow;
5687 
5688 			/* Remove specified profile entry from the list */
5689 			status = ice_rem_prof_from_list(hw, &copy, hdl);
5690 			if (status)
5691 				goto err_ice_rem_prof_id_flow;
5692 
5693 			if (list_empty(&copy)) {
5694 				status = ice_move_vsi(hw, blk, vsi,
5695 						      ICE_DEFAULT_VSIG, &chg);
5696 				if (status)
5697 					goto err_ice_rem_prof_id_flow;
5698 
5699 			} else if (!ice_find_dup_props_vsig(hw, blk, &copy,
5700 							    &vsig)) {
5701 				/* found an exact match */
5702 				/* add or move VSI to the VSIG that matches */
5703 				/* Search for a VSIG with a matching profile
5704 				 * list
5705 				 */
5706 
5707 				/* Found match, move VSI to the matching VSIG */
5708 				status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5709 				if (status)
5710 					goto err_ice_rem_prof_id_flow;
5711 			} else {
5712 				/* since no existing VSIG supports this
5713 				 * characteristic pattern, we need to create a
5714 				 * new VSIG and TCAM entries
5715 				 */
5716 				status = ice_create_vsig_from_lst(hw, blk, vsi,
5717 								  &copy, &vsig,
5718 								  &chg);
5719 				if (status)
5720 					goto err_ice_rem_prof_id_flow;
5721 
5722 				/* Adjust priorities */
5723 				status = ice_adj_prof_priorities(hw, blk, vsig,
5724 								 &chg);
5725 				if (status)
5726 					goto err_ice_rem_prof_id_flow;
5727 			}
5728 		}
5729 	} else {
5730 		status = ICE_ERR_DOES_NOT_EXIST;
5731 	}
5732 
5733 	/* update hardware tables */
5734 	if (!status)
5735 		status = ice_upd_prof_hw(hw, blk, &chg);
5736 
5737 err_ice_rem_prof_id_flow:
5738 	list_for_each_entry_safe(del, tmp, &chg, list_entry) {
5739 		list_del(&del->list_entry);
5740 		devm_kfree(ice_hw_to_dev(hw), del);
5741 	}
5742 
5743 	list_for_each_entry_safe(del1, tmp1, &copy, list) {
5744 		list_del(&del1->list);
5745 		devm_kfree(ice_hw_to_dev(hw), del1);
5746 	}
5747 
5748 	return status;
5749 }
5750