1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3 
4 #include "ice_common.h"
5 #include "ice_flex_pipe.h"
6 #include "ice_flow.h"
7 
8 /* To support tunneling entries by PF, the package will append the PF number to
9  * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
10  */
11 static const struct ice_tunnel_type_scan tnls[] = {
12 	{ TNL_VXLAN,		"TNL_VXLAN_PF" },
13 	{ TNL_GENEVE,		"TNL_GENEVE_PF" },
14 	{ TNL_LAST,		"" }
15 };
16 
17 static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
18 	/* SWITCH */
19 	{
20 		ICE_SID_XLT0_SW,
21 		ICE_SID_XLT_KEY_BUILDER_SW,
22 		ICE_SID_XLT1_SW,
23 		ICE_SID_XLT2_SW,
24 		ICE_SID_PROFID_TCAM_SW,
25 		ICE_SID_PROFID_REDIR_SW,
26 		ICE_SID_FLD_VEC_SW,
27 		ICE_SID_CDID_KEY_BUILDER_SW,
28 		ICE_SID_CDID_REDIR_SW
29 	},
30 
31 	/* ACL */
32 	{
33 		ICE_SID_XLT0_ACL,
34 		ICE_SID_XLT_KEY_BUILDER_ACL,
35 		ICE_SID_XLT1_ACL,
36 		ICE_SID_XLT2_ACL,
37 		ICE_SID_PROFID_TCAM_ACL,
38 		ICE_SID_PROFID_REDIR_ACL,
39 		ICE_SID_FLD_VEC_ACL,
40 		ICE_SID_CDID_KEY_BUILDER_ACL,
41 		ICE_SID_CDID_REDIR_ACL
42 	},
43 
44 	/* FD */
45 	{
46 		ICE_SID_XLT0_FD,
47 		ICE_SID_XLT_KEY_BUILDER_FD,
48 		ICE_SID_XLT1_FD,
49 		ICE_SID_XLT2_FD,
50 		ICE_SID_PROFID_TCAM_FD,
51 		ICE_SID_PROFID_REDIR_FD,
52 		ICE_SID_FLD_VEC_FD,
53 		ICE_SID_CDID_KEY_BUILDER_FD,
54 		ICE_SID_CDID_REDIR_FD
55 	},
56 
57 	/* RSS */
58 	{
59 		ICE_SID_XLT0_RSS,
60 		ICE_SID_XLT_KEY_BUILDER_RSS,
61 		ICE_SID_XLT1_RSS,
62 		ICE_SID_XLT2_RSS,
63 		ICE_SID_PROFID_TCAM_RSS,
64 		ICE_SID_PROFID_REDIR_RSS,
65 		ICE_SID_FLD_VEC_RSS,
66 		ICE_SID_CDID_KEY_BUILDER_RSS,
67 		ICE_SID_CDID_REDIR_RSS
68 	},
69 
70 	/* PE */
71 	{
72 		ICE_SID_XLT0_PE,
73 		ICE_SID_XLT_KEY_BUILDER_PE,
74 		ICE_SID_XLT1_PE,
75 		ICE_SID_XLT2_PE,
76 		ICE_SID_PROFID_TCAM_PE,
77 		ICE_SID_PROFID_REDIR_PE,
78 		ICE_SID_FLD_VEC_PE,
79 		ICE_SID_CDID_KEY_BUILDER_PE,
80 		ICE_SID_CDID_REDIR_PE
81 	}
82 };
83 
84 /**
85  * ice_sect_id - returns section ID
86  * @blk: block type
87  * @sect: section type
88  *
89  * This helper function returns the proper section ID given a block type and a
90  * section type.
91  */
92 static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
93 {
94 	return ice_sect_lkup[blk][sect];
95 }
96 
97 /**
98  * ice_pkg_val_buf
99  * @buf: pointer to the ice buffer
100  *
101  * This helper function validates a buffer's header.
102  */
103 static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
104 {
105 	struct ice_buf_hdr *hdr;
106 	u16 section_count;
107 	u16 data_end;
108 
109 	hdr = (struct ice_buf_hdr *)buf->buf;
110 	/* verify data */
111 	section_count = le16_to_cpu(hdr->section_count);
112 	if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
113 		return NULL;
114 
115 	data_end = le16_to_cpu(hdr->data_end);
116 	if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
117 		return NULL;
118 
119 	return hdr;
120 }
121 
122 /**
123  * ice_find_buf_table
124  * @ice_seg: pointer to the ice segment
125  *
126  * Returns the address of the buffer table within the ice segment.
127  */
128 static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
129 {
130 	struct ice_nvm_table *nvms;
131 
132 	nvms = (struct ice_nvm_table *)
133 		(ice_seg->device_table +
134 		 le32_to_cpu(ice_seg->device_table_count));
135 
136 	return (__force struct ice_buf_table *)
137 		(nvms->vers + le32_to_cpu(nvms->table_count));
138 }
139 
140 /**
141  * ice_pkg_enum_buf
142  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
143  * @state: pointer to the enum state
144  *
145  * This function will enumerate all the buffers in the ice segment. The first
146  * call is made with the ice_seg parameter non-NULL; on subsequent calls,
147  * ice_seg is set to NULL which continues the enumeration. When the function
148  * returns a NULL pointer, then the end of the buffers has been reached, or an
149  * unexpected value has been detected (for example an invalid section count or
150  * an invalid buffer end value).
151  */
152 static struct ice_buf_hdr *
153 ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
154 {
155 	if (ice_seg) {
156 		state->buf_table = ice_find_buf_table(ice_seg);
157 		if (!state->buf_table)
158 			return NULL;
159 
160 		state->buf_idx = 0;
161 		return ice_pkg_val_buf(state->buf_table->buf_array);
162 	}
163 
164 	if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count))
165 		return ice_pkg_val_buf(state->buf_table->buf_array +
166 				       state->buf_idx);
167 	else
168 		return NULL;
169 }
170 
171 /**
172  * ice_pkg_advance_sect
173  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
174  * @state: pointer to the enum state
175  *
176  * This helper function will advance the section within the ice segment,
177  * also advancing the buffer if needed.
178  */
179 static bool
180 ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
181 {
182 	if (!ice_seg && !state->buf)
183 		return false;
184 
185 	if (!ice_seg && state->buf)
186 		if (++state->sect_idx < le16_to_cpu(state->buf->section_count))
187 			return true;
188 
189 	state->buf = ice_pkg_enum_buf(ice_seg, state);
190 	if (!state->buf)
191 		return false;
192 
193 	/* start of new buffer, reset section index */
194 	state->sect_idx = 0;
195 	return true;
196 }
197 
198 /**
199  * ice_pkg_enum_section
200  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
201  * @state: pointer to the enum state
202  * @sect_type: section type to enumerate
203  *
204  * This function will enumerate all the sections of a particular type in the
205  * ice segment. The first call is made with the ice_seg parameter non-NULL;
206  * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
207  * When the function returns a NULL pointer, then the end of the matching
208  * sections has been reached.
209  */
210 static void *
211 ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
212 		     u32 sect_type)
213 {
214 	u16 offset, size;
215 
216 	if (ice_seg)
217 		state->type = sect_type;
218 
219 	if (!ice_pkg_advance_sect(ice_seg, state))
220 		return NULL;
221 
222 	/* scan for next matching section */
223 	while (state->buf->section_entry[state->sect_idx].type !=
224 	       cpu_to_le32(state->type))
225 		if (!ice_pkg_advance_sect(NULL, state))
226 			return NULL;
227 
228 	/* validate section */
229 	offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
230 	if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
231 		return NULL;
232 
233 	size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size);
234 	if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
235 		return NULL;
236 
237 	/* make sure the section fits in the buffer */
238 	if (offset + size > ICE_PKG_BUF_SIZE)
239 		return NULL;
240 
241 	state->sect_type =
242 		le32_to_cpu(state->buf->section_entry[state->sect_idx].type);
243 
244 	/* calc pointer to this section */
245 	state->sect = ((u8 *)state->buf) +
246 		le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
247 
248 	return state->sect;
249 }
250 
251 /**
252  * ice_pkg_enum_entry
253  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
254  * @state: pointer to the enum state
255  * @sect_type: section type to enumerate
256  * @offset: pointer to variable that receives the offset in the table (optional)
257  * @handler: function that handles access to the entries into the section type
258  *
259  * This function will enumerate all the entries in particular section type in
260  * the ice segment. The first call is made with the ice_seg parameter non-NULL;
261  * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
262  * When the function returns a NULL pointer, then the end of the entries has
263  * been reached.
264  *
265  * Since each section may have a different header and entry size, the handler
266  * function is needed to determine the number and location entries in each
267  * section.
268  *
269  * The offset parameter is optional, but should be used for sections that
270  * contain an offset for each section table. For such cases, the section handler
271  * function must return the appropriate offset + index to give the absolution
272  * offset for each entry. For example, if the base for a section's header
273  * indicates a base offset of 10, and the index for the entry is 2, then
274  * section handler function should set the offset to 10 + 2 = 12.
275  */
276 static void *
277 ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
278 		   u32 sect_type, u32 *offset,
279 		   void *(*handler)(u32 sect_type, void *section,
280 				    u32 index, u32 *offset))
281 {
282 	void *entry;
283 
284 	if (ice_seg) {
285 		if (!handler)
286 			return NULL;
287 
288 		if (!ice_pkg_enum_section(ice_seg, state, sect_type))
289 			return NULL;
290 
291 		state->entry_idx = 0;
292 		state->handler = handler;
293 	} else {
294 		state->entry_idx++;
295 	}
296 
297 	if (!state->handler)
298 		return NULL;
299 
300 	/* get entry */
301 	entry = state->handler(state->sect_type, state->sect, state->entry_idx,
302 			       offset);
303 	if (!entry) {
304 		/* end of a section, look for another section of this type */
305 		if (!ice_pkg_enum_section(NULL, state, 0))
306 			return NULL;
307 
308 		state->entry_idx = 0;
309 		entry = state->handler(state->sect_type, state->sect,
310 				       state->entry_idx, offset);
311 	}
312 
313 	return entry;
314 }
315 
316 /**
317  * ice_boost_tcam_handler
318  * @sect_type: section type
319  * @section: pointer to section
320  * @index: index of the boost TCAM entry to be returned
321  * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
322  *
323  * This is a callback function that can be passed to ice_pkg_enum_entry.
324  * Handles enumeration of individual boost TCAM entries.
325  */
326 static void *
327 ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
328 {
329 	struct ice_boost_tcam_section *boost;
330 
331 	if (!section)
332 		return NULL;
333 
334 	if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
335 		return NULL;
336 
337 	/* cppcheck-suppress nullPointer */
338 	if (index > ICE_MAX_BST_TCAMS_IN_BUF)
339 		return NULL;
340 
341 	if (offset)
342 		*offset = 0;
343 
344 	boost = section;
345 	if (index >= le16_to_cpu(boost->count))
346 		return NULL;
347 
348 	return boost->tcam + index;
349 }
350 
351 /**
352  * ice_find_boost_entry
353  * @ice_seg: pointer to the ice segment (non-NULL)
354  * @addr: Boost TCAM address of entry to search for
355  * @entry: returns pointer to the entry
356  *
357  * Finds a particular Boost TCAM entry and returns a pointer to that entry
358  * if it is found. The ice_seg parameter must not be NULL since the first call
359  * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
360  */
361 static enum ice_status
362 ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
363 		     struct ice_boost_tcam_entry **entry)
364 {
365 	struct ice_boost_tcam_entry *tcam;
366 	struct ice_pkg_enum state;
367 
368 	memset(&state, 0, sizeof(state));
369 
370 	if (!ice_seg)
371 		return ICE_ERR_PARAM;
372 
373 	do {
374 		tcam = ice_pkg_enum_entry(ice_seg, &state,
375 					  ICE_SID_RXPARSER_BOOST_TCAM, NULL,
376 					  ice_boost_tcam_handler);
377 		if (tcam && le16_to_cpu(tcam->addr) == addr) {
378 			*entry = tcam;
379 			return 0;
380 		}
381 
382 		ice_seg = NULL;
383 	} while (tcam);
384 
385 	*entry = NULL;
386 	return ICE_ERR_CFG;
387 }
388 
389 /**
390  * ice_label_enum_handler
391  * @sect_type: section type
392  * @section: pointer to section
393  * @index: index of the label entry to be returned
394  * @offset: pointer to receive absolute offset, always zero for label sections
395  *
396  * This is a callback function that can be passed to ice_pkg_enum_entry.
397  * Handles enumeration of individual label entries.
398  */
399 static void *
400 ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index,
401 		       u32 *offset)
402 {
403 	struct ice_label_section *labels;
404 
405 	if (!section)
406 		return NULL;
407 
408 	/* cppcheck-suppress nullPointer */
409 	if (index > ICE_MAX_LABELS_IN_BUF)
410 		return NULL;
411 
412 	if (offset)
413 		*offset = 0;
414 
415 	labels = section;
416 	if (index >= le16_to_cpu(labels->count))
417 		return NULL;
418 
419 	return labels->label + index;
420 }
421 
422 /**
423  * ice_enum_labels
424  * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
425  * @type: the section type that will contain the label (0 on subsequent calls)
426  * @state: ice_pkg_enum structure that will hold the state of the enumeration
427  * @value: pointer to a value that will return the label's value if found
428  *
429  * Enumerates a list of labels in the package. The caller will call
430  * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
431  * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
432  * the end of the list has been reached.
433  */
434 static char *
435 ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
436 		u16 *value)
437 {
438 	struct ice_label *label;
439 
440 	/* Check for valid label section on first call */
441 	if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
442 		return NULL;
443 
444 	label = ice_pkg_enum_entry(ice_seg, state, type, NULL,
445 				   ice_label_enum_handler);
446 	if (!label)
447 		return NULL;
448 
449 	*value = le16_to_cpu(label->value);
450 	return label->name;
451 }
452 
453 /**
454  * ice_init_pkg_hints
455  * @hw: pointer to the HW structure
456  * @ice_seg: pointer to the segment of the package scan (non-NULL)
457  *
458  * This function will scan the package and save off relevant information
459  * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
460  * since the first call to ice_enum_labels requires a pointer to an actual
461  * ice_seg structure.
462  */
463 static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
464 {
465 	struct ice_pkg_enum state;
466 	char *label_name;
467 	u16 val;
468 	int i;
469 
470 	memset(&hw->tnl, 0, sizeof(hw->tnl));
471 	memset(&state, 0, sizeof(state));
472 
473 	if (!ice_seg)
474 		return;
475 
476 	label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
477 				     &val);
478 
479 	while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
480 		for (i = 0; tnls[i].type != TNL_LAST; i++) {
481 			size_t len = strlen(tnls[i].label_prefix);
482 
483 			/* Look for matching label start, before continuing */
484 			if (strncmp(label_name, tnls[i].label_prefix, len))
485 				continue;
486 
487 			/* Make sure this label matches our PF. Note that the PF
488 			 * character ('0' - '7') will be located where our
489 			 * prefix string's null terminator is located.
490 			 */
491 			if ((label_name[len] - '0') == hw->pf_id) {
492 				hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
493 				hw->tnl.tbl[hw->tnl.count].valid = false;
494 				hw->tnl.tbl[hw->tnl.count].boost_addr = val;
495 				hw->tnl.tbl[hw->tnl.count].port = 0;
496 				hw->tnl.count++;
497 				break;
498 			}
499 		}
500 
501 		label_name = ice_enum_labels(NULL, 0, &state, &val);
502 	}
503 
504 	/* Cache the appropriate boost TCAM entry pointers */
505 	for (i = 0; i < hw->tnl.count; i++) {
506 		ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
507 				     &hw->tnl.tbl[i].boost_entry);
508 		if (hw->tnl.tbl[i].boost_entry) {
509 			hw->tnl.tbl[i].valid = true;
510 			if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT)
511 				hw->tnl.valid_count[hw->tnl.tbl[i].type]++;
512 		}
513 	}
514 }
515 
516 /* Key creation */
517 
518 #define ICE_DC_KEY	0x1	/* don't care */
519 #define ICE_DC_KEYINV	0x1
520 #define ICE_NM_KEY	0x0	/* never match */
521 #define ICE_NM_KEYINV	0x0
522 #define ICE_0_KEY	0x1	/* match 0 */
523 #define ICE_0_KEYINV	0x0
524 #define ICE_1_KEY	0x0	/* match 1 */
525 #define ICE_1_KEYINV	0x1
526 
527 /**
528  * ice_gen_key_word - generate 16-bits of a key/mask word
529  * @val: the value
530  * @valid: valid bits mask (change only the valid bits)
531  * @dont_care: don't care mask
532  * @nvr_mtch: never match mask
533  * @key: pointer to an array of where the resulting key portion
534  * @key_inv: pointer to an array of where the resulting key invert portion
535  *
536  * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
537  * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
538  * of key and 8 bits of key invert.
539  *
540  *     '0' =    b01, always match a 0 bit
541  *     '1' =    b10, always match a 1 bit
542  *     '?' =    b11, don't care bit (always matches)
543  *     '~' =    b00, never match bit
544  *
545  * Input:
546  *          val:         b0  1  0  1  0  1
547  *          dont_care:   b0  0  1  1  0  0
548  *          never_mtch:  b0  0  0  0  1  1
549  *          ------------------------------
550  * Result:  key:        b01 10 11 11 00 00
551  */
552 static enum ice_status
553 ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
554 		 u8 *key_inv)
555 {
556 	u8 in_key = *key, in_key_inv = *key_inv;
557 	u8 i;
558 
559 	/* 'dont_care' and 'nvr_mtch' masks cannot overlap */
560 	if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
561 		return ICE_ERR_CFG;
562 
563 	*key = 0;
564 	*key_inv = 0;
565 
566 	/* encode the 8 bits into 8-bit key and 8-bit key invert */
567 	for (i = 0; i < 8; i++) {
568 		*key >>= 1;
569 		*key_inv >>= 1;
570 
571 		if (!(valid & 0x1)) { /* change only valid bits */
572 			*key |= (in_key & 0x1) << 7;
573 			*key_inv |= (in_key_inv & 0x1) << 7;
574 		} else if (dont_care & 0x1) { /* don't care bit */
575 			*key |= ICE_DC_KEY << 7;
576 			*key_inv |= ICE_DC_KEYINV << 7;
577 		} else if (nvr_mtch & 0x1) { /* never match bit */
578 			*key |= ICE_NM_KEY << 7;
579 			*key_inv |= ICE_NM_KEYINV << 7;
580 		} else if (val & 0x01) { /* exact 1 match */
581 			*key |= ICE_1_KEY << 7;
582 			*key_inv |= ICE_1_KEYINV << 7;
583 		} else { /* exact 0 match */
584 			*key |= ICE_0_KEY << 7;
585 			*key_inv |= ICE_0_KEYINV << 7;
586 		}
587 
588 		dont_care >>= 1;
589 		nvr_mtch >>= 1;
590 		valid >>= 1;
591 		val >>= 1;
592 		in_key >>= 1;
593 		in_key_inv >>= 1;
594 	}
595 
596 	return 0;
597 }
598 
599 /**
600  * ice_bits_max_set - determine if the number of bits set is within a maximum
601  * @mask: pointer to the byte array which is the mask
602  * @size: the number of bytes in the mask
603  * @max: the max number of set bits
604  *
605  * This function determines if there are at most 'max' number of bits set in an
606  * array. Returns true if the number for bits set is <= max or will return false
607  * otherwise.
608  */
609 static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
610 {
611 	u16 count = 0;
612 	u16 i;
613 
614 	/* check each byte */
615 	for (i = 0; i < size; i++) {
616 		/* if 0, go to next byte */
617 		if (!mask[i])
618 			continue;
619 
620 		/* We know there is at least one set bit in this byte because of
621 		 * the above check; if we already have found 'max' number of
622 		 * bits set, then we can return failure now.
623 		 */
624 		if (count == max)
625 			return false;
626 
627 		/* count the bits in this byte, checking threshold */
628 		count += hweight8(mask[i]);
629 		if (count > max)
630 			return false;
631 	}
632 
633 	return true;
634 }
635 
636 /**
637  * ice_set_key - generate a variable sized key with multiples of 16-bits
638  * @key: pointer to where the key will be stored
639  * @size: the size of the complete key in bytes (must be even)
640  * @val: array of 8-bit values that makes up the value portion of the key
641  * @upd: array of 8-bit masks that determine what key portion to update
642  * @dc: array of 8-bit masks that make up the don't care mask
643  * @nm: array of 8-bit masks that make up the never match mask
644  * @off: the offset of the first byte in the key to update
645  * @len: the number of bytes in the key update
646  *
647  * This function generates a key from a value, a don't care mask and a never
648  * match mask.
649  * upd, dc, and nm are optional parameters, and can be NULL:
650  *	upd == NULL --> upd mask is all 1's (update all bits)
651  *	dc == NULL --> dc mask is all 0's (no don't care bits)
652  *	nm == NULL --> nm mask is all 0's (no never match bits)
653  */
654 static enum ice_status
655 ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
656 	    u16 len)
657 {
658 	u16 half_size;
659 	u16 i;
660 
661 	/* size must be a multiple of 2 bytes. */
662 	if (size % 2)
663 		return ICE_ERR_CFG;
664 
665 	half_size = size / 2;
666 	if (off + len > half_size)
667 		return ICE_ERR_CFG;
668 
669 	/* Make sure at most one bit is set in the never match mask. Having more
670 	 * than one never match mask bit set will cause HW to consume excessive
671 	 * power otherwise; this is a power management efficiency check.
672 	 */
673 #define ICE_NVR_MTCH_BITS_MAX	1
674 	if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
675 		return ICE_ERR_CFG;
676 
677 	for (i = 0; i < len; i++)
678 		if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
679 				     dc ? dc[i] : 0, nm ? nm[i] : 0,
680 				     key + off + i, key + half_size + off + i))
681 			return ICE_ERR_CFG;
682 
683 	return 0;
684 }
685 
686 /**
687  * ice_acquire_global_cfg_lock
688  * @hw: pointer to the HW structure
689  * @access: access type (read or write)
690  *
691  * This function will request ownership of the global config lock for reading
692  * or writing of the package. When attempting to obtain write access, the
693  * caller must check for the following two return values:
694  *
695  * ICE_SUCCESS        - Means the caller has acquired the global config lock
696  *                      and can perform writing of the package.
697  * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
698  *                      package or has found that no update was necessary; in
699  *                      this case, the caller can just skip performing any
700  *                      update of the package.
701  */
702 static enum ice_status
703 ice_acquire_global_cfg_lock(struct ice_hw *hw,
704 			    enum ice_aq_res_access_type access)
705 {
706 	enum ice_status status;
707 
708 	status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
709 				 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
710 
711 	if (!status)
712 		mutex_lock(&ice_global_cfg_lock_sw);
713 	else if (status == ICE_ERR_AQ_NO_WORK)
714 		ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
715 
716 	return status;
717 }
718 
719 /**
720  * ice_release_global_cfg_lock
721  * @hw: pointer to the HW structure
722  *
723  * This function will release the global config lock.
724  */
725 static void ice_release_global_cfg_lock(struct ice_hw *hw)
726 {
727 	mutex_unlock(&ice_global_cfg_lock_sw);
728 	ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
729 }
730 
731 /**
732  * ice_acquire_change_lock
733  * @hw: pointer to the HW structure
734  * @access: access type (read or write)
735  *
736  * This function will request ownership of the change lock.
737  */
738 enum ice_status
739 ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
740 {
741 	return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
742 			       ICE_CHANGE_LOCK_TIMEOUT);
743 }
744 
745 /**
746  * ice_release_change_lock
747  * @hw: pointer to the HW structure
748  *
749  * This function will release the change lock using the proper Admin Command.
750  */
751 void ice_release_change_lock(struct ice_hw *hw)
752 {
753 	ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
754 }
755 
756 /**
757  * ice_aq_download_pkg
758  * @hw: pointer to the hardware structure
759  * @pkg_buf: the package buffer to transfer
760  * @buf_size: the size of the package buffer
761  * @last_buf: last buffer indicator
762  * @error_offset: returns error offset
763  * @error_info: returns error information
764  * @cd: pointer to command details structure or NULL
765  *
766  * Download Package (0x0C40)
767  */
768 static enum ice_status
769 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
770 		    u16 buf_size, bool last_buf, u32 *error_offset,
771 		    u32 *error_info, struct ice_sq_cd *cd)
772 {
773 	struct ice_aqc_download_pkg *cmd;
774 	struct ice_aq_desc desc;
775 	enum ice_status status;
776 
777 	if (error_offset)
778 		*error_offset = 0;
779 	if (error_info)
780 		*error_info = 0;
781 
782 	cmd = &desc.params.download_pkg;
783 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
784 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
785 
786 	if (last_buf)
787 		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
788 
789 	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
790 	if (status == ICE_ERR_AQ_ERROR) {
791 		/* Read error from buffer only when the FW returned an error */
792 		struct ice_aqc_download_pkg_resp *resp;
793 
794 		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
795 		if (error_offset)
796 			*error_offset = le32_to_cpu(resp->error_offset);
797 		if (error_info)
798 			*error_info = le32_to_cpu(resp->error_info);
799 	}
800 
801 	return status;
802 }
803 
804 /**
805  * ice_aq_update_pkg
806  * @hw: pointer to the hardware structure
807  * @pkg_buf: the package cmd buffer
808  * @buf_size: the size of the package cmd buffer
809  * @last_buf: last buffer indicator
810  * @error_offset: returns error offset
811  * @error_info: returns error information
812  * @cd: pointer to command details structure or NULL
813  *
814  * Update Package (0x0C42)
815  */
816 static enum ice_status
817 ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
818 		  bool last_buf, u32 *error_offset, u32 *error_info,
819 		  struct ice_sq_cd *cd)
820 {
821 	struct ice_aqc_download_pkg *cmd;
822 	struct ice_aq_desc desc;
823 	enum ice_status status;
824 
825 	if (error_offset)
826 		*error_offset = 0;
827 	if (error_info)
828 		*error_info = 0;
829 
830 	cmd = &desc.params.download_pkg;
831 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
832 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
833 
834 	if (last_buf)
835 		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
836 
837 	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
838 	if (status == ICE_ERR_AQ_ERROR) {
839 		/* Read error from buffer only when the FW returned an error */
840 		struct ice_aqc_download_pkg_resp *resp;
841 
842 		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
843 		if (error_offset)
844 			*error_offset = le32_to_cpu(resp->error_offset);
845 		if (error_info)
846 			*error_info = le32_to_cpu(resp->error_info);
847 	}
848 
849 	return status;
850 }
851 
852 /**
853  * ice_find_seg_in_pkg
854  * @hw: pointer to the hardware structure
855  * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
856  * @pkg_hdr: pointer to the package header to be searched
857  *
858  * This function searches a package file for a particular segment type. On
859  * success it returns a pointer to the segment header, otherwise it will
860  * return NULL.
861  */
862 static struct ice_generic_seg_hdr *
863 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
864 		    struct ice_pkg_hdr *pkg_hdr)
865 {
866 	u32 i;
867 
868 	ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
869 		  pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
870 		  pkg_hdr->pkg_format_ver.update,
871 		  pkg_hdr->pkg_format_ver.draft);
872 
873 	/* Search all package segments for the requested segment type */
874 	for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
875 		struct ice_generic_seg_hdr *seg;
876 
877 		seg = (struct ice_generic_seg_hdr *)
878 			((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i]));
879 
880 		if (le32_to_cpu(seg->seg_type) == seg_type)
881 			return seg;
882 	}
883 
884 	return NULL;
885 }
886 
887 /**
888  * ice_update_pkg
889  * @hw: pointer to the hardware structure
890  * @bufs: pointer to an array of buffers
891  * @count: the number of buffers in the array
892  *
893  * Obtains change lock and updates package.
894  */
895 static enum ice_status
896 ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
897 {
898 	enum ice_status status;
899 	u32 offset, info, i;
900 
901 	status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
902 	if (status)
903 		return status;
904 
905 	for (i = 0; i < count; i++) {
906 		struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
907 		bool last = ((i + 1) == count);
908 
909 		status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end),
910 					   last, &offset, &info, NULL);
911 
912 		if (status) {
913 			ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
914 				  status, offset, info);
915 			break;
916 		}
917 	}
918 
919 	ice_release_change_lock(hw);
920 
921 	return status;
922 }
923 
924 /**
925  * ice_dwnld_cfg_bufs
926  * @hw: pointer to the hardware structure
927  * @bufs: pointer to an array of buffers
928  * @count: the number of buffers in the array
929  *
930  * Obtains global config lock and downloads the package configuration buffers
931  * to the firmware. Metadata buffers are skipped, and the first metadata buffer
932  * found indicates that the rest of the buffers are all metadata buffers.
933  */
934 static enum ice_status
935 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
936 {
937 	enum ice_status status;
938 	struct ice_buf_hdr *bh;
939 	u32 offset, info, i;
940 
941 	if (!bufs || !count)
942 		return ICE_ERR_PARAM;
943 
944 	/* If the first buffer's first section has its metadata bit set
945 	 * then there are no buffers to be downloaded, and the operation is
946 	 * considered a success.
947 	 */
948 	bh = (struct ice_buf_hdr *)bufs;
949 	if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
950 		return 0;
951 
952 	/* reset pkg_dwnld_status in case this function is called in the
953 	 * reset/rebuild flow
954 	 */
955 	hw->pkg_dwnld_status = ICE_AQ_RC_OK;
956 
957 	status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
958 	if (status) {
959 		if (status == ICE_ERR_AQ_NO_WORK)
960 			hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
961 		else
962 			hw->pkg_dwnld_status = hw->adminq.sq_last_status;
963 		return status;
964 	}
965 
966 	for (i = 0; i < count; i++) {
967 		bool last = ((i + 1) == count);
968 
969 		if (!last) {
970 			/* check next buffer for metadata flag */
971 			bh = (struct ice_buf_hdr *)(bufs + i + 1);
972 
973 			/* A set metadata flag in the next buffer will signal
974 			 * that the current buffer will be the last buffer
975 			 * downloaded
976 			 */
977 			if (le16_to_cpu(bh->section_count))
978 				if (le32_to_cpu(bh->section_entry[0].type) &
979 				    ICE_METADATA_BUF)
980 					last = true;
981 		}
982 
983 		bh = (struct ice_buf_hdr *)(bufs + i);
984 
985 		status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
986 					     &offset, &info, NULL);
987 
988 		/* Save AQ status from download package */
989 		hw->pkg_dwnld_status = hw->adminq.sq_last_status;
990 		if (status) {
991 			ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
992 				  status, offset, info);
993 
994 			break;
995 		}
996 
997 		if (last)
998 			break;
999 	}
1000 
1001 	ice_release_global_cfg_lock(hw);
1002 
1003 	return status;
1004 }
1005 
1006 /**
1007  * ice_aq_get_pkg_info_list
1008  * @hw: pointer to the hardware structure
1009  * @pkg_info: the buffer which will receive the information list
1010  * @buf_size: the size of the pkg_info information buffer
1011  * @cd: pointer to command details structure or NULL
1012  *
1013  * Get Package Info List (0x0C43)
1014  */
1015 static enum ice_status
1016 ice_aq_get_pkg_info_list(struct ice_hw *hw,
1017 			 struct ice_aqc_get_pkg_info_resp *pkg_info,
1018 			 u16 buf_size, struct ice_sq_cd *cd)
1019 {
1020 	struct ice_aq_desc desc;
1021 
1022 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
1023 
1024 	return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
1025 }
1026 
1027 /**
1028  * ice_download_pkg
1029  * @hw: pointer to the hardware structure
1030  * @ice_seg: pointer to the segment of the package to be downloaded
1031  *
1032  * Handles the download of a complete package.
1033  */
1034 static enum ice_status
1035 ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
1036 {
1037 	struct ice_buf_table *ice_buf_tbl;
1038 
1039 	ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
1040 		  ice_seg->hdr.seg_format_ver.major,
1041 		  ice_seg->hdr.seg_format_ver.minor,
1042 		  ice_seg->hdr.seg_format_ver.update,
1043 		  ice_seg->hdr.seg_format_ver.draft);
1044 
1045 	ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
1046 		  le32_to_cpu(ice_seg->hdr.seg_type),
1047 		  le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
1048 
1049 	ice_buf_tbl = ice_find_buf_table(ice_seg);
1050 
1051 	ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
1052 		  le32_to_cpu(ice_buf_tbl->buf_count));
1053 
1054 	return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
1055 				  le32_to_cpu(ice_buf_tbl->buf_count));
1056 }
1057 
1058 /**
1059  * ice_init_pkg_info
1060  * @hw: pointer to the hardware structure
1061  * @pkg_hdr: pointer to the driver's package hdr
1062  *
1063  * Saves off the package details into the HW structure.
1064  */
1065 static enum ice_status
1066 ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
1067 {
1068 	struct ice_generic_seg_hdr *seg_hdr;
1069 
1070 	if (!pkg_hdr)
1071 		return ICE_ERR_PARAM;
1072 
1073 	seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
1074 	if (seg_hdr) {
1075 		struct ice_meta_sect *meta;
1076 		struct ice_pkg_enum state;
1077 
1078 		memset(&state, 0, sizeof(state));
1079 
1080 		/* Get package information from the Metadata Section */
1081 		meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
1082 					    ICE_SID_METADATA);
1083 		if (!meta) {
1084 			ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
1085 			return ICE_ERR_CFG;
1086 		}
1087 
1088 		hw->pkg_ver = meta->ver;
1089 		memcpy(hw->pkg_name, meta->name, sizeof(meta->name));
1090 
1091 		ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
1092 			  meta->ver.major, meta->ver.minor, meta->ver.update,
1093 			  meta->ver.draft, meta->name);
1094 
1095 		hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
1096 		memcpy(hw->ice_seg_id, seg_hdr->seg_id,
1097 		       sizeof(hw->ice_seg_id));
1098 
1099 		ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
1100 			  seg_hdr->seg_format_ver.major,
1101 			  seg_hdr->seg_format_ver.minor,
1102 			  seg_hdr->seg_format_ver.update,
1103 			  seg_hdr->seg_format_ver.draft,
1104 			  seg_hdr->seg_id);
1105 	} else {
1106 		ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
1107 		return ICE_ERR_CFG;
1108 	}
1109 
1110 	return 0;
1111 }
1112 
1113 /**
1114  * ice_get_pkg_info
1115  * @hw: pointer to the hardware structure
1116  *
1117  * Store details of the package currently loaded in HW into the HW structure.
1118  */
1119 static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
1120 {
1121 	struct ice_aqc_get_pkg_info_resp *pkg_info;
1122 	enum ice_status status;
1123 	u16 size;
1124 	u32 i;
1125 
1126 	size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
1127 	pkg_info = kzalloc(size, GFP_KERNEL);
1128 	if (!pkg_info)
1129 		return ICE_ERR_NO_MEMORY;
1130 
1131 	status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
1132 	if (status)
1133 		goto init_pkg_free_alloc;
1134 
1135 	for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
1136 #define ICE_PKG_FLAG_COUNT	4
1137 		char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
1138 		u8 place = 0;
1139 
1140 		if (pkg_info->pkg_info[i].is_active) {
1141 			flags[place++] = 'A';
1142 			hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
1143 			hw->active_track_id =
1144 				le32_to_cpu(pkg_info->pkg_info[i].track_id);
1145 			memcpy(hw->active_pkg_name,
1146 			       pkg_info->pkg_info[i].name,
1147 			       sizeof(pkg_info->pkg_info[i].name));
1148 			hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
1149 		}
1150 		if (pkg_info->pkg_info[i].is_active_at_boot)
1151 			flags[place++] = 'B';
1152 		if (pkg_info->pkg_info[i].is_modified)
1153 			flags[place++] = 'M';
1154 		if (pkg_info->pkg_info[i].is_in_nvm)
1155 			flags[place++] = 'N';
1156 
1157 		ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
1158 			  i, pkg_info->pkg_info[i].ver.major,
1159 			  pkg_info->pkg_info[i].ver.minor,
1160 			  pkg_info->pkg_info[i].ver.update,
1161 			  pkg_info->pkg_info[i].ver.draft,
1162 			  pkg_info->pkg_info[i].name, flags);
1163 	}
1164 
1165 init_pkg_free_alloc:
1166 	kfree(pkg_info);
1167 
1168 	return status;
1169 }
1170 
1171 /**
1172  * ice_verify_pkg - verify package
1173  * @pkg: pointer to the package buffer
1174  * @len: size of the package buffer
1175  *
1176  * Verifies various attributes of the package file, including length, format
1177  * version, and the requirement of at least one segment.
1178  */
1179 static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
1180 {
1181 	u32 seg_count;
1182 	u32 i;
1183 
1184 	if (len < struct_size(pkg, seg_offset, 1))
1185 		return ICE_ERR_BUF_TOO_SHORT;
1186 
1187 	if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
1188 	    pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
1189 	    pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
1190 	    pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
1191 		return ICE_ERR_CFG;
1192 
1193 	/* pkg must have at least one segment */
1194 	seg_count = le32_to_cpu(pkg->seg_count);
1195 	if (seg_count < 1)
1196 		return ICE_ERR_CFG;
1197 
1198 	/* make sure segment array fits in package length */
1199 	if (len < struct_size(pkg, seg_offset, seg_count))
1200 		return ICE_ERR_BUF_TOO_SHORT;
1201 
1202 	/* all segments must fit within length */
1203 	for (i = 0; i < seg_count; i++) {
1204 		u32 off = le32_to_cpu(pkg->seg_offset[i]);
1205 		struct ice_generic_seg_hdr *seg;
1206 
1207 		/* segment header must fit */
1208 		if (len < off + sizeof(*seg))
1209 			return ICE_ERR_BUF_TOO_SHORT;
1210 
1211 		seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
1212 
1213 		/* segment body must fit */
1214 		if (len < off + le32_to_cpu(seg->seg_size))
1215 			return ICE_ERR_BUF_TOO_SHORT;
1216 	}
1217 
1218 	return 0;
1219 }
1220 
1221 /**
1222  * ice_free_seg - free package segment pointer
1223  * @hw: pointer to the hardware structure
1224  *
1225  * Frees the package segment pointer in the proper manner, depending on if the
1226  * segment was allocated or just the passed in pointer was stored.
1227  */
1228 void ice_free_seg(struct ice_hw *hw)
1229 {
1230 	if (hw->pkg_copy) {
1231 		devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy);
1232 		hw->pkg_copy = NULL;
1233 		hw->pkg_size = 0;
1234 	}
1235 	hw->seg = NULL;
1236 }
1237 
1238 /**
1239  * ice_init_pkg_regs - initialize additional package registers
1240  * @hw: pointer to the hardware structure
1241  */
1242 static void ice_init_pkg_regs(struct ice_hw *hw)
1243 {
1244 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
1245 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
1246 #define ICE_SW_BLK_IDX	0
1247 
1248 	/* setup Switch block input mask, which is 48-bits in two parts */
1249 	wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
1250 	wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
1251 }
1252 
1253 /**
1254  * ice_chk_pkg_version - check package version for compatibility with driver
1255  * @pkg_ver: pointer to a version structure to check
1256  *
1257  * Check to make sure that the package about to be downloaded is compatible with
1258  * the driver. To be compatible, the major and minor components of the package
1259  * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
1260  * definitions.
1261  */
1262 static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
1263 {
1264 	if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
1265 	    pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
1266 		return ICE_ERR_NOT_SUPPORTED;
1267 
1268 	return 0;
1269 }
1270 
1271 /**
1272  * ice_chk_pkg_compat
1273  * @hw: pointer to the hardware structure
1274  * @ospkg: pointer to the package hdr
1275  * @seg: pointer to the package segment hdr
1276  *
1277  * This function checks the package version compatibility with driver and NVM
1278  */
1279 static enum ice_status
1280 ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
1281 		   struct ice_seg **seg)
1282 {
1283 	struct ice_aqc_get_pkg_info_resp *pkg;
1284 	enum ice_status status;
1285 	u16 size;
1286 	u32 i;
1287 
1288 	/* Check package version compatibility */
1289 	status = ice_chk_pkg_version(&hw->pkg_ver);
1290 	if (status) {
1291 		ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
1292 		return status;
1293 	}
1294 
1295 	/* find ICE segment in given package */
1296 	*seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
1297 						     ospkg);
1298 	if (!*seg) {
1299 		ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
1300 		return ICE_ERR_CFG;
1301 	}
1302 
1303 	/* Check if FW is compatible with the OS package */
1304 	size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
1305 	pkg = kzalloc(size, GFP_KERNEL);
1306 	if (!pkg)
1307 		return ICE_ERR_NO_MEMORY;
1308 
1309 	status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
1310 	if (status)
1311 		goto fw_ddp_compat_free_alloc;
1312 
1313 	for (i = 0; i < le32_to_cpu(pkg->count); i++) {
1314 		/* loop till we find the NVM package */
1315 		if (!pkg->pkg_info[i].is_in_nvm)
1316 			continue;
1317 		if ((*seg)->hdr.seg_format_ver.major !=
1318 			pkg->pkg_info[i].ver.major ||
1319 		    (*seg)->hdr.seg_format_ver.minor >
1320 			pkg->pkg_info[i].ver.minor) {
1321 			status = ICE_ERR_FW_DDP_MISMATCH;
1322 			ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
1323 		}
1324 		/* done processing NVM package so break */
1325 		break;
1326 	}
1327 fw_ddp_compat_free_alloc:
1328 	kfree(pkg);
1329 	return status;
1330 }
1331 
1332 /**
1333  * ice_sw_fv_handler
1334  * @sect_type: section type
1335  * @section: pointer to section
1336  * @index: index of the field vector entry to be returned
1337  * @offset: ptr to variable that receives the offset in the field vector table
1338  *
1339  * This is a callback function that can be passed to ice_pkg_enum_entry.
1340  * This function treats the given section as of type ice_sw_fv_section and
1341  * enumerates offset field. "offset" is an index into the field vector table.
1342  */
1343 static void *
1344 ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
1345 {
1346 	struct ice_sw_fv_section *fv_section = section;
1347 
1348 	if (!section || sect_type != ICE_SID_FLD_VEC_SW)
1349 		return NULL;
1350 	if (index >= le16_to_cpu(fv_section->count))
1351 		return NULL;
1352 	if (offset)
1353 		/* "index" passed in to this function is relative to a given
1354 		 * 4k block. To get to the true index into the field vector
1355 		 * table need to add the relative index to the base_offset
1356 		 * field of this section
1357 		 */
1358 		*offset = le16_to_cpu(fv_section->base_offset) + index;
1359 	return fv_section->fv + index;
1360 }
1361 
1362 /**
1363  * ice_get_prof_index_max - get the max profile index for used profile
1364  * @hw: pointer to the HW struct
1365  *
1366  * Calling this function will get the max profile index for used profile
1367  * and store the index number in struct ice_switch_info *switch_info
1368  * in HW for following use.
1369  */
1370 static enum ice_status ice_get_prof_index_max(struct ice_hw *hw)
1371 {
1372 	u16 prof_index = 0, j, max_prof_index = 0;
1373 	struct ice_pkg_enum state;
1374 	struct ice_seg *ice_seg;
1375 	bool flag = false;
1376 	struct ice_fv *fv;
1377 	u32 offset;
1378 
1379 	memset(&state, 0, sizeof(state));
1380 
1381 	if (!hw->seg)
1382 		return ICE_ERR_PARAM;
1383 
1384 	ice_seg = hw->seg;
1385 
1386 	do {
1387 		fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1388 					&offset, ice_sw_fv_handler);
1389 		if (!fv)
1390 			break;
1391 		ice_seg = NULL;
1392 
1393 		/* in the profile that not be used, the prot_id is set to 0xff
1394 		 * and the off is set to 0x1ff for all the field vectors.
1395 		 */
1396 		for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1397 			if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
1398 			    fv->ew[j].off != ICE_FV_OFFSET_INVAL)
1399 				flag = true;
1400 		if (flag && prof_index > max_prof_index)
1401 			max_prof_index = prof_index;
1402 
1403 		prof_index++;
1404 		flag = false;
1405 	} while (fv);
1406 
1407 	hw->switch_info->max_used_prof_index = max_prof_index;
1408 
1409 	return 0;
1410 }
1411 
1412 /**
1413  * ice_init_pkg - initialize/download package
1414  * @hw: pointer to the hardware structure
1415  * @buf: pointer to the package buffer
1416  * @len: size of the package buffer
1417  *
1418  * This function initializes a package. The package contains HW tables
1419  * required to do packet processing. First, the function extracts package
1420  * information such as version. Then it finds the ice configuration segment
1421  * within the package; this function then saves a copy of the segment pointer
1422  * within the supplied package buffer. Next, the function will cache any hints
1423  * from the package, followed by downloading the package itself. Note, that if
1424  * a previous PF driver has already downloaded the package successfully, then
1425  * the current driver will not have to download the package again.
1426  *
1427  * The local package contents will be used to query default behavior and to
1428  * update specific sections of the HW's version of the package (e.g. to update
1429  * the parse graph to understand new protocols).
1430  *
1431  * This function stores a pointer to the package buffer memory, and it is
1432  * expected that the supplied buffer will not be freed immediately. If the
1433  * package buffer needs to be freed, such as when read from a file, use
1434  * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
1435  * case.
1436  */
1437 enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
1438 {
1439 	struct ice_pkg_hdr *pkg;
1440 	enum ice_status status;
1441 	struct ice_seg *seg;
1442 
1443 	if (!buf || !len)
1444 		return ICE_ERR_PARAM;
1445 
1446 	pkg = (struct ice_pkg_hdr *)buf;
1447 	status = ice_verify_pkg(pkg, len);
1448 	if (status) {
1449 		ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
1450 			  status);
1451 		return status;
1452 	}
1453 
1454 	/* initialize package info */
1455 	status = ice_init_pkg_info(hw, pkg);
1456 	if (status)
1457 		return status;
1458 
1459 	/* before downloading the package, check package version for
1460 	 * compatibility with driver
1461 	 */
1462 	status = ice_chk_pkg_compat(hw, pkg, &seg);
1463 	if (status)
1464 		return status;
1465 
1466 	/* initialize package hints and then download package */
1467 	ice_init_pkg_hints(hw, seg);
1468 	status = ice_download_pkg(hw, seg);
1469 	if (status == ICE_ERR_AQ_NO_WORK) {
1470 		ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
1471 		status = 0;
1472 	}
1473 
1474 	/* Get information on the package currently loaded in HW, then make sure
1475 	 * the driver is compatible with this version.
1476 	 */
1477 	if (!status) {
1478 		status = ice_get_pkg_info(hw);
1479 		if (!status)
1480 			status = ice_chk_pkg_version(&hw->active_pkg_ver);
1481 	}
1482 
1483 	if (!status) {
1484 		hw->seg = seg;
1485 		/* on successful package download update other required
1486 		 * registers to support the package and fill HW tables
1487 		 * with package content.
1488 		 */
1489 		ice_init_pkg_regs(hw);
1490 		ice_fill_blk_tbls(hw);
1491 		ice_get_prof_index_max(hw);
1492 	} else {
1493 		ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
1494 			  status);
1495 	}
1496 
1497 	return status;
1498 }
1499 
1500 /**
1501  * ice_copy_and_init_pkg - initialize/download a copy of the package
1502  * @hw: pointer to the hardware structure
1503  * @buf: pointer to the package buffer
1504  * @len: size of the package buffer
1505  *
1506  * This function copies the package buffer, and then calls ice_init_pkg() to
1507  * initialize the copied package contents.
1508  *
1509  * The copying is necessary if the package buffer supplied is constant, or if
1510  * the memory may disappear shortly after calling this function.
1511  *
1512  * If the package buffer resides in the data segment and can be modified, the
1513  * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
1514  *
1515  * However, if the package buffer needs to be copied first, such as when being
1516  * read from a file, the caller should use ice_copy_and_init_pkg().
1517  *
1518  * This function will first copy the package buffer, before calling
1519  * ice_init_pkg(). The caller is free to immediately destroy the original
1520  * package buffer, as the new copy will be managed by this function and
1521  * related routines.
1522  */
1523 enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
1524 {
1525 	enum ice_status status;
1526 	u8 *buf_copy;
1527 
1528 	if (!buf || !len)
1529 		return ICE_ERR_PARAM;
1530 
1531 	buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
1532 
1533 	status = ice_init_pkg(hw, buf_copy, len);
1534 	if (status) {
1535 		/* Free the copy, since we failed to initialize the package */
1536 		devm_kfree(ice_hw_to_dev(hw), buf_copy);
1537 	} else {
1538 		/* Track the copied pkg so we can free it later */
1539 		hw->pkg_copy = buf_copy;
1540 		hw->pkg_size = len;
1541 	}
1542 
1543 	return status;
1544 }
1545 
1546 /**
1547  * ice_pkg_buf_alloc
1548  * @hw: pointer to the HW structure
1549  *
1550  * Allocates a package buffer and returns a pointer to the buffer header.
1551  * Note: all package contents must be in Little Endian form.
1552  */
1553 static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
1554 {
1555 	struct ice_buf_build *bld;
1556 	struct ice_buf_hdr *buf;
1557 
1558 	bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL);
1559 	if (!bld)
1560 		return NULL;
1561 
1562 	buf = (struct ice_buf_hdr *)bld;
1563 	buf->data_end = cpu_to_le16(offsetof(struct ice_buf_hdr,
1564 					     section_entry));
1565 	return bld;
1566 }
1567 
1568 /**
1569  * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
1570  * @hw: pointer to hardware structure
1571  * @req_profs: type of profiles requested
1572  * @bm: pointer to memory for returning the bitmap of field vectors
1573  */
1574 void
1575 ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
1576 		     unsigned long *bm)
1577 {
1578 	struct ice_pkg_enum state;
1579 	struct ice_seg *ice_seg;
1580 	struct ice_fv *fv;
1581 
1582 	if (req_profs == ICE_PROF_ALL) {
1583 		bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES);
1584 		return;
1585 	}
1586 
1587 	memset(&state, 0, sizeof(state));
1588 	bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
1589 	ice_seg = hw->seg;
1590 	do {
1591 		u32 offset;
1592 
1593 		fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1594 					&offset, ice_sw_fv_handler);
1595 		ice_seg = NULL;
1596 
1597 		if (fv) {
1598 			if (req_profs & ICE_PROF_NON_TUN)
1599 				set_bit((u16)offset, bm);
1600 		}
1601 	} while (fv);
1602 }
1603 
1604 /**
1605  * ice_get_sw_fv_list
1606  * @hw: pointer to the HW structure
1607  * @prot_ids: field vector to search for with a given protocol ID
1608  * @ids_cnt: lookup/protocol count
1609  * @bm: bitmap of field vectors to consider
1610  * @fv_list: Head of a list
1611  *
1612  * Finds all the field vector entries from switch block that contain
1613  * a given protocol ID and returns a list of structures of type
1614  * "ice_sw_fv_list_entry". Every structure in the list has a field vector
1615  * definition and profile ID information
1616  * NOTE: The caller of the function is responsible for freeing the memory
1617  * allocated for every list entry.
1618  */
1619 enum ice_status
1620 ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
1621 		   unsigned long *bm, struct list_head *fv_list)
1622 {
1623 	struct ice_sw_fv_list_entry *fvl;
1624 	struct ice_sw_fv_list_entry *tmp;
1625 	struct ice_pkg_enum state;
1626 	struct ice_seg *ice_seg;
1627 	struct ice_fv *fv;
1628 	u32 offset;
1629 
1630 	memset(&state, 0, sizeof(state));
1631 
1632 	if (!ids_cnt || !hw->seg)
1633 		return ICE_ERR_PARAM;
1634 
1635 	ice_seg = hw->seg;
1636 	do {
1637 		u16 i;
1638 
1639 		fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1640 					&offset, ice_sw_fv_handler);
1641 		if (!fv)
1642 			break;
1643 		ice_seg = NULL;
1644 
1645 		/* If field vector is not in the bitmap list, then skip this
1646 		 * profile.
1647 		 */
1648 		if (!test_bit((u16)offset, bm))
1649 			continue;
1650 
1651 		for (i = 0; i < ids_cnt; i++) {
1652 			int j;
1653 
1654 			/* This code assumes that if a switch field vector line
1655 			 * has a matching protocol, then this line will contain
1656 			 * the entries necessary to represent every field in
1657 			 * that protocol header.
1658 			 */
1659 			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1660 				if (fv->ew[j].prot_id == prot_ids[i])
1661 					break;
1662 			if (j >= hw->blk[ICE_BLK_SW].es.fvw)
1663 				break;
1664 			if (i + 1 == ids_cnt) {
1665 				fvl = devm_kzalloc(ice_hw_to_dev(hw),
1666 						   sizeof(*fvl), GFP_KERNEL);
1667 				if (!fvl)
1668 					goto err;
1669 				fvl->fv_ptr = fv;
1670 				fvl->profile_id = offset;
1671 				list_add(&fvl->list_entry, fv_list);
1672 				break;
1673 			}
1674 		}
1675 	} while (fv);
1676 	if (list_empty(fv_list))
1677 		return ICE_ERR_CFG;
1678 	return 0;
1679 
1680 err:
1681 	list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) {
1682 		list_del(&fvl->list_entry);
1683 		devm_kfree(ice_hw_to_dev(hw), fvl);
1684 	}
1685 
1686 	return ICE_ERR_NO_MEMORY;
1687 }
1688 
1689 /**
1690  * ice_init_prof_result_bm - Initialize the profile result index bitmap
1691  * @hw: pointer to hardware structure
1692  */
1693 void ice_init_prof_result_bm(struct ice_hw *hw)
1694 {
1695 	struct ice_pkg_enum state;
1696 	struct ice_seg *ice_seg;
1697 	struct ice_fv *fv;
1698 
1699 	memset(&state, 0, sizeof(state));
1700 
1701 	if (!hw->seg)
1702 		return;
1703 
1704 	ice_seg = hw->seg;
1705 	do {
1706 		u32 off;
1707 		u16 i;
1708 
1709 		fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1710 					&off, ice_sw_fv_handler);
1711 		ice_seg = NULL;
1712 		if (!fv)
1713 			break;
1714 
1715 		bitmap_zero(hw->switch_info->prof_res_bm[off],
1716 			    ICE_MAX_FV_WORDS);
1717 
1718 		/* Determine empty field vector indices, these can be
1719 		 * used for recipe results. Skip index 0, since it is
1720 		 * always used for Switch ID.
1721 		 */
1722 		for (i = 1; i < ICE_MAX_FV_WORDS; i++)
1723 			if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
1724 			    fv->ew[i].off == ICE_FV_OFFSET_INVAL)
1725 				set_bit(i, hw->switch_info->prof_res_bm[off]);
1726 	} while (fv);
1727 }
1728 
1729 /**
1730  * ice_pkg_buf_free
1731  * @hw: pointer to the HW structure
1732  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1733  *
1734  * Frees a package buffer
1735  */
1736 static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
1737 {
1738 	devm_kfree(ice_hw_to_dev(hw), bld);
1739 }
1740 
1741 /**
1742  * ice_pkg_buf_reserve_section
1743  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1744  * @count: the number of sections to reserve
1745  *
1746  * Reserves one or more section table entries in a package buffer. This routine
1747  * can be called multiple times as long as they are made before calling
1748  * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
1749  * is called once, the number of sections that can be allocated will not be able
1750  * to be increased; not using all reserved sections is fine, but this will
1751  * result in some wasted space in the buffer.
1752  * Note: all package contents must be in Little Endian form.
1753  */
1754 static enum ice_status
1755 ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
1756 {
1757 	struct ice_buf_hdr *buf;
1758 	u16 section_count;
1759 	u16 data_end;
1760 
1761 	if (!bld)
1762 		return ICE_ERR_PARAM;
1763 
1764 	buf = (struct ice_buf_hdr *)&bld->buf;
1765 
1766 	/* already an active section, can't increase table size */
1767 	section_count = le16_to_cpu(buf->section_count);
1768 	if (section_count > 0)
1769 		return ICE_ERR_CFG;
1770 
1771 	if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
1772 		return ICE_ERR_CFG;
1773 	bld->reserved_section_table_entries += count;
1774 
1775 	data_end = le16_to_cpu(buf->data_end) +
1776 		flex_array_size(buf, section_entry, count);
1777 	buf->data_end = cpu_to_le16(data_end);
1778 
1779 	return 0;
1780 }
1781 
1782 /**
1783  * ice_pkg_buf_alloc_section
1784  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1785  * @type: the section type value
1786  * @size: the size of the section to reserve (in bytes)
1787  *
1788  * Reserves memory in the buffer for a section's content and updates the
1789  * buffers' status accordingly. This routine returns a pointer to the first
1790  * byte of the section start within the buffer, which is used to fill in the
1791  * section contents.
1792  * Note: all package contents must be in Little Endian form.
1793  */
1794 static void *
1795 ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
1796 {
1797 	struct ice_buf_hdr *buf;
1798 	u16 sect_count;
1799 	u16 data_end;
1800 
1801 	if (!bld || !type || !size)
1802 		return NULL;
1803 
1804 	buf = (struct ice_buf_hdr *)&bld->buf;
1805 
1806 	/* check for enough space left in buffer */
1807 	data_end = le16_to_cpu(buf->data_end);
1808 
1809 	/* section start must align on 4 byte boundary */
1810 	data_end = ALIGN(data_end, 4);
1811 
1812 	if ((data_end + size) > ICE_MAX_S_DATA_END)
1813 		return NULL;
1814 
1815 	/* check for more available section table entries */
1816 	sect_count = le16_to_cpu(buf->section_count);
1817 	if (sect_count < bld->reserved_section_table_entries) {
1818 		void *section_ptr = ((u8 *)buf) + data_end;
1819 
1820 		buf->section_entry[sect_count].offset = cpu_to_le16(data_end);
1821 		buf->section_entry[sect_count].size = cpu_to_le16(size);
1822 		buf->section_entry[sect_count].type = cpu_to_le32(type);
1823 
1824 		data_end += size;
1825 		buf->data_end = cpu_to_le16(data_end);
1826 
1827 		buf->section_count = cpu_to_le16(sect_count + 1);
1828 		return section_ptr;
1829 	}
1830 
1831 	/* no free section table entries */
1832 	return NULL;
1833 }
1834 
1835 /**
1836  * ice_pkg_buf_get_active_sections
1837  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1838  *
1839  * Returns the number of active sections. Before using the package buffer
1840  * in an update package command, the caller should make sure that there is at
1841  * least one active section - otherwise, the buffer is not legal and should
1842  * not be used.
1843  * Note: all package contents must be in Little Endian form.
1844  */
1845 static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
1846 {
1847 	struct ice_buf_hdr *buf;
1848 
1849 	if (!bld)
1850 		return 0;
1851 
1852 	buf = (struct ice_buf_hdr *)&bld->buf;
1853 	return le16_to_cpu(buf->section_count);
1854 }
1855 
1856 /**
1857  * ice_pkg_buf
1858  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1859  *
1860  * Return a pointer to the buffer's header
1861  */
1862 static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
1863 {
1864 	if (!bld)
1865 		return NULL;
1866 
1867 	return &bld->buf;
1868 }
1869 
1870 /**
1871  * ice_get_open_tunnel_port - retrieve an open tunnel port
1872  * @hw: pointer to the HW structure
1873  * @port: returns open port
1874  */
1875 bool
1876 ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port)
1877 {
1878 	bool res = false;
1879 	u16 i;
1880 
1881 	mutex_lock(&hw->tnl_lock);
1882 
1883 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1884 		if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port) {
1885 			*port = hw->tnl.tbl[i].port;
1886 			res = true;
1887 			break;
1888 		}
1889 
1890 	mutex_unlock(&hw->tnl_lock);
1891 
1892 	return res;
1893 }
1894 
1895 /**
1896  * ice_tunnel_idx_to_entry - convert linear index to the sparse one
1897  * @hw: pointer to the HW structure
1898  * @type: type of tunnel
1899  * @idx: linear index
1900  *
1901  * Stack assumes we have 2 linear tables with indexes [0, count_valid),
1902  * but really the port table may be sprase, and types are mixed, so convert
1903  * the stack index into the device index.
1904  */
1905 static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
1906 				   u16 idx)
1907 {
1908 	u16 i;
1909 
1910 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1911 		if (hw->tnl.tbl[i].valid &&
1912 		    hw->tnl.tbl[i].type == type &&
1913 		    idx--)
1914 			return i;
1915 
1916 	WARN_ON_ONCE(1);
1917 	return 0;
1918 }
1919 
1920 /**
1921  * ice_create_tunnel
1922  * @hw: pointer to the HW structure
1923  * @index: device table entry
1924  * @type: type of tunnel
1925  * @port: port of tunnel to create
1926  *
1927  * Create a tunnel by updating the parse graph in the parser. We do that by
1928  * creating a package buffer with the tunnel info and issuing an update package
1929  * command.
1930  */
1931 static enum ice_status
1932 ice_create_tunnel(struct ice_hw *hw, u16 index,
1933 		  enum ice_tunnel_type type, u16 port)
1934 {
1935 	struct ice_boost_tcam_section *sect_rx, *sect_tx;
1936 	enum ice_status status = ICE_ERR_MAX_LIMIT;
1937 	struct ice_buf_build *bld;
1938 
1939 	mutex_lock(&hw->tnl_lock);
1940 
1941 	bld = ice_pkg_buf_alloc(hw);
1942 	if (!bld) {
1943 		status = ICE_ERR_NO_MEMORY;
1944 		goto ice_create_tunnel_end;
1945 	}
1946 
1947 	/* allocate 2 sections, one for Rx parser, one for Tx parser */
1948 	if (ice_pkg_buf_reserve_section(bld, 2))
1949 		goto ice_create_tunnel_err;
1950 
1951 	sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
1952 					    struct_size(sect_rx, tcam, 1));
1953 	if (!sect_rx)
1954 		goto ice_create_tunnel_err;
1955 	sect_rx->count = cpu_to_le16(1);
1956 
1957 	sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
1958 					    struct_size(sect_tx, tcam, 1));
1959 	if (!sect_tx)
1960 		goto ice_create_tunnel_err;
1961 	sect_tx->count = cpu_to_le16(1);
1962 
1963 	/* copy original boost entry to update package buffer */
1964 	memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
1965 	       sizeof(*sect_rx->tcam));
1966 
1967 	/* over-write the never-match dest port key bits with the encoded port
1968 	 * bits
1969 	 */
1970 	ice_set_key((u8 *)&sect_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
1971 		    (u8 *)&port, NULL, NULL, NULL,
1972 		    (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
1973 		    sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
1974 
1975 	/* exact copy of entry to Tx section entry */
1976 	memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
1977 
1978 	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
1979 	if (!status)
1980 		hw->tnl.tbl[index].port = port;
1981 
1982 ice_create_tunnel_err:
1983 	ice_pkg_buf_free(hw, bld);
1984 
1985 ice_create_tunnel_end:
1986 	mutex_unlock(&hw->tnl_lock);
1987 
1988 	return status;
1989 }
1990 
1991 /**
1992  * ice_destroy_tunnel
1993  * @hw: pointer to the HW structure
1994  * @index: device table entry
1995  * @type: type of tunnel
1996  * @port: port of tunnel to destroy (ignored if the all parameter is true)
1997  *
1998  * Destroys a tunnel or all tunnels by creating an update package buffer
1999  * targeting the specific updates requested and then performing an update
2000  * package.
2001  */
2002 static enum ice_status
2003 ice_destroy_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type,
2004 		   u16 port)
2005 {
2006 	struct ice_boost_tcam_section *sect_rx, *sect_tx;
2007 	enum ice_status status = ICE_ERR_MAX_LIMIT;
2008 	struct ice_buf_build *bld;
2009 
2010 	mutex_lock(&hw->tnl_lock);
2011 
2012 	if (WARN_ON(!hw->tnl.tbl[index].valid ||
2013 		    hw->tnl.tbl[index].type != type ||
2014 		    hw->tnl.tbl[index].port != port)) {
2015 		status = ICE_ERR_OUT_OF_RANGE;
2016 		goto ice_destroy_tunnel_end;
2017 	}
2018 
2019 	bld = ice_pkg_buf_alloc(hw);
2020 	if (!bld) {
2021 		status = ICE_ERR_NO_MEMORY;
2022 		goto ice_destroy_tunnel_end;
2023 	}
2024 
2025 	/* allocate 2 sections, one for Rx parser, one for Tx parser */
2026 	if (ice_pkg_buf_reserve_section(bld, 2))
2027 		goto ice_destroy_tunnel_err;
2028 
2029 	sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
2030 					    struct_size(sect_rx, tcam, 1));
2031 	if (!sect_rx)
2032 		goto ice_destroy_tunnel_err;
2033 	sect_rx->count = cpu_to_le16(1);
2034 
2035 	sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
2036 					    struct_size(sect_tx, tcam, 1));
2037 	if (!sect_tx)
2038 		goto ice_destroy_tunnel_err;
2039 	sect_tx->count = cpu_to_le16(1);
2040 
2041 	/* copy original boost entry to update package buffer, one copy to Rx
2042 	 * section, another copy to the Tx section
2043 	 */
2044 	memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
2045 	       sizeof(*sect_rx->tcam));
2046 	memcpy(sect_tx->tcam, hw->tnl.tbl[index].boost_entry,
2047 	       sizeof(*sect_tx->tcam));
2048 
2049 	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
2050 	if (!status)
2051 		hw->tnl.tbl[index].port = 0;
2052 
2053 ice_destroy_tunnel_err:
2054 	ice_pkg_buf_free(hw, bld);
2055 
2056 ice_destroy_tunnel_end:
2057 	mutex_unlock(&hw->tnl_lock);
2058 
2059 	return status;
2060 }
2061 
2062 int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
2063 			    unsigned int idx, struct udp_tunnel_info *ti)
2064 {
2065 	struct ice_netdev_priv *np = netdev_priv(netdev);
2066 	struct ice_vsi *vsi = np->vsi;
2067 	struct ice_pf *pf = vsi->back;
2068 	enum ice_tunnel_type tnl_type;
2069 	enum ice_status status;
2070 	u16 index;
2071 
2072 	tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
2073 	index = ice_tunnel_idx_to_entry(&pf->hw, idx, tnl_type);
2074 
2075 	status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
2076 	if (status) {
2077 		netdev_err(netdev, "Error adding UDP tunnel - %s\n",
2078 			   ice_stat_str(status));
2079 		return -EIO;
2080 	}
2081 
2082 	udp_tunnel_nic_set_port_priv(netdev, table, idx, index);
2083 	return 0;
2084 }
2085 
2086 int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
2087 			      unsigned int idx, struct udp_tunnel_info *ti)
2088 {
2089 	struct ice_netdev_priv *np = netdev_priv(netdev);
2090 	struct ice_vsi *vsi = np->vsi;
2091 	struct ice_pf *pf = vsi->back;
2092 	enum ice_tunnel_type tnl_type;
2093 	enum ice_status status;
2094 
2095 	tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
2096 
2097 	status = ice_destroy_tunnel(&pf->hw, ti->hw_priv, tnl_type,
2098 				    ntohs(ti->port));
2099 	if (status) {
2100 		netdev_err(netdev, "Error removing UDP tunnel - %s\n",
2101 			   ice_stat_str(status));
2102 		return -EIO;
2103 	}
2104 
2105 	return 0;
2106 }
2107 
2108 /**
2109  * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index
2110  * @hw: pointer to the hardware structure
2111  * @blk: hardware block
2112  * @prof: profile ID
2113  * @fv_idx: field vector word index
2114  * @prot: variable to receive the protocol ID
2115  * @off: variable to receive the protocol offset
2116  */
2117 enum ice_status
2118 ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
2119 		  u8 *prot, u16 *off)
2120 {
2121 	struct ice_fv_word *fv_ext;
2122 
2123 	if (prof >= hw->blk[blk].es.count)
2124 		return ICE_ERR_PARAM;
2125 
2126 	if (fv_idx >= hw->blk[blk].es.fvw)
2127 		return ICE_ERR_PARAM;
2128 
2129 	fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
2130 
2131 	*prot = fv_ext[fv_idx].prot_id;
2132 	*off = fv_ext[fv_idx].off;
2133 
2134 	return 0;
2135 }
2136 
2137 /* PTG Management */
2138 
2139 /**
2140  * ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
2141  * @hw: pointer to the hardware structure
2142  * @blk: HW block
2143  * @ptype: the ptype to search for
2144  * @ptg: pointer to variable that receives the PTG
2145  *
2146  * This function will search the PTGs for a particular ptype, returning the
2147  * PTG ID that contains it through the PTG parameter, with the value of
2148  * ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
2149  */
2150 static enum ice_status
2151 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
2152 {
2153 	if (ptype >= ICE_XLT1_CNT || !ptg)
2154 		return ICE_ERR_PARAM;
2155 
2156 	*ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
2157 	return 0;
2158 }
2159 
2160 /**
2161  * ice_ptg_alloc_val - Allocates a new packet type group ID by value
2162  * @hw: pointer to the hardware structure
2163  * @blk: HW block
2164  * @ptg: the PTG to allocate
2165  *
2166  * This function allocates a given packet type group ID specified by the PTG
2167  * parameter.
2168  */
2169 static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
2170 {
2171 	hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
2172 }
2173 
2174 /**
2175  * ice_ptg_remove_ptype - Removes ptype from a particular packet type group
2176  * @hw: pointer to the hardware structure
2177  * @blk: HW block
2178  * @ptype: the ptype to remove
2179  * @ptg: the PTG to remove the ptype from
2180  *
2181  * This function will remove the ptype from the specific PTG, and move it to
2182  * the default PTG (ICE_DEFAULT_PTG).
2183  */
2184 static enum ice_status
2185 ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
2186 {
2187 	struct ice_ptg_ptype **ch;
2188 	struct ice_ptg_ptype *p;
2189 
2190 	if (ptype > ICE_XLT1_CNT - 1)
2191 		return ICE_ERR_PARAM;
2192 
2193 	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
2194 		return ICE_ERR_DOES_NOT_EXIST;
2195 
2196 	/* Should not happen if .in_use is set, bad config */
2197 	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
2198 		return ICE_ERR_CFG;
2199 
2200 	/* find the ptype within this PTG, and bypass the link over it */
2201 	p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2202 	ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2203 	while (p) {
2204 		if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
2205 			*ch = p->next_ptype;
2206 			break;
2207 		}
2208 
2209 		ch = &p->next_ptype;
2210 		p = p->next_ptype;
2211 	}
2212 
2213 	hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
2214 	hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
2215 
2216 	return 0;
2217 }
2218 
2219 /**
2220  * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group
2221  * @hw: pointer to the hardware structure
2222  * @blk: HW block
2223  * @ptype: the ptype to add or move
2224  * @ptg: the PTG to add or move the ptype to
2225  *
2226  * This function will either add or move a ptype to a particular PTG depending
2227  * on if the ptype is already part of another group. Note that using a
2228  * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
2229  * default PTG.
2230  */
2231 static enum ice_status
2232 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
2233 {
2234 	enum ice_status status;
2235 	u8 original_ptg;
2236 
2237 	if (ptype > ICE_XLT1_CNT - 1)
2238 		return ICE_ERR_PARAM;
2239 
2240 	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
2241 		return ICE_ERR_DOES_NOT_EXIST;
2242 
2243 	status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
2244 	if (status)
2245 		return status;
2246 
2247 	/* Is ptype already in the correct PTG? */
2248 	if (original_ptg == ptg)
2249 		return 0;
2250 
2251 	/* Remove from original PTG and move back to the default PTG */
2252 	if (original_ptg != ICE_DEFAULT_PTG)
2253 		ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
2254 
2255 	/* Moving to default PTG? Then we're done with this request */
2256 	if (ptg == ICE_DEFAULT_PTG)
2257 		return 0;
2258 
2259 	/* Add ptype to PTG at beginning of list */
2260 	hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
2261 		hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2262 	hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
2263 		&hw->blk[blk].xlt1.ptypes[ptype];
2264 
2265 	hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
2266 	hw->blk[blk].xlt1.t[ptype] = ptg;
2267 
2268 	return 0;
2269 }
2270 
2271 /* Block / table size info */
2272 struct ice_blk_size_details {
2273 	u16 xlt1;			/* # XLT1 entries */
2274 	u16 xlt2;			/* # XLT2 entries */
2275 	u16 prof_tcam;			/* # profile ID TCAM entries */
2276 	u16 prof_id;			/* # profile IDs */
2277 	u8 prof_cdid_bits;		/* # CDID one-hot bits used in key */
2278 	u16 prof_redir;			/* # profile redirection entries */
2279 	u16 es;				/* # extraction sequence entries */
2280 	u16 fvw;			/* # field vector words */
2281 	u8 overwrite;			/* overwrite existing entries allowed */
2282 	u8 reverse;			/* reverse FV order */
2283 };
2284 
2285 static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
2286 	/**
2287 	 * Table Definitions
2288 	 * XLT1 - Number of entries in XLT1 table
2289 	 * XLT2 - Number of entries in XLT2 table
2290 	 * TCAM - Number of entries Profile ID TCAM table
2291 	 * CDID - Control Domain ID of the hardware block
2292 	 * PRED - Number of entries in the Profile Redirection Table
2293 	 * FV   - Number of entries in the Field Vector
2294 	 * FVW  - Width (in WORDs) of the Field Vector
2295 	 * OVR  - Overwrite existing table entries
2296 	 * REV  - Reverse FV
2297 	 */
2298 	/*          XLT1        , XLT2        ,TCAM, PID,CDID,PRED,   FV, FVW */
2299 	/*          Overwrite   , Reverse FV */
2300 	/* SW  */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256,   0,  256, 256,  48,
2301 		    false, false },
2302 	/* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  32,
2303 		    false, false },
2304 	/* FD  */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  24,
2305 		    false, true  },
2306 	/* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  24,
2307 		    true,  true  },
2308 	/* PE  */ { ICE_XLT1_CNT, ICE_XLT2_CNT,  64,  32,   0,   32,  32,  24,
2309 		    false, false },
2310 };
2311 
2312 enum ice_sid_all {
2313 	ICE_SID_XLT1_OFF = 0,
2314 	ICE_SID_XLT2_OFF,
2315 	ICE_SID_PR_OFF,
2316 	ICE_SID_PR_REDIR_OFF,
2317 	ICE_SID_ES_OFF,
2318 	ICE_SID_OFF_COUNT,
2319 };
2320 
2321 /* Characteristic handling */
2322 
2323 /**
2324  * ice_match_prop_lst - determine if properties of two lists match
2325  * @list1: first properties list
2326  * @list2: second properties list
2327  *
2328  * Count, cookies and the order must match in order to be considered equivalent.
2329  */
2330 static bool
2331 ice_match_prop_lst(struct list_head *list1, struct list_head *list2)
2332 {
2333 	struct ice_vsig_prof *tmp1;
2334 	struct ice_vsig_prof *tmp2;
2335 	u16 chk_count = 0;
2336 	u16 count = 0;
2337 
2338 	/* compare counts */
2339 	list_for_each_entry(tmp1, list1, list)
2340 		count++;
2341 	list_for_each_entry(tmp2, list2, list)
2342 		chk_count++;
2343 	/* cppcheck-suppress knownConditionTrueFalse */
2344 	if (!count || count != chk_count)
2345 		return false;
2346 
2347 	tmp1 = list_first_entry(list1, struct ice_vsig_prof, list);
2348 	tmp2 = list_first_entry(list2, struct ice_vsig_prof, list);
2349 
2350 	/* profile cookies must compare, and in the exact same order to take
2351 	 * into account priority
2352 	 */
2353 	while (count--) {
2354 		if (tmp2->profile_cookie != tmp1->profile_cookie)
2355 			return false;
2356 
2357 		tmp1 = list_next_entry(tmp1, list);
2358 		tmp2 = list_next_entry(tmp2, list);
2359 	}
2360 
2361 	return true;
2362 }
2363 
2364 /* VSIG Management */
2365 
2366 /**
2367  * ice_vsig_find_vsi - find a VSIG that contains a specified VSI
2368  * @hw: pointer to the hardware structure
2369  * @blk: HW block
2370  * @vsi: VSI of interest
2371  * @vsig: pointer to receive the VSI group
2372  *
2373  * This function will lookup the VSI entry in the XLT2 list and return
2374  * the VSI group its associated with.
2375  */
2376 static enum ice_status
2377 ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
2378 {
2379 	if (!vsig || vsi >= ICE_MAX_VSI)
2380 		return ICE_ERR_PARAM;
2381 
2382 	/* As long as there's a default or valid VSIG associated with the input
2383 	 * VSI, the functions returns a success. Any handling of VSIG will be
2384 	 * done by the following add, update or remove functions.
2385 	 */
2386 	*vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
2387 
2388 	return 0;
2389 }
2390 
2391 /**
2392  * ice_vsig_alloc_val - allocate a new VSIG by value
2393  * @hw: pointer to the hardware structure
2394  * @blk: HW block
2395  * @vsig: the VSIG to allocate
2396  *
2397  * This function will allocate a given VSIG specified by the VSIG parameter.
2398  */
2399 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2400 {
2401 	u16 idx = vsig & ICE_VSIG_IDX_M;
2402 
2403 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
2404 		INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2405 		hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
2406 	}
2407 
2408 	return ICE_VSIG_VALUE(idx, hw->pf_id);
2409 }
2410 
2411 /**
2412  * ice_vsig_alloc - Finds a free entry and allocates a new VSIG
2413  * @hw: pointer to the hardware structure
2414  * @blk: HW block
2415  *
2416  * This function will iterate through the VSIG list and mark the first
2417  * unused entry for the new VSIG entry as used and return that value.
2418  */
2419 static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
2420 {
2421 	u16 i;
2422 
2423 	for (i = 1; i < ICE_MAX_VSIGS; i++)
2424 		if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2425 			return ice_vsig_alloc_val(hw, blk, i);
2426 
2427 	return ICE_DEFAULT_VSIG;
2428 }
2429 
2430 /**
2431  * ice_find_dup_props_vsig - find VSI group with a specified set of properties
2432  * @hw: pointer to the hardware structure
2433  * @blk: HW block
2434  * @chs: characteristic list
2435  * @vsig: returns the VSIG with the matching profiles, if found
2436  *
2437  * Each VSIG is associated with a characteristic set; i.e. all VSIs under
2438  * a group have the same characteristic set. To check if there exists a VSIG
2439  * which has the same characteristics as the input characteristics; this
2440  * function will iterate through the XLT2 list and return the VSIG that has a
2441  * matching configuration. In order to make sure that priorities are accounted
2442  * for, the list must match exactly, including the order in which the
2443  * characteristics are listed.
2444  */
2445 static enum ice_status
2446 ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
2447 			struct list_head *chs, u16 *vsig)
2448 {
2449 	struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
2450 	u16 i;
2451 
2452 	for (i = 0; i < xlt2->count; i++)
2453 		if (xlt2->vsig_tbl[i].in_use &&
2454 		    ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
2455 			*vsig = ICE_VSIG_VALUE(i, hw->pf_id);
2456 			return 0;
2457 		}
2458 
2459 	return ICE_ERR_DOES_NOT_EXIST;
2460 }
2461 
2462 /**
2463  * ice_vsig_free - free VSI group
2464  * @hw: pointer to the hardware structure
2465  * @blk: HW block
2466  * @vsig: VSIG to remove
2467  *
2468  * The function will remove all VSIs associated with the input VSIG and move
2469  * them to the DEFAULT_VSIG and mark the VSIG available.
2470  */
2471 static enum ice_status
2472 ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2473 {
2474 	struct ice_vsig_prof *dtmp, *del;
2475 	struct ice_vsig_vsi *vsi_cur;
2476 	u16 idx;
2477 
2478 	idx = vsig & ICE_VSIG_IDX_M;
2479 	if (idx >= ICE_MAX_VSIGS)
2480 		return ICE_ERR_PARAM;
2481 
2482 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2483 		return ICE_ERR_DOES_NOT_EXIST;
2484 
2485 	hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
2486 
2487 	vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2488 	/* If the VSIG has at least 1 VSI then iterate through the
2489 	 * list and remove the VSIs before deleting the group.
2490 	 */
2491 	if (vsi_cur) {
2492 		/* remove all vsis associated with this VSIG XLT2 entry */
2493 		do {
2494 			struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
2495 
2496 			vsi_cur->vsig = ICE_DEFAULT_VSIG;
2497 			vsi_cur->changed = 1;
2498 			vsi_cur->next_vsi = NULL;
2499 			vsi_cur = tmp;
2500 		} while (vsi_cur);
2501 
2502 		/* NULL terminate head of VSI list */
2503 		hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
2504 	}
2505 
2506 	/* free characteristic list */
2507 	list_for_each_entry_safe(del, dtmp,
2508 				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2509 				 list) {
2510 		list_del(&del->list);
2511 		devm_kfree(ice_hw_to_dev(hw), del);
2512 	}
2513 
2514 	/* if VSIG characteristic list was cleared for reset
2515 	 * re-initialize the list head
2516 	 */
2517 	INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2518 
2519 	return 0;
2520 }
2521 
2522 /**
2523  * ice_vsig_remove_vsi - remove VSI from VSIG
2524  * @hw: pointer to the hardware structure
2525  * @blk: HW block
2526  * @vsi: VSI to remove
2527  * @vsig: VSI group to remove from
2528  *
2529  * The function will remove the input VSI from its VSI group and move it
2530  * to the DEFAULT_VSIG.
2531  */
2532 static enum ice_status
2533 ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2534 {
2535 	struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
2536 	u16 idx;
2537 
2538 	idx = vsig & ICE_VSIG_IDX_M;
2539 
2540 	if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2541 		return ICE_ERR_PARAM;
2542 
2543 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2544 		return ICE_ERR_DOES_NOT_EXIST;
2545 
2546 	/* entry already in default VSIG, don't have to remove */
2547 	if (idx == ICE_DEFAULT_VSIG)
2548 		return 0;
2549 
2550 	vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2551 	if (!(*vsi_head))
2552 		return ICE_ERR_CFG;
2553 
2554 	vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
2555 	vsi_cur = (*vsi_head);
2556 
2557 	/* iterate the VSI list, skip over the entry to be removed */
2558 	while (vsi_cur) {
2559 		if (vsi_tgt == vsi_cur) {
2560 			(*vsi_head) = vsi_cur->next_vsi;
2561 			break;
2562 		}
2563 		vsi_head = &vsi_cur->next_vsi;
2564 		vsi_cur = vsi_cur->next_vsi;
2565 	}
2566 
2567 	/* verify if VSI was removed from group list */
2568 	if (!vsi_cur)
2569 		return ICE_ERR_DOES_NOT_EXIST;
2570 
2571 	vsi_cur->vsig = ICE_DEFAULT_VSIG;
2572 	vsi_cur->changed = 1;
2573 	vsi_cur->next_vsi = NULL;
2574 
2575 	return 0;
2576 }
2577 
2578 /**
2579  * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group
2580  * @hw: pointer to the hardware structure
2581  * @blk: HW block
2582  * @vsi: VSI to move
2583  * @vsig: destination VSI group
2584  *
2585  * This function will move or add the input VSI to the target VSIG.
2586  * The function will find the original VSIG the VSI belongs to and
2587  * move the entry to the DEFAULT_VSIG, update the original VSIG and
2588  * then move entry to the new VSIG.
2589  */
2590 static enum ice_status
2591 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2592 {
2593 	struct ice_vsig_vsi *tmp;
2594 	enum ice_status status;
2595 	u16 orig_vsig, idx;
2596 
2597 	idx = vsig & ICE_VSIG_IDX_M;
2598 
2599 	if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2600 		return ICE_ERR_PARAM;
2601 
2602 	/* if VSIG not in use and VSIG is not default type this VSIG
2603 	 * doesn't exist.
2604 	 */
2605 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
2606 	    vsig != ICE_DEFAULT_VSIG)
2607 		return ICE_ERR_DOES_NOT_EXIST;
2608 
2609 	status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
2610 	if (status)
2611 		return status;
2612 
2613 	/* no update required if vsigs match */
2614 	if (orig_vsig == vsig)
2615 		return 0;
2616 
2617 	if (orig_vsig != ICE_DEFAULT_VSIG) {
2618 		/* remove entry from orig_vsig and add to default VSIG */
2619 		status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
2620 		if (status)
2621 			return status;
2622 	}
2623 
2624 	if (idx == ICE_DEFAULT_VSIG)
2625 		return 0;
2626 
2627 	/* Create VSI entry and add VSIG and prop_mask values */
2628 	hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
2629 	hw->blk[blk].xlt2.vsis[vsi].changed = 1;
2630 
2631 	/* Add new entry to the head of the VSIG list */
2632 	tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2633 	hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
2634 		&hw->blk[blk].xlt2.vsis[vsi];
2635 	hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
2636 	hw->blk[blk].xlt2.t[vsi] = vsig;
2637 
2638 	return 0;
2639 }
2640 
2641 /**
2642  * ice_prof_has_mask_idx - determine if profile index masking is identical
2643  * @hw: pointer to the hardware structure
2644  * @blk: HW block
2645  * @prof: profile to check
2646  * @idx: profile index to check
2647  * @mask: mask to match
2648  */
2649 static bool
2650 ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx,
2651 		      u16 mask)
2652 {
2653 	bool expect_no_mask = false;
2654 	bool found = false;
2655 	bool match = false;
2656 	u16 i;
2657 
2658 	/* If mask is 0x0000 or 0xffff, then there is no masking */
2659 	if (mask == 0 || mask == 0xffff)
2660 		expect_no_mask = true;
2661 
2662 	/* Scan the enabled masks on this profile, for the specified idx */
2663 	for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first +
2664 	     hw->blk[blk].masks.count; i++)
2665 		if (hw->blk[blk].es.mask_ena[prof] & BIT(i))
2666 			if (hw->blk[blk].masks.masks[i].in_use &&
2667 			    hw->blk[blk].masks.masks[i].idx == idx) {
2668 				found = true;
2669 				if (hw->blk[blk].masks.masks[i].mask == mask)
2670 					match = true;
2671 				break;
2672 			}
2673 
2674 	if (expect_no_mask) {
2675 		if (found)
2676 			return false;
2677 	} else {
2678 		if (!match)
2679 			return false;
2680 	}
2681 
2682 	return true;
2683 }
2684 
2685 /**
2686  * ice_prof_has_mask - determine if profile masking is identical
2687  * @hw: pointer to the hardware structure
2688  * @blk: HW block
2689  * @prof: profile to check
2690  * @masks: masks to match
2691  */
2692 static bool
2693 ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
2694 {
2695 	u16 i;
2696 
2697 	/* es->mask_ena[prof] will have the mask */
2698 	for (i = 0; i < hw->blk[blk].es.fvw; i++)
2699 		if (!ice_prof_has_mask_idx(hw, blk, prof, i, masks[i]))
2700 			return false;
2701 
2702 	return true;
2703 }
2704 
2705 /**
2706  * ice_find_prof_id_with_mask - find profile ID for a given field vector
2707  * @hw: pointer to the hardware structure
2708  * @blk: HW block
2709  * @fv: field vector to search for
2710  * @masks: masks for FV
2711  * @prof_id: receives the profile ID
2712  */
2713 static enum ice_status
2714 ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
2715 			   struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
2716 {
2717 	struct ice_es *es = &hw->blk[blk].es;
2718 	u8 i;
2719 
2720 	/* For FD, we don't want to re-use a existed profile with the same
2721 	 * field vector and mask. This will cause rule interference.
2722 	 */
2723 	if (blk == ICE_BLK_FD)
2724 		return ICE_ERR_DOES_NOT_EXIST;
2725 
2726 	for (i = 0; i < (u8)es->count; i++) {
2727 		u16 off = i * es->fvw;
2728 
2729 		if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
2730 			continue;
2731 
2732 		/* check if masks settings are the same for this profile */
2733 		if (masks && !ice_prof_has_mask(hw, blk, i, masks))
2734 			continue;
2735 
2736 		*prof_id = i;
2737 		return 0;
2738 	}
2739 
2740 	return ICE_ERR_DOES_NOT_EXIST;
2741 }
2742 
2743 /**
2744  * ice_prof_id_rsrc_type - get profile ID resource type for a block type
2745  * @blk: the block type
2746  * @rsrc_type: pointer to variable to receive the resource type
2747  */
2748 static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
2749 {
2750 	switch (blk) {
2751 	case ICE_BLK_FD:
2752 		*rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
2753 		break;
2754 	case ICE_BLK_RSS:
2755 		*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
2756 		break;
2757 	default:
2758 		return false;
2759 	}
2760 	return true;
2761 }
2762 
2763 /**
2764  * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type
2765  * @blk: the block type
2766  * @rsrc_type: pointer to variable to receive the resource type
2767  */
2768 static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
2769 {
2770 	switch (blk) {
2771 	case ICE_BLK_FD:
2772 		*rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
2773 		break;
2774 	case ICE_BLK_RSS:
2775 		*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
2776 		break;
2777 	default:
2778 		return false;
2779 	}
2780 	return true;
2781 }
2782 
2783 /**
2784  * ice_alloc_tcam_ent - allocate hardware TCAM entry
2785  * @hw: pointer to the HW struct
2786  * @blk: the block to allocate the TCAM for
2787  * @btm: true to allocate from bottom of table, false to allocate from top
2788  * @tcam_idx: pointer to variable to receive the TCAM entry
2789  *
2790  * This function allocates a new entry in a Profile ID TCAM for a specific
2791  * block.
2792  */
2793 static enum ice_status
2794 ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
2795 		   u16 *tcam_idx)
2796 {
2797 	u16 res_type;
2798 
2799 	if (!ice_tcam_ent_rsrc_type(blk, &res_type))
2800 		return ICE_ERR_PARAM;
2801 
2802 	return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
2803 }
2804 
2805 /**
2806  * ice_free_tcam_ent - free hardware TCAM entry
2807  * @hw: pointer to the HW struct
2808  * @blk: the block from which to free the TCAM entry
2809  * @tcam_idx: the TCAM entry to free
2810  *
2811  * This function frees an entry in a Profile ID TCAM for a specific block.
2812  */
2813 static enum ice_status
2814 ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
2815 {
2816 	u16 res_type;
2817 
2818 	if (!ice_tcam_ent_rsrc_type(blk, &res_type))
2819 		return ICE_ERR_PARAM;
2820 
2821 	return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
2822 }
2823 
2824 /**
2825  * ice_alloc_prof_id - allocate profile ID
2826  * @hw: pointer to the HW struct
2827  * @blk: the block to allocate the profile ID for
2828  * @prof_id: pointer to variable to receive the profile ID
2829  *
2830  * This function allocates a new profile ID, which also corresponds to a Field
2831  * Vector (Extraction Sequence) entry.
2832  */
2833 static enum ice_status
2834 ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
2835 {
2836 	enum ice_status status;
2837 	u16 res_type;
2838 	u16 get_prof;
2839 
2840 	if (!ice_prof_id_rsrc_type(blk, &res_type))
2841 		return ICE_ERR_PARAM;
2842 
2843 	status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
2844 	if (!status)
2845 		*prof_id = (u8)get_prof;
2846 
2847 	return status;
2848 }
2849 
2850 /**
2851  * ice_free_prof_id - free profile ID
2852  * @hw: pointer to the HW struct
2853  * @blk: the block from which to free the profile ID
2854  * @prof_id: the profile ID to free
2855  *
2856  * This function frees a profile ID, which also corresponds to a Field Vector.
2857  */
2858 static enum ice_status
2859 ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2860 {
2861 	u16 tmp_prof_id = (u16)prof_id;
2862 	u16 res_type;
2863 
2864 	if (!ice_prof_id_rsrc_type(blk, &res_type))
2865 		return ICE_ERR_PARAM;
2866 
2867 	return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
2868 }
2869 
2870 /**
2871  * ice_prof_inc_ref - increment reference count for profile
2872  * @hw: pointer to the HW struct
2873  * @blk: the block from which to free the profile ID
2874  * @prof_id: the profile ID for which to increment the reference count
2875  */
2876 static enum ice_status
2877 ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2878 {
2879 	if (prof_id > hw->blk[blk].es.count)
2880 		return ICE_ERR_PARAM;
2881 
2882 	hw->blk[blk].es.ref_count[prof_id]++;
2883 
2884 	return 0;
2885 }
2886 
2887 /**
2888  * ice_write_prof_mask_reg - write profile mask register
2889  * @hw: pointer to the HW struct
2890  * @blk: hardware block
2891  * @mask_idx: mask index
2892  * @idx: index of the FV which will use the mask
2893  * @mask: the 16-bit mask
2894  */
2895 static void
2896 ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx,
2897 			u16 idx, u16 mask)
2898 {
2899 	u32 offset;
2900 	u32 val;
2901 
2902 	switch (blk) {
2903 	case ICE_BLK_RSS:
2904 		offset = GLQF_HMASK(mask_idx);
2905 		val = (idx << GLQF_HMASK_MSK_INDEX_S) & GLQF_HMASK_MSK_INDEX_M;
2906 		val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M;
2907 		break;
2908 	case ICE_BLK_FD:
2909 		offset = GLQF_FDMASK(mask_idx);
2910 		val = (idx << GLQF_FDMASK_MSK_INDEX_S) & GLQF_FDMASK_MSK_INDEX_M;
2911 		val |= (mask << GLQF_FDMASK_MASK_S) & GLQF_FDMASK_MASK_M;
2912 		break;
2913 	default:
2914 		ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
2915 			  blk);
2916 		return;
2917 	}
2918 
2919 	wr32(hw, offset, val);
2920 	ice_debug(hw, ICE_DBG_PKG, "write mask, blk %d (%d): %x = %x\n",
2921 		  blk, idx, offset, val);
2922 }
2923 
2924 /**
2925  * ice_write_prof_mask_enable_res - write profile mask enable register
2926  * @hw: pointer to the HW struct
2927  * @blk: hardware block
2928  * @prof_id: profile ID
2929  * @enable_mask: enable mask
2930  */
2931 static void
2932 ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk,
2933 			       u16 prof_id, u32 enable_mask)
2934 {
2935 	u32 offset;
2936 
2937 	switch (blk) {
2938 	case ICE_BLK_RSS:
2939 		offset = GLQF_HMASK_SEL(prof_id);
2940 		break;
2941 	case ICE_BLK_FD:
2942 		offset = GLQF_FDMASK_SEL(prof_id);
2943 		break;
2944 	default:
2945 		ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
2946 			  blk);
2947 		return;
2948 	}
2949 
2950 	wr32(hw, offset, enable_mask);
2951 	ice_debug(hw, ICE_DBG_PKG, "write mask enable, blk %d (%d): %x = %x\n",
2952 		  blk, prof_id, offset, enable_mask);
2953 }
2954 
2955 /**
2956  * ice_init_prof_masks - initial prof masks
2957  * @hw: pointer to the HW struct
2958  * @blk: hardware block
2959  */
2960 static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
2961 {
2962 	u16 per_pf;
2963 	u16 i;
2964 
2965 	mutex_init(&hw->blk[blk].masks.lock);
2966 
2967 	per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
2968 
2969 	hw->blk[blk].masks.count = per_pf;
2970 	hw->blk[blk].masks.first = hw->pf_id * per_pf;
2971 
2972 	memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks));
2973 
2974 	for (i = hw->blk[blk].masks.first;
2975 	     i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
2976 		ice_write_prof_mask_reg(hw, blk, i, 0, 0);
2977 }
2978 
2979 /**
2980  * ice_init_all_prof_masks - initialize all prof masks
2981  * @hw: pointer to the HW struct
2982  */
2983 static void ice_init_all_prof_masks(struct ice_hw *hw)
2984 {
2985 	ice_init_prof_masks(hw, ICE_BLK_RSS);
2986 	ice_init_prof_masks(hw, ICE_BLK_FD);
2987 }
2988 
2989 /**
2990  * ice_alloc_prof_mask - allocate profile mask
2991  * @hw: pointer to the HW struct
2992  * @blk: hardware block
2993  * @idx: index of FV which will use the mask
2994  * @mask: the 16-bit mask
2995  * @mask_idx: variable to receive the mask index
2996  */
2997 static enum ice_status
2998 ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
2999 		    u16 *mask_idx)
3000 {
3001 	bool found_unused = false, found_copy = false;
3002 	enum ice_status status = ICE_ERR_MAX_LIMIT;
3003 	u16 unused_idx = 0, copy_idx = 0;
3004 	u16 i;
3005 
3006 	if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3007 		return ICE_ERR_PARAM;
3008 
3009 	mutex_lock(&hw->blk[blk].masks.lock);
3010 
3011 	for (i = hw->blk[blk].masks.first;
3012 	     i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
3013 		if (hw->blk[blk].masks.masks[i].in_use) {
3014 			/* if mask is in use and it exactly duplicates the
3015 			 * desired mask and index, then in can be reused
3016 			 */
3017 			if (hw->blk[blk].masks.masks[i].mask == mask &&
3018 			    hw->blk[blk].masks.masks[i].idx == idx) {
3019 				found_copy = true;
3020 				copy_idx = i;
3021 				break;
3022 			}
3023 		} else {
3024 			/* save off unused index, but keep searching in case
3025 			 * there is an exact match later on
3026 			 */
3027 			if (!found_unused) {
3028 				found_unused = true;
3029 				unused_idx = i;
3030 			}
3031 		}
3032 
3033 	if (found_copy)
3034 		i = copy_idx;
3035 	else if (found_unused)
3036 		i = unused_idx;
3037 	else
3038 		goto err_ice_alloc_prof_mask;
3039 
3040 	/* update mask for a new entry */
3041 	if (found_unused) {
3042 		hw->blk[blk].masks.masks[i].in_use = true;
3043 		hw->blk[blk].masks.masks[i].mask = mask;
3044 		hw->blk[blk].masks.masks[i].idx = idx;
3045 		hw->blk[blk].masks.masks[i].ref = 0;
3046 		ice_write_prof_mask_reg(hw, blk, i, idx, mask);
3047 	}
3048 
3049 	hw->blk[blk].masks.masks[i].ref++;
3050 	*mask_idx = i;
3051 	status = 0;
3052 
3053 err_ice_alloc_prof_mask:
3054 	mutex_unlock(&hw->blk[blk].masks.lock);
3055 
3056 	return status;
3057 }
3058 
3059 /**
3060  * ice_free_prof_mask - free profile mask
3061  * @hw: pointer to the HW struct
3062  * @blk: hardware block
3063  * @mask_idx: index of mask
3064  */
3065 static enum ice_status
3066 ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx)
3067 {
3068 	if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3069 		return ICE_ERR_PARAM;
3070 
3071 	if (!(mask_idx >= hw->blk[blk].masks.first &&
3072 	      mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count))
3073 		return ICE_ERR_DOES_NOT_EXIST;
3074 
3075 	mutex_lock(&hw->blk[blk].masks.lock);
3076 
3077 	if (!hw->blk[blk].masks.masks[mask_idx].in_use)
3078 		goto exit_ice_free_prof_mask;
3079 
3080 	if (hw->blk[blk].masks.masks[mask_idx].ref > 1) {
3081 		hw->blk[blk].masks.masks[mask_idx].ref--;
3082 		goto exit_ice_free_prof_mask;
3083 	}
3084 
3085 	/* remove mask */
3086 	hw->blk[blk].masks.masks[mask_idx].in_use = false;
3087 	hw->blk[blk].masks.masks[mask_idx].mask = 0;
3088 	hw->blk[blk].masks.masks[mask_idx].idx = 0;
3089 
3090 	/* update mask as unused entry */
3091 	ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d\n", blk,
3092 		  mask_idx);
3093 	ice_write_prof_mask_reg(hw, blk, mask_idx, 0, 0);
3094 
3095 exit_ice_free_prof_mask:
3096 	mutex_unlock(&hw->blk[blk].masks.lock);
3097 
3098 	return 0;
3099 }
3100 
3101 /**
3102  * ice_free_prof_masks - free all profile masks for a profile
3103  * @hw: pointer to the HW struct
3104  * @blk: hardware block
3105  * @prof_id: profile ID
3106  */
3107 static enum ice_status
3108 ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
3109 {
3110 	u32 mask_bm;
3111 	u16 i;
3112 
3113 	if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3114 		return ICE_ERR_PARAM;
3115 
3116 	mask_bm = hw->blk[blk].es.mask_ena[prof_id];
3117 	for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++)
3118 		if (mask_bm & BIT(i))
3119 			ice_free_prof_mask(hw, blk, i);
3120 
3121 	return 0;
3122 }
3123 
3124 /**
3125  * ice_shutdown_prof_masks - releases lock for masking
3126  * @hw: pointer to the HW struct
3127  * @blk: hardware block
3128  *
3129  * This should be called before unloading the driver
3130  */
3131 static void ice_shutdown_prof_masks(struct ice_hw *hw, enum ice_block blk)
3132 {
3133 	u16 i;
3134 
3135 	mutex_lock(&hw->blk[blk].masks.lock);
3136 
3137 	for (i = hw->blk[blk].masks.first;
3138 	     i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) {
3139 		ice_write_prof_mask_reg(hw, blk, i, 0, 0);
3140 
3141 		hw->blk[blk].masks.masks[i].in_use = false;
3142 		hw->blk[blk].masks.masks[i].idx = 0;
3143 		hw->blk[blk].masks.masks[i].mask = 0;
3144 	}
3145 
3146 	mutex_unlock(&hw->blk[blk].masks.lock);
3147 	mutex_destroy(&hw->blk[blk].masks.lock);
3148 }
3149 
3150 /**
3151  * ice_shutdown_all_prof_masks - releases all locks for masking
3152  * @hw: pointer to the HW struct
3153  *
3154  * This should be called before unloading the driver
3155  */
3156 static void ice_shutdown_all_prof_masks(struct ice_hw *hw)
3157 {
3158 	ice_shutdown_prof_masks(hw, ICE_BLK_RSS);
3159 	ice_shutdown_prof_masks(hw, ICE_BLK_FD);
3160 }
3161 
3162 /**
3163  * ice_update_prof_masking - set registers according to masking
3164  * @hw: pointer to the HW struct
3165  * @blk: hardware block
3166  * @prof_id: profile ID
3167  * @masks: masks
3168  */
3169 static enum ice_status
3170 ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
3171 			u16 *masks)
3172 {
3173 	bool err = false;
3174 	u32 ena_mask = 0;
3175 	u16 idx;
3176 	u16 i;
3177 
3178 	/* Only support FD and RSS masking, otherwise nothing to be done */
3179 	if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3180 		return 0;
3181 
3182 	for (i = 0; i < hw->blk[blk].es.fvw; i++)
3183 		if (masks[i] && masks[i] != 0xFFFF) {
3184 			if (!ice_alloc_prof_mask(hw, blk, i, masks[i], &idx)) {
3185 				ena_mask |= BIT(idx);
3186 			} else {
3187 				/* not enough bitmaps */
3188 				err = true;
3189 				break;
3190 			}
3191 		}
3192 
3193 	if (err) {
3194 		/* free any bitmaps we have allocated */
3195 		for (i = 0; i < BITS_PER_BYTE * sizeof(ena_mask); i++)
3196 			if (ena_mask & BIT(i))
3197 				ice_free_prof_mask(hw, blk, i);
3198 
3199 		return ICE_ERR_OUT_OF_RANGE;
3200 	}
3201 
3202 	/* enable the masks for this profile */
3203 	ice_write_prof_mask_enable_res(hw, blk, prof_id, ena_mask);
3204 
3205 	/* store enabled masks with profile so that they can be freed later */
3206 	hw->blk[blk].es.mask_ena[prof_id] = ena_mask;
3207 
3208 	return 0;
3209 }
3210 
3211 /**
3212  * ice_write_es - write an extraction sequence to hardware
3213  * @hw: pointer to the HW struct
3214  * @blk: the block in which to write the extraction sequence
3215  * @prof_id: the profile ID to write
3216  * @fv: pointer to the extraction sequence to write - NULL to clear extraction
3217  */
3218 static void
3219 ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
3220 	     struct ice_fv_word *fv)
3221 {
3222 	u16 off;
3223 
3224 	off = prof_id * hw->blk[blk].es.fvw;
3225 	if (!fv) {
3226 		memset(&hw->blk[blk].es.t[off], 0,
3227 		       hw->blk[blk].es.fvw * sizeof(*fv));
3228 		hw->blk[blk].es.written[prof_id] = false;
3229 	} else {
3230 		memcpy(&hw->blk[blk].es.t[off], fv,
3231 		       hw->blk[blk].es.fvw * sizeof(*fv));
3232 	}
3233 }
3234 
3235 /**
3236  * ice_prof_dec_ref - decrement reference count for profile
3237  * @hw: pointer to the HW struct
3238  * @blk: the block from which to free the profile ID
3239  * @prof_id: the profile ID for which to decrement the reference count
3240  */
3241 static enum ice_status
3242 ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
3243 {
3244 	if (prof_id > hw->blk[blk].es.count)
3245 		return ICE_ERR_PARAM;
3246 
3247 	if (hw->blk[blk].es.ref_count[prof_id] > 0) {
3248 		if (!--hw->blk[blk].es.ref_count[prof_id]) {
3249 			ice_write_es(hw, blk, prof_id, NULL);
3250 			ice_free_prof_masks(hw, blk, prof_id);
3251 			return ice_free_prof_id(hw, blk, prof_id);
3252 		}
3253 	}
3254 
3255 	return 0;
3256 }
3257 
3258 /* Block / table section IDs */
3259 static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
3260 	/* SWITCH */
3261 	{	ICE_SID_XLT1_SW,
3262 		ICE_SID_XLT2_SW,
3263 		ICE_SID_PROFID_TCAM_SW,
3264 		ICE_SID_PROFID_REDIR_SW,
3265 		ICE_SID_FLD_VEC_SW
3266 	},
3267 
3268 	/* ACL */
3269 	{	ICE_SID_XLT1_ACL,
3270 		ICE_SID_XLT2_ACL,
3271 		ICE_SID_PROFID_TCAM_ACL,
3272 		ICE_SID_PROFID_REDIR_ACL,
3273 		ICE_SID_FLD_VEC_ACL
3274 	},
3275 
3276 	/* FD */
3277 	{	ICE_SID_XLT1_FD,
3278 		ICE_SID_XLT2_FD,
3279 		ICE_SID_PROFID_TCAM_FD,
3280 		ICE_SID_PROFID_REDIR_FD,
3281 		ICE_SID_FLD_VEC_FD
3282 	},
3283 
3284 	/* RSS */
3285 	{	ICE_SID_XLT1_RSS,
3286 		ICE_SID_XLT2_RSS,
3287 		ICE_SID_PROFID_TCAM_RSS,
3288 		ICE_SID_PROFID_REDIR_RSS,
3289 		ICE_SID_FLD_VEC_RSS
3290 	},
3291 
3292 	/* PE */
3293 	{	ICE_SID_XLT1_PE,
3294 		ICE_SID_XLT2_PE,
3295 		ICE_SID_PROFID_TCAM_PE,
3296 		ICE_SID_PROFID_REDIR_PE,
3297 		ICE_SID_FLD_VEC_PE
3298 	}
3299 };
3300 
3301 /**
3302  * ice_init_sw_xlt1_db - init software XLT1 database from HW tables
3303  * @hw: pointer to the hardware structure
3304  * @blk: the HW block to initialize
3305  */
3306 static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
3307 {
3308 	u16 pt;
3309 
3310 	for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
3311 		u8 ptg;
3312 
3313 		ptg = hw->blk[blk].xlt1.t[pt];
3314 		if (ptg != ICE_DEFAULT_PTG) {
3315 			ice_ptg_alloc_val(hw, blk, ptg);
3316 			ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
3317 		}
3318 	}
3319 }
3320 
3321 /**
3322  * ice_init_sw_xlt2_db - init software XLT2 database from HW tables
3323  * @hw: pointer to the hardware structure
3324  * @blk: the HW block to initialize
3325  */
3326 static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
3327 {
3328 	u16 vsi;
3329 
3330 	for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
3331 		u16 vsig;
3332 
3333 		vsig = hw->blk[blk].xlt2.t[vsi];
3334 		if (vsig) {
3335 			ice_vsig_alloc_val(hw, blk, vsig);
3336 			ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
3337 			/* no changes at this time, since this has been
3338 			 * initialized from the original package
3339 			 */
3340 			hw->blk[blk].xlt2.vsis[vsi].changed = 0;
3341 		}
3342 	}
3343 }
3344 
3345 /**
3346  * ice_init_sw_db - init software database from HW tables
3347  * @hw: pointer to the hardware structure
3348  */
3349 static void ice_init_sw_db(struct ice_hw *hw)
3350 {
3351 	u16 i;
3352 
3353 	for (i = 0; i < ICE_BLK_COUNT; i++) {
3354 		ice_init_sw_xlt1_db(hw, (enum ice_block)i);
3355 		ice_init_sw_xlt2_db(hw, (enum ice_block)i);
3356 	}
3357 }
3358 
3359 /**
3360  * ice_fill_tbl - Reads content of a single table type into database
3361  * @hw: pointer to the hardware structure
3362  * @block_id: Block ID of the table to copy
3363  * @sid: Section ID of the table to copy
3364  *
3365  * Will attempt to read the entire content of a given table of a single block
3366  * into the driver database. We assume that the buffer will always
3367  * be as large or larger than the data contained in the package. If
3368  * this condition is not met, there is most likely an error in the package
3369  * contents.
3370  */
3371 static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
3372 {
3373 	u32 dst_len, sect_len, offset = 0;
3374 	struct ice_prof_redir_section *pr;
3375 	struct ice_prof_id_section *pid;
3376 	struct ice_xlt1_section *xlt1;
3377 	struct ice_xlt2_section *xlt2;
3378 	struct ice_sw_fv_section *es;
3379 	struct ice_pkg_enum state;
3380 	u8 *src, *dst;
3381 	void *sect;
3382 
3383 	/* if the HW segment pointer is null then the first iteration of
3384 	 * ice_pkg_enum_section() will fail. In this case the HW tables will
3385 	 * not be filled and return success.
3386 	 */
3387 	if (!hw->seg) {
3388 		ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
3389 		return;
3390 	}
3391 
3392 	memset(&state, 0, sizeof(state));
3393 
3394 	sect = ice_pkg_enum_section(hw->seg, &state, sid);
3395 
3396 	while (sect) {
3397 		switch (sid) {
3398 		case ICE_SID_XLT1_SW:
3399 		case ICE_SID_XLT1_FD:
3400 		case ICE_SID_XLT1_RSS:
3401 		case ICE_SID_XLT1_ACL:
3402 		case ICE_SID_XLT1_PE:
3403 			xlt1 = sect;
3404 			src = xlt1->value;
3405 			sect_len = le16_to_cpu(xlt1->count) *
3406 				sizeof(*hw->blk[block_id].xlt1.t);
3407 			dst = hw->blk[block_id].xlt1.t;
3408 			dst_len = hw->blk[block_id].xlt1.count *
3409 				sizeof(*hw->blk[block_id].xlt1.t);
3410 			break;
3411 		case ICE_SID_XLT2_SW:
3412 		case ICE_SID_XLT2_FD:
3413 		case ICE_SID_XLT2_RSS:
3414 		case ICE_SID_XLT2_ACL:
3415 		case ICE_SID_XLT2_PE:
3416 			xlt2 = sect;
3417 			src = (__force u8 *)xlt2->value;
3418 			sect_len = le16_to_cpu(xlt2->count) *
3419 				sizeof(*hw->blk[block_id].xlt2.t);
3420 			dst = (u8 *)hw->blk[block_id].xlt2.t;
3421 			dst_len = hw->blk[block_id].xlt2.count *
3422 				sizeof(*hw->blk[block_id].xlt2.t);
3423 			break;
3424 		case ICE_SID_PROFID_TCAM_SW:
3425 		case ICE_SID_PROFID_TCAM_FD:
3426 		case ICE_SID_PROFID_TCAM_RSS:
3427 		case ICE_SID_PROFID_TCAM_ACL:
3428 		case ICE_SID_PROFID_TCAM_PE:
3429 			pid = sect;
3430 			src = (u8 *)pid->entry;
3431 			sect_len = le16_to_cpu(pid->count) *
3432 				sizeof(*hw->blk[block_id].prof.t);
3433 			dst = (u8 *)hw->blk[block_id].prof.t;
3434 			dst_len = hw->blk[block_id].prof.count *
3435 				sizeof(*hw->blk[block_id].prof.t);
3436 			break;
3437 		case ICE_SID_PROFID_REDIR_SW:
3438 		case ICE_SID_PROFID_REDIR_FD:
3439 		case ICE_SID_PROFID_REDIR_RSS:
3440 		case ICE_SID_PROFID_REDIR_ACL:
3441 		case ICE_SID_PROFID_REDIR_PE:
3442 			pr = sect;
3443 			src = pr->redir_value;
3444 			sect_len = le16_to_cpu(pr->count) *
3445 				sizeof(*hw->blk[block_id].prof_redir.t);
3446 			dst = hw->blk[block_id].prof_redir.t;
3447 			dst_len = hw->blk[block_id].prof_redir.count *
3448 				sizeof(*hw->blk[block_id].prof_redir.t);
3449 			break;
3450 		case ICE_SID_FLD_VEC_SW:
3451 		case ICE_SID_FLD_VEC_FD:
3452 		case ICE_SID_FLD_VEC_RSS:
3453 		case ICE_SID_FLD_VEC_ACL:
3454 		case ICE_SID_FLD_VEC_PE:
3455 			es = sect;
3456 			src = (u8 *)es->fv;
3457 			sect_len = (u32)(le16_to_cpu(es->count) *
3458 					 hw->blk[block_id].es.fvw) *
3459 				sizeof(*hw->blk[block_id].es.t);
3460 			dst = (u8 *)hw->blk[block_id].es.t;
3461 			dst_len = (u32)(hw->blk[block_id].es.count *
3462 					hw->blk[block_id].es.fvw) *
3463 				sizeof(*hw->blk[block_id].es.t);
3464 			break;
3465 		default:
3466 			return;
3467 		}
3468 
3469 		/* if the section offset exceeds destination length, terminate
3470 		 * table fill.
3471 		 */
3472 		if (offset > dst_len)
3473 			return;
3474 
3475 		/* if the sum of section size and offset exceed destination size
3476 		 * then we are out of bounds of the HW table size for that PF.
3477 		 * Changing section length to fill the remaining table space
3478 		 * of that PF.
3479 		 */
3480 		if ((offset + sect_len) > dst_len)
3481 			sect_len = dst_len - offset;
3482 
3483 		memcpy(dst + offset, src, sect_len);
3484 		offset += sect_len;
3485 		sect = ice_pkg_enum_section(NULL, &state, sid);
3486 	}
3487 }
3488 
3489 /**
3490  * ice_fill_blk_tbls - Read package context for tables
3491  * @hw: pointer to the hardware structure
3492  *
3493  * Reads the current package contents and populates the driver
3494  * database with the data iteratively for all advanced feature
3495  * blocks. Assume that the HW tables have been allocated.
3496  */
3497 void ice_fill_blk_tbls(struct ice_hw *hw)
3498 {
3499 	u8 i;
3500 
3501 	for (i = 0; i < ICE_BLK_COUNT; i++) {
3502 		enum ice_block blk_id = (enum ice_block)i;
3503 
3504 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
3505 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
3506 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
3507 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
3508 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
3509 	}
3510 
3511 	ice_init_sw_db(hw);
3512 }
3513 
3514 /**
3515  * ice_free_prof_map - free profile map
3516  * @hw: pointer to the hardware structure
3517  * @blk_idx: HW block index
3518  */
3519 static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
3520 {
3521 	struct ice_es *es = &hw->blk[blk_idx].es;
3522 	struct ice_prof_map *del, *tmp;
3523 
3524 	mutex_lock(&es->prof_map_lock);
3525 	list_for_each_entry_safe(del, tmp, &es->prof_map, list) {
3526 		list_del(&del->list);
3527 		devm_kfree(ice_hw_to_dev(hw), del);
3528 	}
3529 	INIT_LIST_HEAD(&es->prof_map);
3530 	mutex_unlock(&es->prof_map_lock);
3531 }
3532 
3533 /**
3534  * ice_free_flow_profs - free flow profile entries
3535  * @hw: pointer to the hardware structure
3536  * @blk_idx: HW block index
3537  */
3538 static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
3539 {
3540 	struct ice_flow_prof *p, *tmp;
3541 
3542 	mutex_lock(&hw->fl_profs_locks[blk_idx]);
3543 	list_for_each_entry_safe(p, tmp, &hw->fl_profs[blk_idx], l_entry) {
3544 		struct ice_flow_entry *e, *t;
3545 
3546 		list_for_each_entry_safe(e, t, &p->entries, l_entry)
3547 			ice_flow_rem_entry(hw, (enum ice_block)blk_idx,
3548 					   ICE_FLOW_ENTRY_HNDL(e));
3549 
3550 		list_del(&p->l_entry);
3551 
3552 		mutex_destroy(&p->entries_lock);
3553 		devm_kfree(ice_hw_to_dev(hw), p);
3554 	}
3555 	mutex_unlock(&hw->fl_profs_locks[blk_idx]);
3556 
3557 	/* if driver is in reset and tables are being cleared
3558 	 * re-initialize the flow profile list heads
3559 	 */
3560 	INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3561 }
3562 
3563 /**
3564  * ice_free_vsig_tbl - free complete VSIG table entries
3565  * @hw: pointer to the hardware structure
3566  * @blk: the HW block on which to free the VSIG table entries
3567  */
3568 static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
3569 {
3570 	u16 i;
3571 
3572 	if (!hw->blk[blk].xlt2.vsig_tbl)
3573 		return;
3574 
3575 	for (i = 1; i < ICE_MAX_VSIGS; i++)
3576 		if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
3577 			ice_vsig_free(hw, blk, i);
3578 }
3579 
3580 /**
3581  * ice_free_hw_tbls - free hardware table memory
3582  * @hw: pointer to the hardware structure
3583  */
3584 void ice_free_hw_tbls(struct ice_hw *hw)
3585 {
3586 	struct ice_rss_cfg *r, *rt;
3587 	u8 i;
3588 
3589 	for (i = 0; i < ICE_BLK_COUNT; i++) {
3590 		if (hw->blk[i].is_list_init) {
3591 			struct ice_es *es = &hw->blk[i].es;
3592 
3593 			ice_free_prof_map(hw, i);
3594 			mutex_destroy(&es->prof_map_lock);
3595 
3596 			ice_free_flow_profs(hw, i);
3597 			mutex_destroy(&hw->fl_profs_locks[i]);
3598 
3599 			hw->blk[i].is_list_init = false;
3600 		}
3601 		ice_free_vsig_tbl(hw, (enum ice_block)i);
3602 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes);
3603 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl);
3604 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t);
3605 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.t);
3606 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsig_tbl);
3607 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsis);
3608 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof.t);
3609 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t);
3610 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
3611 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
3612 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
3613 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.mask_ena);
3614 	}
3615 
3616 	list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) {
3617 		list_del(&r->l_entry);
3618 		devm_kfree(ice_hw_to_dev(hw), r);
3619 	}
3620 	mutex_destroy(&hw->rss_locks);
3621 	ice_shutdown_all_prof_masks(hw);
3622 	memset(hw->blk, 0, sizeof(hw->blk));
3623 }
3624 
3625 /**
3626  * ice_init_flow_profs - init flow profile locks and list heads
3627  * @hw: pointer to the hardware structure
3628  * @blk_idx: HW block index
3629  */
3630 static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
3631 {
3632 	mutex_init(&hw->fl_profs_locks[blk_idx]);
3633 	INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3634 }
3635 
3636 /**
3637  * ice_clear_hw_tbls - clear HW tables and flow profiles
3638  * @hw: pointer to the hardware structure
3639  */
3640 void ice_clear_hw_tbls(struct ice_hw *hw)
3641 {
3642 	u8 i;
3643 
3644 	for (i = 0; i < ICE_BLK_COUNT; i++) {
3645 		struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
3646 		struct ice_prof_tcam *prof = &hw->blk[i].prof;
3647 		struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3648 		struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3649 		struct ice_es *es = &hw->blk[i].es;
3650 
3651 		if (hw->blk[i].is_list_init) {
3652 			ice_free_prof_map(hw, i);
3653 			ice_free_flow_profs(hw, i);
3654 		}
3655 
3656 		ice_free_vsig_tbl(hw, (enum ice_block)i);
3657 
3658 		memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes));
3659 		memset(xlt1->ptg_tbl, 0,
3660 		       ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl));
3661 		memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t));
3662 
3663 		memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis));
3664 		memset(xlt2->vsig_tbl, 0,
3665 		       xlt2->count * sizeof(*xlt2->vsig_tbl));
3666 		memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t));
3667 
3668 		memset(prof->t, 0, prof->count * sizeof(*prof->t));
3669 		memset(prof_redir->t, 0,
3670 		       prof_redir->count * sizeof(*prof_redir->t));
3671 
3672 		memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
3673 		memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
3674 		memset(es->written, 0, es->count * sizeof(*es->written));
3675 		memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena));
3676 	}
3677 }
3678 
3679 /**
3680  * ice_init_hw_tbls - init hardware table memory
3681  * @hw: pointer to the hardware structure
3682  */
3683 enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
3684 {
3685 	u8 i;
3686 
3687 	mutex_init(&hw->rss_locks);
3688 	INIT_LIST_HEAD(&hw->rss_list_head);
3689 	ice_init_all_prof_masks(hw);
3690 	for (i = 0; i < ICE_BLK_COUNT; i++) {
3691 		struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
3692 		struct ice_prof_tcam *prof = &hw->blk[i].prof;
3693 		struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3694 		struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3695 		struct ice_es *es = &hw->blk[i].es;
3696 		u16 j;
3697 
3698 		if (hw->blk[i].is_list_init)
3699 			continue;
3700 
3701 		ice_init_flow_profs(hw, i);
3702 		mutex_init(&es->prof_map_lock);
3703 		INIT_LIST_HEAD(&es->prof_map);
3704 		hw->blk[i].is_list_init = true;
3705 
3706 		hw->blk[i].overwrite = blk_sizes[i].overwrite;
3707 		es->reverse = blk_sizes[i].reverse;
3708 
3709 		xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
3710 		xlt1->count = blk_sizes[i].xlt1;
3711 
3712 		xlt1->ptypes = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
3713 					    sizeof(*xlt1->ptypes), GFP_KERNEL);
3714 
3715 		if (!xlt1->ptypes)
3716 			goto err;
3717 
3718 		xlt1->ptg_tbl = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_PTGS,
3719 					     sizeof(*xlt1->ptg_tbl),
3720 					     GFP_KERNEL);
3721 
3722 		if (!xlt1->ptg_tbl)
3723 			goto err;
3724 
3725 		xlt1->t = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
3726 				       sizeof(*xlt1->t), GFP_KERNEL);
3727 		if (!xlt1->t)
3728 			goto err;
3729 
3730 		xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
3731 		xlt2->count = blk_sizes[i].xlt2;
3732 
3733 		xlt2->vsis = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3734 					  sizeof(*xlt2->vsis), GFP_KERNEL);
3735 
3736 		if (!xlt2->vsis)
3737 			goto err;
3738 
3739 		xlt2->vsig_tbl = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3740 					      sizeof(*xlt2->vsig_tbl),
3741 					      GFP_KERNEL);
3742 		if (!xlt2->vsig_tbl)
3743 			goto err;
3744 
3745 		for (j = 0; j < xlt2->count; j++)
3746 			INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
3747 
3748 		xlt2->t = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3749 				       sizeof(*xlt2->t), GFP_KERNEL);
3750 		if (!xlt2->t)
3751 			goto err;
3752 
3753 		prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
3754 		prof->count = blk_sizes[i].prof_tcam;
3755 		prof->max_prof_id = blk_sizes[i].prof_id;
3756 		prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
3757 		prof->t = devm_kcalloc(ice_hw_to_dev(hw), prof->count,
3758 				       sizeof(*prof->t), GFP_KERNEL);
3759 
3760 		if (!prof->t)
3761 			goto err;
3762 
3763 		prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
3764 		prof_redir->count = blk_sizes[i].prof_redir;
3765 		prof_redir->t = devm_kcalloc(ice_hw_to_dev(hw),
3766 					     prof_redir->count,
3767 					     sizeof(*prof_redir->t),
3768 					     GFP_KERNEL);
3769 
3770 		if (!prof_redir->t)
3771 			goto err;
3772 
3773 		es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
3774 		es->count = blk_sizes[i].es;
3775 		es->fvw = blk_sizes[i].fvw;
3776 		es->t = devm_kcalloc(ice_hw_to_dev(hw),
3777 				     (u32)(es->count * es->fvw),
3778 				     sizeof(*es->t), GFP_KERNEL);
3779 		if (!es->t)
3780 			goto err;
3781 
3782 		es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count,
3783 					     sizeof(*es->ref_count),
3784 					     GFP_KERNEL);
3785 		if (!es->ref_count)
3786 			goto err;
3787 
3788 		es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
3789 					   sizeof(*es->written), GFP_KERNEL);
3790 		if (!es->written)
3791 			goto err;
3792 
3793 		es->mask_ena = devm_kcalloc(ice_hw_to_dev(hw), es->count,
3794 					    sizeof(*es->mask_ena), GFP_KERNEL);
3795 		if (!es->mask_ena)
3796 			goto err;
3797 	}
3798 	return 0;
3799 
3800 err:
3801 	ice_free_hw_tbls(hw);
3802 	return ICE_ERR_NO_MEMORY;
3803 }
3804 
3805 /**
3806  * ice_prof_gen_key - generate profile ID key
3807  * @hw: pointer to the HW struct
3808  * @blk: the block in which to write profile ID to
3809  * @ptg: packet type group (PTG) portion of key
3810  * @vsig: VSIG portion of key
3811  * @cdid: CDID portion of key
3812  * @flags: flag portion of key
3813  * @vl_msk: valid mask
3814  * @dc_msk: don't care mask
3815  * @nm_msk: never match mask
3816  * @key: output of profile ID key
3817  */
3818 static enum ice_status
3819 ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
3820 		 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
3821 		 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
3822 		 u8 key[ICE_TCAM_KEY_SZ])
3823 {
3824 	struct ice_prof_id_key inkey;
3825 
3826 	inkey.xlt1 = ptg;
3827 	inkey.xlt2_cdid = cpu_to_le16(vsig);
3828 	inkey.flags = cpu_to_le16(flags);
3829 
3830 	switch (hw->blk[blk].prof.cdid_bits) {
3831 	case 0:
3832 		break;
3833 	case 2:
3834 #define ICE_CD_2_M 0xC000U
3835 #define ICE_CD_2_S 14
3836 		inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_2_M);
3837 		inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_2_S);
3838 		break;
3839 	case 4:
3840 #define ICE_CD_4_M 0xF000U
3841 #define ICE_CD_4_S 12
3842 		inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_4_M);
3843 		inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_4_S);
3844 		break;
3845 	case 8:
3846 #define ICE_CD_8_M 0xFF00U
3847 #define ICE_CD_8_S 16
3848 		inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_8_M);
3849 		inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_8_S);
3850 		break;
3851 	default:
3852 		ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
3853 		break;
3854 	}
3855 
3856 	return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
3857 			   nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
3858 }
3859 
3860 /**
3861  * ice_tcam_write_entry - write TCAM entry
3862  * @hw: pointer to the HW struct
3863  * @blk: the block in which to write profile ID to
3864  * @idx: the entry index to write to
3865  * @prof_id: profile ID
3866  * @ptg: packet type group (PTG) portion of key
3867  * @vsig: VSIG portion of key
3868  * @cdid: CDID portion of key
3869  * @flags: flag portion of key
3870  * @vl_msk: valid mask
3871  * @dc_msk: don't care mask
3872  * @nm_msk: never match mask
3873  */
3874 static enum ice_status
3875 ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
3876 		     u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
3877 		     u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
3878 		     u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
3879 		     u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
3880 {
3881 	struct ice_prof_tcam_entry;
3882 	enum ice_status status;
3883 
3884 	status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
3885 				  dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
3886 	if (!status) {
3887 		hw->blk[blk].prof.t[idx].addr = cpu_to_le16(idx);
3888 		hw->blk[blk].prof.t[idx].prof_id = prof_id;
3889 	}
3890 
3891 	return status;
3892 }
3893 
3894 /**
3895  * ice_vsig_get_ref - returns number of VSIs belong to a VSIG
3896  * @hw: pointer to the hardware structure
3897  * @blk: HW block
3898  * @vsig: VSIG to query
3899  * @refs: pointer to variable to receive the reference count
3900  */
3901 static enum ice_status
3902 ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
3903 {
3904 	u16 idx = vsig & ICE_VSIG_IDX_M;
3905 	struct ice_vsig_vsi *ptr;
3906 
3907 	*refs = 0;
3908 
3909 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
3910 		return ICE_ERR_DOES_NOT_EXIST;
3911 
3912 	ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
3913 	while (ptr) {
3914 		(*refs)++;
3915 		ptr = ptr->next_vsi;
3916 	}
3917 
3918 	return 0;
3919 }
3920 
3921 /**
3922  * ice_has_prof_vsig - check to see if VSIG has a specific profile
3923  * @hw: pointer to the hardware structure
3924  * @blk: HW block
3925  * @vsig: VSIG to check against
3926  * @hdl: profile handle
3927  */
3928 static bool
3929 ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
3930 {
3931 	u16 idx = vsig & ICE_VSIG_IDX_M;
3932 	struct ice_vsig_prof *ent;
3933 
3934 	list_for_each_entry(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3935 			    list)
3936 		if (ent->profile_cookie == hdl)
3937 			return true;
3938 
3939 	ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n",
3940 		  vsig);
3941 	return false;
3942 }
3943 
3944 /**
3945  * ice_prof_bld_es - build profile ID extraction sequence changes
3946  * @hw: pointer to the HW struct
3947  * @blk: hardware block
3948  * @bld: the update package buffer build to add to
3949  * @chgs: the list of changes to make in hardware
3950  */
3951 static enum ice_status
3952 ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
3953 		struct ice_buf_build *bld, struct list_head *chgs)
3954 {
3955 	u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
3956 	struct ice_chs_chg *tmp;
3957 
3958 	list_for_each_entry(tmp, chgs, list_entry)
3959 		if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
3960 			u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
3961 			struct ice_pkg_es *p;
3962 			u32 id;
3963 
3964 			id = ice_sect_id(blk, ICE_VEC_TBL);
3965 			p = ice_pkg_buf_alloc_section(bld, id,
3966 						      struct_size(p, es, 1) +
3967 						      vec_size -
3968 						      sizeof(p->es[0]));
3969 
3970 			if (!p)
3971 				return ICE_ERR_MAX_LIMIT;
3972 
3973 			p->count = cpu_to_le16(1);
3974 			p->offset = cpu_to_le16(tmp->prof_id);
3975 
3976 			memcpy(p->es, &hw->blk[blk].es.t[off], vec_size);
3977 		}
3978 
3979 	return 0;
3980 }
3981 
3982 /**
3983  * ice_prof_bld_tcam - build profile ID TCAM changes
3984  * @hw: pointer to the HW struct
3985  * @blk: hardware block
3986  * @bld: the update package buffer build to add to
3987  * @chgs: the list of changes to make in hardware
3988  */
3989 static enum ice_status
3990 ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
3991 		  struct ice_buf_build *bld, struct list_head *chgs)
3992 {
3993 	struct ice_chs_chg *tmp;
3994 
3995 	list_for_each_entry(tmp, chgs, list_entry)
3996 		if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
3997 			struct ice_prof_id_section *p;
3998 			u32 id;
3999 
4000 			id = ice_sect_id(blk, ICE_PROF_TCAM);
4001 			p = ice_pkg_buf_alloc_section(bld, id,
4002 						      struct_size(p, entry, 1));
4003 
4004 			if (!p)
4005 				return ICE_ERR_MAX_LIMIT;
4006 
4007 			p->count = cpu_to_le16(1);
4008 			p->entry[0].addr = cpu_to_le16(tmp->tcam_idx);
4009 			p->entry[0].prof_id = tmp->prof_id;
4010 
4011 			memcpy(p->entry[0].key,
4012 			       &hw->blk[blk].prof.t[tmp->tcam_idx].key,
4013 			       sizeof(hw->blk[blk].prof.t->key));
4014 		}
4015 
4016 	return 0;
4017 }
4018 
4019 /**
4020  * ice_prof_bld_xlt1 - build XLT1 changes
4021  * @blk: hardware block
4022  * @bld: the update package buffer build to add to
4023  * @chgs: the list of changes to make in hardware
4024  */
4025 static enum ice_status
4026 ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
4027 		  struct list_head *chgs)
4028 {
4029 	struct ice_chs_chg *tmp;
4030 
4031 	list_for_each_entry(tmp, chgs, list_entry)
4032 		if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
4033 			struct ice_xlt1_section *p;
4034 			u32 id;
4035 
4036 			id = ice_sect_id(blk, ICE_XLT1);
4037 			p = ice_pkg_buf_alloc_section(bld, id,
4038 						      struct_size(p, value, 1));
4039 
4040 			if (!p)
4041 				return ICE_ERR_MAX_LIMIT;
4042 
4043 			p->count = cpu_to_le16(1);
4044 			p->offset = cpu_to_le16(tmp->ptype);
4045 			p->value[0] = tmp->ptg;
4046 		}
4047 
4048 	return 0;
4049 }
4050 
4051 /**
4052  * ice_prof_bld_xlt2 - build XLT2 changes
4053  * @blk: hardware block
4054  * @bld: the update package buffer build to add to
4055  * @chgs: the list of changes to make in hardware
4056  */
4057 static enum ice_status
4058 ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
4059 		  struct list_head *chgs)
4060 {
4061 	struct ice_chs_chg *tmp;
4062 
4063 	list_for_each_entry(tmp, chgs, list_entry) {
4064 		struct ice_xlt2_section *p;
4065 		u32 id;
4066 
4067 		switch (tmp->type) {
4068 		case ICE_VSIG_ADD:
4069 		case ICE_VSI_MOVE:
4070 		case ICE_VSIG_REM:
4071 			id = ice_sect_id(blk, ICE_XLT2);
4072 			p = ice_pkg_buf_alloc_section(bld, id,
4073 						      struct_size(p, value, 1));
4074 
4075 			if (!p)
4076 				return ICE_ERR_MAX_LIMIT;
4077 
4078 			p->count = cpu_to_le16(1);
4079 			p->offset = cpu_to_le16(tmp->vsi);
4080 			p->value[0] = cpu_to_le16(tmp->vsig);
4081 			break;
4082 		default:
4083 			break;
4084 		}
4085 	}
4086 
4087 	return 0;
4088 }
4089 
4090 /**
4091  * ice_upd_prof_hw - update hardware using the change list
4092  * @hw: pointer to the HW struct
4093  * @blk: hardware block
4094  * @chgs: the list of changes to make in hardware
4095  */
4096 static enum ice_status
4097 ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
4098 		struct list_head *chgs)
4099 {
4100 	struct ice_buf_build *b;
4101 	struct ice_chs_chg *tmp;
4102 	enum ice_status status;
4103 	u16 pkg_sects;
4104 	u16 xlt1 = 0;
4105 	u16 xlt2 = 0;
4106 	u16 tcam = 0;
4107 	u16 es = 0;
4108 	u16 sects;
4109 
4110 	/* count number of sections we need */
4111 	list_for_each_entry(tmp, chgs, list_entry) {
4112 		switch (tmp->type) {
4113 		case ICE_PTG_ES_ADD:
4114 			if (tmp->add_ptg)
4115 				xlt1++;
4116 			if (tmp->add_prof)
4117 				es++;
4118 			break;
4119 		case ICE_TCAM_ADD:
4120 			tcam++;
4121 			break;
4122 		case ICE_VSIG_ADD:
4123 		case ICE_VSI_MOVE:
4124 		case ICE_VSIG_REM:
4125 			xlt2++;
4126 			break;
4127 		default:
4128 			break;
4129 		}
4130 	}
4131 	sects = xlt1 + xlt2 + tcam + es;
4132 
4133 	if (!sects)
4134 		return 0;
4135 
4136 	/* Build update package buffer */
4137 	b = ice_pkg_buf_alloc(hw);
4138 	if (!b)
4139 		return ICE_ERR_NO_MEMORY;
4140 
4141 	status = ice_pkg_buf_reserve_section(b, sects);
4142 	if (status)
4143 		goto error_tmp;
4144 
4145 	/* Preserve order of table update: ES, TCAM, PTG, VSIG */
4146 	if (es) {
4147 		status = ice_prof_bld_es(hw, blk, b, chgs);
4148 		if (status)
4149 			goto error_tmp;
4150 	}
4151 
4152 	if (tcam) {
4153 		status = ice_prof_bld_tcam(hw, blk, b, chgs);
4154 		if (status)
4155 			goto error_tmp;
4156 	}
4157 
4158 	if (xlt1) {
4159 		status = ice_prof_bld_xlt1(blk, b, chgs);
4160 		if (status)
4161 			goto error_tmp;
4162 	}
4163 
4164 	if (xlt2) {
4165 		status = ice_prof_bld_xlt2(blk, b, chgs);
4166 		if (status)
4167 			goto error_tmp;
4168 	}
4169 
4170 	/* After package buffer build check if the section count in buffer is
4171 	 * non-zero and matches the number of sections detected for package
4172 	 * update.
4173 	 */
4174 	pkg_sects = ice_pkg_buf_get_active_sections(b);
4175 	if (!pkg_sects || pkg_sects != sects) {
4176 		status = ICE_ERR_INVAL_SIZE;
4177 		goto error_tmp;
4178 	}
4179 
4180 	/* update package */
4181 	status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
4182 	if (status == ICE_ERR_AQ_ERROR)
4183 		ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
4184 
4185 error_tmp:
4186 	ice_pkg_buf_free(hw, b);
4187 	return status;
4188 }
4189 
4190 /**
4191  * ice_update_fd_mask - set Flow Director Field Vector mask for a profile
4192  * @hw: pointer to the HW struct
4193  * @prof_id: profile ID
4194  * @mask_sel: mask select
4195  *
4196  * This function enable any of the masks selected by the mask select parameter
4197  * for the profile specified.
4198  */
4199 static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel)
4200 {
4201 	wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel);
4202 
4203 	ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id,
4204 		  GLQF_FDMASK_SEL(prof_id), mask_sel);
4205 }
4206 
4207 struct ice_fd_src_dst_pair {
4208 	u8 prot_id;
4209 	u8 count;
4210 	u16 off;
4211 };
4212 
4213 static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
4214 	/* These are defined in pairs */
4215 	{ ICE_PROT_IPV4_OF_OR_S, 2, 12 },
4216 	{ ICE_PROT_IPV4_OF_OR_S, 2, 16 },
4217 
4218 	{ ICE_PROT_IPV4_IL, 2, 12 },
4219 	{ ICE_PROT_IPV4_IL, 2, 16 },
4220 
4221 	{ ICE_PROT_IPV6_OF_OR_S, 8, 8 },
4222 	{ ICE_PROT_IPV6_OF_OR_S, 8, 24 },
4223 
4224 	{ ICE_PROT_IPV6_IL, 8, 8 },
4225 	{ ICE_PROT_IPV6_IL, 8, 24 },
4226 
4227 	{ ICE_PROT_TCP_IL, 1, 0 },
4228 	{ ICE_PROT_TCP_IL, 1, 2 },
4229 
4230 	{ ICE_PROT_UDP_OF, 1, 0 },
4231 	{ ICE_PROT_UDP_OF, 1, 2 },
4232 
4233 	{ ICE_PROT_UDP_IL_OR_S, 1, 0 },
4234 	{ ICE_PROT_UDP_IL_OR_S, 1, 2 },
4235 
4236 	{ ICE_PROT_SCTP_IL, 1, 0 },
4237 	{ ICE_PROT_SCTP_IL, 1, 2 }
4238 };
4239 
4240 #define ICE_FD_SRC_DST_PAIR_COUNT	ARRAY_SIZE(ice_fd_pairs)
4241 
4242 /**
4243  * ice_update_fd_swap - set register appropriately for a FD FV extraction
4244  * @hw: pointer to the HW struct
4245  * @prof_id: profile ID
4246  * @es: extraction sequence (length of array is determined by the block)
4247  */
4248 static enum ice_status
4249 ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
4250 {
4251 	DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
4252 	u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 };
4253 #define ICE_FD_FV_NOT_FOUND (-2)
4254 	s8 first_free = ICE_FD_FV_NOT_FOUND;
4255 	u8 used[ICE_MAX_FV_WORDS] = { 0 };
4256 	s8 orig_free, si;
4257 	u32 mask_sel = 0;
4258 	u8 i, j, k;
4259 
4260 	bitmap_zero(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
4261 
4262 	/* This code assumes that the Flow Director field vectors are assigned
4263 	 * from the end of the FV indexes working towards the zero index, that
4264 	 * only complete fields will be included and will be consecutive, and
4265 	 * that there are no gaps between valid indexes.
4266 	 */
4267 
4268 	/* Determine swap fields present */
4269 	for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
4270 		/* Find the first free entry, assuming right to left population.
4271 		 * This is where we can start adding additional pairs if needed.
4272 		 */
4273 		if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id !=
4274 		    ICE_PROT_INVALID)
4275 			first_free = i - 1;
4276 
4277 		for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
4278 			if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
4279 			    es[i].off == ice_fd_pairs[j].off) {
4280 				set_bit(j, pair_list);
4281 				pair_start[j] = i;
4282 			}
4283 	}
4284 
4285 	orig_free = first_free;
4286 
4287 	/* determine missing swap fields that need to be added */
4288 	for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) {
4289 		u8 bit1 = test_bit(i + 1, pair_list);
4290 		u8 bit0 = test_bit(i, pair_list);
4291 
4292 		if (bit0 ^ bit1) {
4293 			u8 index;
4294 
4295 			/* add the appropriate 'paired' entry */
4296 			if (!bit0)
4297 				index = i;
4298 			else
4299 				index = i + 1;
4300 
4301 			/* check for room */
4302 			if (first_free + 1 < (s8)ice_fd_pairs[index].count)
4303 				return ICE_ERR_MAX_LIMIT;
4304 
4305 			/* place in extraction sequence */
4306 			for (k = 0; k < ice_fd_pairs[index].count; k++) {
4307 				es[first_free - k].prot_id =
4308 					ice_fd_pairs[index].prot_id;
4309 				es[first_free - k].off =
4310 					ice_fd_pairs[index].off + (k * 2);
4311 
4312 				if (k > first_free)
4313 					return ICE_ERR_OUT_OF_RANGE;
4314 
4315 				/* keep track of non-relevant fields */
4316 				mask_sel |= BIT(first_free - k);
4317 			}
4318 
4319 			pair_start[index] = first_free;
4320 			first_free -= ice_fd_pairs[index].count;
4321 		}
4322 	}
4323 
4324 	/* fill in the swap array */
4325 	si = hw->blk[ICE_BLK_FD].es.fvw - 1;
4326 	while (si >= 0) {
4327 		u8 indexes_used = 1;
4328 
4329 		/* assume flat at this index */
4330 #define ICE_SWAP_VALID	0x80
4331 		used[si] = si | ICE_SWAP_VALID;
4332 
4333 		if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) {
4334 			si -= indexes_used;
4335 			continue;
4336 		}
4337 
4338 		/* check for a swap location */
4339 		for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
4340 			if (es[si].prot_id == ice_fd_pairs[j].prot_id &&
4341 			    es[si].off == ice_fd_pairs[j].off) {
4342 				u8 idx;
4343 
4344 				/* determine the appropriate matching field */
4345 				idx = j + ((j % 2) ? -1 : 1);
4346 
4347 				indexes_used = ice_fd_pairs[idx].count;
4348 				for (k = 0; k < indexes_used; k++) {
4349 					used[si - k] = (pair_start[idx] - k) |
4350 						ICE_SWAP_VALID;
4351 				}
4352 
4353 				break;
4354 			}
4355 
4356 		si -= indexes_used;
4357 	}
4358 
4359 	/* for each set of 4 swap and 4 inset indexes, write the appropriate
4360 	 * register
4361 	 */
4362 	for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
4363 		u32 raw_swap = 0;
4364 		u32 raw_in = 0;
4365 
4366 		for (k = 0; k < 4; k++) {
4367 			u8 idx;
4368 
4369 			idx = (j * 4) + k;
4370 			if (used[idx] && !(mask_sel & BIT(idx))) {
4371 				raw_swap |= used[idx] << (k * BITS_PER_BYTE);
4372 #define ICE_INSET_DFLT 0x9f
4373 				raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE);
4374 			}
4375 		}
4376 
4377 		/* write the appropriate swap register set */
4378 		wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap);
4379 
4380 		ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n",
4381 			  prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap);
4382 
4383 		/* write the appropriate inset register set */
4384 		wr32(hw, GLQF_FDINSET(prof_id, j), raw_in);
4385 
4386 		ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n",
4387 			  prof_id, j, GLQF_FDINSET(prof_id, j), raw_in);
4388 	}
4389 
4390 	/* initially clear the mask select for this profile */
4391 	ice_update_fd_mask(hw, prof_id, 0);
4392 
4393 	return 0;
4394 }
4395 
4396 /* The entries here needs to match the order of enum ice_ptype_attrib */
4397 static const struct ice_ptype_attrib_info ice_ptype_attributes[] = {
4398 	{ ICE_GTP_PDU_EH,	ICE_GTP_PDU_FLAG_MASK },
4399 	{ ICE_GTP_SESSION,	ICE_GTP_FLAGS_MASK },
4400 	{ ICE_GTP_DOWNLINK,	ICE_GTP_FLAGS_MASK },
4401 	{ ICE_GTP_UPLINK,	ICE_GTP_FLAGS_MASK },
4402 };
4403 
4404 /**
4405  * ice_get_ptype_attrib_info - get PTYPE attribute information
4406  * @type: attribute type
4407  * @info: pointer to variable to the attribute information
4408  */
4409 static void
4410 ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type,
4411 			  struct ice_ptype_attrib_info *info)
4412 {
4413 	*info = ice_ptype_attributes[type];
4414 }
4415 
4416 /**
4417  * ice_add_prof_attrib - add any PTG with attributes to profile
4418  * @prof: pointer to the profile to which PTG entries will be added
4419  * @ptg: PTG to be added
4420  * @ptype: PTYPE that needs to be looked up
4421  * @attr: array of attributes that will be considered
4422  * @attr_cnt: number of elements in the attribute array
4423  */
4424 static enum ice_status
4425 ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
4426 		    const struct ice_ptype_attributes *attr, u16 attr_cnt)
4427 {
4428 	bool found = false;
4429 	u16 i;
4430 
4431 	for (i = 0; i < attr_cnt; i++)
4432 		if (attr[i].ptype == ptype) {
4433 			found = true;
4434 
4435 			prof->ptg[prof->ptg_cnt] = ptg;
4436 			ice_get_ptype_attrib_info(attr[i].attrib,
4437 						  &prof->attr[prof->ptg_cnt]);
4438 
4439 			if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
4440 				return ICE_ERR_MAX_LIMIT;
4441 		}
4442 
4443 	if (!found)
4444 		return ICE_ERR_DOES_NOT_EXIST;
4445 
4446 	return 0;
4447 }
4448 
4449 /**
4450  * ice_add_prof - add profile
4451  * @hw: pointer to the HW struct
4452  * @blk: hardware block
4453  * @id: profile tracking ID
4454  * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
4455  * @attr: array of attributes
4456  * @attr_cnt: number of elements in attr array
4457  * @es: extraction sequence (length of array is determined by the block)
4458  * @masks: mask for extraction sequence
4459  *
4460  * This function registers a profile, which matches a set of PTYPES with a
4461  * particular extraction sequence. While the hardware profile is allocated
4462  * it will not be written until the first call to ice_add_flow that specifies
4463  * the ID value used here.
4464  */
4465 enum ice_status
4466 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
4467 	     const struct ice_ptype_attributes *attr, u16 attr_cnt,
4468 	     struct ice_fv_word *es, u16 *masks)
4469 {
4470 	u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
4471 	DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
4472 	struct ice_prof_map *prof;
4473 	enum ice_status status;
4474 	u8 byte = 0;
4475 	u8 prof_id;
4476 
4477 	bitmap_zero(ptgs_used, ICE_XLT1_CNT);
4478 
4479 	mutex_lock(&hw->blk[blk].es.prof_map_lock);
4480 
4481 	/* search for existing profile */
4482 	status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
4483 	if (status) {
4484 		/* allocate profile ID */
4485 		status = ice_alloc_prof_id(hw, blk, &prof_id);
4486 		if (status)
4487 			goto err_ice_add_prof;
4488 		if (blk == ICE_BLK_FD) {
4489 			/* For Flow Director block, the extraction sequence may
4490 			 * need to be altered in the case where there are paired
4491 			 * fields that have no match. This is necessary because
4492 			 * for Flow Director, src and dest fields need to paired
4493 			 * for filter programming and these values are swapped
4494 			 * during Tx.
4495 			 */
4496 			status = ice_update_fd_swap(hw, prof_id, es);
4497 			if (status)
4498 				goto err_ice_add_prof;
4499 		}
4500 		status = ice_update_prof_masking(hw, blk, prof_id, masks);
4501 		if (status)
4502 			goto err_ice_add_prof;
4503 
4504 		/* and write new es */
4505 		ice_write_es(hw, blk, prof_id, es);
4506 	}
4507 
4508 	ice_prof_inc_ref(hw, blk, prof_id);
4509 
4510 	/* add profile info */
4511 	prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL);
4512 	if (!prof) {
4513 		status = ICE_ERR_NO_MEMORY;
4514 		goto err_ice_add_prof;
4515 	}
4516 
4517 	prof->profile_cookie = id;
4518 	prof->prof_id = prof_id;
4519 	prof->ptg_cnt = 0;
4520 	prof->context = 0;
4521 
4522 	/* build list of ptgs */
4523 	while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
4524 		u8 bit;
4525 
4526 		if (!ptypes[byte]) {
4527 			bytes--;
4528 			byte++;
4529 			continue;
4530 		}
4531 
4532 		/* Examine 8 bits per byte */
4533 		for_each_set_bit(bit, (unsigned long *)&ptypes[byte],
4534 				 BITS_PER_BYTE) {
4535 			u16 ptype;
4536 			u8 ptg;
4537 
4538 			ptype = byte * BITS_PER_BYTE + bit;
4539 
4540 			/* The package should place all ptypes in a non-zero
4541 			 * PTG, so the following call should never fail.
4542 			 */
4543 			if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
4544 				continue;
4545 
4546 			/* If PTG is already added, skip and continue */
4547 			if (test_bit(ptg, ptgs_used))
4548 				continue;
4549 
4550 			set_bit(ptg, ptgs_used);
4551 			/* Check to see there are any attributes for
4552 			 * this PTYPE, and add them if found.
4553 			 */
4554 			status = ice_add_prof_attrib(prof, ptg, ptype,
4555 						     attr, attr_cnt);
4556 			if (status == ICE_ERR_MAX_LIMIT)
4557 				break;
4558 			if (status) {
4559 				/* This is simple a PTYPE/PTG with no
4560 				 * attribute
4561 				 */
4562 				prof->ptg[prof->ptg_cnt] = ptg;
4563 				prof->attr[prof->ptg_cnt].flags = 0;
4564 				prof->attr[prof->ptg_cnt].mask = 0;
4565 
4566 				if (++prof->ptg_cnt >=
4567 				    ICE_MAX_PTG_PER_PROFILE)
4568 					break;
4569 			}
4570 		}
4571 
4572 		bytes--;
4573 		byte++;
4574 	}
4575 
4576 	list_add(&prof->list, &hw->blk[blk].es.prof_map);
4577 	status = 0;
4578 
4579 err_ice_add_prof:
4580 	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4581 	return status;
4582 }
4583 
4584 /**
4585  * ice_search_prof_id - Search for a profile tracking ID
4586  * @hw: pointer to the HW struct
4587  * @blk: hardware block
4588  * @id: profile tracking ID
4589  *
4590  * This will search for a profile tracking ID which was previously added.
4591  * The profile map lock should be held before calling this function.
4592  */
4593 static struct ice_prof_map *
4594 ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
4595 {
4596 	struct ice_prof_map *entry = NULL;
4597 	struct ice_prof_map *map;
4598 
4599 	list_for_each_entry(map, &hw->blk[blk].es.prof_map, list)
4600 		if (map->profile_cookie == id) {
4601 			entry = map;
4602 			break;
4603 		}
4604 
4605 	return entry;
4606 }
4607 
4608 /**
4609  * ice_vsig_prof_id_count - count profiles in a VSIG
4610  * @hw: pointer to the HW struct
4611  * @blk: hardware block
4612  * @vsig: VSIG to remove the profile from
4613  */
4614 static u16
4615 ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
4616 {
4617 	u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
4618 	struct ice_vsig_prof *p;
4619 
4620 	list_for_each_entry(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4621 			    list)
4622 		count++;
4623 
4624 	return count;
4625 }
4626 
4627 /**
4628  * ice_rel_tcam_idx - release a TCAM index
4629  * @hw: pointer to the HW struct
4630  * @blk: hardware block
4631  * @idx: the index to release
4632  */
4633 static enum ice_status
4634 ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
4635 {
4636 	/* Masks to invoke a never match entry */
4637 	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4638 	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
4639 	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
4640 	enum ice_status status;
4641 
4642 	/* write the TCAM entry */
4643 	status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
4644 				      dc_msk, nm_msk);
4645 	if (status)
4646 		return status;
4647 
4648 	/* release the TCAM entry */
4649 	status = ice_free_tcam_ent(hw, blk, idx);
4650 
4651 	return status;
4652 }
4653 
4654 /**
4655  * ice_rem_prof_id - remove one profile from a VSIG
4656  * @hw: pointer to the HW struct
4657  * @blk: hardware block
4658  * @prof: pointer to profile structure to remove
4659  */
4660 static enum ice_status
4661 ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
4662 		struct ice_vsig_prof *prof)
4663 {
4664 	enum ice_status status;
4665 	u16 i;
4666 
4667 	for (i = 0; i < prof->tcam_count; i++)
4668 		if (prof->tcam[i].in_use) {
4669 			prof->tcam[i].in_use = false;
4670 			status = ice_rel_tcam_idx(hw, blk,
4671 						  prof->tcam[i].tcam_idx);
4672 			if (status)
4673 				return ICE_ERR_HW_TABLE;
4674 		}
4675 
4676 	return 0;
4677 }
4678 
4679 /**
4680  * ice_rem_vsig - remove VSIG
4681  * @hw: pointer to the HW struct
4682  * @blk: hardware block
4683  * @vsig: the VSIG to remove
4684  * @chg: the change list
4685  */
4686 static enum ice_status
4687 ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4688 	     struct list_head *chg)
4689 {
4690 	u16 idx = vsig & ICE_VSIG_IDX_M;
4691 	struct ice_vsig_vsi *vsi_cur;
4692 	struct ice_vsig_prof *d, *t;
4693 	enum ice_status status;
4694 
4695 	/* remove TCAM entries */
4696 	list_for_each_entry_safe(d, t,
4697 				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4698 				 list) {
4699 		status = ice_rem_prof_id(hw, blk, d);
4700 		if (status)
4701 			return status;
4702 
4703 		list_del(&d->list);
4704 		devm_kfree(ice_hw_to_dev(hw), d);
4705 	}
4706 
4707 	/* Move all VSIS associated with this VSIG to the default VSIG */
4708 	vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
4709 	/* If the VSIG has at least 1 VSI then iterate through the list
4710 	 * and remove the VSIs before deleting the group.
4711 	 */
4712 	if (vsi_cur)
4713 		do {
4714 			struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
4715 			struct ice_chs_chg *p;
4716 
4717 			p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
4718 					 GFP_KERNEL);
4719 			if (!p)
4720 				return ICE_ERR_NO_MEMORY;
4721 
4722 			p->type = ICE_VSIG_REM;
4723 			p->orig_vsig = vsig;
4724 			p->vsig = ICE_DEFAULT_VSIG;
4725 			p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
4726 
4727 			list_add(&p->list_entry, chg);
4728 
4729 			vsi_cur = tmp;
4730 		} while (vsi_cur);
4731 
4732 	return ice_vsig_free(hw, blk, vsig);
4733 }
4734 
4735 /**
4736  * ice_rem_prof_id_vsig - remove a specific profile from a VSIG
4737  * @hw: pointer to the HW struct
4738  * @blk: hardware block
4739  * @vsig: VSIG to remove the profile from
4740  * @hdl: profile handle indicating which profile to remove
4741  * @chg: list to receive a record of changes
4742  */
4743 static enum ice_status
4744 ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
4745 		     struct list_head *chg)
4746 {
4747 	u16 idx = vsig & ICE_VSIG_IDX_M;
4748 	struct ice_vsig_prof *p, *t;
4749 	enum ice_status status;
4750 
4751 	list_for_each_entry_safe(p, t,
4752 				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4753 				 list)
4754 		if (p->profile_cookie == hdl) {
4755 			if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
4756 				/* this is the last profile, remove the VSIG */
4757 				return ice_rem_vsig(hw, blk, vsig, chg);
4758 
4759 			status = ice_rem_prof_id(hw, blk, p);
4760 			if (!status) {
4761 				list_del(&p->list);
4762 				devm_kfree(ice_hw_to_dev(hw), p);
4763 			}
4764 			return status;
4765 		}
4766 
4767 	return ICE_ERR_DOES_NOT_EXIST;
4768 }
4769 
4770 /**
4771  * ice_rem_flow_all - remove all flows with a particular profile
4772  * @hw: pointer to the HW struct
4773  * @blk: hardware block
4774  * @id: profile tracking ID
4775  */
4776 static enum ice_status
4777 ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
4778 {
4779 	struct ice_chs_chg *del, *tmp;
4780 	enum ice_status status;
4781 	struct list_head chg;
4782 	u16 i;
4783 
4784 	INIT_LIST_HEAD(&chg);
4785 
4786 	for (i = 1; i < ICE_MAX_VSIGS; i++)
4787 		if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
4788 			if (ice_has_prof_vsig(hw, blk, i, id)) {
4789 				status = ice_rem_prof_id_vsig(hw, blk, i, id,
4790 							      &chg);
4791 				if (status)
4792 					goto err_ice_rem_flow_all;
4793 			}
4794 		}
4795 
4796 	status = ice_upd_prof_hw(hw, blk, &chg);
4797 
4798 err_ice_rem_flow_all:
4799 	list_for_each_entry_safe(del, tmp, &chg, list_entry) {
4800 		list_del(&del->list_entry);
4801 		devm_kfree(ice_hw_to_dev(hw), del);
4802 	}
4803 
4804 	return status;
4805 }
4806 
4807 /**
4808  * ice_rem_prof - remove profile
4809  * @hw: pointer to the HW struct
4810  * @blk: hardware block
4811  * @id: profile tracking ID
4812  *
4813  * This will remove the profile specified by the ID parameter, which was
4814  * previously created through ice_add_prof. If any existing entries
4815  * are associated with this profile, they will be removed as well.
4816  */
4817 enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
4818 {
4819 	struct ice_prof_map *pmap;
4820 	enum ice_status status;
4821 
4822 	mutex_lock(&hw->blk[blk].es.prof_map_lock);
4823 
4824 	pmap = ice_search_prof_id(hw, blk, id);
4825 	if (!pmap) {
4826 		status = ICE_ERR_DOES_NOT_EXIST;
4827 		goto err_ice_rem_prof;
4828 	}
4829 
4830 	/* remove all flows with this profile */
4831 	status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
4832 	if (status)
4833 		goto err_ice_rem_prof;
4834 
4835 	/* dereference profile, and possibly remove */
4836 	ice_prof_dec_ref(hw, blk, pmap->prof_id);
4837 
4838 	list_del(&pmap->list);
4839 	devm_kfree(ice_hw_to_dev(hw), pmap);
4840 
4841 err_ice_rem_prof:
4842 	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4843 	return status;
4844 }
4845 
4846 /**
4847  * ice_get_prof - get profile
4848  * @hw: pointer to the HW struct
4849  * @blk: hardware block
4850  * @hdl: profile handle
4851  * @chg: change list
4852  */
4853 static enum ice_status
4854 ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
4855 	     struct list_head *chg)
4856 {
4857 	enum ice_status status = 0;
4858 	struct ice_prof_map *map;
4859 	struct ice_chs_chg *p;
4860 	u16 i;
4861 
4862 	mutex_lock(&hw->blk[blk].es.prof_map_lock);
4863 	/* Get the details on the profile specified by the handle ID */
4864 	map = ice_search_prof_id(hw, blk, hdl);
4865 	if (!map) {
4866 		status = ICE_ERR_DOES_NOT_EXIST;
4867 		goto err_ice_get_prof;
4868 	}
4869 
4870 	for (i = 0; i < map->ptg_cnt; i++)
4871 		if (!hw->blk[blk].es.written[map->prof_id]) {
4872 			/* add ES to change list */
4873 			p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
4874 					 GFP_KERNEL);
4875 			if (!p) {
4876 				status = ICE_ERR_NO_MEMORY;
4877 				goto err_ice_get_prof;
4878 			}
4879 
4880 			p->type = ICE_PTG_ES_ADD;
4881 			p->ptype = 0;
4882 			p->ptg = map->ptg[i];
4883 			p->add_ptg = 0;
4884 
4885 			p->add_prof = 1;
4886 			p->prof_id = map->prof_id;
4887 
4888 			hw->blk[blk].es.written[map->prof_id] = true;
4889 
4890 			list_add(&p->list_entry, chg);
4891 		}
4892 
4893 err_ice_get_prof:
4894 	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4895 	/* let caller clean up the change list */
4896 	return status;
4897 }
4898 
4899 /**
4900  * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG
4901  * @hw: pointer to the HW struct
4902  * @blk: hardware block
4903  * @vsig: VSIG from which to copy the list
4904  * @lst: output list
4905  *
4906  * This routine makes a copy of the list of profiles in the specified VSIG.
4907  */
4908 static enum ice_status
4909 ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4910 		   struct list_head *lst)
4911 {
4912 	struct ice_vsig_prof *ent1, *ent2;
4913 	u16 idx = vsig & ICE_VSIG_IDX_M;
4914 
4915 	list_for_each_entry(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4916 			    list) {
4917 		struct ice_vsig_prof *p;
4918 
4919 		/* copy to the input list */
4920 		p = devm_kmemdup(ice_hw_to_dev(hw), ent1, sizeof(*p),
4921 				 GFP_KERNEL);
4922 		if (!p)
4923 			goto err_ice_get_profs_vsig;
4924 
4925 		list_add_tail(&p->list, lst);
4926 	}
4927 
4928 	return 0;
4929 
4930 err_ice_get_profs_vsig:
4931 	list_for_each_entry_safe(ent1, ent2, lst, list) {
4932 		list_del(&ent1->list);
4933 		devm_kfree(ice_hw_to_dev(hw), ent1);
4934 	}
4935 
4936 	return ICE_ERR_NO_MEMORY;
4937 }
4938 
4939 /**
4940  * ice_add_prof_to_lst - add profile entry to a list
4941  * @hw: pointer to the HW struct
4942  * @blk: hardware block
4943  * @lst: the list to be added to
4944  * @hdl: profile handle of entry to add
4945  */
4946 static enum ice_status
4947 ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
4948 		    struct list_head *lst, u64 hdl)
4949 {
4950 	enum ice_status status = 0;
4951 	struct ice_prof_map *map;
4952 	struct ice_vsig_prof *p;
4953 	u16 i;
4954 
4955 	mutex_lock(&hw->blk[blk].es.prof_map_lock);
4956 	map = ice_search_prof_id(hw, blk, hdl);
4957 	if (!map) {
4958 		status = ICE_ERR_DOES_NOT_EXIST;
4959 		goto err_ice_add_prof_to_lst;
4960 	}
4961 
4962 	p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4963 	if (!p) {
4964 		status = ICE_ERR_NO_MEMORY;
4965 		goto err_ice_add_prof_to_lst;
4966 	}
4967 
4968 	p->profile_cookie = map->profile_cookie;
4969 	p->prof_id = map->prof_id;
4970 	p->tcam_count = map->ptg_cnt;
4971 
4972 	for (i = 0; i < map->ptg_cnt; i++) {
4973 		p->tcam[i].prof_id = map->prof_id;
4974 		p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
4975 		p->tcam[i].ptg = map->ptg[i];
4976 	}
4977 
4978 	list_add(&p->list, lst);
4979 
4980 err_ice_add_prof_to_lst:
4981 	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4982 	return status;
4983 }
4984 
4985 /**
4986  * ice_move_vsi - move VSI to another VSIG
4987  * @hw: pointer to the HW struct
4988  * @blk: hardware block
4989  * @vsi: the VSI to move
4990  * @vsig: the VSIG to move the VSI to
4991  * @chg: the change list
4992  */
4993 static enum ice_status
4994 ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
4995 	     struct list_head *chg)
4996 {
4997 	enum ice_status status;
4998 	struct ice_chs_chg *p;
4999 	u16 orig_vsig;
5000 
5001 	p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
5002 	if (!p)
5003 		return ICE_ERR_NO_MEMORY;
5004 
5005 	status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
5006 	if (!status)
5007 		status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
5008 
5009 	if (status) {
5010 		devm_kfree(ice_hw_to_dev(hw), p);
5011 		return status;
5012 	}
5013 
5014 	p->type = ICE_VSI_MOVE;
5015 	p->vsi = vsi;
5016 	p->orig_vsig = orig_vsig;
5017 	p->vsig = vsig;
5018 
5019 	list_add(&p->list_entry, chg);
5020 
5021 	return 0;
5022 }
5023 
5024 /**
5025  * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
5026  * @hw: pointer to the HW struct
5027  * @idx: the index of the TCAM entry to remove
5028  * @chg: the list of change structures to search
5029  */
5030 static void
5031 ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg)
5032 {
5033 	struct ice_chs_chg *pos, *tmp;
5034 
5035 	list_for_each_entry_safe(tmp, pos, chg, list_entry)
5036 		if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
5037 			list_del(&tmp->list_entry);
5038 			devm_kfree(ice_hw_to_dev(hw), tmp);
5039 		}
5040 }
5041 
5042 /**
5043  * ice_prof_tcam_ena_dis - add enable or disable TCAM change
5044  * @hw: pointer to the HW struct
5045  * @blk: hardware block
5046  * @enable: true to enable, false to disable
5047  * @vsig: the VSIG of the TCAM entry
5048  * @tcam: pointer the TCAM info structure of the TCAM to disable
5049  * @chg: the change list
5050  *
5051  * This function appends an enable or disable TCAM entry in the change log
5052  */
5053 static enum ice_status
5054 ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
5055 		      u16 vsig, struct ice_tcam_inf *tcam,
5056 		      struct list_head *chg)
5057 {
5058 	enum ice_status status;
5059 	struct ice_chs_chg *p;
5060 
5061 	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5062 	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
5063 	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
5064 
5065 	/* if disabling, free the TCAM */
5066 	if (!enable) {
5067 		status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
5068 
5069 		/* if we have already created a change for this TCAM entry, then
5070 		 * we need to remove that entry, in order to prevent writing to
5071 		 * a TCAM entry we no longer will have ownership of.
5072 		 */
5073 		ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
5074 		tcam->tcam_idx = 0;
5075 		tcam->in_use = 0;
5076 		return status;
5077 	}
5078 
5079 	/* for re-enabling, reallocate a TCAM */
5080 	/* for entries with empty attribute masks, allocate entry from
5081 	 * the bottom of the TCAM table; otherwise, allocate from the
5082 	 * top of the table in order to give it higher priority
5083 	 */
5084 	status = ice_alloc_tcam_ent(hw, blk, tcam->attr.mask == 0,
5085 				    &tcam->tcam_idx);
5086 	if (status)
5087 		return status;
5088 
5089 	/* add TCAM to change list */
5090 	p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
5091 	if (!p)
5092 		return ICE_ERR_NO_MEMORY;
5093 
5094 	status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
5095 				      tcam->ptg, vsig, 0, tcam->attr.flags,
5096 				      vl_msk, dc_msk, nm_msk);
5097 	if (status)
5098 		goto err_ice_prof_tcam_ena_dis;
5099 
5100 	tcam->in_use = 1;
5101 
5102 	p->type = ICE_TCAM_ADD;
5103 	p->add_tcam_idx = true;
5104 	p->prof_id = tcam->prof_id;
5105 	p->ptg = tcam->ptg;
5106 	p->vsig = 0;
5107 	p->tcam_idx = tcam->tcam_idx;
5108 
5109 	/* log change */
5110 	list_add(&p->list_entry, chg);
5111 
5112 	return 0;
5113 
5114 err_ice_prof_tcam_ena_dis:
5115 	devm_kfree(ice_hw_to_dev(hw), p);
5116 	return status;
5117 }
5118 
5119 /**
5120  * ice_adj_prof_priorities - adjust profile based on priorities
5121  * @hw: pointer to the HW struct
5122  * @blk: hardware block
5123  * @vsig: the VSIG for which to adjust profile priorities
5124  * @chg: the change list
5125  */
5126 static enum ice_status
5127 ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
5128 			struct list_head *chg)
5129 {
5130 	DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
5131 	struct ice_vsig_prof *t;
5132 	enum ice_status status;
5133 	u16 idx;
5134 
5135 	bitmap_zero(ptgs_used, ICE_XLT1_CNT);
5136 	idx = vsig & ICE_VSIG_IDX_M;
5137 
5138 	/* Priority is based on the order in which the profiles are added. The
5139 	 * newest added profile has highest priority and the oldest added
5140 	 * profile has the lowest priority. Since the profile property list for
5141 	 * a VSIG is sorted from newest to oldest, this code traverses the list
5142 	 * in order and enables the first of each PTG that it finds (that is not
5143 	 * already enabled); it also disables any duplicate PTGs that it finds
5144 	 * in the older profiles (that are currently enabled).
5145 	 */
5146 
5147 	list_for_each_entry(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5148 			    list) {
5149 		u16 i;
5150 
5151 		for (i = 0; i < t->tcam_count; i++) {
5152 			/* Scan the priorities from newest to oldest.
5153 			 * Make sure that the newest profiles take priority.
5154 			 */
5155 			if (test_bit(t->tcam[i].ptg, ptgs_used) &&
5156 			    t->tcam[i].in_use) {
5157 				/* need to mark this PTG as never match, as it
5158 				 * was already in use and therefore duplicate
5159 				 * (and lower priority)
5160 				 */
5161 				status = ice_prof_tcam_ena_dis(hw, blk, false,
5162 							       vsig,
5163 							       &t->tcam[i],
5164 							       chg);
5165 				if (status)
5166 					return status;
5167 			} else if (!test_bit(t->tcam[i].ptg, ptgs_used) &&
5168 				   !t->tcam[i].in_use) {
5169 				/* need to enable this PTG, as it in not in use
5170 				 * and not enabled (highest priority)
5171 				 */
5172 				status = ice_prof_tcam_ena_dis(hw, blk, true,
5173 							       vsig,
5174 							       &t->tcam[i],
5175 							       chg);
5176 				if (status)
5177 					return status;
5178 			}
5179 
5180 			/* keep track of used ptgs */
5181 			set_bit(t->tcam[i].ptg, ptgs_used);
5182 		}
5183 	}
5184 
5185 	return 0;
5186 }
5187 
5188 /**
5189  * ice_add_prof_id_vsig - add profile to VSIG
5190  * @hw: pointer to the HW struct
5191  * @blk: hardware block
5192  * @vsig: the VSIG to which this profile is to be added
5193  * @hdl: the profile handle indicating the profile to add
5194  * @rev: true to add entries to the end of the list
5195  * @chg: the change list
5196  */
5197 static enum ice_status
5198 ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
5199 		     bool rev, struct list_head *chg)
5200 {
5201 	/* Masks that ignore flags */
5202 	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5203 	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
5204 	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
5205 	enum ice_status status = 0;
5206 	struct ice_prof_map *map;
5207 	struct ice_vsig_prof *t;
5208 	struct ice_chs_chg *p;
5209 	u16 vsig_idx, i;
5210 
5211 	/* Error, if this VSIG already has this profile */
5212 	if (ice_has_prof_vsig(hw, blk, vsig, hdl))
5213 		return ICE_ERR_ALREADY_EXISTS;
5214 
5215 	/* new VSIG profile structure */
5216 	t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL);
5217 	if (!t)
5218 		return ICE_ERR_NO_MEMORY;
5219 
5220 	mutex_lock(&hw->blk[blk].es.prof_map_lock);
5221 	/* Get the details on the profile specified by the handle ID */
5222 	map = ice_search_prof_id(hw, blk, hdl);
5223 	if (!map) {
5224 		status = ICE_ERR_DOES_NOT_EXIST;
5225 		goto err_ice_add_prof_id_vsig;
5226 	}
5227 
5228 	t->profile_cookie = map->profile_cookie;
5229 	t->prof_id = map->prof_id;
5230 	t->tcam_count = map->ptg_cnt;
5231 
5232 	/* create TCAM entries */
5233 	for (i = 0; i < map->ptg_cnt; i++) {
5234 		u16 tcam_idx;
5235 
5236 		/* add TCAM to change list */
5237 		p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
5238 		if (!p) {
5239 			status = ICE_ERR_NO_MEMORY;
5240 			goto err_ice_add_prof_id_vsig;
5241 		}
5242 
5243 		/* allocate the TCAM entry index */
5244 		/* for entries with empty attribute masks, allocate entry from
5245 		 * the bottom of the TCAM table; otherwise, allocate from the
5246 		 * top of the table in order to give it higher priority
5247 		 */
5248 		status = ice_alloc_tcam_ent(hw, blk, map->attr[i].mask == 0,
5249 					    &tcam_idx);
5250 		if (status) {
5251 			devm_kfree(ice_hw_to_dev(hw), p);
5252 			goto err_ice_add_prof_id_vsig;
5253 		}
5254 
5255 		t->tcam[i].ptg = map->ptg[i];
5256 		t->tcam[i].prof_id = map->prof_id;
5257 		t->tcam[i].tcam_idx = tcam_idx;
5258 		t->tcam[i].attr = map->attr[i];
5259 		t->tcam[i].in_use = true;
5260 
5261 		p->type = ICE_TCAM_ADD;
5262 		p->add_tcam_idx = true;
5263 		p->prof_id = t->tcam[i].prof_id;
5264 		p->ptg = t->tcam[i].ptg;
5265 		p->vsig = vsig;
5266 		p->tcam_idx = t->tcam[i].tcam_idx;
5267 
5268 		/* write the TCAM entry */
5269 		status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
5270 					      t->tcam[i].prof_id,
5271 					      t->tcam[i].ptg, vsig, 0, 0,
5272 					      vl_msk, dc_msk, nm_msk);
5273 		if (status) {
5274 			devm_kfree(ice_hw_to_dev(hw), p);
5275 			goto err_ice_add_prof_id_vsig;
5276 		}
5277 
5278 		/* log change */
5279 		list_add(&p->list_entry, chg);
5280 	}
5281 
5282 	/* add profile to VSIG */
5283 	vsig_idx = vsig & ICE_VSIG_IDX_M;
5284 	if (rev)
5285 		list_add_tail(&t->list,
5286 			      &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
5287 	else
5288 		list_add(&t->list,
5289 			 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
5290 
5291 	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
5292 	return status;
5293 
5294 err_ice_add_prof_id_vsig:
5295 	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
5296 	/* let caller clean up the change list */
5297 	devm_kfree(ice_hw_to_dev(hw), t);
5298 	return status;
5299 }
5300 
5301 /**
5302  * ice_create_prof_id_vsig - add a new VSIG with a single profile
5303  * @hw: pointer to the HW struct
5304  * @blk: hardware block
5305  * @vsi: the initial VSI that will be in VSIG
5306  * @hdl: the profile handle of the profile that will be added to the VSIG
5307  * @chg: the change list
5308  */
5309 static enum ice_status
5310 ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
5311 			struct list_head *chg)
5312 {
5313 	enum ice_status status;
5314 	struct ice_chs_chg *p;
5315 	u16 new_vsig;
5316 
5317 	p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
5318 	if (!p)
5319 		return ICE_ERR_NO_MEMORY;
5320 
5321 	new_vsig = ice_vsig_alloc(hw, blk);
5322 	if (!new_vsig) {
5323 		status = ICE_ERR_HW_TABLE;
5324 		goto err_ice_create_prof_id_vsig;
5325 	}
5326 
5327 	status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
5328 	if (status)
5329 		goto err_ice_create_prof_id_vsig;
5330 
5331 	status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
5332 	if (status)
5333 		goto err_ice_create_prof_id_vsig;
5334 
5335 	p->type = ICE_VSIG_ADD;
5336 	p->vsi = vsi;
5337 	p->orig_vsig = ICE_DEFAULT_VSIG;
5338 	p->vsig = new_vsig;
5339 
5340 	list_add(&p->list_entry, chg);
5341 
5342 	return 0;
5343 
5344 err_ice_create_prof_id_vsig:
5345 	/* let caller clean up the change list */
5346 	devm_kfree(ice_hw_to_dev(hw), p);
5347 	return status;
5348 }
5349 
5350 /**
5351  * ice_create_vsig_from_lst - create a new VSIG with a list of profiles
5352  * @hw: pointer to the HW struct
5353  * @blk: hardware block
5354  * @vsi: the initial VSI that will be in VSIG
5355  * @lst: the list of profile that will be added to the VSIG
5356  * @new_vsig: return of new VSIG
5357  * @chg: the change list
5358  */
5359 static enum ice_status
5360 ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
5361 			 struct list_head *lst, u16 *new_vsig,
5362 			 struct list_head *chg)
5363 {
5364 	struct ice_vsig_prof *t;
5365 	enum ice_status status;
5366 	u16 vsig;
5367 
5368 	vsig = ice_vsig_alloc(hw, blk);
5369 	if (!vsig)
5370 		return ICE_ERR_HW_TABLE;
5371 
5372 	status = ice_move_vsi(hw, blk, vsi, vsig, chg);
5373 	if (status)
5374 		return status;
5375 
5376 	list_for_each_entry(t, lst, list) {
5377 		/* Reverse the order here since we are copying the list */
5378 		status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
5379 					      true, chg);
5380 		if (status)
5381 			return status;
5382 	}
5383 
5384 	*new_vsig = vsig;
5385 
5386 	return 0;
5387 }
5388 
5389 /**
5390  * ice_find_prof_vsig - find a VSIG with a specific profile handle
5391  * @hw: pointer to the HW struct
5392  * @blk: hardware block
5393  * @hdl: the profile handle of the profile to search for
5394  * @vsig: returns the VSIG with the matching profile
5395  */
5396 static bool
5397 ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
5398 {
5399 	struct ice_vsig_prof *t;
5400 	enum ice_status status;
5401 	struct list_head lst;
5402 
5403 	INIT_LIST_HEAD(&lst);
5404 
5405 	t = kzalloc(sizeof(*t), GFP_KERNEL);
5406 	if (!t)
5407 		return false;
5408 
5409 	t->profile_cookie = hdl;
5410 	list_add(&t->list, &lst);
5411 
5412 	status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
5413 
5414 	list_del(&t->list);
5415 	kfree(t);
5416 
5417 	return !status;
5418 }
5419 
5420 /**
5421  * ice_add_prof_id_flow - add profile flow
5422  * @hw: pointer to the HW struct
5423  * @blk: hardware block
5424  * @vsi: the VSI to enable with the profile specified by ID
5425  * @hdl: profile handle
5426  *
5427  * Calling this function will update the hardware tables to enable the
5428  * profile indicated by the ID parameter for the VSIs specified in the VSI
5429  * array. Once successfully called, the flow will be enabled.
5430  */
5431 enum ice_status
5432 ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
5433 {
5434 	struct ice_vsig_prof *tmp1, *del1;
5435 	struct ice_chs_chg *tmp, *del;
5436 	struct list_head union_lst;
5437 	enum ice_status status;
5438 	struct list_head chg;
5439 	u16 vsig;
5440 
5441 	INIT_LIST_HEAD(&union_lst);
5442 	INIT_LIST_HEAD(&chg);
5443 
5444 	/* Get profile */
5445 	status = ice_get_prof(hw, blk, hdl, &chg);
5446 	if (status)
5447 		return status;
5448 
5449 	/* determine if VSI is already part of a VSIG */
5450 	status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
5451 	if (!status && vsig) {
5452 		bool only_vsi;
5453 		u16 or_vsig;
5454 		u16 ref;
5455 
5456 		/* found in VSIG */
5457 		or_vsig = vsig;
5458 
5459 		/* make sure that there is no overlap/conflict between the new
5460 		 * characteristics and the existing ones; we don't support that
5461 		 * scenario
5462 		 */
5463 		if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
5464 			status = ICE_ERR_ALREADY_EXISTS;
5465 			goto err_ice_add_prof_id_flow;
5466 		}
5467 
5468 		/* last VSI in the VSIG? */
5469 		status = ice_vsig_get_ref(hw, blk, vsig, &ref);
5470 		if (status)
5471 			goto err_ice_add_prof_id_flow;
5472 		only_vsi = (ref == 1);
5473 
5474 		/* create a union of the current profiles and the one being
5475 		 * added
5476 		 */
5477 		status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
5478 		if (status)
5479 			goto err_ice_add_prof_id_flow;
5480 
5481 		status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
5482 		if (status)
5483 			goto err_ice_add_prof_id_flow;
5484 
5485 		/* search for an existing VSIG with an exact charc match */
5486 		status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
5487 		if (!status) {
5488 			/* move VSI to the VSIG that matches */
5489 			status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5490 			if (status)
5491 				goto err_ice_add_prof_id_flow;
5492 
5493 			/* VSI has been moved out of or_vsig. If the or_vsig had
5494 			 * only that VSI it is now empty and can be removed.
5495 			 */
5496 			if (only_vsi) {
5497 				status = ice_rem_vsig(hw, blk, or_vsig, &chg);
5498 				if (status)
5499 					goto err_ice_add_prof_id_flow;
5500 			}
5501 		} else if (only_vsi) {
5502 			/* If the original VSIG only contains one VSI, then it
5503 			 * will be the requesting VSI. In this case the VSI is
5504 			 * not sharing entries and we can simply add the new
5505 			 * profile to the VSIG.
5506 			 */
5507 			status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
5508 						      &chg);
5509 			if (status)
5510 				goto err_ice_add_prof_id_flow;
5511 
5512 			/* Adjust priorities */
5513 			status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
5514 			if (status)
5515 				goto err_ice_add_prof_id_flow;
5516 		} else {
5517 			/* No match, so we need a new VSIG */
5518 			status = ice_create_vsig_from_lst(hw, blk, vsi,
5519 							  &union_lst, &vsig,
5520 							  &chg);
5521 			if (status)
5522 				goto err_ice_add_prof_id_flow;
5523 
5524 			/* Adjust priorities */
5525 			status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
5526 			if (status)
5527 				goto err_ice_add_prof_id_flow;
5528 		}
5529 	} else {
5530 		/* need to find or add a VSIG */
5531 		/* search for an existing VSIG with an exact charc match */
5532 		if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
5533 			/* found an exact match */
5534 			/* add or move VSI to the VSIG that matches */
5535 			status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5536 			if (status)
5537 				goto err_ice_add_prof_id_flow;
5538 		} else {
5539 			/* we did not find an exact match */
5540 			/* we need to add a VSIG */
5541 			status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
5542 							 &chg);
5543 			if (status)
5544 				goto err_ice_add_prof_id_flow;
5545 		}
5546 	}
5547 
5548 	/* update hardware */
5549 	if (!status)
5550 		status = ice_upd_prof_hw(hw, blk, &chg);
5551 
5552 err_ice_add_prof_id_flow:
5553 	list_for_each_entry_safe(del, tmp, &chg, list_entry) {
5554 		list_del(&del->list_entry);
5555 		devm_kfree(ice_hw_to_dev(hw), del);
5556 	}
5557 
5558 	list_for_each_entry_safe(del1, tmp1, &union_lst, list) {
5559 		list_del(&del1->list);
5560 		devm_kfree(ice_hw_to_dev(hw), del1);
5561 	}
5562 
5563 	return status;
5564 }
5565 
5566 /**
5567  * ice_rem_prof_from_list - remove a profile from list
5568  * @hw: pointer to the HW struct
5569  * @lst: list to remove the profile from
5570  * @hdl: the profile handle indicating the profile to remove
5571  */
5572 static enum ice_status
5573 ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
5574 {
5575 	struct ice_vsig_prof *ent, *tmp;
5576 
5577 	list_for_each_entry_safe(ent, tmp, lst, list)
5578 		if (ent->profile_cookie == hdl) {
5579 			list_del(&ent->list);
5580 			devm_kfree(ice_hw_to_dev(hw), ent);
5581 			return 0;
5582 		}
5583 
5584 	return ICE_ERR_DOES_NOT_EXIST;
5585 }
5586 
5587 /**
5588  * ice_rem_prof_id_flow - remove flow
5589  * @hw: pointer to the HW struct
5590  * @blk: hardware block
5591  * @vsi: the VSI from which to remove the profile specified by ID
5592  * @hdl: profile tracking handle
5593  *
5594  * Calling this function will update the hardware tables to remove the
5595  * profile indicated by the ID parameter for the VSIs specified in the VSI
5596  * array. Once successfully called, the flow will be disabled.
5597  */
5598 enum ice_status
5599 ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
5600 {
5601 	struct ice_vsig_prof *tmp1, *del1;
5602 	struct ice_chs_chg *tmp, *del;
5603 	struct list_head chg, copy;
5604 	enum ice_status status;
5605 	u16 vsig;
5606 
5607 	INIT_LIST_HEAD(&copy);
5608 	INIT_LIST_HEAD(&chg);
5609 
5610 	/* determine if VSI is already part of a VSIG */
5611 	status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
5612 	if (!status && vsig) {
5613 		bool last_profile;
5614 		bool only_vsi;
5615 		u16 ref;
5616 
5617 		/* found in VSIG */
5618 		last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
5619 		status = ice_vsig_get_ref(hw, blk, vsig, &ref);
5620 		if (status)
5621 			goto err_ice_rem_prof_id_flow;
5622 		only_vsi = (ref == 1);
5623 
5624 		if (only_vsi) {
5625 			/* If the original VSIG only contains one reference,
5626 			 * which will be the requesting VSI, then the VSI is not
5627 			 * sharing entries and we can simply remove the specific
5628 			 * characteristics from the VSIG.
5629 			 */
5630 
5631 			if (last_profile) {
5632 				/* If there are no profiles left for this VSIG,
5633 				 * then simply remove the VSIG.
5634 				 */
5635 				status = ice_rem_vsig(hw, blk, vsig, &chg);
5636 				if (status)
5637 					goto err_ice_rem_prof_id_flow;
5638 			} else {
5639 				status = ice_rem_prof_id_vsig(hw, blk, vsig,
5640 							      hdl, &chg);
5641 				if (status)
5642 					goto err_ice_rem_prof_id_flow;
5643 
5644 				/* Adjust priorities */
5645 				status = ice_adj_prof_priorities(hw, blk, vsig,
5646 								 &chg);
5647 				if (status)
5648 					goto err_ice_rem_prof_id_flow;
5649 			}
5650 
5651 		} else {
5652 			/* Make a copy of the VSIG's list of Profiles */
5653 			status = ice_get_profs_vsig(hw, blk, vsig, &copy);
5654 			if (status)
5655 				goto err_ice_rem_prof_id_flow;
5656 
5657 			/* Remove specified profile entry from the list */
5658 			status = ice_rem_prof_from_list(hw, &copy, hdl);
5659 			if (status)
5660 				goto err_ice_rem_prof_id_flow;
5661 
5662 			if (list_empty(&copy)) {
5663 				status = ice_move_vsi(hw, blk, vsi,
5664 						      ICE_DEFAULT_VSIG, &chg);
5665 				if (status)
5666 					goto err_ice_rem_prof_id_flow;
5667 
5668 			} else if (!ice_find_dup_props_vsig(hw, blk, &copy,
5669 							    &vsig)) {
5670 				/* found an exact match */
5671 				/* add or move VSI to the VSIG that matches */
5672 				/* Search for a VSIG with a matching profile
5673 				 * list
5674 				 */
5675 
5676 				/* Found match, move VSI to the matching VSIG */
5677 				status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5678 				if (status)
5679 					goto err_ice_rem_prof_id_flow;
5680 			} else {
5681 				/* since no existing VSIG supports this
5682 				 * characteristic pattern, we need to create a
5683 				 * new VSIG and TCAM entries
5684 				 */
5685 				status = ice_create_vsig_from_lst(hw, blk, vsi,
5686 								  &copy, &vsig,
5687 								  &chg);
5688 				if (status)
5689 					goto err_ice_rem_prof_id_flow;
5690 
5691 				/* Adjust priorities */
5692 				status = ice_adj_prof_priorities(hw, blk, vsig,
5693 								 &chg);
5694 				if (status)
5695 					goto err_ice_rem_prof_id_flow;
5696 			}
5697 		}
5698 	} else {
5699 		status = ICE_ERR_DOES_NOT_EXIST;
5700 	}
5701 
5702 	/* update hardware tables */
5703 	if (!status)
5704 		status = ice_upd_prof_hw(hw, blk, &chg);
5705 
5706 err_ice_rem_prof_id_flow:
5707 	list_for_each_entry_safe(del, tmp, &chg, list_entry) {
5708 		list_del(&del->list_entry);
5709 		devm_kfree(ice_hw_to_dev(hw), del);
5710 	}
5711 
5712 	list_for_each_entry_safe(del1, tmp1, &copy, list) {
5713 		list_del(&del1->list);
5714 		devm_kfree(ice_hw_to_dev(hw), del1);
5715 	}
5716 
5717 	return status;
5718 }
5719