xref: /openbmc/linux/drivers/net/ethernet/intel/ice/ice_ddp.c (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1  // SPDX-License-Identifier: GPL-2.0
2  /* Copyright (c) 2022, Intel Corporation. */
3  
4  #include "ice_common.h"
5  #include "ice.h"
6  #include "ice_ddp.h"
7  
8  /* For supporting double VLAN mode, it is necessary to enable or disable certain
9   * boost tcam entries. The metadata labels names that match the following
10   * prefixes will be saved to allow enabling double VLAN mode.
11   */
12  #define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */
13  #define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */
14  
15  /* To support tunneling entries by PF, the package will append the PF number to
16   * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
17   */
18  #define ICE_TNL_PRE "TNL_"
19  static const struct ice_tunnel_type_scan tnls[] = {
20  	{ TNL_VXLAN, "TNL_VXLAN_PF" },
21  	{ TNL_GENEVE, "TNL_GENEVE_PF" },
22  	{ TNL_LAST, "" }
23  };
24  
25  /**
26   * ice_verify_pkg - verify package
27   * @pkg: pointer to the package buffer
28   * @len: size of the package buffer
29   *
30   * Verifies various attributes of the package file, including length, format
31   * version, and the requirement of at least one segment.
32   */
ice_verify_pkg(struct ice_pkg_hdr * pkg,u32 len)33  static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
34  {
35  	u32 seg_count;
36  	u32 i;
37  
38  	if (len < struct_size(pkg, seg_offset, 1))
39  		return ICE_DDP_PKG_INVALID_FILE;
40  
41  	if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
42  	    pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
43  	    pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
44  	    pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
45  		return ICE_DDP_PKG_INVALID_FILE;
46  
47  	/* pkg must have at least one segment */
48  	seg_count = le32_to_cpu(pkg->seg_count);
49  	if (seg_count < 1)
50  		return ICE_DDP_PKG_INVALID_FILE;
51  
52  	/* make sure segment array fits in package length */
53  	if (len < struct_size(pkg, seg_offset, seg_count))
54  		return ICE_DDP_PKG_INVALID_FILE;
55  
56  	/* all segments must fit within length */
57  	for (i = 0; i < seg_count; i++) {
58  		u32 off = le32_to_cpu(pkg->seg_offset[i]);
59  		struct ice_generic_seg_hdr *seg;
60  
61  		/* segment header must fit */
62  		if (len < off + sizeof(*seg))
63  			return ICE_DDP_PKG_INVALID_FILE;
64  
65  		seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
66  
67  		/* segment body must fit */
68  		if (len < off + le32_to_cpu(seg->seg_size))
69  			return ICE_DDP_PKG_INVALID_FILE;
70  	}
71  
72  	return ICE_DDP_PKG_SUCCESS;
73  }
74  
75  /**
76   * ice_free_seg - free package segment pointer
77   * @hw: pointer to the hardware structure
78   *
79   * Frees the package segment pointer in the proper manner, depending on if the
80   * segment was allocated or just the passed in pointer was stored.
81   */
ice_free_seg(struct ice_hw * hw)82  void ice_free_seg(struct ice_hw *hw)
83  {
84  	if (hw->pkg_copy) {
85  		devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy);
86  		hw->pkg_copy = NULL;
87  		hw->pkg_size = 0;
88  	}
89  	hw->seg = NULL;
90  }
91  
92  /**
93   * ice_chk_pkg_version - check package version for compatibility with driver
94   * @pkg_ver: pointer to a version structure to check
95   *
96   * Check to make sure that the package about to be downloaded is compatible with
97   * the driver. To be compatible, the major and minor components of the package
98   * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
99   * definitions.
100   */
ice_chk_pkg_version(struct ice_pkg_ver * pkg_ver)101  static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
102  {
103  	if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ ||
104  	    (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
105  	     pkg_ver->minor > ICE_PKG_SUPP_VER_MNR))
106  		return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH;
107  	else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ ||
108  		 (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
109  		  pkg_ver->minor < ICE_PKG_SUPP_VER_MNR))
110  		return ICE_DDP_PKG_FILE_VERSION_TOO_LOW;
111  
112  	return ICE_DDP_PKG_SUCCESS;
113  }
114  
115  /**
116   * ice_pkg_val_buf
117   * @buf: pointer to the ice buffer
118   *
119   * This helper function validates a buffer's header.
120   */
ice_pkg_val_buf(struct ice_buf * buf)121  static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
122  {
123  	struct ice_buf_hdr *hdr;
124  	u16 section_count;
125  	u16 data_end;
126  
127  	hdr = (struct ice_buf_hdr *)buf->buf;
128  	/* verify data */
129  	section_count = le16_to_cpu(hdr->section_count);
130  	if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
131  		return NULL;
132  
133  	data_end = le16_to_cpu(hdr->data_end);
134  	if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
135  		return NULL;
136  
137  	return hdr;
138  }
139  
140  /**
141   * ice_find_buf_table
142   * @ice_seg: pointer to the ice segment
143   *
144   * Returns the address of the buffer table within the ice segment.
145   */
ice_find_buf_table(struct ice_seg * ice_seg)146  static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
147  {
148  	struct ice_nvm_table *nvms = (struct ice_nvm_table *)
149  		(ice_seg->device_table + le32_to_cpu(ice_seg->device_table_count));
150  
151  	return (__force struct ice_buf_table *)(nvms->vers +
152  						le32_to_cpu(nvms->table_count));
153  }
154  
155  /**
156   * ice_pkg_enum_buf
157   * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
158   * @state: pointer to the enum state
159   *
160   * This function will enumerate all the buffers in the ice segment. The first
161   * call is made with the ice_seg parameter non-NULL; on subsequent calls,
162   * ice_seg is set to NULL which continues the enumeration. When the function
163   * returns a NULL pointer, then the end of the buffers has been reached, or an
164   * unexpected value has been detected (for example an invalid section count or
165   * an invalid buffer end value).
166   */
ice_pkg_enum_buf(struct ice_seg * ice_seg,struct ice_pkg_enum * state)167  static struct ice_buf_hdr *ice_pkg_enum_buf(struct ice_seg *ice_seg,
168  					    struct ice_pkg_enum *state)
169  {
170  	if (ice_seg) {
171  		state->buf_table = ice_find_buf_table(ice_seg);
172  		if (!state->buf_table)
173  			return NULL;
174  
175  		state->buf_idx = 0;
176  		return ice_pkg_val_buf(state->buf_table->buf_array);
177  	}
178  
179  	if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count))
180  		return ice_pkg_val_buf(state->buf_table->buf_array +
181  				       state->buf_idx);
182  	else
183  		return NULL;
184  }
185  
186  /**
187   * ice_pkg_advance_sect
188   * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
189   * @state: pointer to the enum state
190   *
191   * This helper function will advance the section within the ice segment,
192   * also advancing the buffer if needed.
193   */
ice_pkg_advance_sect(struct ice_seg * ice_seg,struct ice_pkg_enum * state)194  static bool ice_pkg_advance_sect(struct ice_seg *ice_seg,
195  				 struct ice_pkg_enum *state)
196  {
197  	if (!ice_seg && !state->buf)
198  		return false;
199  
200  	if (!ice_seg && state->buf)
201  		if (++state->sect_idx < le16_to_cpu(state->buf->section_count))
202  			return true;
203  
204  	state->buf = ice_pkg_enum_buf(ice_seg, state);
205  	if (!state->buf)
206  		return false;
207  
208  	/* start of new buffer, reset section index */
209  	state->sect_idx = 0;
210  	return true;
211  }
212  
213  /**
214   * ice_pkg_enum_section
215   * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
216   * @state: pointer to the enum state
217   * @sect_type: section type to enumerate
218   *
219   * This function will enumerate all the sections of a particular type in the
220   * ice segment. The first call is made with the ice_seg parameter non-NULL;
221   * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
222   * When the function returns a NULL pointer, then the end of the matching
223   * sections has been reached.
224   */
ice_pkg_enum_section(struct ice_seg * ice_seg,struct ice_pkg_enum * state,u32 sect_type)225  void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
226  			   u32 sect_type)
227  {
228  	u16 offset, size;
229  
230  	if (ice_seg)
231  		state->type = sect_type;
232  
233  	if (!ice_pkg_advance_sect(ice_seg, state))
234  		return NULL;
235  
236  	/* scan for next matching section */
237  	while (state->buf->section_entry[state->sect_idx].type !=
238  	       cpu_to_le32(state->type))
239  		if (!ice_pkg_advance_sect(NULL, state))
240  			return NULL;
241  
242  	/* validate section */
243  	offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
244  	if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
245  		return NULL;
246  
247  	size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size);
248  	if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
249  		return NULL;
250  
251  	/* make sure the section fits in the buffer */
252  	if (offset + size > ICE_PKG_BUF_SIZE)
253  		return NULL;
254  
255  	state->sect_type =
256  		le32_to_cpu(state->buf->section_entry[state->sect_idx].type);
257  
258  	/* calc pointer to this section */
259  	state->sect =
260  		((u8 *)state->buf) +
261  		le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
262  
263  	return state->sect;
264  }
265  
266  /**
267   * ice_pkg_enum_entry
268   * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
269   * @state: pointer to the enum state
270   * @sect_type: section type to enumerate
271   * @offset: pointer to variable that receives the offset in the table (optional)
272   * @handler: function that handles access to the entries into the section type
273   *
274   * This function will enumerate all the entries in particular section type in
275   * the ice segment. The first call is made with the ice_seg parameter non-NULL;
276   * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
277   * When the function returns a NULL pointer, then the end of the entries has
278   * been reached.
279   *
280   * Since each section may have a different header and entry size, the handler
281   * function is needed to determine the number and location entries in each
282   * section.
283   *
284   * The offset parameter is optional, but should be used for sections that
285   * contain an offset for each section table. For such cases, the section handler
286   * function must return the appropriate offset + index to give the absolution
287   * offset for each entry. For example, if the base for a section's header
288   * indicates a base offset of 10, and the index for the entry is 2, then
289   * section handler function should set the offset to 10 + 2 = 12.
290   */
ice_pkg_enum_entry(struct ice_seg * ice_seg,struct ice_pkg_enum * state,u32 sect_type,u32 * offset,void * (* handler)(u32 sect_type,void * section,u32 index,u32 * offset))291  static void *ice_pkg_enum_entry(struct ice_seg *ice_seg,
292  				struct ice_pkg_enum *state, u32 sect_type,
293  				u32 *offset,
294  				void *(*handler)(u32 sect_type, void *section,
295  						 u32 index, u32 *offset))
296  {
297  	void *entry;
298  
299  	if (ice_seg) {
300  		if (!handler)
301  			return NULL;
302  
303  		if (!ice_pkg_enum_section(ice_seg, state, sect_type))
304  			return NULL;
305  
306  		state->entry_idx = 0;
307  		state->handler = handler;
308  	} else {
309  		state->entry_idx++;
310  	}
311  
312  	if (!state->handler)
313  		return NULL;
314  
315  	/* get entry */
316  	entry = state->handler(state->sect_type, state->sect, state->entry_idx,
317  			       offset);
318  	if (!entry) {
319  		/* end of a section, look for another section of this type */
320  		if (!ice_pkg_enum_section(NULL, state, 0))
321  			return NULL;
322  
323  		state->entry_idx = 0;
324  		entry = state->handler(state->sect_type, state->sect,
325  				       state->entry_idx, offset);
326  	}
327  
328  	return entry;
329  }
330  
331  /**
332   * ice_sw_fv_handler
333   * @sect_type: section type
334   * @section: pointer to section
335   * @index: index of the field vector entry to be returned
336   * @offset: ptr to variable that receives the offset in the field vector table
337   *
338   * This is a callback function that can be passed to ice_pkg_enum_entry.
339   * This function treats the given section as of type ice_sw_fv_section and
340   * enumerates offset field. "offset" is an index into the field vector table.
341   */
ice_sw_fv_handler(u32 sect_type,void * section,u32 index,u32 * offset)342  static void *ice_sw_fv_handler(u32 sect_type, void *section, u32 index,
343  			       u32 *offset)
344  {
345  	struct ice_sw_fv_section *fv_section = section;
346  
347  	if (!section || sect_type != ICE_SID_FLD_VEC_SW)
348  		return NULL;
349  	if (index >= le16_to_cpu(fv_section->count))
350  		return NULL;
351  	if (offset)
352  		/* "index" passed in to this function is relative to a given
353  		 * 4k block. To get to the true index into the field vector
354  		 * table need to add the relative index to the base_offset
355  		 * field of this section
356  		 */
357  		*offset = le16_to_cpu(fv_section->base_offset) + index;
358  	return fv_section->fv + index;
359  }
360  
361  /**
362   * ice_get_prof_index_max - get the max profile index for used profile
363   * @hw: pointer to the HW struct
364   *
365   * Calling this function will get the max profile index for used profile
366   * and store the index number in struct ice_switch_info *switch_info
367   * in HW for following use.
368   */
ice_get_prof_index_max(struct ice_hw * hw)369  static int ice_get_prof_index_max(struct ice_hw *hw)
370  {
371  	u16 prof_index = 0, j, max_prof_index = 0;
372  	struct ice_pkg_enum state;
373  	struct ice_seg *ice_seg;
374  	bool flag = false;
375  	struct ice_fv *fv;
376  	u32 offset;
377  
378  	memset(&state, 0, sizeof(state));
379  
380  	if (!hw->seg)
381  		return -EINVAL;
382  
383  	ice_seg = hw->seg;
384  
385  	do {
386  		fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
387  					&offset, ice_sw_fv_handler);
388  		if (!fv)
389  			break;
390  		ice_seg = NULL;
391  
392  		/* in the profile that not be used, the prot_id is set to 0xff
393  		 * and the off is set to 0x1ff for all the field vectors.
394  		 */
395  		for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
396  			if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
397  			    fv->ew[j].off != ICE_FV_OFFSET_INVAL)
398  				flag = true;
399  		if (flag && prof_index > max_prof_index)
400  			max_prof_index = prof_index;
401  
402  		prof_index++;
403  		flag = false;
404  	} while (fv);
405  
406  	hw->switch_info->max_used_prof_index = max_prof_index;
407  
408  	return 0;
409  }
410  
411  /**
412   * ice_get_ddp_pkg_state - get DDP pkg state after download
413   * @hw: pointer to the HW struct
414   * @already_loaded: indicates if pkg was already loaded onto the device
415   */
ice_get_ddp_pkg_state(struct ice_hw * hw,bool already_loaded)416  static enum ice_ddp_state ice_get_ddp_pkg_state(struct ice_hw *hw,
417  						bool already_loaded)
418  {
419  	if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
420  	    hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
421  	    hw->pkg_ver.update == hw->active_pkg_ver.update &&
422  	    hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
423  	    !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) {
424  		if (already_loaded)
425  			return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED;
426  		else
427  			return ICE_DDP_PKG_SUCCESS;
428  	} else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
429  		   hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
430  		return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED;
431  	} else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
432  		   hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
433  		return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED;
434  	} else {
435  		return ICE_DDP_PKG_ERR;
436  	}
437  }
438  
439  /**
440   * ice_init_pkg_regs - initialize additional package registers
441   * @hw: pointer to the hardware structure
442   */
ice_init_pkg_regs(struct ice_hw * hw)443  static void ice_init_pkg_regs(struct ice_hw *hw)
444  {
445  #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
446  #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
447  #define ICE_SW_BLK_IDX 0
448  
449  	/* setup Switch block input mask, which is 48-bits in two parts */
450  	wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
451  	wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
452  }
453  
454  /**
455   * ice_marker_ptype_tcam_handler
456   * @sect_type: section type
457   * @section: pointer to section
458   * @index: index of the Marker PType TCAM entry to be returned
459   * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections
460   *
461   * This is a callback function that can be passed to ice_pkg_enum_entry.
462   * Handles enumeration of individual Marker PType TCAM entries.
463   */
ice_marker_ptype_tcam_handler(u32 sect_type,void * section,u32 index,u32 * offset)464  static void *ice_marker_ptype_tcam_handler(u32 sect_type, void *section,
465  					   u32 index, u32 *offset)
466  {
467  	struct ice_marker_ptype_tcam_section *marker_ptype;
468  
469  	if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE)
470  		return NULL;
471  
472  	if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF)
473  		return NULL;
474  
475  	if (offset)
476  		*offset = 0;
477  
478  	marker_ptype = section;
479  	if (index >= le16_to_cpu(marker_ptype->count))
480  		return NULL;
481  
482  	return marker_ptype->tcam + index;
483  }
484  
485  /**
486   * ice_add_dvm_hint
487   * @hw: pointer to the HW structure
488   * @val: value of the boost entry
489   * @enable: true if entry needs to be enabled, or false if needs to be disabled
490   */
ice_add_dvm_hint(struct ice_hw * hw,u16 val,bool enable)491  static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable)
492  {
493  	if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) {
494  		hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val;
495  		hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable;
496  		hw->dvm_upd.count++;
497  	}
498  }
499  
500  /**
501   * ice_add_tunnel_hint
502   * @hw: pointer to the HW structure
503   * @label_name: label text
504   * @val: value of the tunnel port boost entry
505   */
ice_add_tunnel_hint(struct ice_hw * hw,char * label_name,u16 val)506  static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val)
507  {
508  	if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
509  		u16 i;
510  
511  		for (i = 0; tnls[i].type != TNL_LAST; i++) {
512  			size_t len = strlen(tnls[i].label_prefix);
513  
514  			/* Look for matching label start, before continuing */
515  			if (strncmp(label_name, tnls[i].label_prefix, len))
516  				continue;
517  
518  			/* Make sure this label matches our PF. Note that the PF
519  			 * character ('0' - '7') will be located where our
520  			 * prefix string's null terminator is located.
521  			 */
522  			if ((label_name[len] - '0') == hw->pf_id) {
523  				hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
524  				hw->tnl.tbl[hw->tnl.count].valid = false;
525  				hw->tnl.tbl[hw->tnl.count].boost_addr = val;
526  				hw->tnl.tbl[hw->tnl.count].port = 0;
527  				hw->tnl.count++;
528  				break;
529  			}
530  		}
531  	}
532  }
533  
534  /**
535   * ice_label_enum_handler
536   * @sect_type: section type
537   * @section: pointer to section
538   * @index: index of the label entry to be returned
539   * @offset: pointer to receive absolute offset, always zero for label sections
540   *
541   * This is a callback function that can be passed to ice_pkg_enum_entry.
542   * Handles enumeration of individual label entries.
543   */
ice_label_enum_handler(u32 __always_unused sect_type,void * section,u32 index,u32 * offset)544  static void *ice_label_enum_handler(u32 __always_unused sect_type,
545  				    void *section, u32 index, u32 *offset)
546  {
547  	struct ice_label_section *labels;
548  
549  	if (!section)
550  		return NULL;
551  
552  	if (index > ICE_MAX_LABELS_IN_BUF)
553  		return NULL;
554  
555  	if (offset)
556  		*offset = 0;
557  
558  	labels = section;
559  	if (index >= le16_to_cpu(labels->count))
560  		return NULL;
561  
562  	return labels->label + index;
563  }
564  
565  /**
566   * ice_enum_labels
567   * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
568   * @type: the section type that will contain the label (0 on subsequent calls)
569   * @state: ice_pkg_enum structure that will hold the state of the enumeration
570   * @value: pointer to a value that will return the label's value if found
571   *
572   * Enumerates a list of labels in the package. The caller will call
573   * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
574   * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
575   * the end of the list has been reached.
576   */
ice_enum_labels(struct ice_seg * ice_seg,u32 type,struct ice_pkg_enum * state,u16 * value)577  static char *ice_enum_labels(struct ice_seg *ice_seg, u32 type,
578  			     struct ice_pkg_enum *state, u16 *value)
579  {
580  	struct ice_label *label;
581  
582  	/* Check for valid label section on first call */
583  	if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
584  		return NULL;
585  
586  	label = ice_pkg_enum_entry(ice_seg, state, type, NULL,
587  				   ice_label_enum_handler);
588  	if (!label)
589  		return NULL;
590  
591  	*value = le16_to_cpu(label->value);
592  	return label->name;
593  }
594  
595  /**
596   * ice_boost_tcam_handler
597   * @sect_type: section type
598   * @section: pointer to section
599   * @index: index of the boost TCAM entry to be returned
600   * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
601   *
602   * This is a callback function that can be passed to ice_pkg_enum_entry.
603   * Handles enumeration of individual boost TCAM entries.
604   */
ice_boost_tcam_handler(u32 sect_type,void * section,u32 index,u32 * offset)605  static void *ice_boost_tcam_handler(u32 sect_type, void *section, u32 index,
606  				    u32 *offset)
607  {
608  	struct ice_boost_tcam_section *boost;
609  
610  	if (!section)
611  		return NULL;
612  
613  	if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
614  		return NULL;
615  
616  	if (index > ICE_MAX_BST_TCAMS_IN_BUF)
617  		return NULL;
618  
619  	if (offset)
620  		*offset = 0;
621  
622  	boost = section;
623  	if (index >= le16_to_cpu(boost->count))
624  		return NULL;
625  
626  	return boost->tcam + index;
627  }
628  
629  /**
630   * ice_find_boost_entry
631   * @ice_seg: pointer to the ice segment (non-NULL)
632   * @addr: Boost TCAM address of entry to search for
633   * @entry: returns pointer to the entry
634   *
635   * Finds a particular Boost TCAM entry and returns a pointer to that entry
636   * if it is found. The ice_seg parameter must not be NULL since the first call
637   * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
638   */
ice_find_boost_entry(struct ice_seg * ice_seg,u16 addr,struct ice_boost_tcam_entry ** entry)639  static int ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
640  				struct ice_boost_tcam_entry **entry)
641  {
642  	struct ice_boost_tcam_entry *tcam;
643  	struct ice_pkg_enum state;
644  
645  	memset(&state, 0, sizeof(state));
646  
647  	if (!ice_seg)
648  		return -EINVAL;
649  
650  	do {
651  		tcam = ice_pkg_enum_entry(ice_seg, &state,
652  					  ICE_SID_RXPARSER_BOOST_TCAM, NULL,
653  					  ice_boost_tcam_handler);
654  		if (tcam && le16_to_cpu(tcam->addr) == addr) {
655  			*entry = tcam;
656  			return 0;
657  		}
658  
659  		ice_seg = NULL;
660  	} while (tcam);
661  
662  	*entry = NULL;
663  	return -EIO;
664  }
665  
666  /**
667   * ice_is_init_pkg_successful - check if DDP init was successful
668   * @state: state of the DDP pkg after download
669   */
ice_is_init_pkg_successful(enum ice_ddp_state state)670  bool ice_is_init_pkg_successful(enum ice_ddp_state state)
671  {
672  	switch (state) {
673  	case ICE_DDP_PKG_SUCCESS:
674  	case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
675  	case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
676  		return true;
677  	default:
678  		return false;
679  	}
680  }
681  
682  /**
683   * ice_pkg_buf_alloc
684   * @hw: pointer to the HW structure
685   *
686   * Allocates a package buffer and returns a pointer to the buffer header.
687   * Note: all package contents must be in Little Endian form.
688   */
ice_pkg_buf_alloc(struct ice_hw * hw)689  struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
690  {
691  	struct ice_buf_build *bld;
692  	struct ice_buf_hdr *buf;
693  
694  	bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL);
695  	if (!bld)
696  		return NULL;
697  
698  	buf = (struct ice_buf_hdr *)bld;
699  	buf->data_end =
700  		cpu_to_le16(offsetof(struct ice_buf_hdr, section_entry));
701  	return bld;
702  }
703  
ice_is_gtp_u_profile(u16 prof_idx)704  static bool ice_is_gtp_u_profile(u16 prof_idx)
705  {
706  	return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID &&
707  		prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER) ||
708  	       prof_idx == ICE_PROFID_IPV4_GTPU_TEID;
709  }
710  
ice_is_gtp_c_profile(u16 prof_idx)711  static bool ice_is_gtp_c_profile(u16 prof_idx)
712  {
713  	switch (prof_idx) {
714  	case ICE_PROFID_IPV4_GTPC_TEID:
715  	case ICE_PROFID_IPV4_GTPC_NO_TEID:
716  	case ICE_PROFID_IPV6_GTPC_TEID:
717  	case ICE_PROFID_IPV6_GTPC_NO_TEID:
718  		return true;
719  	default:
720  		return false;
721  	}
722  }
723  
724  /**
725   * ice_get_sw_prof_type - determine switch profile type
726   * @hw: pointer to the HW structure
727   * @fv: pointer to the switch field vector
728   * @prof_idx: profile index to check
729   */
ice_get_sw_prof_type(struct ice_hw * hw,struct ice_fv * fv,u32 prof_idx)730  static enum ice_prof_type ice_get_sw_prof_type(struct ice_hw *hw,
731  					       struct ice_fv *fv, u32 prof_idx)
732  {
733  	u16 i;
734  
735  	if (ice_is_gtp_c_profile(prof_idx))
736  		return ICE_PROF_TUN_GTPC;
737  
738  	if (ice_is_gtp_u_profile(prof_idx))
739  		return ICE_PROF_TUN_GTPU;
740  
741  	for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
742  		/* UDP tunnel will have UDP_OF protocol ID and VNI offset */
743  		if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
744  		    fv->ew[i].off == ICE_VNI_OFFSET)
745  			return ICE_PROF_TUN_UDP;
746  
747  		/* GRE tunnel will have GRE protocol */
748  		if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
749  			return ICE_PROF_TUN_GRE;
750  	}
751  
752  	return ICE_PROF_NON_TUN;
753  }
754  
755  /**
756   * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
757   * @hw: pointer to hardware structure
758   * @req_profs: type of profiles requested
759   * @bm: pointer to memory for returning the bitmap of field vectors
760   */
ice_get_sw_fv_bitmap(struct ice_hw * hw,enum ice_prof_type req_profs,unsigned long * bm)761  void ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
762  			  unsigned long *bm)
763  {
764  	struct ice_pkg_enum state;
765  	struct ice_seg *ice_seg;
766  	struct ice_fv *fv;
767  
768  	if (req_profs == ICE_PROF_ALL) {
769  		bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES);
770  		return;
771  	}
772  
773  	memset(&state, 0, sizeof(state));
774  	bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
775  	ice_seg = hw->seg;
776  	do {
777  		enum ice_prof_type prof_type;
778  		u32 offset;
779  
780  		fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
781  					&offset, ice_sw_fv_handler);
782  		ice_seg = NULL;
783  
784  		if (fv) {
785  			/* Determine field vector type */
786  			prof_type = ice_get_sw_prof_type(hw, fv, offset);
787  
788  			if (req_profs & prof_type)
789  				set_bit((u16)offset, bm);
790  		}
791  	} while (fv);
792  }
793  
794  /**
795   * ice_get_sw_fv_list
796   * @hw: pointer to the HW structure
797   * @lkups: list of protocol types
798   * @bm: bitmap of field vectors to consider
799   * @fv_list: Head of a list
800   *
801   * Finds all the field vector entries from switch block that contain
802   * a given protocol ID and offset and returns a list of structures of type
803   * "ice_sw_fv_list_entry". Every structure in the list has a field vector
804   * definition and profile ID information
805   * NOTE: The caller of the function is responsible for freeing the memory
806   * allocated for every list entry.
807   */
ice_get_sw_fv_list(struct ice_hw * hw,struct ice_prot_lkup_ext * lkups,unsigned long * bm,struct list_head * fv_list)808  int ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
809  		       unsigned long *bm, struct list_head *fv_list)
810  {
811  	struct ice_sw_fv_list_entry *fvl;
812  	struct ice_sw_fv_list_entry *tmp;
813  	struct ice_pkg_enum state;
814  	struct ice_seg *ice_seg;
815  	struct ice_fv *fv;
816  	u32 offset;
817  
818  	memset(&state, 0, sizeof(state));
819  
820  	if (!lkups->n_val_words || !hw->seg)
821  		return -EINVAL;
822  
823  	ice_seg = hw->seg;
824  	do {
825  		u16 i;
826  
827  		fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
828  					&offset, ice_sw_fv_handler);
829  		if (!fv)
830  			break;
831  		ice_seg = NULL;
832  
833  		/* If field vector is not in the bitmap list, then skip this
834  		 * profile.
835  		 */
836  		if (!test_bit((u16)offset, bm))
837  			continue;
838  
839  		for (i = 0; i < lkups->n_val_words; i++) {
840  			int j;
841  
842  			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
843  				if (fv->ew[j].prot_id ==
844  					    lkups->fv_words[i].prot_id &&
845  				    fv->ew[j].off == lkups->fv_words[i].off)
846  					break;
847  			if (j >= hw->blk[ICE_BLK_SW].es.fvw)
848  				break;
849  			if (i + 1 == lkups->n_val_words) {
850  				fvl = devm_kzalloc(ice_hw_to_dev(hw),
851  						   sizeof(*fvl), GFP_KERNEL);
852  				if (!fvl)
853  					goto err;
854  				fvl->fv_ptr = fv;
855  				fvl->profile_id = offset;
856  				list_add(&fvl->list_entry, fv_list);
857  				break;
858  			}
859  		}
860  	} while (fv);
861  	if (list_empty(fv_list)) {
862  		dev_warn(ice_hw_to_dev(hw),
863  			 "Required profiles not found in currently loaded DDP package");
864  		return -EIO;
865  	}
866  
867  	return 0;
868  
869  err:
870  	list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) {
871  		list_del(&fvl->list_entry);
872  		devm_kfree(ice_hw_to_dev(hw), fvl);
873  	}
874  
875  	return -ENOMEM;
876  }
877  
878  /**
879   * ice_init_prof_result_bm - Initialize the profile result index bitmap
880   * @hw: pointer to hardware structure
881   */
ice_init_prof_result_bm(struct ice_hw * hw)882  void ice_init_prof_result_bm(struct ice_hw *hw)
883  {
884  	struct ice_pkg_enum state;
885  	struct ice_seg *ice_seg;
886  	struct ice_fv *fv;
887  
888  	memset(&state, 0, sizeof(state));
889  
890  	if (!hw->seg)
891  		return;
892  
893  	ice_seg = hw->seg;
894  	do {
895  		u32 off;
896  		u16 i;
897  
898  		fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
899  					&off, ice_sw_fv_handler);
900  		ice_seg = NULL;
901  		if (!fv)
902  			break;
903  
904  		bitmap_zero(hw->switch_info->prof_res_bm[off],
905  			    ICE_MAX_FV_WORDS);
906  
907  		/* Determine empty field vector indices, these can be
908  		 * used for recipe results. Skip index 0, since it is
909  		 * always used for Switch ID.
910  		 */
911  		for (i = 1; i < ICE_MAX_FV_WORDS; i++)
912  			if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
913  			    fv->ew[i].off == ICE_FV_OFFSET_INVAL)
914  				set_bit(i, hw->switch_info->prof_res_bm[off]);
915  	} while (fv);
916  }
917  
918  /**
919   * ice_pkg_buf_free
920   * @hw: pointer to the HW structure
921   * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
922   *
923   * Frees a package buffer
924   */
ice_pkg_buf_free(struct ice_hw * hw,struct ice_buf_build * bld)925  void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
926  {
927  	devm_kfree(ice_hw_to_dev(hw), bld);
928  }
929  
930  /**
931   * ice_pkg_buf_reserve_section
932   * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
933   * @count: the number of sections to reserve
934   *
935   * Reserves one or more section table entries in a package buffer. This routine
936   * can be called multiple times as long as they are made before calling
937   * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
938   * is called once, the number of sections that can be allocated will not be able
939   * to be increased; not using all reserved sections is fine, but this will
940   * result in some wasted space in the buffer.
941   * Note: all package contents must be in Little Endian form.
942   */
ice_pkg_buf_reserve_section(struct ice_buf_build * bld,u16 count)943  int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
944  {
945  	struct ice_buf_hdr *buf;
946  	u16 section_count;
947  	u16 data_end;
948  
949  	if (!bld)
950  		return -EINVAL;
951  
952  	buf = (struct ice_buf_hdr *)&bld->buf;
953  
954  	/* already an active section, can't increase table size */
955  	section_count = le16_to_cpu(buf->section_count);
956  	if (section_count > 0)
957  		return -EIO;
958  
959  	if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
960  		return -EIO;
961  	bld->reserved_section_table_entries += count;
962  
963  	data_end = le16_to_cpu(buf->data_end) +
964  		   flex_array_size(buf, section_entry, count);
965  	buf->data_end = cpu_to_le16(data_end);
966  
967  	return 0;
968  }
969  
970  /**
971   * ice_pkg_buf_alloc_section
972   * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
973   * @type: the section type value
974   * @size: the size of the section to reserve (in bytes)
975   *
976   * Reserves memory in the buffer for a section's content and updates the
977   * buffers' status accordingly. This routine returns a pointer to the first
978   * byte of the section start within the buffer, which is used to fill in the
979   * section contents.
980   * Note: all package contents must be in Little Endian form.
981   */
ice_pkg_buf_alloc_section(struct ice_buf_build * bld,u32 type,u16 size)982  void *ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
983  {
984  	struct ice_buf_hdr *buf;
985  	u16 sect_count;
986  	u16 data_end;
987  
988  	if (!bld || !type || !size)
989  		return NULL;
990  
991  	buf = (struct ice_buf_hdr *)&bld->buf;
992  
993  	/* check for enough space left in buffer */
994  	data_end = le16_to_cpu(buf->data_end);
995  
996  	/* section start must align on 4 byte boundary */
997  	data_end = ALIGN(data_end, 4);
998  
999  	if ((data_end + size) > ICE_MAX_S_DATA_END)
1000  		return NULL;
1001  
1002  	/* check for more available section table entries */
1003  	sect_count = le16_to_cpu(buf->section_count);
1004  	if (sect_count < bld->reserved_section_table_entries) {
1005  		void *section_ptr = ((u8 *)buf) + data_end;
1006  
1007  		buf->section_entry[sect_count].offset = cpu_to_le16(data_end);
1008  		buf->section_entry[sect_count].size = cpu_to_le16(size);
1009  		buf->section_entry[sect_count].type = cpu_to_le32(type);
1010  
1011  		data_end += size;
1012  		buf->data_end = cpu_to_le16(data_end);
1013  
1014  		buf->section_count = cpu_to_le16(sect_count + 1);
1015  		return section_ptr;
1016  	}
1017  
1018  	/* no free section table entries */
1019  	return NULL;
1020  }
1021  
1022  /**
1023   * ice_pkg_buf_alloc_single_section
1024   * @hw: pointer to the HW structure
1025   * @type: the section type value
1026   * @size: the size of the section to reserve (in bytes)
1027   * @section: returns pointer to the section
1028   *
1029   * Allocates a package buffer with a single section.
1030   * Note: all package contents must be in Little Endian form.
1031   */
ice_pkg_buf_alloc_single_section(struct ice_hw * hw,u32 type,u16 size,void ** section)1032  struct ice_buf_build *ice_pkg_buf_alloc_single_section(struct ice_hw *hw,
1033  						       u32 type, u16 size,
1034  						       void **section)
1035  {
1036  	struct ice_buf_build *buf;
1037  
1038  	if (!section)
1039  		return NULL;
1040  
1041  	buf = ice_pkg_buf_alloc(hw);
1042  	if (!buf)
1043  		return NULL;
1044  
1045  	if (ice_pkg_buf_reserve_section(buf, 1))
1046  		goto ice_pkg_buf_alloc_single_section_err;
1047  
1048  	*section = ice_pkg_buf_alloc_section(buf, type, size);
1049  	if (!*section)
1050  		goto ice_pkg_buf_alloc_single_section_err;
1051  
1052  	return buf;
1053  
1054  ice_pkg_buf_alloc_single_section_err:
1055  	ice_pkg_buf_free(hw, buf);
1056  	return NULL;
1057  }
1058  
1059  /**
1060   * ice_pkg_buf_get_active_sections
1061   * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1062   *
1063   * Returns the number of active sections. Before using the package buffer
1064   * in an update package command, the caller should make sure that there is at
1065   * least one active section - otherwise, the buffer is not legal and should
1066   * not be used.
1067   * Note: all package contents must be in Little Endian form.
1068   */
ice_pkg_buf_get_active_sections(struct ice_buf_build * bld)1069  u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
1070  {
1071  	struct ice_buf_hdr *buf;
1072  
1073  	if (!bld)
1074  		return 0;
1075  
1076  	buf = (struct ice_buf_hdr *)&bld->buf;
1077  	return le16_to_cpu(buf->section_count);
1078  }
1079  
1080  /**
1081   * ice_pkg_buf
1082   * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1083   *
1084   * Return a pointer to the buffer's header
1085   */
ice_pkg_buf(struct ice_buf_build * bld)1086  struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
1087  {
1088  	if (!bld)
1089  		return NULL;
1090  
1091  	return &bld->buf;
1092  }
1093  
ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err)1094  static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err)
1095  {
1096  	switch (aq_err) {
1097  	case ICE_AQ_RC_ENOSEC:
1098  	case ICE_AQ_RC_EBADSIG:
1099  		return ICE_DDP_PKG_FILE_SIGNATURE_INVALID;
1100  	case ICE_AQ_RC_ESVN:
1101  		return ICE_DDP_PKG_FILE_REVISION_TOO_LOW;
1102  	case ICE_AQ_RC_EBADMAN:
1103  	case ICE_AQ_RC_EBADBUF:
1104  		return ICE_DDP_PKG_LOAD_ERROR;
1105  	default:
1106  		return ICE_DDP_PKG_ERR;
1107  	}
1108  }
1109  
1110  /**
1111   * ice_acquire_global_cfg_lock
1112   * @hw: pointer to the HW structure
1113   * @access: access type (read or write)
1114   *
1115   * This function will request ownership of the global config lock for reading
1116   * or writing of the package. When attempting to obtain write access, the
1117   * caller must check for the following two return values:
1118   *
1119   * 0         -  Means the caller has acquired the global config lock
1120   *              and can perform writing of the package.
1121   * -EALREADY - Indicates another driver has already written the
1122   *             package or has found that no update was necessary; in
1123   *             this case, the caller can just skip performing any
1124   *             update of the package.
1125   */
ice_acquire_global_cfg_lock(struct ice_hw * hw,enum ice_aq_res_access_type access)1126  static int ice_acquire_global_cfg_lock(struct ice_hw *hw,
1127  				       enum ice_aq_res_access_type access)
1128  {
1129  	int status;
1130  
1131  	status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
1132  				 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
1133  
1134  	if (!status)
1135  		mutex_lock(&ice_global_cfg_lock_sw);
1136  	else if (status == -EALREADY)
1137  		ice_debug(hw, ICE_DBG_PKG,
1138  			  "Global config lock: No work to do\n");
1139  
1140  	return status;
1141  }
1142  
1143  /**
1144   * ice_release_global_cfg_lock
1145   * @hw: pointer to the HW structure
1146   *
1147   * This function will release the global config lock.
1148   */
ice_release_global_cfg_lock(struct ice_hw * hw)1149  static void ice_release_global_cfg_lock(struct ice_hw *hw)
1150  {
1151  	mutex_unlock(&ice_global_cfg_lock_sw);
1152  	ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
1153  }
1154  
1155  /**
1156   * ice_aq_download_pkg
1157   * @hw: pointer to the hardware structure
1158   * @pkg_buf: the package buffer to transfer
1159   * @buf_size: the size of the package buffer
1160   * @last_buf: last buffer indicator
1161   * @error_offset: returns error offset
1162   * @error_info: returns error information
1163   * @cd: pointer to command details structure or NULL
1164   *
1165   * Download Package (0x0C40)
1166   */
1167  static int
ice_aq_download_pkg(struct ice_hw * hw,struct ice_buf_hdr * pkg_buf,u16 buf_size,bool last_buf,u32 * error_offset,u32 * error_info,struct ice_sq_cd * cd)1168  ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
1169  		    u16 buf_size, bool last_buf, u32 *error_offset,
1170  		    u32 *error_info, struct ice_sq_cd *cd)
1171  {
1172  	struct ice_aqc_download_pkg *cmd;
1173  	struct ice_aq_desc desc;
1174  	int status;
1175  
1176  	if (error_offset)
1177  		*error_offset = 0;
1178  	if (error_info)
1179  		*error_info = 0;
1180  
1181  	cmd = &desc.params.download_pkg;
1182  	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
1183  	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1184  
1185  	if (last_buf)
1186  		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
1187  
1188  	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
1189  	if (status == -EIO) {
1190  		/* Read error from buffer only when the FW returned an error */
1191  		struct ice_aqc_download_pkg_resp *resp;
1192  
1193  		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
1194  		if (error_offset)
1195  			*error_offset = le32_to_cpu(resp->error_offset);
1196  		if (error_info)
1197  			*error_info = le32_to_cpu(resp->error_info);
1198  	}
1199  
1200  	return status;
1201  }
1202  
1203  /**
1204   * ice_dwnld_cfg_bufs
1205   * @hw: pointer to the hardware structure
1206   * @bufs: pointer to an array of buffers
1207   * @count: the number of buffers in the array
1208   *
1209   * Obtains global config lock and downloads the package configuration buffers
1210   * to the firmware. Metadata buffers are skipped, and the first metadata buffer
1211   * found indicates that the rest of the buffers are all metadata buffers.
1212   */
ice_dwnld_cfg_bufs(struct ice_hw * hw,struct ice_buf * bufs,u32 count)1213  static enum ice_ddp_state ice_dwnld_cfg_bufs(struct ice_hw *hw,
1214  					     struct ice_buf *bufs, u32 count)
1215  {
1216  	enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
1217  	struct ice_buf_hdr *bh;
1218  	enum ice_aq_err err;
1219  	u32 offset, info, i;
1220  	int status;
1221  
1222  	if (!bufs || !count)
1223  		return ICE_DDP_PKG_ERR;
1224  
1225  	/* If the first buffer's first section has its metadata bit set
1226  	 * then there are no buffers to be downloaded, and the operation is
1227  	 * considered a success.
1228  	 */
1229  	bh = (struct ice_buf_hdr *)bufs;
1230  	if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
1231  		return ICE_DDP_PKG_SUCCESS;
1232  
1233  	status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
1234  	if (status) {
1235  		if (status == -EALREADY)
1236  			return ICE_DDP_PKG_ALREADY_LOADED;
1237  		return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
1238  	}
1239  
1240  	for (i = 0; i < count; i++) {
1241  		bool last = ((i + 1) == count);
1242  
1243  		if (!last) {
1244  			/* check next buffer for metadata flag */
1245  			bh = (struct ice_buf_hdr *)(bufs + i + 1);
1246  
1247  			/* A set metadata flag in the next buffer will signal
1248  			 * that the current buffer will be the last buffer
1249  			 * downloaded
1250  			 */
1251  			if (le16_to_cpu(bh->section_count))
1252  				if (le32_to_cpu(bh->section_entry[0].type) &
1253  				    ICE_METADATA_BUF)
1254  					last = true;
1255  		}
1256  
1257  		bh = (struct ice_buf_hdr *)(bufs + i);
1258  
1259  		status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
1260  					     &offset, &info, NULL);
1261  
1262  		/* Save AQ status from download package */
1263  		if (status) {
1264  			ice_debug(hw, ICE_DBG_PKG,
1265  				  "Pkg download failed: err %d off %d inf %d\n",
1266  				  status, offset, info);
1267  			err = hw->adminq.sq_last_status;
1268  			state = ice_map_aq_err_to_ddp_state(err);
1269  			break;
1270  		}
1271  
1272  		if (last)
1273  			break;
1274  	}
1275  
1276  	if (!status) {
1277  		status = ice_set_vlan_mode(hw);
1278  		if (status)
1279  			ice_debug(hw, ICE_DBG_PKG,
1280  				  "Failed to set VLAN mode: err %d\n", status);
1281  	}
1282  
1283  	ice_release_global_cfg_lock(hw);
1284  
1285  	return state;
1286  }
1287  
1288  /**
1289   * ice_aq_get_pkg_info_list
1290   * @hw: pointer to the hardware structure
1291   * @pkg_info: the buffer which will receive the information list
1292   * @buf_size: the size of the pkg_info information buffer
1293   * @cd: pointer to command details structure or NULL
1294   *
1295   * Get Package Info List (0x0C43)
1296   */
ice_aq_get_pkg_info_list(struct ice_hw * hw,struct ice_aqc_get_pkg_info_resp * pkg_info,u16 buf_size,struct ice_sq_cd * cd)1297  static int ice_aq_get_pkg_info_list(struct ice_hw *hw,
1298  				    struct ice_aqc_get_pkg_info_resp *pkg_info,
1299  				    u16 buf_size, struct ice_sq_cd *cd)
1300  {
1301  	struct ice_aq_desc desc;
1302  
1303  	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
1304  
1305  	return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
1306  }
1307  
1308  /**
1309   * ice_download_pkg
1310   * @hw: pointer to the hardware structure
1311   * @ice_seg: pointer to the segment of the package to be downloaded
1312   *
1313   * Handles the download of a complete package.
1314   */
ice_download_pkg(struct ice_hw * hw,struct ice_seg * ice_seg)1315  static enum ice_ddp_state ice_download_pkg(struct ice_hw *hw,
1316  					   struct ice_seg *ice_seg)
1317  {
1318  	struct ice_buf_table *ice_buf_tbl;
1319  	int status;
1320  
1321  	ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
1322  		  ice_seg->hdr.seg_format_ver.major,
1323  		  ice_seg->hdr.seg_format_ver.minor,
1324  		  ice_seg->hdr.seg_format_ver.update,
1325  		  ice_seg->hdr.seg_format_ver.draft);
1326  
1327  	ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
1328  		  le32_to_cpu(ice_seg->hdr.seg_type),
1329  		  le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
1330  
1331  	ice_buf_tbl = ice_find_buf_table(ice_seg);
1332  
1333  	ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
1334  		  le32_to_cpu(ice_buf_tbl->buf_count));
1335  
1336  	status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
1337  				    le32_to_cpu(ice_buf_tbl->buf_count));
1338  
1339  	ice_post_pkg_dwnld_vlan_mode_cfg(hw);
1340  
1341  	return status;
1342  }
1343  
1344  /**
1345   * ice_aq_update_pkg
1346   * @hw: pointer to the hardware structure
1347   * @pkg_buf: the package cmd buffer
1348   * @buf_size: the size of the package cmd buffer
1349   * @last_buf: last buffer indicator
1350   * @error_offset: returns error offset
1351   * @error_info: returns error information
1352   * @cd: pointer to command details structure or NULL
1353   *
1354   * Update Package (0x0C42)
1355   */
ice_aq_update_pkg(struct ice_hw * hw,struct ice_buf_hdr * pkg_buf,u16 buf_size,bool last_buf,u32 * error_offset,u32 * error_info,struct ice_sq_cd * cd)1356  static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
1357  			     u16 buf_size, bool last_buf, u32 *error_offset,
1358  			     u32 *error_info, struct ice_sq_cd *cd)
1359  {
1360  	struct ice_aqc_download_pkg *cmd;
1361  	struct ice_aq_desc desc;
1362  	int status;
1363  
1364  	if (error_offset)
1365  		*error_offset = 0;
1366  	if (error_info)
1367  		*error_info = 0;
1368  
1369  	cmd = &desc.params.download_pkg;
1370  	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
1371  	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1372  
1373  	if (last_buf)
1374  		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
1375  
1376  	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
1377  	if (status == -EIO) {
1378  		/* Read error from buffer only when the FW returned an error */
1379  		struct ice_aqc_download_pkg_resp *resp;
1380  
1381  		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
1382  		if (error_offset)
1383  			*error_offset = le32_to_cpu(resp->error_offset);
1384  		if (error_info)
1385  			*error_info = le32_to_cpu(resp->error_info);
1386  	}
1387  
1388  	return status;
1389  }
1390  
1391  /**
1392   * ice_aq_upload_section
1393   * @hw: pointer to the hardware structure
1394   * @pkg_buf: the package buffer which will receive the section
1395   * @buf_size: the size of the package buffer
1396   * @cd: pointer to command details structure or NULL
1397   *
1398   * Upload Section (0x0C41)
1399   */
ice_aq_upload_section(struct ice_hw * hw,struct ice_buf_hdr * pkg_buf,u16 buf_size,struct ice_sq_cd * cd)1400  int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
1401  			  u16 buf_size, struct ice_sq_cd *cd)
1402  {
1403  	struct ice_aq_desc desc;
1404  
1405  	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
1406  	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1407  
1408  	return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
1409  }
1410  
1411  /**
1412   * ice_update_pkg_no_lock
1413   * @hw: pointer to the hardware structure
1414   * @bufs: pointer to an array of buffers
1415   * @count: the number of buffers in the array
1416   */
ice_update_pkg_no_lock(struct ice_hw * hw,struct ice_buf * bufs,u32 count)1417  int ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
1418  {
1419  	int status = 0;
1420  	u32 i;
1421  
1422  	for (i = 0; i < count; i++) {
1423  		struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
1424  		bool last = ((i + 1) == count);
1425  		u32 offset, info;
1426  
1427  		status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end),
1428  					   last, &offset, &info, NULL);
1429  
1430  		if (status) {
1431  			ice_debug(hw, ICE_DBG_PKG,
1432  				  "Update pkg failed: err %d off %d inf %d\n",
1433  				  status, offset, info);
1434  			break;
1435  		}
1436  	}
1437  
1438  	return status;
1439  }
1440  
1441  /**
1442   * ice_update_pkg
1443   * @hw: pointer to the hardware structure
1444   * @bufs: pointer to an array of buffers
1445   * @count: the number of buffers in the array
1446   *
1447   * Obtains change lock and updates package.
1448   */
ice_update_pkg(struct ice_hw * hw,struct ice_buf * bufs,u32 count)1449  int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
1450  {
1451  	int status;
1452  
1453  	status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
1454  	if (status)
1455  		return status;
1456  
1457  	status = ice_update_pkg_no_lock(hw, bufs, count);
1458  
1459  	ice_release_change_lock(hw);
1460  
1461  	return status;
1462  }
1463  
1464  /**
1465   * ice_find_seg_in_pkg
1466   * @hw: pointer to the hardware structure
1467   * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
1468   * @pkg_hdr: pointer to the package header to be searched
1469   *
1470   * This function searches a package file for a particular segment type. On
1471   * success it returns a pointer to the segment header, otherwise it will
1472   * return NULL.
1473   */
1474  static struct ice_generic_seg_hdr *
ice_find_seg_in_pkg(struct ice_hw * hw,u32 seg_type,struct ice_pkg_hdr * pkg_hdr)1475  ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
1476  		    struct ice_pkg_hdr *pkg_hdr)
1477  {
1478  	u32 i;
1479  
1480  	ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
1481  		  pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
1482  		  pkg_hdr->pkg_format_ver.update,
1483  		  pkg_hdr->pkg_format_ver.draft);
1484  
1485  	/* Search all package segments for the requested segment type */
1486  	for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
1487  		struct ice_generic_seg_hdr *seg;
1488  
1489  		seg = (struct ice_generic_seg_hdr
1490  			       *)((u8 *)pkg_hdr +
1491  				  le32_to_cpu(pkg_hdr->seg_offset[i]));
1492  
1493  		if (le32_to_cpu(seg->seg_type) == seg_type)
1494  			return seg;
1495  	}
1496  
1497  	return NULL;
1498  }
1499  
1500  /**
1501   * ice_init_pkg_info
1502   * @hw: pointer to the hardware structure
1503   * @pkg_hdr: pointer to the driver's package hdr
1504   *
1505   * Saves off the package details into the HW structure.
1506   */
ice_init_pkg_info(struct ice_hw * hw,struct ice_pkg_hdr * pkg_hdr)1507  static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw,
1508  					    struct ice_pkg_hdr *pkg_hdr)
1509  {
1510  	struct ice_generic_seg_hdr *seg_hdr;
1511  
1512  	if (!pkg_hdr)
1513  		return ICE_DDP_PKG_ERR;
1514  
1515  	seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
1516  	if (seg_hdr) {
1517  		struct ice_meta_sect *meta;
1518  		struct ice_pkg_enum state;
1519  
1520  		memset(&state, 0, sizeof(state));
1521  
1522  		/* Get package information from the Metadata Section */
1523  		meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
1524  					    ICE_SID_METADATA);
1525  		if (!meta) {
1526  			ice_debug(hw, ICE_DBG_INIT,
1527  				  "Did not find ice metadata section in package\n");
1528  			return ICE_DDP_PKG_INVALID_FILE;
1529  		}
1530  
1531  		hw->pkg_ver = meta->ver;
1532  		memcpy(hw->pkg_name, meta->name, sizeof(meta->name));
1533  
1534  		ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
1535  			  meta->ver.major, meta->ver.minor, meta->ver.update,
1536  			  meta->ver.draft, meta->name);
1537  
1538  		hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
1539  		memcpy(hw->ice_seg_id, seg_hdr->seg_id, sizeof(hw->ice_seg_id));
1540  
1541  		ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
1542  			  seg_hdr->seg_format_ver.major,
1543  			  seg_hdr->seg_format_ver.minor,
1544  			  seg_hdr->seg_format_ver.update,
1545  			  seg_hdr->seg_format_ver.draft, seg_hdr->seg_id);
1546  	} else {
1547  		ice_debug(hw, ICE_DBG_INIT,
1548  			  "Did not find ice segment in driver package\n");
1549  		return ICE_DDP_PKG_INVALID_FILE;
1550  	}
1551  
1552  	return ICE_DDP_PKG_SUCCESS;
1553  }
1554  
1555  /**
1556   * ice_get_pkg_info
1557   * @hw: pointer to the hardware structure
1558   *
1559   * Store details of the package currently loaded in HW into the HW structure.
1560   */
ice_get_pkg_info(struct ice_hw * hw)1561  static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
1562  {
1563  	enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
1564  	struct ice_aqc_get_pkg_info_resp *pkg_info;
1565  	u16 size;
1566  	u32 i;
1567  
1568  	size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
1569  	pkg_info = kzalloc(size, GFP_KERNEL);
1570  	if (!pkg_info)
1571  		return ICE_DDP_PKG_ERR;
1572  
1573  	if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) {
1574  		state = ICE_DDP_PKG_ERR;
1575  		goto init_pkg_free_alloc;
1576  	}
1577  
1578  	for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
1579  #define ICE_PKG_FLAG_COUNT 4
1580  		char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
1581  		u8 place = 0;
1582  
1583  		if (pkg_info->pkg_info[i].is_active) {
1584  			flags[place++] = 'A';
1585  			hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
1586  			hw->active_track_id =
1587  				le32_to_cpu(pkg_info->pkg_info[i].track_id);
1588  			memcpy(hw->active_pkg_name, pkg_info->pkg_info[i].name,
1589  			       sizeof(pkg_info->pkg_info[i].name));
1590  			hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
1591  		}
1592  		if (pkg_info->pkg_info[i].is_active_at_boot)
1593  			flags[place++] = 'B';
1594  		if (pkg_info->pkg_info[i].is_modified)
1595  			flags[place++] = 'M';
1596  		if (pkg_info->pkg_info[i].is_in_nvm)
1597  			flags[place++] = 'N';
1598  
1599  		ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", i,
1600  			  pkg_info->pkg_info[i].ver.major,
1601  			  pkg_info->pkg_info[i].ver.minor,
1602  			  pkg_info->pkg_info[i].ver.update,
1603  			  pkg_info->pkg_info[i].ver.draft,
1604  			  pkg_info->pkg_info[i].name, flags);
1605  	}
1606  
1607  init_pkg_free_alloc:
1608  	kfree(pkg_info);
1609  
1610  	return state;
1611  }
1612  
1613  /**
1614   * ice_chk_pkg_compat
1615   * @hw: pointer to the hardware structure
1616   * @ospkg: pointer to the package hdr
1617   * @seg: pointer to the package segment hdr
1618   *
1619   * This function checks the package version compatibility with driver and NVM
1620   */
ice_chk_pkg_compat(struct ice_hw * hw,struct ice_pkg_hdr * ospkg,struct ice_seg ** seg)1621  static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
1622  					     struct ice_pkg_hdr *ospkg,
1623  					     struct ice_seg **seg)
1624  {
1625  	struct ice_aqc_get_pkg_info_resp *pkg;
1626  	enum ice_ddp_state state;
1627  	u16 size;
1628  	u32 i;
1629  
1630  	/* Check package version compatibility */
1631  	state = ice_chk_pkg_version(&hw->pkg_ver);
1632  	if (state) {
1633  		ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
1634  		return state;
1635  	}
1636  
1637  	/* find ICE segment in given package */
1638  	*seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
1639  						     ospkg);
1640  	if (!*seg) {
1641  		ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
1642  		return ICE_DDP_PKG_INVALID_FILE;
1643  	}
1644  
1645  	/* Check if FW is compatible with the OS package */
1646  	size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
1647  	pkg = kzalloc(size, GFP_KERNEL);
1648  	if (!pkg)
1649  		return ICE_DDP_PKG_ERR;
1650  
1651  	if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) {
1652  		state = ICE_DDP_PKG_LOAD_ERROR;
1653  		goto fw_ddp_compat_free_alloc;
1654  	}
1655  
1656  	for (i = 0; i < le32_to_cpu(pkg->count); i++) {
1657  		/* loop till we find the NVM package */
1658  		if (!pkg->pkg_info[i].is_in_nvm)
1659  			continue;
1660  		if ((*seg)->hdr.seg_format_ver.major !=
1661  			    pkg->pkg_info[i].ver.major ||
1662  		    (*seg)->hdr.seg_format_ver.minor >
1663  			    pkg->pkg_info[i].ver.minor) {
1664  			state = ICE_DDP_PKG_FW_MISMATCH;
1665  			ice_debug(hw, ICE_DBG_INIT,
1666  				  "OS package is not compatible with NVM.\n");
1667  		}
1668  		/* done processing NVM package so break */
1669  		break;
1670  	}
1671  fw_ddp_compat_free_alloc:
1672  	kfree(pkg);
1673  	return state;
1674  }
1675  
1676  /**
1677   * ice_init_pkg_hints
1678   * @hw: pointer to the HW structure
1679   * @ice_seg: pointer to the segment of the package scan (non-NULL)
1680   *
1681   * This function will scan the package and save off relevant information
1682   * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
1683   * since the first call to ice_enum_labels requires a pointer to an actual
1684   * ice_seg structure.
1685   */
ice_init_pkg_hints(struct ice_hw * hw,struct ice_seg * ice_seg)1686  static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
1687  {
1688  	struct ice_pkg_enum state;
1689  	char *label_name;
1690  	u16 val;
1691  	int i;
1692  
1693  	memset(&hw->tnl, 0, sizeof(hw->tnl));
1694  	memset(&state, 0, sizeof(state));
1695  
1696  	if (!ice_seg)
1697  		return;
1698  
1699  	label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
1700  				     &val);
1701  
1702  	while (label_name) {
1703  		if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE)))
1704  			/* check for a tunnel entry */
1705  			ice_add_tunnel_hint(hw, label_name, val);
1706  
1707  		/* check for a dvm mode entry */
1708  		else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE)))
1709  			ice_add_dvm_hint(hw, val, true);
1710  
1711  		/* check for a svm mode entry */
1712  		else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE)))
1713  			ice_add_dvm_hint(hw, val, false);
1714  
1715  		label_name = ice_enum_labels(NULL, 0, &state, &val);
1716  	}
1717  
1718  	/* Cache the appropriate boost TCAM entry pointers for tunnels */
1719  	for (i = 0; i < hw->tnl.count; i++) {
1720  		ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
1721  				     &hw->tnl.tbl[i].boost_entry);
1722  		if (hw->tnl.tbl[i].boost_entry) {
1723  			hw->tnl.tbl[i].valid = true;
1724  			if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT)
1725  				hw->tnl.valid_count[hw->tnl.tbl[i].type]++;
1726  		}
1727  	}
1728  
1729  	/* Cache the appropriate boost TCAM entry pointers for DVM and SVM */
1730  	for (i = 0; i < hw->dvm_upd.count; i++)
1731  		ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr,
1732  				     &hw->dvm_upd.tbl[i].boost_entry);
1733  }
1734  
1735  /**
1736   * ice_fill_hw_ptype - fill the enabled PTYPE bit information
1737   * @hw: pointer to the HW structure
1738   */
ice_fill_hw_ptype(struct ice_hw * hw)1739  static void ice_fill_hw_ptype(struct ice_hw *hw)
1740  {
1741  	struct ice_marker_ptype_tcam_entry *tcam;
1742  	struct ice_seg *seg = hw->seg;
1743  	struct ice_pkg_enum state;
1744  
1745  	bitmap_zero(hw->hw_ptype, ICE_FLOW_PTYPE_MAX);
1746  	if (!seg)
1747  		return;
1748  
1749  	memset(&state, 0, sizeof(state));
1750  
1751  	do {
1752  		tcam = ice_pkg_enum_entry(seg, &state,
1753  					  ICE_SID_RXPARSER_MARKER_PTYPE, NULL,
1754  					  ice_marker_ptype_tcam_handler);
1755  		if (tcam &&
1756  		    le16_to_cpu(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX &&
1757  		    le16_to_cpu(tcam->ptype) < ICE_FLOW_PTYPE_MAX)
1758  			set_bit(le16_to_cpu(tcam->ptype), hw->hw_ptype);
1759  
1760  		seg = NULL;
1761  	} while (tcam);
1762  }
1763  
1764  /**
1765   * ice_init_pkg - initialize/download package
1766   * @hw: pointer to the hardware structure
1767   * @buf: pointer to the package buffer
1768   * @len: size of the package buffer
1769   *
1770   * This function initializes a package. The package contains HW tables
1771   * required to do packet processing. First, the function extracts package
1772   * information such as version. Then it finds the ice configuration segment
1773   * within the package; this function then saves a copy of the segment pointer
1774   * within the supplied package buffer. Next, the function will cache any hints
1775   * from the package, followed by downloading the package itself. Note, that if
1776   * a previous PF driver has already downloaded the package successfully, then
1777   * the current driver will not have to download the package again.
1778   *
1779   * The local package contents will be used to query default behavior and to
1780   * update specific sections of the HW's version of the package (e.g. to update
1781   * the parse graph to understand new protocols).
1782   *
1783   * This function stores a pointer to the package buffer memory, and it is
1784   * expected that the supplied buffer will not be freed immediately. If the
1785   * package buffer needs to be freed, such as when read from a file, use
1786   * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
1787   * case.
1788   */
ice_init_pkg(struct ice_hw * hw,u8 * buf,u32 len)1789  enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
1790  {
1791  	bool already_loaded = false;
1792  	enum ice_ddp_state state;
1793  	struct ice_pkg_hdr *pkg;
1794  	struct ice_seg *seg;
1795  
1796  	if (!buf || !len)
1797  		return ICE_DDP_PKG_ERR;
1798  
1799  	pkg = (struct ice_pkg_hdr *)buf;
1800  	state = ice_verify_pkg(pkg, len);
1801  	if (state) {
1802  		ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
1803  			  state);
1804  		return state;
1805  	}
1806  
1807  	/* initialize package info */
1808  	state = ice_init_pkg_info(hw, pkg);
1809  	if (state)
1810  		return state;
1811  
1812  	/* before downloading the package, check package version for
1813  	 * compatibility with driver
1814  	 */
1815  	state = ice_chk_pkg_compat(hw, pkg, &seg);
1816  	if (state)
1817  		return state;
1818  
1819  	/* initialize package hints and then download package */
1820  	ice_init_pkg_hints(hw, seg);
1821  	state = ice_download_pkg(hw, seg);
1822  	if (state == ICE_DDP_PKG_ALREADY_LOADED) {
1823  		ice_debug(hw, ICE_DBG_INIT,
1824  			  "package previously loaded - no work.\n");
1825  		already_loaded = true;
1826  	}
1827  
1828  	/* Get information on the package currently loaded in HW, then make sure
1829  	 * the driver is compatible with this version.
1830  	 */
1831  	if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) {
1832  		state = ice_get_pkg_info(hw);
1833  		if (!state)
1834  			state = ice_get_ddp_pkg_state(hw, already_loaded);
1835  	}
1836  
1837  	if (ice_is_init_pkg_successful(state)) {
1838  		hw->seg = seg;
1839  		/* on successful package download update other required
1840  		 * registers to support the package and fill HW tables
1841  		 * with package content.
1842  		 */
1843  		ice_init_pkg_regs(hw);
1844  		ice_fill_blk_tbls(hw);
1845  		ice_fill_hw_ptype(hw);
1846  		ice_get_prof_index_max(hw);
1847  	} else {
1848  		ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", state);
1849  	}
1850  
1851  	return state;
1852  }
1853  
1854  /**
1855   * ice_copy_and_init_pkg - initialize/download a copy of the package
1856   * @hw: pointer to the hardware structure
1857   * @buf: pointer to the package buffer
1858   * @len: size of the package buffer
1859   *
1860   * This function copies the package buffer, and then calls ice_init_pkg() to
1861   * initialize the copied package contents.
1862   *
1863   * The copying is necessary if the package buffer supplied is constant, or if
1864   * the memory may disappear shortly after calling this function.
1865   *
1866   * If the package buffer resides in the data segment and can be modified, the
1867   * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
1868   *
1869   * However, if the package buffer needs to be copied first, such as when being
1870   * read from a file, the caller should use ice_copy_and_init_pkg().
1871   *
1872   * This function will first copy the package buffer, before calling
1873   * ice_init_pkg(). The caller is free to immediately destroy the original
1874   * package buffer, as the new copy will be managed by this function and
1875   * related routines.
1876   */
ice_copy_and_init_pkg(struct ice_hw * hw,const u8 * buf,u32 len)1877  enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf,
1878  					 u32 len)
1879  {
1880  	enum ice_ddp_state state;
1881  	u8 *buf_copy;
1882  
1883  	if (!buf || !len)
1884  		return ICE_DDP_PKG_ERR;
1885  
1886  	buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
1887  
1888  	state = ice_init_pkg(hw, buf_copy, len);
1889  	if (!ice_is_init_pkg_successful(state)) {
1890  		/* Free the copy, since we failed to initialize the package */
1891  		devm_kfree(ice_hw_to_dev(hw), buf_copy);
1892  	} else {
1893  		/* Track the copied pkg so we can free it later */
1894  		hw->pkg_copy = buf_copy;
1895  		hw->pkg_size = len;
1896  	}
1897  
1898  	return state;
1899  }
1900