1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_common.h"
5 #include "ice_lib.h"
6 #include "ice_sched.h"
7 #include "ice_adminq_cmd.h"
8 #include "ice_flow.h"
9 
10 #define ICE_PF_RESET_WAIT_COUNT	300
11 
12 /**
13  * ice_set_mac_type - Sets MAC type
14  * @hw: pointer to the HW structure
15  *
16  * This function sets the MAC type of the adapter based on the
17  * vendor ID and device ID stored in the HW structure.
18  */
19 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
20 {
21 	if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
22 		return ICE_ERR_DEVICE_NOT_SUPPORTED;
23 
24 	switch (hw->device_id) {
25 	case ICE_DEV_ID_E810C_BACKPLANE:
26 	case ICE_DEV_ID_E810C_QSFP:
27 	case ICE_DEV_ID_E810C_SFP:
28 	case ICE_DEV_ID_E810_XXV_BACKPLANE:
29 	case ICE_DEV_ID_E810_XXV_QSFP:
30 	case ICE_DEV_ID_E810_XXV_SFP:
31 		hw->mac_type = ICE_MAC_E810;
32 		break;
33 	case ICE_DEV_ID_E823C_10G_BASE_T:
34 	case ICE_DEV_ID_E823C_BACKPLANE:
35 	case ICE_DEV_ID_E823C_QSFP:
36 	case ICE_DEV_ID_E823C_SFP:
37 	case ICE_DEV_ID_E823C_SGMII:
38 	case ICE_DEV_ID_E822C_10G_BASE_T:
39 	case ICE_DEV_ID_E822C_BACKPLANE:
40 	case ICE_DEV_ID_E822C_QSFP:
41 	case ICE_DEV_ID_E822C_SFP:
42 	case ICE_DEV_ID_E822C_SGMII:
43 	case ICE_DEV_ID_E822L_10G_BASE_T:
44 	case ICE_DEV_ID_E822L_BACKPLANE:
45 	case ICE_DEV_ID_E822L_SFP:
46 	case ICE_DEV_ID_E822L_SGMII:
47 	case ICE_DEV_ID_E823L_10G_BASE_T:
48 	case ICE_DEV_ID_E823L_1GBE:
49 	case ICE_DEV_ID_E823L_BACKPLANE:
50 	case ICE_DEV_ID_E823L_QSFP:
51 	case ICE_DEV_ID_E823L_SFP:
52 		hw->mac_type = ICE_MAC_GENERIC;
53 		break;
54 	default:
55 		hw->mac_type = ICE_MAC_UNKNOWN;
56 		break;
57 	}
58 
59 	ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
60 	return 0;
61 }
62 
63 /**
64  * ice_is_e810
65  * @hw: pointer to the hardware structure
66  *
67  * returns true if the device is E810 based, false if not.
68  */
69 bool ice_is_e810(struct ice_hw *hw)
70 {
71 	return hw->mac_type == ICE_MAC_E810;
72 }
73 
74 /**
75  * ice_is_e810t
76  * @hw: pointer to the hardware structure
77  *
78  * returns true if the device is E810T based, false if not.
79  */
80 bool ice_is_e810t(struct ice_hw *hw)
81 {
82 	switch (hw->device_id) {
83 	case ICE_DEV_ID_E810C_SFP:
84 		if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T ||
85 		    hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
86 			return true;
87 		break;
88 	default:
89 		break;
90 	}
91 
92 	return false;
93 }
94 
95 /**
96  * ice_clear_pf_cfg - Clear PF configuration
97  * @hw: pointer to the hardware structure
98  *
99  * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
100  * configuration, flow director filters, etc.).
101  */
102 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
103 {
104 	struct ice_aq_desc desc;
105 
106 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
107 
108 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
109 }
110 
111 /**
112  * ice_aq_manage_mac_read - manage MAC address read command
113  * @hw: pointer to the HW struct
114  * @buf: a virtual buffer to hold the manage MAC read response
115  * @buf_size: Size of the virtual buffer
116  * @cd: pointer to command details structure or NULL
117  *
118  * This function is used to return per PF station MAC address (0x0107).
119  * NOTE: Upon successful completion of this command, MAC address information
120  * is returned in user specified buffer. Please interpret user specified
121  * buffer as "manage_mac_read" response.
122  * Response such as various MAC addresses are stored in HW struct (port.mac)
123  * ice_discover_dev_caps is expected to be called before this function is
124  * called.
125  */
126 static enum ice_status
127 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
128 		       struct ice_sq_cd *cd)
129 {
130 	struct ice_aqc_manage_mac_read_resp *resp;
131 	struct ice_aqc_manage_mac_read *cmd;
132 	struct ice_aq_desc desc;
133 	enum ice_status status;
134 	u16 flags;
135 	u8 i;
136 
137 	cmd = &desc.params.mac_read;
138 
139 	if (buf_size < sizeof(*resp))
140 		return ICE_ERR_BUF_TOO_SHORT;
141 
142 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
143 
144 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
145 	if (status)
146 		return status;
147 
148 	resp = buf;
149 	flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
150 
151 	if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
152 		ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
153 		return ICE_ERR_CFG;
154 	}
155 
156 	/* A single port can report up to two (LAN and WoL) addresses */
157 	for (i = 0; i < cmd->num_addr; i++)
158 		if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
159 			ether_addr_copy(hw->port_info->mac.lan_addr,
160 					resp[i].mac_addr);
161 			ether_addr_copy(hw->port_info->mac.perm_addr,
162 					resp[i].mac_addr);
163 			break;
164 		}
165 
166 	return 0;
167 }
168 
169 /**
170  * ice_aq_get_phy_caps - returns PHY capabilities
171  * @pi: port information structure
172  * @qual_mods: report qualified modules
173  * @report_mode: report mode capabilities
174  * @pcaps: structure for PHY capabilities to be filled
175  * @cd: pointer to command details structure or NULL
176  *
177  * Returns the various PHY capabilities supported on the Port (0x0600)
178  */
179 enum ice_status
180 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
181 		    struct ice_aqc_get_phy_caps_data *pcaps,
182 		    struct ice_sq_cd *cd)
183 {
184 	struct ice_aqc_get_phy_caps *cmd;
185 	u16 pcaps_size = sizeof(*pcaps);
186 	struct ice_aq_desc desc;
187 	enum ice_status status;
188 	struct ice_hw *hw;
189 
190 	cmd = &desc.params.get_phy;
191 
192 	if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
193 		return ICE_ERR_PARAM;
194 	hw = pi->hw;
195 
196 	if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
197 	    !ice_fw_supports_report_dflt_cfg(hw))
198 		return ICE_ERR_PARAM;
199 
200 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
201 
202 	if (qual_mods)
203 		cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
204 
205 	cmd->param0 |= cpu_to_le16(report_mode);
206 	status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
207 
208 	ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
209 		  report_mode);
210 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
211 		  (unsigned long long)le64_to_cpu(pcaps->phy_type_low));
212 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
213 		  (unsigned long long)le64_to_cpu(pcaps->phy_type_high));
214 	ice_debug(hw, ICE_DBG_LINK, "	caps = 0x%x\n", pcaps->caps);
215 	ice_debug(hw, ICE_DBG_LINK, "	low_power_ctrl_an = 0x%x\n",
216 		  pcaps->low_power_ctrl_an);
217 	ice_debug(hw, ICE_DBG_LINK, "	eee_cap = 0x%x\n", pcaps->eee_cap);
218 	ice_debug(hw, ICE_DBG_LINK, "	eeer_value = 0x%x\n",
219 		  pcaps->eeer_value);
220 	ice_debug(hw, ICE_DBG_LINK, "	link_fec_options = 0x%x\n",
221 		  pcaps->link_fec_options);
222 	ice_debug(hw, ICE_DBG_LINK, "	module_compliance_enforcement = 0x%x\n",
223 		  pcaps->module_compliance_enforcement);
224 	ice_debug(hw, ICE_DBG_LINK, "   extended_compliance_code = 0x%x\n",
225 		  pcaps->extended_compliance_code);
226 	ice_debug(hw, ICE_DBG_LINK, "   module_type[0] = 0x%x\n",
227 		  pcaps->module_type[0]);
228 	ice_debug(hw, ICE_DBG_LINK, "   module_type[1] = 0x%x\n",
229 		  pcaps->module_type[1]);
230 	ice_debug(hw, ICE_DBG_LINK, "   module_type[2] = 0x%x\n",
231 		  pcaps->module_type[2]);
232 
233 	if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
234 		pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
235 		pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
236 		memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
237 		       sizeof(pi->phy.link_info.module_type));
238 	}
239 
240 	return status;
241 }
242 
243 /**
244  * ice_aq_get_link_topo_handle - get link topology node return status
245  * @pi: port information structure
246  * @node_type: requested node type
247  * @cd: pointer to command details structure or NULL
248  *
249  * Get link topology node return status for specified node type (0x06E0)
250  *
251  * Node type cage can be used to determine if cage is present. If AQC
252  * returns error (ENOENT), then no cage present. If no cage present, then
253  * connection type is backplane or BASE-T.
254  */
255 static enum ice_status
256 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
257 			    struct ice_sq_cd *cd)
258 {
259 	struct ice_aqc_get_link_topo *cmd;
260 	struct ice_aq_desc desc;
261 
262 	cmd = &desc.params.get_link_topo;
263 
264 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
265 
266 	cmd->addr.topo_params.node_type_ctx =
267 		(ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
268 		 ICE_AQC_LINK_TOPO_NODE_CTX_S);
269 
270 	/* set node type */
271 	cmd->addr.topo_params.node_type_ctx |=
272 		(ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
273 
274 	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
275 }
276 
277 /**
278  * ice_is_media_cage_present
279  * @pi: port information structure
280  *
281  * Returns true if media cage is present, else false. If no cage, then
282  * media type is backplane or BASE-T.
283  */
284 static bool ice_is_media_cage_present(struct ice_port_info *pi)
285 {
286 	/* Node type cage can be used to determine if cage is present. If AQC
287 	 * returns error (ENOENT), then no cage present. If no cage present then
288 	 * connection type is backplane or BASE-T.
289 	 */
290 	return !ice_aq_get_link_topo_handle(pi,
291 					    ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
292 					    NULL);
293 }
294 
295 /**
296  * ice_get_media_type - Gets media type
297  * @pi: port information structure
298  */
299 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
300 {
301 	struct ice_link_status *hw_link_info;
302 
303 	if (!pi)
304 		return ICE_MEDIA_UNKNOWN;
305 
306 	hw_link_info = &pi->phy.link_info;
307 	if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
308 		/* If more than one media type is selected, report unknown */
309 		return ICE_MEDIA_UNKNOWN;
310 
311 	if (hw_link_info->phy_type_low) {
312 		/* 1G SGMII is a special case where some DA cable PHYs
313 		 * may show this as an option when it really shouldn't
314 		 * be since SGMII is meant to be between a MAC and a PHY
315 		 * in a backplane. Try to detect this case and handle it
316 		 */
317 		if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
318 		    (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
319 		    ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
320 		    hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
321 		    ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
322 			return ICE_MEDIA_DA;
323 
324 		switch (hw_link_info->phy_type_low) {
325 		case ICE_PHY_TYPE_LOW_1000BASE_SX:
326 		case ICE_PHY_TYPE_LOW_1000BASE_LX:
327 		case ICE_PHY_TYPE_LOW_10GBASE_SR:
328 		case ICE_PHY_TYPE_LOW_10GBASE_LR:
329 		case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
330 		case ICE_PHY_TYPE_LOW_25GBASE_SR:
331 		case ICE_PHY_TYPE_LOW_25GBASE_LR:
332 		case ICE_PHY_TYPE_LOW_40GBASE_SR4:
333 		case ICE_PHY_TYPE_LOW_40GBASE_LR4:
334 		case ICE_PHY_TYPE_LOW_50GBASE_SR2:
335 		case ICE_PHY_TYPE_LOW_50GBASE_LR2:
336 		case ICE_PHY_TYPE_LOW_50GBASE_SR:
337 		case ICE_PHY_TYPE_LOW_50GBASE_FR:
338 		case ICE_PHY_TYPE_LOW_50GBASE_LR:
339 		case ICE_PHY_TYPE_LOW_100GBASE_SR4:
340 		case ICE_PHY_TYPE_LOW_100GBASE_LR4:
341 		case ICE_PHY_TYPE_LOW_100GBASE_SR2:
342 		case ICE_PHY_TYPE_LOW_100GBASE_DR:
343 		case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
344 		case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
345 		case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
346 		case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
347 		case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
348 		case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
349 		case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
350 		case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
351 			return ICE_MEDIA_FIBER;
352 		case ICE_PHY_TYPE_LOW_100BASE_TX:
353 		case ICE_PHY_TYPE_LOW_1000BASE_T:
354 		case ICE_PHY_TYPE_LOW_2500BASE_T:
355 		case ICE_PHY_TYPE_LOW_5GBASE_T:
356 		case ICE_PHY_TYPE_LOW_10GBASE_T:
357 		case ICE_PHY_TYPE_LOW_25GBASE_T:
358 			return ICE_MEDIA_BASET;
359 		case ICE_PHY_TYPE_LOW_10G_SFI_DA:
360 		case ICE_PHY_TYPE_LOW_25GBASE_CR:
361 		case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
362 		case ICE_PHY_TYPE_LOW_25GBASE_CR1:
363 		case ICE_PHY_TYPE_LOW_40GBASE_CR4:
364 		case ICE_PHY_TYPE_LOW_50GBASE_CR2:
365 		case ICE_PHY_TYPE_LOW_50GBASE_CP:
366 		case ICE_PHY_TYPE_LOW_100GBASE_CR4:
367 		case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
368 		case ICE_PHY_TYPE_LOW_100GBASE_CP2:
369 			return ICE_MEDIA_DA;
370 		case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
371 		case ICE_PHY_TYPE_LOW_40G_XLAUI:
372 		case ICE_PHY_TYPE_LOW_50G_LAUI2:
373 		case ICE_PHY_TYPE_LOW_50G_AUI2:
374 		case ICE_PHY_TYPE_LOW_50G_AUI1:
375 		case ICE_PHY_TYPE_LOW_100G_AUI4:
376 		case ICE_PHY_TYPE_LOW_100G_CAUI4:
377 			if (ice_is_media_cage_present(pi))
378 				return ICE_MEDIA_DA;
379 			fallthrough;
380 		case ICE_PHY_TYPE_LOW_1000BASE_KX:
381 		case ICE_PHY_TYPE_LOW_2500BASE_KX:
382 		case ICE_PHY_TYPE_LOW_2500BASE_X:
383 		case ICE_PHY_TYPE_LOW_5GBASE_KR:
384 		case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
385 		case ICE_PHY_TYPE_LOW_25GBASE_KR:
386 		case ICE_PHY_TYPE_LOW_25GBASE_KR1:
387 		case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
388 		case ICE_PHY_TYPE_LOW_40GBASE_KR4:
389 		case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
390 		case ICE_PHY_TYPE_LOW_50GBASE_KR2:
391 		case ICE_PHY_TYPE_LOW_100GBASE_KR4:
392 		case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
393 			return ICE_MEDIA_BACKPLANE;
394 		}
395 	} else {
396 		switch (hw_link_info->phy_type_high) {
397 		case ICE_PHY_TYPE_HIGH_100G_AUI2:
398 		case ICE_PHY_TYPE_HIGH_100G_CAUI2:
399 			if (ice_is_media_cage_present(pi))
400 				return ICE_MEDIA_DA;
401 			fallthrough;
402 		case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
403 			return ICE_MEDIA_BACKPLANE;
404 		case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
405 		case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
406 			return ICE_MEDIA_FIBER;
407 		}
408 	}
409 	return ICE_MEDIA_UNKNOWN;
410 }
411 
412 /**
413  * ice_aq_get_link_info
414  * @pi: port information structure
415  * @ena_lse: enable/disable LinkStatusEvent reporting
416  * @link: pointer to link status structure - optional
417  * @cd: pointer to command details structure or NULL
418  *
419  * Get Link Status (0x607). Returns the link status of the adapter.
420  */
421 enum ice_status
422 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
423 		     struct ice_link_status *link, struct ice_sq_cd *cd)
424 {
425 	struct ice_aqc_get_link_status_data link_data = { 0 };
426 	struct ice_aqc_get_link_status *resp;
427 	struct ice_link_status *li_old, *li;
428 	enum ice_media_type *hw_media_type;
429 	struct ice_fc_info *hw_fc_info;
430 	bool tx_pause, rx_pause;
431 	struct ice_aq_desc desc;
432 	enum ice_status status;
433 	struct ice_hw *hw;
434 	u16 cmd_flags;
435 
436 	if (!pi)
437 		return ICE_ERR_PARAM;
438 	hw = pi->hw;
439 	li_old = &pi->phy.link_info_old;
440 	hw_media_type = &pi->phy.media_type;
441 	li = &pi->phy.link_info;
442 	hw_fc_info = &pi->fc;
443 
444 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
445 	cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
446 	resp = &desc.params.get_link_status;
447 	resp->cmd_flags = cpu_to_le16(cmd_flags);
448 	resp->lport_num = pi->lport;
449 
450 	status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
451 
452 	if (status)
453 		return status;
454 
455 	/* save off old link status information */
456 	*li_old = *li;
457 
458 	/* update current link status information */
459 	li->link_speed = le16_to_cpu(link_data.link_speed);
460 	li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
461 	li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
462 	*hw_media_type = ice_get_media_type(pi);
463 	li->link_info = link_data.link_info;
464 	li->link_cfg_err = link_data.link_cfg_err;
465 	li->an_info = link_data.an_info;
466 	li->ext_info = link_data.ext_info;
467 	li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
468 	li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
469 	li->topo_media_conflict = link_data.topo_media_conflict;
470 	li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
471 				      ICE_AQ_CFG_PACING_TYPE_M);
472 
473 	/* update fc info */
474 	tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
475 	rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
476 	if (tx_pause && rx_pause)
477 		hw_fc_info->current_mode = ICE_FC_FULL;
478 	else if (tx_pause)
479 		hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
480 	else if (rx_pause)
481 		hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
482 	else
483 		hw_fc_info->current_mode = ICE_FC_NONE;
484 
485 	li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
486 
487 	ice_debug(hw, ICE_DBG_LINK, "get link info\n");
488 	ice_debug(hw, ICE_DBG_LINK, "	link_speed = 0x%x\n", li->link_speed);
489 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
490 		  (unsigned long long)li->phy_type_low);
491 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
492 		  (unsigned long long)li->phy_type_high);
493 	ice_debug(hw, ICE_DBG_LINK, "	media_type = 0x%x\n", *hw_media_type);
494 	ice_debug(hw, ICE_DBG_LINK, "	link_info = 0x%x\n", li->link_info);
495 	ice_debug(hw, ICE_DBG_LINK, "	link_cfg_err = 0x%x\n", li->link_cfg_err);
496 	ice_debug(hw, ICE_DBG_LINK, "	an_info = 0x%x\n", li->an_info);
497 	ice_debug(hw, ICE_DBG_LINK, "	ext_info = 0x%x\n", li->ext_info);
498 	ice_debug(hw, ICE_DBG_LINK, "	fec_info = 0x%x\n", li->fec_info);
499 	ice_debug(hw, ICE_DBG_LINK, "	lse_ena = 0x%x\n", li->lse_ena);
500 	ice_debug(hw, ICE_DBG_LINK, "	max_frame = 0x%x\n",
501 		  li->max_frame_size);
502 	ice_debug(hw, ICE_DBG_LINK, "	pacing = 0x%x\n", li->pacing);
503 
504 	/* save link status information */
505 	if (link)
506 		*link = *li;
507 
508 	/* flag cleared so calling functions don't call AQ again */
509 	pi->phy.get_link_info = false;
510 
511 	return 0;
512 }
513 
514 /**
515  * ice_fill_tx_timer_and_fc_thresh
516  * @hw: pointer to the HW struct
517  * @cmd: pointer to MAC cfg structure
518  *
519  * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
520  * descriptor
521  */
522 static void
523 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
524 				struct ice_aqc_set_mac_cfg *cmd)
525 {
526 	u16 fc_thres_val, tx_timer_val;
527 	u32 val;
528 
529 	/* We read back the transmit timer and FC threshold value of
530 	 * LFC. Thus, we will use index =
531 	 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
532 	 *
533 	 * Also, because we are operating on transmit timer and FC
534 	 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
535 	 */
536 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
537 
538 	/* Retrieve the transmit timer */
539 	val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
540 	tx_timer_val = val &
541 		PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
542 	cmd->tx_tmr_value = cpu_to_le16(tx_timer_val);
543 
544 	/* Retrieve the FC threshold */
545 	val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
546 	fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
547 
548 	cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val);
549 }
550 
551 /**
552  * ice_aq_set_mac_cfg
553  * @hw: pointer to the HW struct
554  * @max_frame_size: Maximum Frame Size to be supported
555  * @cd: pointer to command details structure or NULL
556  *
557  * Set MAC configuration (0x0603)
558  */
559 enum ice_status
560 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
561 {
562 	struct ice_aqc_set_mac_cfg *cmd;
563 	struct ice_aq_desc desc;
564 
565 	cmd = &desc.params.set_mac_cfg;
566 
567 	if (max_frame_size == 0)
568 		return ICE_ERR_PARAM;
569 
570 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
571 
572 	cmd->max_frame_size = cpu_to_le16(max_frame_size);
573 
574 	ice_fill_tx_timer_and_fc_thresh(hw, cmd);
575 
576 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
577 }
578 
579 /**
580  * ice_init_fltr_mgmt_struct - initializes filter management list and locks
581  * @hw: pointer to the HW struct
582  */
583 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
584 {
585 	struct ice_switch_info *sw;
586 	enum ice_status status;
587 
588 	hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
589 				       sizeof(*hw->switch_info), GFP_KERNEL);
590 	sw = hw->switch_info;
591 
592 	if (!sw)
593 		return ICE_ERR_NO_MEMORY;
594 
595 	INIT_LIST_HEAD(&sw->vsi_list_map_head);
596 	sw->prof_res_bm_init = 0;
597 
598 	status = ice_init_def_sw_recp(hw);
599 	if (status) {
600 		devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
601 		return status;
602 	}
603 	return 0;
604 }
605 
606 /**
607  * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
608  * @hw: pointer to the HW struct
609  */
610 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
611 {
612 	struct ice_switch_info *sw = hw->switch_info;
613 	struct ice_vsi_list_map_info *v_pos_map;
614 	struct ice_vsi_list_map_info *v_tmp_map;
615 	struct ice_sw_recipe *recps;
616 	u8 i;
617 
618 	list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
619 				 list_entry) {
620 		list_del(&v_pos_map->list_entry);
621 		devm_kfree(ice_hw_to_dev(hw), v_pos_map);
622 	}
623 	recps = sw->recp_list;
624 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
625 		struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
626 
627 		recps[i].root_rid = i;
628 		list_for_each_entry_safe(rg_entry, tmprg_entry,
629 					 &recps[i].rg_list, l_entry) {
630 			list_del(&rg_entry->l_entry);
631 			devm_kfree(ice_hw_to_dev(hw), rg_entry);
632 		}
633 
634 		if (recps[i].adv_rule) {
635 			struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
636 			struct ice_adv_fltr_mgmt_list_entry *lst_itr;
637 
638 			mutex_destroy(&recps[i].filt_rule_lock);
639 			list_for_each_entry_safe(lst_itr, tmp_entry,
640 						 &recps[i].filt_rules,
641 						 list_entry) {
642 				list_del(&lst_itr->list_entry);
643 				devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
644 				devm_kfree(ice_hw_to_dev(hw), lst_itr);
645 			}
646 		} else {
647 			struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
648 
649 			mutex_destroy(&recps[i].filt_rule_lock);
650 			list_for_each_entry_safe(lst_itr, tmp_entry,
651 						 &recps[i].filt_rules,
652 						 list_entry) {
653 				list_del(&lst_itr->list_entry);
654 				devm_kfree(ice_hw_to_dev(hw), lst_itr);
655 			}
656 		}
657 		if (recps[i].root_buf)
658 			devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf);
659 	}
660 	ice_rm_all_sw_replay_rule_info(hw);
661 	devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
662 	devm_kfree(ice_hw_to_dev(hw), sw);
663 }
664 
665 /**
666  * ice_get_fw_log_cfg - get FW logging configuration
667  * @hw: pointer to the HW struct
668  */
669 static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
670 {
671 	struct ice_aq_desc desc;
672 	enum ice_status status;
673 	__le16 *config;
674 	u16 size;
675 
676 	size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
677 	config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
678 	if (!config)
679 		return ICE_ERR_NO_MEMORY;
680 
681 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
682 
683 	status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
684 	if (!status) {
685 		u16 i;
686 
687 		/* Save FW logging information into the HW structure */
688 		for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
689 			u16 v, m, flgs;
690 
691 			v = le16_to_cpu(config[i]);
692 			m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
693 			flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
694 
695 			if (m < ICE_AQC_FW_LOG_ID_MAX)
696 				hw->fw_log.evnts[m].cur = flgs;
697 		}
698 	}
699 
700 	devm_kfree(ice_hw_to_dev(hw), config);
701 
702 	return status;
703 }
704 
705 /**
706  * ice_cfg_fw_log - configure FW logging
707  * @hw: pointer to the HW struct
708  * @enable: enable certain FW logging events if true, disable all if false
709  *
710  * This function enables/disables the FW logging via Rx CQ events and a UART
711  * port based on predetermined configurations. FW logging via the Rx CQ can be
712  * enabled/disabled for individual PF's. However, FW logging via the UART can
713  * only be enabled/disabled for all PFs on the same device.
714  *
715  * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
716  * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
717  * before initializing the device.
718  *
719  * When re/configuring FW logging, callers need to update the "cfg" elements of
720  * the hw->fw_log.evnts array with the desired logging event configurations for
721  * modules of interest. When disabling FW logging completely, the callers can
722  * just pass false in the "enable" parameter. On completion, the function will
723  * update the "cur" element of the hw->fw_log.evnts array with the resulting
724  * logging event configurations of the modules that are being re/configured. FW
725  * logging modules that are not part of a reconfiguration operation retain their
726  * previous states.
727  *
728  * Before resetting the device, it is recommended that the driver disables FW
729  * logging before shutting down the control queue. When disabling FW logging
730  * ("enable" = false), the latest configurations of FW logging events stored in
731  * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
732  * a device reset.
733  *
734  * When enabling FW logging to emit log messages via the Rx CQ during the
735  * device's initialization phase, a mechanism alternative to interrupt handlers
736  * needs to be used to extract FW log messages from the Rx CQ periodically and
737  * to prevent the Rx CQ from being full and stalling other types of control
738  * messages from FW to SW. Interrupts are typically disabled during the device's
739  * initialization phase.
740  */
741 static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
742 {
743 	struct ice_aqc_fw_logging *cmd;
744 	enum ice_status status = 0;
745 	u16 i, chgs = 0, len = 0;
746 	struct ice_aq_desc desc;
747 	__le16 *data = NULL;
748 	u8 actv_evnts = 0;
749 	void *buf = NULL;
750 
751 	if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
752 		return 0;
753 
754 	/* Disable FW logging only when the control queue is still responsive */
755 	if (!enable &&
756 	    (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
757 		return 0;
758 
759 	/* Get current FW log settings */
760 	status = ice_get_fw_log_cfg(hw);
761 	if (status)
762 		return status;
763 
764 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
765 	cmd = &desc.params.fw_logging;
766 
767 	/* Indicate which controls are valid */
768 	if (hw->fw_log.cq_en)
769 		cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
770 
771 	if (hw->fw_log.uart_en)
772 		cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
773 
774 	if (enable) {
775 		/* Fill in an array of entries with FW logging modules and
776 		 * logging events being reconfigured.
777 		 */
778 		for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
779 			u16 val;
780 
781 			/* Keep track of enabled event types */
782 			actv_evnts |= hw->fw_log.evnts[i].cfg;
783 
784 			if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
785 				continue;
786 
787 			if (!data) {
788 				data = devm_kcalloc(ice_hw_to_dev(hw),
789 						    ICE_AQC_FW_LOG_ID_MAX,
790 						    sizeof(*data),
791 						    GFP_KERNEL);
792 				if (!data)
793 					return ICE_ERR_NO_MEMORY;
794 			}
795 
796 			val = i << ICE_AQC_FW_LOG_ID_S;
797 			val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
798 			data[chgs++] = cpu_to_le16(val);
799 		}
800 
801 		/* Only enable FW logging if at least one module is specified.
802 		 * If FW logging is currently enabled but all modules are not
803 		 * enabled to emit log messages, disable FW logging altogether.
804 		 */
805 		if (actv_evnts) {
806 			/* Leave if there is effectively no change */
807 			if (!chgs)
808 				goto out;
809 
810 			if (hw->fw_log.cq_en)
811 				cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
812 
813 			if (hw->fw_log.uart_en)
814 				cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
815 
816 			buf = data;
817 			len = sizeof(*data) * chgs;
818 			desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
819 		}
820 	}
821 
822 	status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
823 	if (!status) {
824 		/* Update the current configuration to reflect events enabled.
825 		 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
826 		 * logging mode is enabled for the device. They do not reflect
827 		 * actual modules being enabled to emit log messages. So, their
828 		 * values remain unchanged even when all modules are disabled.
829 		 */
830 		u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
831 
832 		hw->fw_log.actv_evnts = actv_evnts;
833 		for (i = 0; i < cnt; i++) {
834 			u16 v, m;
835 
836 			if (!enable) {
837 				/* When disabling all FW logging events as part
838 				 * of device's de-initialization, the original
839 				 * configurations are retained, and can be used
840 				 * to reconfigure FW logging later if the device
841 				 * is re-initialized.
842 				 */
843 				hw->fw_log.evnts[i].cur = 0;
844 				continue;
845 			}
846 
847 			v = le16_to_cpu(data[i]);
848 			m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
849 			hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
850 		}
851 	}
852 
853 out:
854 	if (data)
855 		devm_kfree(ice_hw_to_dev(hw), data);
856 
857 	return status;
858 }
859 
860 /**
861  * ice_output_fw_log
862  * @hw: pointer to the HW struct
863  * @desc: pointer to the AQ message descriptor
864  * @buf: pointer to the buffer accompanying the AQ message
865  *
866  * Formats a FW Log message and outputs it via the standard driver logs.
867  */
868 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
869 {
870 	ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
871 	ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
872 			le16_to_cpu(desc->datalen));
873 	ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
874 }
875 
876 /**
877  * ice_get_itr_intrl_gran
878  * @hw: pointer to the HW struct
879  *
880  * Determines the ITR/INTRL granularities based on the maximum aggregate
881  * bandwidth according to the device's configuration during power-on.
882  */
883 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
884 {
885 	u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
886 			 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
887 			GL_PWR_MODE_CTL_CAR_MAX_BW_S;
888 
889 	switch (max_agg_bw) {
890 	case ICE_MAX_AGG_BW_200G:
891 	case ICE_MAX_AGG_BW_100G:
892 	case ICE_MAX_AGG_BW_50G:
893 		hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
894 		hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
895 		break;
896 	case ICE_MAX_AGG_BW_25G:
897 		hw->itr_gran = ICE_ITR_GRAN_MAX_25;
898 		hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
899 		break;
900 	}
901 }
902 
903 /**
904  * ice_init_hw - main hardware initialization routine
905  * @hw: pointer to the hardware structure
906  */
907 enum ice_status ice_init_hw(struct ice_hw *hw)
908 {
909 	struct ice_aqc_get_phy_caps_data *pcaps;
910 	enum ice_status status;
911 	u16 mac_buf_len;
912 	void *mac_buf;
913 
914 	/* Set MAC type based on DeviceID */
915 	status = ice_set_mac_type(hw);
916 	if (status)
917 		return status;
918 
919 	hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
920 			 PF_FUNC_RID_FUNC_NUM_M) >>
921 		PF_FUNC_RID_FUNC_NUM_S;
922 
923 	status = ice_reset(hw, ICE_RESET_PFR);
924 	if (status)
925 		return status;
926 
927 	ice_get_itr_intrl_gran(hw);
928 
929 	status = ice_create_all_ctrlq(hw);
930 	if (status)
931 		goto err_unroll_cqinit;
932 
933 	/* Enable FW logging. Not fatal if this fails. */
934 	status = ice_cfg_fw_log(hw, true);
935 	if (status)
936 		ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
937 
938 	status = ice_clear_pf_cfg(hw);
939 	if (status)
940 		goto err_unroll_cqinit;
941 
942 	/* Set bit to enable Flow Director filters */
943 	wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
944 	INIT_LIST_HEAD(&hw->fdir_list_head);
945 
946 	ice_clear_pxe_mode(hw);
947 
948 	status = ice_init_nvm(hw);
949 	if (status)
950 		goto err_unroll_cqinit;
951 
952 	status = ice_get_caps(hw);
953 	if (status)
954 		goto err_unroll_cqinit;
955 
956 	hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
957 				     sizeof(*hw->port_info), GFP_KERNEL);
958 	if (!hw->port_info) {
959 		status = ICE_ERR_NO_MEMORY;
960 		goto err_unroll_cqinit;
961 	}
962 
963 	/* set the back pointer to HW */
964 	hw->port_info->hw = hw;
965 
966 	/* Initialize port_info struct with switch configuration data */
967 	status = ice_get_initial_sw_cfg(hw);
968 	if (status)
969 		goto err_unroll_alloc;
970 
971 	hw->evb_veb = true;
972 
973 	/* Query the allocated resources for Tx scheduler */
974 	status = ice_sched_query_res_alloc(hw);
975 	if (status) {
976 		ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
977 		goto err_unroll_alloc;
978 	}
979 	ice_sched_get_psm_clk_freq(hw);
980 
981 	/* Initialize port_info struct with scheduler data */
982 	status = ice_sched_init_port(hw->port_info);
983 	if (status)
984 		goto err_unroll_sched;
985 
986 	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
987 	if (!pcaps) {
988 		status = ICE_ERR_NO_MEMORY;
989 		goto err_unroll_sched;
990 	}
991 
992 	/* Initialize port_info struct with PHY capabilities */
993 	status = ice_aq_get_phy_caps(hw->port_info, false,
994 				     ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
995 				     NULL);
996 	devm_kfree(ice_hw_to_dev(hw), pcaps);
997 	if (status)
998 		dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n",
999 			 status);
1000 
1001 	/* Initialize port_info struct with link information */
1002 	status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
1003 	if (status)
1004 		goto err_unroll_sched;
1005 
1006 	/* need a valid SW entry point to build a Tx tree */
1007 	if (!hw->sw_entry_point_layer) {
1008 		ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
1009 		status = ICE_ERR_CFG;
1010 		goto err_unroll_sched;
1011 	}
1012 	INIT_LIST_HEAD(&hw->agg_list);
1013 	/* Initialize max burst size */
1014 	if (!hw->max_burst_size)
1015 		ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
1016 
1017 	status = ice_init_fltr_mgmt_struct(hw);
1018 	if (status)
1019 		goto err_unroll_sched;
1020 
1021 	/* Get MAC information */
1022 	/* A single port can report up to two (LAN and WoL) addresses */
1023 	mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
1024 			       sizeof(struct ice_aqc_manage_mac_read_resp),
1025 			       GFP_KERNEL);
1026 	mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
1027 
1028 	if (!mac_buf) {
1029 		status = ICE_ERR_NO_MEMORY;
1030 		goto err_unroll_fltr_mgmt_struct;
1031 	}
1032 
1033 	status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
1034 	devm_kfree(ice_hw_to_dev(hw), mac_buf);
1035 
1036 	if (status)
1037 		goto err_unroll_fltr_mgmt_struct;
1038 	/* enable jumbo frame support at MAC level */
1039 	status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
1040 	if (status)
1041 		goto err_unroll_fltr_mgmt_struct;
1042 	/* Obtain counter base index which would be used by flow director */
1043 	status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
1044 	if (status)
1045 		goto err_unroll_fltr_mgmt_struct;
1046 	status = ice_init_hw_tbls(hw);
1047 	if (status)
1048 		goto err_unroll_fltr_mgmt_struct;
1049 	mutex_init(&hw->tnl_lock);
1050 	return 0;
1051 
1052 err_unroll_fltr_mgmt_struct:
1053 	ice_cleanup_fltr_mgmt_struct(hw);
1054 err_unroll_sched:
1055 	ice_sched_cleanup_all(hw);
1056 err_unroll_alloc:
1057 	devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1058 err_unroll_cqinit:
1059 	ice_destroy_all_ctrlq(hw);
1060 	return status;
1061 }
1062 
1063 /**
1064  * ice_deinit_hw - unroll initialization operations done by ice_init_hw
1065  * @hw: pointer to the hardware structure
1066  *
1067  * This should be called only during nominal operation, not as a result of
1068  * ice_init_hw() failing since ice_init_hw() will take care of unrolling
1069  * applicable initializations if it fails for any reason.
1070  */
1071 void ice_deinit_hw(struct ice_hw *hw)
1072 {
1073 	ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
1074 	ice_cleanup_fltr_mgmt_struct(hw);
1075 
1076 	ice_sched_cleanup_all(hw);
1077 	ice_sched_clear_agg(hw);
1078 	ice_free_seg(hw);
1079 	ice_free_hw_tbls(hw);
1080 	mutex_destroy(&hw->tnl_lock);
1081 
1082 	if (hw->port_info) {
1083 		devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1084 		hw->port_info = NULL;
1085 	}
1086 
1087 	/* Attempt to disable FW logging before shutting down control queues */
1088 	ice_cfg_fw_log(hw, false);
1089 	ice_destroy_all_ctrlq(hw);
1090 
1091 	/* Clear VSI contexts if not already cleared */
1092 	ice_clear_all_vsi_ctx(hw);
1093 }
1094 
1095 /**
1096  * ice_check_reset - Check to see if a global reset is complete
1097  * @hw: pointer to the hardware structure
1098  */
1099 enum ice_status ice_check_reset(struct ice_hw *hw)
1100 {
1101 	u32 cnt, reg = 0, grst_timeout, uld_mask;
1102 
1103 	/* Poll for Device Active state in case a recent CORER, GLOBR,
1104 	 * or EMPR has occurred. The grst delay value is in 100ms units.
1105 	 * Add 1sec for outstanding AQ commands that can take a long time.
1106 	 */
1107 	grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1108 			GLGEN_RSTCTL_GRSTDEL_S) + 10;
1109 
1110 	for (cnt = 0; cnt < grst_timeout; cnt++) {
1111 		mdelay(100);
1112 		reg = rd32(hw, GLGEN_RSTAT);
1113 		if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1114 			break;
1115 	}
1116 
1117 	if (cnt == grst_timeout) {
1118 		ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
1119 		return ICE_ERR_RESET_FAILED;
1120 	}
1121 
1122 #define ICE_RESET_DONE_MASK	(GLNVM_ULD_PCIER_DONE_M |\
1123 				 GLNVM_ULD_PCIER_DONE_1_M |\
1124 				 GLNVM_ULD_CORER_DONE_M |\
1125 				 GLNVM_ULD_GLOBR_DONE_M |\
1126 				 GLNVM_ULD_POR_DONE_M |\
1127 				 GLNVM_ULD_POR_DONE_1_M |\
1128 				 GLNVM_ULD_PCIER_DONE_2_M)
1129 
1130 	uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ?
1131 					  GLNVM_ULD_PE_DONE_M : 0);
1132 
1133 	/* Device is Active; check Global Reset processes are done */
1134 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1135 		reg = rd32(hw, GLNVM_ULD) & uld_mask;
1136 		if (reg == uld_mask) {
1137 			ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
1138 			break;
1139 		}
1140 		mdelay(10);
1141 	}
1142 
1143 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1144 		ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1145 			  reg);
1146 		return ICE_ERR_RESET_FAILED;
1147 	}
1148 
1149 	return 0;
1150 }
1151 
1152 /**
1153  * ice_pf_reset - Reset the PF
1154  * @hw: pointer to the hardware structure
1155  *
1156  * If a global reset has been triggered, this function checks
1157  * for its completion and then issues the PF reset
1158  */
1159 static enum ice_status ice_pf_reset(struct ice_hw *hw)
1160 {
1161 	u32 cnt, reg;
1162 
1163 	/* If at function entry a global reset was already in progress, i.e.
1164 	 * state is not 'device active' or any of the reset done bits are not
1165 	 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1166 	 * global reset is done.
1167 	 */
1168 	if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1169 	    (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1170 		/* poll on global reset currently in progress until done */
1171 		if (ice_check_reset(hw))
1172 			return ICE_ERR_RESET_FAILED;
1173 
1174 		return 0;
1175 	}
1176 
1177 	/* Reset the PF */
1178 	reg = rd32(hw, PFGEN_CTRL);
1179 
1180 	wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1181 
1182 	/* Wait for the PFR to complete. The wait time is the global config lock
1183 	 * timeout plus the PFR timeout which will account for a possible reset
1184 	 * that is occurring during a download package operation.
1185 	 */
1186 	for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1187 	     ICE_PF_RESET_WAIT_COUNT; cnt++) {
1188 		reg = rd32(hw, PFGEN_CTRL);
1189 		if (!(reg & PFGEN_CTRL_PFSWR_M))
1190 			break;
1191 
1192 		mdelay(1);
1193 	}
1194 
1195 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1196 		ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1197 		return ICE_ERR_RESET_FAILED;
1198 	}
1199 
1200 	return 0;
1201 }
1202 
1203 /**
1204  * ice_reset - Perform different types of reset
1205  * @hw: pointer to the hardware structure
1206  * @req: reset request
1207  *
1208  * This function triggers a reset as specified by the req parameter.
1209  *
1210  * Note:
1211  * If anything other than a PF reset is triggered, PXE mode is restored.
1212  * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1213  * interface has been restored in the rebuild flow.
1214  */
1215 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1216 {
1217 	u32 val = 0;
1218 
1219 	switch (req) {
1220 	case ICE_RESET_PFR:
1221 		return ice_pf_reset(hw);
1222 	case ICE_RESET_CORER:
1223 		ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1224 		val = GLGEN_RTRIG_CORER_M;
1225 		break;
1226 	case ICE_RESET_GLOBR:
1227 		ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1228 		val = GLGEN_RTRIG_GLOBR_M;
1229 		break;
1230 	default:
1231 		return ICE_ERR_PARAM;
1232 	}
1233 
1234 	val |= rd32(hw, GLGEN_RTRIG);
1235 	wr32(hw, GLGEN_RTRIG, val);
1236 	ice_flush(hw);
1237 
1238 	/* wait for the FW to be ready */
1239 	return ice_check_reset(hw);
1240 }
1241 
1242 /**
1243  * ice_copy_rxq_ctx_to_hw
1244  * @hw: pointer to the hardware structure
1245  * @ice_rxq_ctx: pointer to the rxq context
1246  * @rxq_index: the index of the Rx queue
1247  *
1248  * Copies rxq context from dense structure to HW register space
1249  */
1250 static enum ice_status
1251 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1252 {
1253 	u8 i;
1254 
1255 	if (!ice_rxq_ctx)
1256 		return ICE_ERR_BAD_PTR;
1257 
1258 	if (rxq_index > QRX_CTRL_MAX_INDEX)
1259 		return ICE_ERR_PARAM;
1260 
1261 	/* Copy each dword separately to HW */
1262 	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1263 		wr32(hw, QRX_CONTEXT(i, rxq_index),
1264 		     *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1265 
1266 		ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1267 			  *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1268 	}
1269 
1270 	return 0;
1271 }
1272 
1273 /* LAN Rx Queue Context */
1274 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1275 	/* Field		Width	LSB */
1276 	ICE_CTX_STORE(ice_rlan_ctx, head,		13,	0),
1277 	ICE_CTX_STORE(ice_rlan_ctx, cpuid,		8,	13),
1278 	ICE_CTX_STORE(ice_rlan_ctx, base,		57,	32),
1279 	ICE_CTX_STORE(ice_rlan_ctx, qlen,		13,	89),
1280 	ICE_CTX_STORE(ice_rlan_ctx, dbuf,		7,	102),
1281 	ICE_CTX_STORE(ice_rlan_ctx, hbuf,		5,	109),
1282 	ICE_CTX_STORE(ice_rlan_ctx, dtype,		2,	114),
1283 	ICE_CTX_STORE(ice_rlan_ctx, dsize,		1,	116),
1284 	ICE_CTX_STORE(ice_rlan_ctx, crcstrip,		1,	117),
1285 	ICE_CTX_STORE(ice_rlan_ctx, l2tsel,		1,	119),
1286 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_0,		4,	120),
1287 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_1,		2,	124),
1288 	ICE_CTX_STORE(ice_rlan_ctx, showiv,		1,	127),
1289 	ICE_CTX_STORE(ice_rlan_ctx, rxmax,		14,	174),
1290 	ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena,	1,	193),
1291 	ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena,	1,	194),
1292 	ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena,	1,	195),
1293 	ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena,	1,	196),
1294 	ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh,		3,	198),
1295 	ICE_CTX_STORE(ice_rlan_ctx, prefena,		1,	201),
1296 	{ 0 }
1297 };
1298 
1299 /**
1300  * ice_write_rxq_ctx
1301  * @hw: pointer to the hardware structure
1302  * @rlan_ctx: pointer to the rxq context
1303  * @rxq_index: the index of the Rx queue
1304  *
1305  * Converts rxq context from sparse to dense structure and then writes
1306  * it to HW register space and enables the hardware to prefetch descriptors
1307  * instead of only fetching them on demand
1308  */
1309 enum ice_status
1310 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1311 		  u32 rxq_index)
1312 {
1313 	u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1314 
1315 	if (!rlan_ctx)
1316 		return ICE_ERR_BAD_PTR;
1317 
1318 	rlan_ctx->prefena = 1;
1319 
1320 	ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1321 	return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1322 }
1323 
1324 /* LAN Tx Queue Context */
1325 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1326 				    /* Field			Width	LSB */
1327 	ICE_CTX_STORE(ice_tlan_ctx, base,			57,	0),
1328 	ICE_CTX_STORE(ice_tlan_ctx, port_num,			3,	57),
1329 	ICE_CTX_STORE(ice_tlan_ctx, cgd_num,			5,	60),
1330 	ICE_CTX_STORE(ice_tlan_ctx, pf_num,			3,	65),
1331 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_num,			10,	68),
1332 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_type,			2,	78),
1333 	ICE_CTX_STORE(ice_tlan_ctx, src_vsi,			10,	80),
1334 	ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena,			1,	90),
1335 	ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag,	1,	91),
1336 	ICE_CTX_STORE(ice_tlan_ctx, alt_vlan,			1,	92),
1337 	ICE_CTX_STORE(ice_tlan_ctx, cpuid,			8,	93),
1338 	ICE_CTX_STORE(ice_tlan_ctx, wb_mode,			1,	101),
1339 	ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc,			1,	102),
1340 	ICE_CTX_STORE(ice_tlan_ctx, tphrd,			1,	103),
1341 	ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc,			1,	104),
1342 	ICE_CTX_STORE(ice_tlan_ctx, cmpq_id,			9,	105),
1343 	ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func,		14,	114),
1344 	ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode,	1,	128),
1345 	ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id,		6,	129),
1346 	ICE_CTX_STORE(ice_tlan_ctx, qlen,			13,	135),
1347 	ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx,		4,	148),
1348 	ICE_CTX_STORE(ice_tlan_ctx, tso_ena,			1,	152),
1349 	ICE_CTX_STORE(ice_tlan_ctx, tso_qnum,			11,	153),
1350 	ICE_CTX_STORE(ice_tlan_ctx, legacy_int,			1,	164),
1351 	ICE_CTX_STORE(ice_tlan_ctx, drop_ena,			1,	165),
1352 	ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx,		2,	166),
1353 	ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx,	3,	168),
1354 	ICE_CTX_STORE(ice_tlan_ctx, int_q_state,		122,	171),
1355 	{ 0 }
1356 };
1357 
1358 /* Sideband Queue command wrappers */
1359 
1360 /**
1361  * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue
1362  * @hw: pointer to the HW struct
1363  * @desc: descriptor describing the command
1364  * @buf: buffer to use for indirect commands (NULL for direct commands)
1365  * @buf_size: size of buffer for indirect commands (0 for direct commands)
1366  * @cd: pointer to command details structure
1367  */
1368 static int
1369 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
1370 		 void *buf, u16 buf_size, struct ice_sq_cd *cd)
1371 {
1372 	return ice_status_to_errno(ice_sq_send_cmd(hw, ice_get_sbq(hw),
1373 						   (struct ice_aq_desc *)desc,
1374 						   buf, buf_size, cd));
1375 }
1376 
1377 /**
1378  * ice_sbq_rw_reg - Fill Sideband Queue command
1379  * @hw: pointer to the HW struct
1380  * @in: message info to be filled in descriptor
1381  */
1382 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
1383 {
1384 	struct ice_sbq_cmd_desc desc = {0};
1385 	struct ice_sbq_msg_req msg = {0};
1386 	u16 msg_len;
1387 	int status;
1388 
1389 	msg_len = sizeof(msg);
1390 
1391 	msg.dest_dev = in->dest_dev;
1392 	msg.opcode = in->opcode;
1393 	msg.flags = ICE_SBQ_MSG_FLAGS;
1394 	msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
1395 	msg.msg_addr_low = cpu_to_le16(in->msg_addr_low);
1396 	msg.msg_addr_high = cpu_to_le32(in->msg_addr_high);
1397 
1398 	if (in->opcode)
1399 		msg.data = cpu_to_le32(in->data);
1400 	else
1401 		/* data read comes back in completion, so shorten the struct by
1402 		 * sizeof(msg.data)
1403 		 */
1404 		msg_len -= sizeof(msg.data);
1405 
1406 	desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
1407 	desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req);
1408 	desc.param0.cmd_len = cpu_to_le16(msg_len);
1409 	status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
1410 	if (!status && !in->opcode)
1411 		in->data = le32_to_cpu
1412 			(((struct ice_sbq_msg_cmpl *)&msg)->data);
1413 	return status;
1414 }
1415 
1416 /* FW Admin Queue command wrappers */
1417 
1418 /* Software lock/mutex that is meant to be held while the Global Config Lock
1419  * in firmware is acquired by the software to prevent most (but not all) types
1420  * of AQ commands from being sent to FW
1421  */
1422 DEFINE_MUTEX(ice_global_cfg_lock_sw);
1423 
1424 /**
1425  * ice_should_retry_sq_send_cmd
1426  * @opcode: AQ opcode
1427  *
1428  * Decide if we should retry the send command routine for the ATQ, depending
1429  * on the opcode.
1430  */
1431 static bool ice_should_retry_sq_send_cmd(u16 opcode)
1432 {
1433 	switch (opcode) {
1434 	case ice_aqc_opc_get_link_topo:
1435 	case ice_aqc_opc_lldp_stop:
1436 	case ice_aqc_opc_lldp_start:
1437 	case ice_aqc_opc_lldp_filter_ctrl:
1438 		return true;
1439 	}
1440 
1441 	return false;
1442 }
1443 
1444 /**
1445  * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
1446  * @hw: pointer to the HW struct
1447  * @cq: pointer to the specific Control queue
1448  * @desc: prefilled descriptor describing the command
1449  * @buf: buffer to use for indirect commands (or NULL for direct commands)
1450  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1451  * @cd: pointer to command details structure
1452  *
1453  * Retry sending the FW Admin Queue command, multiple times, to the FW Admin
1454  * Queue if the EBUSY AQ error is returned.
1455  */
1456 static enum ice_status
1457 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1458 		      struct ice_aq_desc *desc, void *buf, u16 buf_size,
1459 		      struct ice_sq_cd *cd)
1460 {
1461 	struct ice_aq_desc desc_cpy;
1462 	enum ice_status status;
1463 	bool is_cmd_for_retry;
1464 	u8 *buf_cpy = NULL;
1465 	u8 idx = 0;
1466 	u16 opcode;
1467 
1468 	opcode = le16_to_cpu(desc->opcode);
1469 	is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1470 	memset(&desc_cpy, 0, sizeof(desc_cpy));
1471 
1472 	if (is_cmd_for_retry) {
1473 		if (buf) {
1474 			buf_cpy = kzalloc(buf_size, GFP_KERNEL);
1475 			if (!buf_cpy)
1476 				return ICE_ERR_NO_MEMORY;
1477 		}
1478 
1479 		memcpy(&desc_cpy, desc, sizeof(desc_cpy));
1480 	}
1481 
1482 	do {
1483 		status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1484 
1485 		if (!is_cmd_for_retry || !status ||
1486 		    hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1487 			break;
1488 
1489 		if (buf_cpy)
1490 			memcpy(buf, buf_cpy, buf_size);
1491 
1492 		memcpy(desc, &desc_cpy, sizeof(desc_cpy));
1493 
1494 		mdelay(ICE_SQ_SEND_DELAY_TIME_MS);
1495 
1496 	} while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1497 
1498 	kfree(buf_cpy);
1499 
1500 	return status;
1501 }
1502 
1503 /**
1504  * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1505  * @hw: pointer to the HW struct
1506  * @desc: descriptor describing the command
1507  * @buf: buffer to use for indirect commands (NULL for direct commands)
1508  * @buf_size: size of buffer for indirect commands (0 for direct commands)
1509  * @cd: pointer to command details structure
1510  *
1511  * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1512  */
1513 enum ice_status
1514 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1515 		u16 buf_size, struct ice_sq_cd *cd)
1516 {
1517 	struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1518 	bool lock_acquired = false;
1519 	enum ice_status status;
1520 
1521 	/* When a package download is in process (i.e. when the firmware's
1522 	 * Global Configuration Lock resource is held), only the Download
1523 	 * Package, Get Version, Get Package Info List and Release Resource
1524 	 * (with resource ID set to Global Config Lock) AdminQ commands are
1525 	 * allowed; all others must block until the package download completes
1526 	 * and the Global Config Lock is released.  See also
1527 	 * ice_acquire_global_cfg_lock().
1528 	 */
1529 	switch (le16_to_cpu(desc->opcode)) {
1530 	case ice_aqc_opc_download_pkg:
1531 	case ice_aqc_opc_get_pkg_info_list:
1532 	case ice_aqc_opc_get_ver:
1533 		break;
1534 	case ice_aqc_opc_release_res:
1535 		if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1536 			break;
1537 		fallthrough;
1538 	default:
1539 		mutex_lock(&ice_global_cfg_lock_sw);
1540 		lock_acquired = true;
1541 		break;
1542 	}
1543 
1544 	status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1545 	if (lock_acquired)
1546 		mutex_unlock(&ice_global_cfg_lock_sw);
1547 
1548 	return status;
1549 }
1550 
1551 /**
1552  * ice_aq_get_fw_ver
1553  * @hw: pointer to the HW struct
1554  * @cd: pointer to command details structure or NULL
1555  *
1556  * Get the firmware version (0x0001) from the admin queue commands
1557  */
1558 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1559 {
1560 	struct ice_aqc_get_ver *resp;
1561 	struct ice_aq_desc desc;
1562 	enum ice_status status;
1563 
1564 	resp = &desc.params.get_ver;
1565 
1566 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1567 
1568 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1569 
1570 	if (!status) {
1571 		hw->fw_branch = resp->fw_branch;
1572 		hw->fw_maj_ver = resp->fw_major;
1573 		hw->fw_min_ver = resp->fw_minor;
1574 		hw->fw_patch = resp->fw_patch;
1575 		hw->fw_build = le32_to_cpu(resp->fw_build);
1576 		hw->api_branch = resp->api_branch;
1577 		hw->api_maj_ver = resp->api_major;
1578 		hw->api_min_ver = resp->api_minor;
1579 		hw->api_patch = resp->api_patch;
1580 	}
1581 
1582 	return status;
1583 }
1584 
1585 /**
1586  * ice_aq_send_driver_ver
1587  * @hw: pointer to the HW struct
1588  * @dv: driver's major, minor version
1589  * @cd: pointer to command details structure or NULL
1590  *
1591  * Send the driver version (0x0002) to the firmware
1592  */
1593 enum ice_status
1594 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1595 		       struct ice_sq_cd *cd)
1596 {
1597 	struct ice_aqc_driver_ver *cmd;
1598 	struct ice_aq_desc desc;
1599 	u16 len;
1600 
1601 	cmd = &desc.params.driver_ver;
1602 
1603 	if (!dv)
1604 		return ICE_ERR_PARAM;
1605 
1606 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1607 
1608 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1609 	cmd->major_ver = dv->major_ver;
1610 	cmd->minor_ver = dv->minor_ver;
1611 	cmd->build_ver = dv->build_ver;
1612 	cmd->subbuild_ver = dv->subbuild_ver;
1613 
1614 	len = 0;
1615 	while (len < sizeof(dv->driver_string) &&
1616 	       isascii(dv->driver_string[len]) && dv->driver_string[len])
1617 		len++;
1618 
1619 	return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1620 }
1621 
1622 /**
1623  * ice_aq_q_shutdown
1624  * @hw: pointer to the HW struct
1625  * @unloading: is the driver unloading itself
1626  *
1627  * Tell the Firmware that we're shutting down the AdminQ and whether
1628  * or not the driver is unloading as well (0x0003).
1629  */
1630 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1631 {
1632 	struct ice_aqc_q_shutdown *cmd;
1633 	struct ice_aq_desc desc;
1634 
1635 	cmd = &desc.params.q_shutdown;
1636 
1637 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1638 
1639 	if (unloading)
1640 		cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1641 
1642 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1643 }
1644 
1645 /**
1646  * ice_aq_req_res
1647  * @hw: pointer to the HW struct
1648  * @res: resource ID
1649  * @access: access type
1650  * @sdp_number: resource number
1651  * @timeout: the maximum time in ms that the driver may hold the resource
1652  * @cd: pointer to command details structure or NULL
1653  *
1654  * Requests common resource using the admin queue commands (0x0008).
1655  * When attempting to acquire the Global Config Lock, the driver can
1656  * learn of three states:
1657  *  1) ICE_SUCCESS -        acquired lock, and can perform download package
1658  *  2) ICE_ERR_AQ_ERROR -   did not get lock, driver should fail to load
1659  *  3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1660  *                          successfully downloaded the package; the driver does
1661  *                          not have to download the package and can continue
1662  *                          loading
1663  *
1664  * Note that if the caller is in an acquire lock, perform action, release lock
1665  * phase of operation, it is possible that the FW may detect a timeout and issue
1666  * a CORER. In this case, the driver will receive a CORER interrupt and will
1667  * have to determine its cause. The calling thread that is handling this flow
1668  * will likely get an error propagated back to it indicating the Download
1669  * Package, Update Package or the Release Resource AQ commands timed out.
1670  */
1671 static enum ice_status
1672 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1673 	       enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1674 	       struct ice_sq_cd *cd)
1675 {
1676 	struct ice_aqc_req_res *cmd_resp;
1677 	struct ice_aq_desc desc;
1678 	enum ice_status status;
1679 
1680 	cmd_resp = &desc.params.res_owner;
1681 
1682 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1683 
1684 	cmd_resp->res_id = cpu_to_le16(res);
1685 	cmd_resp->access_type = cpu_to_le16(access);
1686 	cmd_resp->res_number = cpu_to_le32(sdp_number);
1687 	cmd_resp->timeout = cpu_to_le32(*timeout);
1688 	*timeout = 0;
1689 
1690 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1691 
1692 	/* The completion specifies the maximum time in ms that the driver
1693 	 * may hold the resource in the Timeout field.
1694 	 */
1695 
1696 	/* Global config lock response utilizes an additional status field.
1697 	 *
1698 	 * If the Global config lock resource is held by some other driver, the
1699 	 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1700 	 * and the timeout field indicates the maximum time the current owner
1701 	 * of the resource has to free it.
1702 	 */
1703 	if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1704 		if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1705 			*timeout = le32_to_cpu(cmd_resp->timeout);
1706 			return 0;
1707 		} else if (le16_to_cpu(cmd_resp->status) ==
1708 			   ICE_AQ_RES_GLBL_IN_PROG) {
1709 			*timeout = le32_to_cpu(cmd_resp->timeout);
1710 			return ICE_ERR_AQ_ERROR;
1711 		} else if (le16_to_cpu(cmd_resp->status) ==
1712 			   ICE_AQ_RES_GLBL_DONE) {
1713 			return ICE_ERR_AQ_NO_WORK;
1714 		}
1715 
1716 		/* invalid FW response, force a timeout immediately */
1717 		*timeout = 0;
1718 		return ICE_ERR_AQ_ERROR;
1719 	}
1720 
1721 	/* If the resource is held by some other driver, the command completes
1722 	 * with a busy return value and the timeout field indicates the maximum
1723 	 * time the current owner of the resource has to free it.
1724 	 */
1725 	if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1726 		*timeout = le32_to_cpu(cmd_resp->timeout);
1727 
1728 	return status;
1729 }
1730 
1731 /**
1732  * ice_aq_release_res
1733  * @hw: pointer to the HW struct
1734  * @res: resource ID
1735  * @sdp_number: resource number
1736  * @cd: pointer to command details structure or NULL
1737  *
1738  * release common resource using the admin queue commands (0x0009)
1739  */
1740 static enum ice_status
1741 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1742 		   struct ice_sq_cd *cd)
1743 {
1744 	struct ice_aqc_req_res *cmd;
1745 	struct ice_aq_desc desc;
1746 
1747 	cmd = &desc.params.res_owner;
1748 
1749 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1750 
1751 	cmd->res_id = cpu_to_le16(res);
1752 	cmd->res_number = cpu_to_le32(sdp_number);
1753 
1754 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1755 }
1756 
1757 /**
1758  * ice_acquire_res
1759  * @hw: pointer to the HW structure
1760  * @res: resource ID
1761  * @access: access type (read or write)
1762  * @timeout: timeout in milliseconds
1763  *
1764  * This function will attempt to acquire the ownership of a resource.
1765  */
1766 enum ice_status
1767 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1768 		enum ice_aq_res_access_type access, u32 timeout)
1769 {
1770 #define ICE_RES_POLLING_DELAY_MS	10
1771 	u32 delay = ICE_RES_POLLING_DELAY_MS;
1772 	u32 time_left = timeout;
1773 	enum ice_status status;
1774 
1775 	status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1776 
1777 	/* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1778 	 * previously acquired the resource and performed any necessary updates;
1779 	 * in this case the caller does not obtain the resource and has no
1780 	 * further work to do.
1781 	 */
1782 	if (status == ICE_ERR_AQ_NO_WORK)
1783 		goto ice_acquire_res_exit;
1784 
1785 	if (status)
1786 		ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
1787 
1788 	/* If necessary, poll until the current lock owner timeouts */
1789 	timeout = time_left;
1790 	while (status && timeout && time_left) {
1791 		mdelay(delay);
1792 		timeout = (timeout > delay) ? timeout - delay : 0;
1793 		status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1794 
1795 		if (status == ICE_ERR_AQ_NO_WORK)
1796 			/* lock free, but no work to do */
1797 			break;
1798 
1799 		if (!status)
1800 			/* lock acquired */
1801 			break;
1802 	}
1803 	if (status && status != ICE_ERR_AQ_NO_WORK)
1804 		ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1805 
1806 ice_acquire_res_exit:
1807 	if (status == ICE_ERR_AQ_NO_WORK) {
1808 		if (access == ICE_RES_WRITE)
1809 			ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
1810 		else
1811 			ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1812 	}
1813 	return status;
1814 }
1815 
1816 /**
1817  * ice_release_res
1818  * @hw: pointer to the HW structure
1819  * @res: resource ID
1820  *
1821  * This function will release a resource using the proper Admin Command.
1822  */
1823 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1824 {
1825 	enum ice_status status;
1826 	u32 total_delay = 0;
1827 
1828 	status = ice_aq_release_res(hw, res, 0, NULL);
1829 
1830 	/* there are some rare cases when trying to release the resource
1831 	 * results in an admin queue timeout, so handle them correctly
1832 	 */
1833 	while ((status == ICE_ERR_AQ_TIMEOUT) &&
1834 	       (total_delay < hw->adminq.sq_cmd_timeout)) {
1835 		mdelay(1);
1836 		status = ice_aq_release_res(hw, res, 0, NULL);
1837 		total_delay++;
1838 	}
1839 }
1840 
1841 /**
1842  * ice_aq_alloc_free_res - command to allocate/free resources
1843  * @hw: pointer to the HW struct
1844  * @num_entries: number of resource entries in buffer
1845  * @buf: Indirect buffer to hold data parameters and response
1846  * @buf_size: size of buffer for indirect commands
1847  * @opc: pass in the command opcode
1848  * @cd: pointer to command details structure or NULL
1849  *
1850  * Helper function to allocate/free resources using the admin queue commands
1851  */
1852 enum ice_status
1853 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1854 		      struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1855 		      enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1856 {
1857 	struct ice_aqc_alloc_free_res_cmd *cmd;
1858 	struct ice_aq_desc desc;
1859 
1860 	cmd = &desc.params.sw_res_ctrl;
1861 
1862 	if (!buf)
1863 		return ICE_ERR_PARAM;
1864 
1865 	if (buf_size < flex_array_size(buf, elem, num_entries))
1866 		return ICE_ERR_PARAM;
1867 
1868 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1869 
1870 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1871 
1872 	cmd->num_entries = cpu_to_le16(num_entries);
1873 
1874 	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1875 }
1876 
1877 /**
1878  * ice_alloc_hw_res - allocate resource
1879  * @hw: pointer to the HW struct
1880  * @type: type of resource
1881  * @num: number of resources to allocate
1882  * @btm: allocate from bottom
1883  * @res: pointer to array that will receive the resources
1884  */
1885 enum ice_status
1886 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1887 {
1888 	struct ice_aqc_alloc_free_res_elem *buf;
1889 	enum ice_status status;
1890 	u16 buf_len;
1891 
1892 	buf_len = struct_size(buf, elem, num);
1893 	buf = kzalloc(buf_len, GFP_KERNEL);
1894 	if (!buf)
1895 		return ICE_ERR_NO_MEMORY;
1896 
1897 	/* Prepare buffer to allocate resource. */
1898 	buf->num_elems = cpu_to_le16(num);
1899 	buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1900 				    ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1901 	if (btm)
1902 		buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1903 
1904 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1905 				       ice_aqc_opc_alloc_res, NULL);
1906 	if (status)
1907 		goto ice_alloc_res_exit;
1908 
1909 	memcpy(res, buf->elem, sizeof(*buf->elem) * num);
1910 
1911 ice_alloc_res_exit:
1912 	kfree(buf);
1913 	return status;
1914 }
1915 
1916 /**
1917  * ice_free_hw_res - free allocated HW resource
1918  * @hw: pointer to the HW struct
1919  * @type: type of resource to free
1920  * @num: number of resources
1921  * @res: pointer to array that contains the resources to free
1922  */
1923 enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1924 {
1925 	struct ice_aqc_alloc_free_res_elem *buf;
1926 	enum ice_status status;
1927 	u16 buf_len;
1928 
1929 	buf_len = struct_size(buf, elem, num);
1930 	buf = kzalloc(buf_len, GFP_KERNEL);
1931 	if (!buf)
1932 		return ICE_ERR_NO_MEMORY;
1933 
1934 	/* Prepare buffer to free resource. */
1935 	buf->num_elems = cpu_to_le16(num);
1936 	buf->res_type = cpu_to_le16(type);
1937 	memcpy(buf->elem, res, sizeof(*buf->elem) * num);
1938 
1939 	status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1940 				       ice_aqc_opc_free_res, NULL);
1941 	if (status)
1942 		ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1943 
1944 	kfree(buf);
1945 	return status;
1946 }
1947 
1948 /**
1949  * ice_get_num_per_func - determine number of resources per PF
1950  * @hw: pointer to the HW structure
1951  * @max: value to be evenly split between each PF
1952  *
1953  * Determine the number of valid functions by going through the bitmap returned
1954  * from parsing capabilities and use this to calculate the number of resources
1955  * per PF based on the max value passed in.
1956  */
1957 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1958 {
1959 	u8 funcs;
1960 
1961 #define ICE_CAPS_VALID_FUNCS_M	0xFF
1962 	funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1963 			 ICE_CAPS_VALID_FUNCS_M);
1964 
1965 	if (!funcs)
1966 		return 0;
1967 
1968 	return max / funcs;
1969 }
1970 
1971 /**
1972  * ice_parse_common_caps - parse common device/function capabilities
1973  * @hw: pointer to the HW struct
1974  * @caps: pointer to common capabilities structure
1975  * @elem: the capability element to parse
1976  * @prefix: message prefix for tracing capabilities
1977  *
1978  * Given a capability element, extract relevant details into the common
1979  * capability structure.
1980  *
1981  * Returns: true if the capability matches one of the common capability ids,
1982  * false otherwise.
1983  */
1984 static bool
1985 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1986 		      struct ice_aqc_list_caps_elem *elem, const char *prefix)
1987 {
1988 	u32 logical_id = le32_to_cpu(elem->logical_id);
1989 	u32 phys_id = le32_to_cpu(elem->phys_id);
1990 	u32 number = le32_to_cpu(elem->number);
1991 	u16 cap = le16_to_cpu(elem->cap);
1992 	bool found = true;
1993 
1994 	switch (cap) {
1995 	case ICE_AQC_CAPS_VALID_FUNCTIONS:
1996 		caps->valid_functions = number;
1997 		ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
1998 			  caps->valid_functions);
1999 		break;
2000 	case ICE_AQC_CAPS_SRIOV:
2001 		caps->sr_iov_1_1 = (number == 1);
2002 		ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
2003 			  caps->sr_iov_1_1);
2004 		break;
2005 	case ICE_AQC_CAPS_DCB:
2006 		caps->dcb = (number == 1);
2007 		caps->active_tc_bitmap = logical_id;
2008 		caps->maxtc = phys_id;
2009 		ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
2010 		ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
2011 			  caps->active_tc_bitmap);
2012 		ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
2013 		break;
2014 	case ICE_AQC_CAPS_RSS:
2015 		caps->rss_table_size = number;
2016 		caps->rss_table_entry_width = logical_id;
2017 		ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
2018 			  caps->rss_table_size);
2019 		ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
2020 			  caps->rss_table_entry_width);
2021 		break;
2022 	case ICE_AQC_CAPS_RXQS:
2023 		caps->num_rxq = number;
2024 		caps->rxq_first_id = phys_id;
2025 		ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
2026 			  caps->num_rxq);
2027 		ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
2028 			  caps->rxq_first_id);
2029 		break;
2030 	case ICE_AQC_CAPS_TXQS:
2031 		caps->num_txq = number;
2032 		caps->txq_first_id = phys_id;
2033 		ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
2034 			  caps->num_txq);
2035 		ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
2036 			  caps->txq_first_id);
2037 		break;
2038 	case ICE_AQC_CAPS_MSIX:
2039 		caps->num_msix_vectors = number;
2040 		caps->msix_vector_first_id = phys_id;
2041 		ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
2042 			  caps->num_msix_vectors);
2043 		ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
2044 			  caps->msix_vector_first_id);
2045 		break;
2046 	case ICE_AQC_CAPS_PENDING_NVM_VER:
2047 		caps->nvm_update_pending_nvm = true;
2048 		ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
2049 		break;
2050 	case ICE_AQC_CAPS_PENDING_OROM_VER:
2051 		caps->nvm_update_pending_orom = true;
2052 		ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
2053 		break;
2054 	case ICE_AQC_CAPS_PENDING_NET_VER:
2055 		caps->nvm_update_pending_netlist = true;
2056 		ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
2057 		break;
2058 	case ICE_AQC_CAPS_NVM_MGMT:
2059 		caps->nvm_unified_update =
2060 			(number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
2061 			true : false;
2062 		ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2063 			  caps->nvm_unified_update);
2064 		break;
2065 	case ICE_AQC_CAPS_RDMA:
2066 		caps->rdma = (number == 1);
2067 		ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma);
2068 		break;
2069 	case ICE_AQC_CAPS_MAX_MTU:
2070 		caps->max_mtu = number;
2071 		ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2072 			  prefix, caps->max_mtu);
2073 		break;
2074 	default:
2075 		/* Not one of the recognized common capabilities */
2076 		found = false;
2077 	}
2078 
2079 	return found;
2080 }
2081 
2082 /**
2083  * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2084  * @hw: pointer to the HW structure
2085  * @caps: pointer to capabilities structure to fix
2086  *
2087  * Re-calculate the capabilities that are dependent on the number of physical
2088  * ports; i.e. some features are not supported or function differently on
2089  * devices with more than 4 ports.
2090  */
2091 static void
2092 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2093 {
2094 	/* This assumes device capabilities are always scanned before function
2095 	 * capabilities during the initialization flow.
2096 	 */
2097 	if (hw->dev_caps.num_funcs > 4) {
2098 		/* Max 4 TCs per port */
2099 		caps->maxtc = 4;
2100 		ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2101 			  caps->maxtc);
2102 		if (caps->rdma) {
2103 			ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
2104 			caps->rdma = 0;
2105 		}
2106 
2107 		/* print message only when processing device capabilities
2108 		 * during initialization.
2109 		 */
2110 		if (caps == &hw->dev_caps.common_cap)
2111 			dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n");
2112 	}
2113 }
2114 
2115 /**
2116  * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
2117  * @hw: pointer to the HW struct
2118  * @func_p: pointer to function capabilities structure
2119  * @cap: pointer to the capability element to parse
2120  *
2121  * Extract function capabilities for ICE_AQC_CAPS_VF.
2122  */
2123 static void
2124 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2125 		       struct ice_aqc_list_caps_elem *cap)
2126 {
2127 	u32 logical_id = le32_to_cpu(cap->logical_id);
2128 	u32 number = le32_to_cpu(cap->number);
2129 
2130 	func_p->num_allocd_vfs = number;
2131 	func_p->vf_base_id = logical_id;
2132 	ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
2133 		  func_p->num_allocd_vfs);
2134 	ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
2135 		  func_p->vf_base_id);
2136 }
2137 
2138 /**
2139  * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2140  * @hw: pointer to the HW struct
2141  * @func_p: pointer to function capabilities structure
2142  * @cap: pointer to the capability element to parse
2143  *
2144  * Extract function capabilities for ICE_AQC_CAPS_VSI.
2145  */
2146 static void
2147 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2148 			struct ice_aqc_list_caps_elem *cap)
2149 {
2150 	func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2151 	ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2152 		  le32_to_cpu(cap->number));
2153 	ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2154 		  func_p->guar_num_vsi);
2155 }
2156 
2157 /**
2158  * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps
2159  * @hw: pointer to the HW struct
2160  * @func_p: pointer to function capabilities structure
2161  * @cap: pointer to the capability element to parse
2162  *
2163  * Extract function capabilities for ICE_AQC_CAPS_1588.
2164  */
2165 static void
2166 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2167 			 struct ice_aqc_list_caps_elem *cap)
2168 {
2169 	struct ice_ts_func_info *info = &func_p->ts_func_info;
2170 	u32 number = le32_to_cpu(cap->number);
2171 
2172 	info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0);
2173 	func_p->common_cap.ieee_1588 = info->ena;
2174 
2175 	info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0);
2176 	info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0);
2177 	info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
2178 	info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
2179 
2180 	info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
2181 	info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
2182 
2183 	ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
2184 		  func_p->common_cap.ieee_1588);
2185 	ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n",
2186 		  info->src_tmr_owned);
2187 	ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n",
2188 		  info->tmr_ena);
2189 	ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n",
2190 		  info->tmr_index_owned);
2191 	ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n",
2192 		  info->tmr_index_assoc);
2193 	ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n",
2194 		  info->clk_freq);
2195 	ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n",
2196 		  info->clk_src);
2197 }
2198 
2199 /**
2200  * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
2201  * @hw: pointer to the HW struct
2202  * @func_p: pointer to function capabilities structure
2203  *
2204  * Extract function capabilities for ICE_AQC_CAPS_FD.
2205  */
2206 static void
2207 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
2208 {
2209 	u32 reg_val, val;
2210 
2211 	reg_val = rd32(hw, GLQF_FD_SIZE);
2212 	val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
2213 		GLQF_FD_SIZE_FD_GSIZE_S;
2214 	func_p->fd_fltr_guar =
2215 		ice_get_num_per_func(hw, val);
2216 	val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
2217 		GLQF_FD_SIZE_FD_BSIZE_S;
2218 	func_p->fd_fltr_best_effort = val;
2219 
2220 	ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
2221 		  func_p->fd_fltr_guar);
2222 	ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n",
2223 		  func_p->fd_fltr_best_effort);
2224 }
2225 
2226 /**
2227  * ice_parse_func_caps - Parse function capabilities
2228  * @hw: pointer to the HW struct
2229  * @func_p: pointer to function capabilities structure
2230  * @buf: buffer containing the function capability records
2231  * @cap_count: the number of capabilities
2232  *
2233  * Helper function to parse function (0x000A) capabilities list. For
2234  * capabilities shared between device and function, this relies on
2235  * ice_parse_common_caps.
2236  *
2237  * Loop through the list of provided capabilities and extract the relevant
2238  * data into the function capabilities structured.
2239  */
2240 static void
2241 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2242 		    void *buf, u32 cap_count)
2243 {
2244 	struct ice_aqc_list_caps_elem *cap_resp;
2245 	u32 i;
2246 
2247 	cap_resp = buf;
2248 
2249 	memset(func_p, 0, sizeof(*func_p));
2250 
2251 	for (i = 0; i < cap_count; i++) {
2252 		u16 cap = le16_to_cpu(cap_resp[i].cap);
2253 		bool found;
2254 
2255 		found = ice_parse_common_caps(hw, &func_p->common_cap,
2256 					      &cap_resp[i], "func caps");
2257 
2258 		switch (cap) {
2259 		case ICE_AQC_CAPS_VF:
2260 			ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2261 			break;
2262 		case ICE_AQC_CAPS_VSI:
2263 			ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2264 			break;
2265 		case ICE_AQC_CAPS_1588:
2266 			ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
2267 			break;
2268 		case ICE_AQC_CAPS_FD:
2269 			ice_parse_fdir_func_caps(hw, func_p);
2270 			break;
2271 		default:
2272 			/* Don't list common capabilities as unknown */
2273 			if (!found)
2274 				ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2275 					  i, cap);
2276 			break;
2277 		}
2278 	}
2279 
2280 	ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2281 }
2282 
2283 /**
2284  * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2285  * @hw: pointer to the HW struct
2286  * @dev_p: pointer to device capabilities structure
2287  * @cap: capability element to parse
2288  *
2289  * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2290  */
2291 static void
2292 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2293 			      struct ice_aqc_list_caps_elem *cap)
2294 {
2295 	u32 number = le32_to_cpu(cap->number);
2296 
2297 	dev_p->num_funcs = hweight32(number);
2298 	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2299 		  dev_p->num_funcs);
2300 }
2301 
2302 /**
2303  * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2304  * @hw: pointer to the HW struct
2305  * @dev_p: pointer to device capabilities structure
2306  * @cap: capability element to parse
2307  *
2308  * Parse ICE_AQC_CAPS_VF for device capabilities.
2309  */
2310 static void
2311 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2312 		      struct ice_aqc_list_caps_elem *cap)
2313 {
2314 	u32 number = le32_to_cpu(cap->number);
2315 
2316 	dev_p->num_vfs_exposed = number;
2317 	ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2318 		  dev_p->num_vfs_exposed);
2319 }
2320 
2321 /**
2322  * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2323  * @hw: pointer to the HW struct
2324  * @dev_p: pointer to device capabilities structure
2325  * @cap: capability element to parse
2326  *
2327  * Parse ICE_AQC_CAPS_VSI for device capabilities.
2328  */
2329 static void
2330 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2331 		       struct ice_aqc_list_caps_elem *cap)
2332 {
2333 	u32 number = le32_to_cpu(cap->number);
2334 
2335 	dev_p->num_vsi_allocd_to_host = number;
2336 	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2337 		  dev_p->num_vsi_allocd_to_host);
2338 }
2339 
2340 /**
2341  * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps
2342  * @hw: pointer to the HW struct
2343  * @dev_p: pointer to device capabilities structure
2344  * @cap: capability element to parse
2345  *
2346  * Parse ICE_AQC_CAPS_1588 for device capabilities.
2347  */
2348 static void
2349 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2350 			struct ice_aqc_list_caps_elem *cap)
2351 {
2352 	struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
2353 	u32 logical_id = le32_to_cpu(cap->logical_id);
2354 	u32 phys_id = le32_to_cpu(cap->phys_id);
2355 	u32 number = le32_to_cpu(cap->number);
2356 
2357 	info->ena = ((number & ICE_TS_DEV_ENA_M) != 0);
2358 	dev_p->common_cap.ieee_1588 = info->ena;
2359 
2360 	info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M;
2361 	info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0);
2362 	info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0);
2363 
2364 	info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S;
2365 	info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
2366 	info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
2367 
2368 	info->ena_ports = logical_id;
2369 	info->tmr_own_map = phys_id;
2370 
2371 	ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n",
2372 		  dev_p->common_cap.ieee_1588);
2373 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n",
2374 		  info->tmr0_owner);
2375 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n",
2376 		  info->tmr0_owned);
2377 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n",
2378 		  info->tmr0_ena);
2379 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n",
2380 		  info->tmr1_owner);
2381 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n",
2382 		  info->tmr1_owned);
2383 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
2384 		  info->tmr1_ena);
2385 	ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
2386 		  info->ena_ports);
2387 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
2388 		  info->tmr_own_map);
2389 }
2390 
2391 /**
2392  * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
2393  * @hw: pointer to the HW struct
2394  * @dev_p: pointer to device capabilities structure
2395  * @cap: capability element to parse
2396  *
2397  * Parse ICE_AQC_CAPS_FD for device capabilities.
2398  */
2399 static void
2400 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2401 			struct ice_aqc_list_caps_elem *cap)
2402 {
2403 	u32 number = le32_to_cpu(cap->number);
2404 
2405 	dev_p->num_flow_director_fltr = number;
2406 	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2407 		  dev_p->num_flow_director_fltr);
2408 }
2409 
2410 /**
2411  * ice_parse_dev_caps - Parse device capabilities
2412  * @hw: pointer to the HW struct
2413  * @dev_p: pointer to device capabilities structure
2414  * @buf: buffer containing the device capability records
2415  * @cap_count: the number of capabilities
2416  *
2417  * Helper device to parse device (0x000B) capabilities list. For
2418  * capabilities shared between device and function, this relies on
2419  * ice_parse_common_caps.
2420  *
2421  * Loop through the list of provided capabilities and extract the relevant
2422  * data into the device capabilities structured.
2423  */
2424 static void
2425 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2426 		   void *buf, u32 cap_count)
2427 {
2428 	struct ice_aqc_list_caps_elem *cap_resp;
2429 	u32 i;
2430 
2431 	cap_resp = buf;
2432 
2433 	memset(dev_p, 0, sizeof(*dev_p));
2434 
2435 	for (i = 0; i < cap_count; i++) {
2436 		u16 cap = le16_to_cpu(cap_resp[i].cap);
2437 		bool found;
2438 
2439 		found = ice_parse_common_caps(hw, &dev_p->common_cap,
2440 					      &cap_resp[i], "dev caps");
2441 
2442 		switch (cap) {
2443 		case ICE_AQC_CAPS_VALID_FUNCTIONS:
2444 			ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2445 			break;
2446 		case ICE_AQC_CAPS_VF:
2447 			ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2448 			break;
2449 		case ICE_AQC_CAPS_VSI:
2450 			ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2451 			break;
2452 		case ICE_AQC_CAPS_1588:
2453 			ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
2454 			break;
2455 		case  ICE_AQC_CAPS_FD:
2456 			ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2457 			break;
2458 		default:
2459 			/* Don't list common capabilities as unknown */
2460 			if (!found)
2461 				ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2462 					  i, cap);
2463 			break;
2464 		}
2465 	}
2466 
2467 	ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2468 }
2469 
2470 /**
2471  * ice_aq_list_caps - query function/device capabilities
2472  * @hw: pointer to the HW struct
2473  * @buf: a buffer to hold the capabilities
2474  * @buf_size: size of the buffer
2475  * @cap_count: if not NULL, set to the number of capabilities reported
2476  * @opc: capabilities type to discover, device or function
2477  * @cd: pointer to command details structure or NULL
2478  *
2479  * Get the function (0x000A) or device (0x000B) capabilities description from
2480  * firmware and store it in the buffer.
2481  *
2482  * If the cap_count pointer is not NULL, then it is set to the number of
2483  * capabilities firmware will report. Note that if the buffer size is too
2484  * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2485  * cap_count will still be updated in this case. It is recommended that the
2486  * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2487  * firmware could return) to avoid this.
2488  */
2489 enum ice_status
2490 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2491 		 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2492 {
2493 	struct ice_aqc_list_caps *cmd;
2494 	struct ice_aq_desc desc;
2495 	enum ice_status status;
2496 
2497 	cmd = &desc.params.get_cap;
2498 
2499 	if (opc != ice_aqc_opc_list_func_caps &&
2500 	    opc != ice_aqc_opc_list_dev_caps)
2501 		return ICE_ERR_PARAM;
2502 
2503 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
2504 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2505 
2506 	if (cap_count)
2507 		*cap_count = le32_to_cpu(cmd->count);
2508 
2509 	return status;
2510 }
2511 
2512 /**
2513  * ice_discover_dev_caps - Read and extract device capabilities
2514  * @hw: pointer to the hardware structure
2515  * @dev_caps: pointer to device capabilities structure
2516  *
2517  * Read the device capabilities and extract them into the dev_caps structure
2518  * for later use.
2519  */
2520 enum ice_status
2521 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2522 {
2523 	enum ice_status status;
2524 	u32 cap_count = 0;
2525 	void *cbuf;
2526 
2527 	cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2528 	if (!cbuf)
2529 		return ICE_ERR_NO_MEMORY;
2530 
2531 	/* Although the driver doesn't know the number of capabilities the
2532 	 * device will return, we can simply send a 4KB buffer, the maximum
2533 	 * possible size that firmware can return.
2534 	 */
2535 	cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2536 
2537 	status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2538 				  ice_aqc_opc_list_dev_caps, NULL);
2539 	if (!status)
2540 		ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2541 	kfree(cbuf);
2542 
2543 	return status;
2544 }
2545 
2546 /**
2547  * ice_discover_func_caps - Read and extract function capabilities
2548  * @hw: pointer to the hardware structure
2549  * @func_caps: pointer to function capabilities structure
2550  *
2551  * Read the function capabilities and extract them into the func_caps structure
2552  * for later use.
2553  */
2554 static enum ice_status
2555 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2556 {
2557 	enum ice_status status;
2558 	u32 cap_count = 0;
2559 	void *cbuf;
2560 
2561 	cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2562 	if (!cbuf)
2563 		return ICE_ERR_NO_MEMORY;
2564 
2565 	/* Although the driver doesn't know the number of capabilities the
2566 	 * device will return, we can simply send a 4KB buffer, the maximum
2567 	 * possible size that firmware can return.
2568 	 */
2569 	cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2570 
2571 	status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2572 				  ice_aqc_opc_list_func_caps, NULL);
2573 	if (!status)
2574 		ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2575 	kfree(cbuf);
2576 
2577 	return status;
2578 }
2579 
2580 /**
2581  * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2582  * @hw: pointer to the hardware structure
2583  */
2584 void ice_set_safe_mode_caps(struct ice_hw *hw)
2585 {
2586 	struct ice_hw_func_caps *func_caps = &hw->func_caps;
2587 	struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2588 	struct ice_hw_common_caps cached_caps;
2589 	u32 num_funcs;
2590 
2591 	/* cache some func_caps values that should be restored after memset */
2592 	cached_caps = func_caps->common_cap;
2593 
2594 	/* unset func capabilities */
2595 	memset(func_caps, 0, sizeof(*func_caps));
2596 
2597 #define ICE_RESTORE_FUNC_CAP(name) \
2598 	func_caps->common_cap.name = cached_caps.name
2599 
2600 	/* restore cached values */
2601 	ICE_RESTORE_FUNC_CAP(valid_functions);
2602 	ICE_RESTORE_FUNC_CAP(txq_first_id);
2603 	ICE_RESTORE_FUNC_CAP(rxq_first_id);
2604 	ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2605 	ICE_RESTORE_FUNC_CAP(max_mtu);
2606 	ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2607 	ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm);
2608 	ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom);
2609 	ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist);
2610 
2611 	/* one Tx and one Rx queue in safe mode */
2612 	func_caps->common_cap.num_rxq = 1;
2613 	func_caps->common_cap.num_txq = 1;
2614 
2615 	/* two MSIX vectors, one for traffic and one for misc causes */
2616 	func_caps->common_cap.num_msix_vectors = 2;
2617 	func_caps->guar_num_vsi = 1;
2618 
2619 	/* cache some dev_caps values that should be restored after memset */
2620 	cached_caps = dev_caps->common_cap;
2621 	num_funcs = dev_caps->num_funcs;
2622 
2623 	/* unset dev capabilities */
2624 	memset(dev_caps, 0, sizeof(*dev_caps));
2625 
2626 #define ICE_RESTORE_DEV_CAP(name) \
2627 	dev_caps->common_cap.name = cached_caps.name
2628 
2629 	/* restore cached values */
2630 	ICE_RESTORE_DEV_CAP(valid_functions);
2631 	ICE_RESTORE_DEV_CAP(txq_first_id);
2632 	ICE_RESTORE_DEV_CAP(rxq_first_id);
2633 	ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2634 	ICE_RESTORE_DEV_CAP(max_mtu);
2635 	ICE_RESTORE_DEV_CAP(nvm_unified_update);
2636 	ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm);
2637 	ICE_RESTORE_DEV_CAP(nvm_update_pending_orom);
2638 	ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist);
2639 	dev_caps->num_funcs = num_funcs;
2640 
2641 	/* one Tx and one Rx queue per function in safe mode */
2642 	dev_caps->common_cap.num_rxq = num_funcs;
2643 	dev_caps->common_cap.num_txq = num_funcs;
2644 
2645 	/* two MSIX vectors per function */
2646 	dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2647 }
2648 
2649 /**
2650  * ice_get_caps - get info about the HW
2651  * @hw: pointer to the hardware structure
2652  */
2653 enum ice_status ice_get_caps(struct ice_hw *hw)
2654 {
2655 	enum ice_status status;
2656 
2657 	status = ice_discover_dev_caps(hw, &hw->dev_caps);
2658 	if (status)
2659 		return status;
2660 
2661 	return ice_discover_func_caps(hw, &hw->func_caps);
2662 }
2663 
2664 /**
2665  * ice_aq_manage_mac_write - manage MAC address write command
2666  * @hw: pointer to the HW struct
2667  * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2668  * @flags: flags to control write behavior
2669  * @cd: pointer to command details structure or NULL
2670  *
2671  * This function is used to write MAC address to the NVM (0x0108).
2672  */
2673 enum ice_status
2674 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2675 			struct ice_sq_cd *cd)
2676 {
2677 	struct ice_aqc_manage_mac_write *cmd;
2678 	struct ice_aq_desc desc;
2679 
2680 	cmd = &desc.params.mac_write;
2681 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2682 
2683 	cmd->flags = flags;
2684 	ether_addr_copy(cmd->mac_addr, mac_addr);
2685 
2686 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2687 }
2688 
2689 /**
2690  * ice_aq_clear_pxe_mode
2691  * @hw: pointer to the HW struct
2692  *
2693  * Tell the firmware that the driver is taking over from PXE (0x0110).
2694  */
2695 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2696 {
2697 	struct ice_aq_desc desc;
2698 
2699 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2700 	desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2701 
2702 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2703 }
2704 
2705 /**
2706  * ice_clear_pxe_mode - clear pxe operations mode
2707  * @hw: pointer to the HW struct
2708  *
2709  * Make sure all PXE mode settings are cleared, including things
2710  * like descriptor fetch/write-back mode.
2711  */
2712 void ice_clear_pxe_mode(struct ice_hw *hw)
2713 {
2714 	if (ice_check_sq_alive(hw, &hw->adminq))
2715 		ice_aq_clear_pxe_mode(hw);
2716 }
2717 
2718 /**
2719  * ice_get_link_speed_based_on_phy_type - returns link speed
2720  * @phy_type_low: lower part of phy_type
2721  * @phy_type_high: higher part of phy_type
2722  *
2723  * This helper function will convert an entry in PHY type structure
2724  * [phy_type_low, phy_type_high] to its corresponding link speed.
2725  * Note: In the structure of [phy_type_low, phy_type_high], there should
2726  * be one bit set, as this function will convert one PHY type to its
2727  * speed.
2728  * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2729  * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2730  */
2731 static u16
2732 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2733 {
2734 	u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2735 	u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2736 
2737 	switch (phy_type_low) {
2738 	case ICE_PHY_TYPE_LOW_100BASE_TX:
2739 	case ICE_PHY_TYPE_LOW_100M_SGMII:
2740 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2741 		break;
2742 	case ICE_PHY_TYPE_LOW_1000BASE_T:
2743 	case ICE_PHY_TYPE_LOW_1000BASE_SX:
2744 	case ICE_PHY_TYPE_LOW_1000BASE_LX:
2745 	case ICE_PHY_TYPE_LOW_1000BASE_KX:
2746 	case ICE_PHY_TYPE_LOW_1G_SGMII:
2747 		speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2748 		break;
2749 	case ICE_PHY_TYPE_LOW_2500BASE_T:
2750 	case ICE_PHY_TYPE_LOW_2500BASE_X:
2751 	case ICE_PHY_TYPE_LOW_2500BASE_KX:
2752 		speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2753 		break;
2754 	case ICE_PHY_TYPE_LOW_5GBASE_T:
2755 	case ICE_PHY_TYPE_LOW_5GBASE_KR:
2756 		speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2757 		break;
2758 	case ICE_PHY_TYPE_LOW_10GBASE_T:
2759 	case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2760 	case ICE_PHY_TYPE_LOW_10GBASE_SR:
2761 	case ICE_PHY_TYPE_LOW_10GBASE_LR:
2762 	case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2763 	case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2764 	case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2765 		speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2766 		break;
2767 	case ICE_PHY_TYPE_LOW_25GBASE_T:
2768 	case ICE_PHY_TYPE_LOW_25GBASE_CR:
2769 	case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2770 	case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2771 	case ICE_PHY_TYPE_LOW_25GBASE_SR:
2772 	case ICE_PHY_TYPE_LOW_25GBASE_LR:
2773 	case ICE_PHY_TYPE_LOW_25GBASE_KR:
2774 	case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2775 	case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2776 	case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2777 	case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2778 		speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2779 		break;
2780 	case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2781 	case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2782 	case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2783 	case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2784 	case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2785 	case ICE_PHY_TYPE_LOW_40G_XLAUI:
2786 		speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2787 		break;
2788 	case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2789 	case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2790 	case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2791 	case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2792 	case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2793 	case ICE_PHY_TYPE_LOW_50G_LAUI2:
2794 	case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2795 	case ICE_PHY_TYPE_LOW_50G_AUI2:
2796 	case ICE_PHY_TYPE_LOW_50GBASE_CP:
2797 	case ICE_PHY_TYPE_LOW_50GBASE_SR:
2798 	case ICE_PHY_TYPE_LOW_50GBASE_FR:
2799 	case ICE_PHY_TYPE_LOW_50GBASE_LR:
2800 	case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2801 	case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2802 	case ICE_PHY_TYPE_LOW_50G_AUI1:
2803 		speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2804 		break;
2805 	case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2806 	case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2807 	case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2808 	case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2809 	case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2810 	case ICE_PHY_TYPE_LOW_100G_CAUI4:
2811 	case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2812 	case ICE_PHY_TYPE_LOW_100G_AUI4:
2813 	case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2814 	case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2815 	case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2816 	case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2817 	case ICE_PHY_TYPE_LOW_100GBASE_DR:
2818 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2819 		break;
2820 	default:
2821 		speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2822 		break;
2823 	}
2824 
2825 	switch (phy_type_high) {
2826 	case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2827 	case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2828 	case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2829 	case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2830 	case ICE_PHY_TYPE_HIGH_100G_AUI2:
2831 		speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2832 		break;
2833 	default:
2834 		speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2835 		break;
2836 	}
2837 
2838 	if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2839 	    speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2840 		return ICE_AQ_LINK_SPEED_UNKNOWN;
2841 	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2842 		 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2843 		return ICE_AQ_LINK_SPEED_UNKNOWN;
2844 	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2845 		 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2846 		return speed_phy_type_low;
2847 	else
2848 		return speed_phy_type_high;
2849 }
2850 
2851 /**
2852  * ice_update_phy_type
2853  * @phy_type_low: pointer to the lower part of phy_type
2854  * @phy_type_high: pointer to the higher part of phy_type
2855  * @link_speeds_bitmap: targeted link speeds bitmap
2856  *
2857  * Note: For the link_speeds_bitmap structure, you can check it at
2858  * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2859  * link_speeds_bitmap include multiple speeds.
2860  *
2861  * Each entry in this [phy_type_low, phy_type_high] structure will
2862  * present a certain link speed. This helper function will turn on bits
2863  * in [phy_type_low, phy_type_high] structure based on the value of
2864  * link_speeds_bitmap input parameter.
2865  */
2866 void
2867 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2868 		    u16 link_speeds_bitmap)
2869 {
2870 	u64 pt_high;
2871 	u64 pt_low;
2872 	int index;
2873 	u16 speed;
2874 
2875 	/* We first check with low part of phy_type */
2876 	for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2877 		pt_low = BIT_ULL(index);
2878 		speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2879 
2880 		if (link_speeds_bitmap & speed)
2881 			*phy_type_low |= BIT_ULL(index);
2882 	}
2883 
2884 	/* We then check with high part of phy_type */
2885 	for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2886 		pt_high = BIT_ULL(index);
2887 		speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2888 
2889 		if (link_speeds_bitmap & speed)
2890 			*phy_type_high |= BIT_ULL(index);
2891 	}
2892 }
2893 
2894 /**
2895  * ice_aq_set_phy_cfg
2896  * @hw: pointer to the HW struct
2897  * @pi: port info structure of the interested logical port
2898  * @cfg: structure with PHY configuration data to be set
2899  * @cd: pointer to command details structure or NULL
2900  *
2901  * Set the various PHY configuration parameters supported on the Port.
2902  * One or more of the Set PHY config parameters may be ignored in an MFP
2903  * mode as the PF may not have the privilege to set some of the PHY Config
2904  * parameters. This status will be indicated by the command response (0x0601).
2905  */
2906 enum ice_status
2907 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2908 		   struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2909 {
2910 	struct ice_aq_desc desc;
2911 	enum ice_status status;
2912 
2913 	if (!cfg)
2914 		return ICE_ERR_PARAM;
2915 
2916 	/* Ensure that only valid bits of cfg->caps can be turned on. */
2917 	if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2918 		ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2919 			  cfg->caps);
2920 
2921 		cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2922 	}
2923 
2924 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2925 	desc.params.set_phy.lport_num = pi->lport;
2926 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2927 
2928 	ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2929 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
2930 		  (unsigned long long)le64_to_cpu(cfg->phy_type_low));
2931 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
2932 		  (unsigned long long)le64_to_cpu(cfg->phy_type_high));
2933 	ice_debug(hw, ICE_DBG_LINK, "	caps = 0x%x\n", cfg->caps);
2934 	ice_debug(hw, ICE_DBG_LINK, "	low_power_ctrl_an = 0x%x\n",
2935 		  cfg->low_power_ctrl_an);
2936 	ice_debug(hw, ICE_DBG_LINK, "	eee_cap = 0x%x\n", cfg->eee_cap);
2937 	ice_debug(hw, ICE_DBG_LINK, "	eeer_value = 0x%x\n", cfg->eeer_value);
2938 	ice_debug(hw, ICE_DBG_LINK, "	link_fec_opt = 0x%x\n",
2939 		  cfg->link_fec_opt);
2940 
2941 	status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2942 	if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
2943 		status = 0;
2944 
2945 	if (!status)
2946 		pi->phy.curr_user_phy_cfg = *cfg;
2947 
2948 	return status;
2949 }
2950 
2951 /**
2952  * ice_update_link_info - update status of the HW network link
2953  * @pi: port info structure of the interested logical port
2954  */
2955 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2956 {
2957 	struct ice_link_status *li;
2958 	enum ice_status status;
2959 
2960 	if (!pi)
2961 		return ICE_ERR_PARAM;
2962 
2963 	li = &pi->phy.link_info;
2964 
2965 	status = ice_aq_get_link_info(pi, true, NULL, NULL);
2966 	if (status)
2967 		return status;
2968 
2969 	if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2970 		struct ice_aqc_get_phy_caps_data *pcaps;
2971 		struct ice_hw *hw;
2972 
2973 		hw = pi->hw;
2974 		pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
2975 				     GFP_KERNEL);
2976 		if (!pcaps)
2977 			return ICE_ERR_NO_MEMORY;
2978 
2979 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2980 					     pcaps, NULL);
2981 
2982 		devm_kfree(ice_hw_to_dev(hw), pcaps);
2983 	}
2984 
2985 	return status;
2986 }
2987 
2988 /**
2989  * ice_cache_phy_user_req
2990  * @pi: port information structure
2991  * @cache_data: PHY logging data
2992  * @cache_mode: PHY logging mode
2993  *
2994  * Log the user request on (FC, FEC, SPEED) for later use.
2995  */
2996 static void
2997 ice_cache_phy_user_req(struct ice_port_info *pi,
2998 		       struct ice_phy_cache_mode_data cache_data,
2999 		       enum ice_phy_cache_mode cache_mode)
3000 {
3001 	if (!pi)
3002 		return;
3003 
3004 	switch (cache_mode) {
3005 	case ICE_FC_MODE:
3006 		pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
3007 		break;
3008 	case ICE_SPEED_MODE:
3009 		pi->phy.curr_user_speed_req =
3010 			cache_data.data.curr_user_speed_req;
3011 		break;
3012 	case ICE_FEC_MODE:
3013 		pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
3014 		break;
3015 	default:
3016 		break;
3017 	}
3018 }
3019 
3020 /**
3021  * ice_caps_to_fc_mode
3022  * @caps: PHY capabilities
3023  *
3024  * Convert PHY FC capabilities to ice FC mode
3025  */
3026 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
3027 {
3028 	if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
3029 	    caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3030 		return ICE_FC_FULL;
3031 
3032 	if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
3033 		return ICE_FC_TX_PAUSE;
3034 
3035 	if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3036 		return ICE_FC_RX_PAUSE;
3037 
3038 	return ICE_FC_NONE;
3039 }
3040 
3041 /**
3042  * ice_caps_to_fec_mode
3043  * @caps: PHY capabilities
3044  * @fec_options: Link FEC options
3045  *
3046  * Convert PHY FEC capabilities to ice FEC mode
3047  */
3048 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
3049 {
3050 	if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
3051 		return ICE_FEC_AUTO;
3052 
3053 	if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3054 			   ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3055 			   ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
3056 			   ICE_AQC_PHY_FEC_25G_KR_REQ))
3057 		return ICE_FEC_BASER;
3058 
3059 	if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3060 			   ICE_AQC_PHY_FEC_25G_RS_544_REQ |
3061 			   ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
3062 		return ICE_FEC_RS;
3063 
3064 	return ICE_FEC_NONE;
3065 }
3066 
3067 /**
3068  * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
3069  * @pi: port information structure
3070  * @cfg: PHY configuration data to set FC mode
3071  * @req_mode: FC mode to configure
3072  */
3073 enum ice_status
3074 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3075 	       enum ice_fc_mode req_mode)
3076 {
3077 	struct ice_phy_cache_mode_data cache_data;
3078 	u8 pause_mask = 0x0;
3079 
3080 	if (!pi || !cfg)
3081 		return ICE_ERR_BAD_PTR;
3082 
3083 	switch (req_mode) {
3084 	case ICE_FC_FULL:
3085 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3086 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3087 		break;
3088 	case ICE_FC_RX_PAUSE:
3089 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3090 		break;
3091 	case ICE_FC_TX_PAUSE:
3092 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3093 		break;
3094 	default:
3095 		break;
3096 	}
3097 
3098 	/* clear the old pause settings */
3099 	cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
3100 		ICE_AQC_PHY_EN_RX_LINK_PAUSE);
3101 
3102 	/* set the new capabilities */
3103 	cfg->caps |= pause_mask;
3104 
3105 	/* Cache user FC request */
3106 	cache_data.data.curr_user_fc_req = req_mode;
3107 	ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
3108 
3109 	return 0;
3110 }
3111 
3112 /**
3113  * ice_set_fc
3114  * @pi: port information structure
3115  * @aq_failures: pointer to status code, specific to ice_set_fc routine
3116  * @ena_auto_link_update: enable automatic link update
3117  *
3118  * Set the requested flow control mode.
3119  */
3120 enum ice_status
3121 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3122 {
3123 	struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3124 	struct ice_aqc_get_phy_caps_data *pcaps;
3125 	enum ice_status status;
3126 	struct ice_hw *hw;
3127 
3128 	if (!pi || !aq_failures)
3129 		return ICE_ERR_BAD_PTR;
3130 
3131 	*aq_failures = 0;
3132 	hw = pi->hw;
3133 
3134 	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
3135 	if (!pcaps)
3136 		return ICE_ERR_NO_MEMORY;
3137 
3138 	/* Get the current PHY config */
3139 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3140 				     pcaps, NULL);
3141 	if (status) {
3142 		*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3143 		goto out;
3144 	}
3145 
3146 	ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3147 
3148 	/* Configure the set PHY data */
3149 	status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3150 	if (status)
3151 		goto out;
3152 
3153 	/* If the capabilities have changed, then set the new config */
3154 	if (cfg.caps != pcaps->caps) {
3155 		int retry_count, retry_max = 10;
3156 
3157 		/* Auto restart link so settings take effect */
3158 		if (ena_auto_link_update)
3159 			cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3160 
3161 		status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3162 		if (status) {
3163 			*aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3164 			goto out;
3165 		}
3166 
3167 		/* Update the link info
3168 		 * It sometimes takes a really long time for link to
3169 		 * come back from the atomic reset. Thus, we wait a
3170 		 * little bit.
3171 		 */
3172 		for (retry_count = 0; retry_count < retry_max; retry_count++) {
3173 			status = ice_update_link_info(pi);
3174 
3175 			if (!status)
3176 				break;
3177 
3178 			mdelay(100);
3179 		}
3180 
3181 		if (status)
3182 			*aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3183 	}
3184 
3185 out:
3186 	devm_kfree(ice_hw_to_dev(hw), pcaps);
3187 	return status;
3188 }
3189 
3190 /**
3191  * ice_phy_caps_equals_cfg
3192  * @phy_caps: PHY capabilities
3193  * @phy_cfg: PHY configuration
3194  *
3195  * Helper function to determine if PHY capabilities matches PHY
3196  * configuration
3197  */
3198 bool
3199 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3200 			struct ice_aqc_set_phy_cfg_data *phy_cfg)
3201 {
3202 	u8 caps_mask, cfg_mask;
3203 
3204 	if (!phy_caps || !phy_cfg)
3205 		return false;
3206 
3207 	/* These bits are not common between capabilities and configuration.
3208 	 * Do not use them to determine equality.
3209 	 */
3210 	caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3211 					      ICE_AQC_GET_PHY_EN_MOD_QUAL);
3212 	cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3213 
3214 	if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3215 	    phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3216 	    ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3217 	    phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3218 	    phy_caps->eee_cap != phy_cfg->eee_cap ||
3219 	    phy_caps->eeer_value != phy_cfg->eeer_value ||
3220 	    phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3221 		return false;
3222 
3223 	return true;
3224 }
3225 
3226 /**
3227  * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3228  * @pi: port information structure
3229  * @caps: PHY ability structure to copy date from
3230  * @cfg: PHY configuration structure to copy data to
3231  *
3232  * Helper function to copy AQC PHY get ability data to PHY set configuration
3233  * data structure
3234  */
3235 void
3236 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3237 			 struct ice_aqc_get_phy_caps_data *caps,
3238 			 struct ice_aqc_set_phy_cfg_data *cfg)
3239 {
3240 	if (!pi || !caps || !cfg)
3241 		return;
3242 
3243 	memset(cfg, 0, sizeof(*cfg));
3244 	cfg->phy_type_low = caps->phy_type_low;
3245 	cfg->phy_type_high = caps->phy_type_high;
3246 	cfg->caps = caps->caps;
3247 	cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3248 	cfg->eee_cap = caps->eee_cap;
3249 	cfg->eeer_value = caps->eeer_value;
3250 	cfg->link_fec_opt = caps->link_fec_options;
3251 	cfg->module_compliance_enforcement =
3252 		caps->module_compliance_enforcement;
3253 }
3254 
3255 /**
3256  * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
3257  * @pi: port information structure
3258  * @cfg: PHY configuration data to set FEC mode
3259  * @fec: FEC mode to configure
3260  */
3261 enum ice_status
3262 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3263 		enum ice_fec_mode fec)
3264 {
3265 	struct ice_aqc_get_phy_caps_data *pcaps;
3266 	enum ice_status status;
3267 	struct ice_hw *hw;
3268 
3269 	if (!pi || !cfg)
3270 		return ICE_ERR_BAD_PTR;
3271 
3272 	hw = pi->hw;
3273 
3274 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
3275 	if (!pcaps)
3276 		return ICE_ERR_NO_MEMORY;
3277 
3278 	status = ice_aq_get_phy_caps(pi, false,
3279 				     (ice_fw_supports_report_dflt_cfg(hw) ?
3280 				      ICE_AQC_REPORT_DFLT_CFG :
3281 				      ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
3282 	if (status)
3283 		goto out;
3284 
3285 	cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
3286 	cfg->link_fec_opt = pcaps->link_fec_options;
3287 
3288 	switch (fec) {
3289 	case ICE_FEC_BASER:
3290 		/* Clear RS bits, and AND BASE-R ability
3291 		 * bits and OR request bits.
3292 		 */
3293 		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3294 			ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3295 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3296 			ICE_AQC_PHY_FEC_25G_KR_REQ;
3297 		break;
3298 	case ICE_FEC_RS:
3299 		/* Clear BASE-R bits, and AND RS ability
3300 		 * bits and OR request bits.
3301 		 */
3302 		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3303 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3304 			ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3305 		break;
3306 	case ICE_FEC_NONE:
3307 		/* Clear all FEC option bits. */
3308 		cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3309 		break;
3310 	case ICE_FEC_AUTO:
3311 		/* AND auto FEC bit, and all caps bits. */
3312 		cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3313 		cfg->link_fec_opt |= pcaps->link_fec_options;
3314 		break;
3315 	default:
3316 		status = ICE_ERR_PARAM;
3317 		break;
3318 	}
3319 
3320 	if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
3321 	    !ice_fw_supports_report_dflt_cfg(hw)) {
3322 		struct ice_link_default_override_tlv tlv;
3323 
3324 		if (ice_get_link_default_override(&tlv, pi))
3325 			goto out;
3326 
3327 		if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3328 		    (tlv.options & ICE_LINK_OVERRIDE_EN))
3329 			cfg->link_fec_opt = tlv.fec_options;
3330 	}
3331 
3332 out:
3333 	kfree(pcaps);
3334 
3335 	return status;
3336 }
3337 
3338 /**
3339  * ice_get_link_status - get status of the HW network link
3340  * @pi: port information structure
3341  * @link_up: pointer to bool (true/false = linkup/linkdown)
3342  *
3343  * Variable link_up is true if link is up, false if link is down.
3344  * The variable link_up is invalid if status is non zero. As a
3345  * result of this call, link status reporting becomes enabled
3346  */
3347 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3348 {
3349 	struct ice_phy_info *phy_info;
3350 	enum ice_status status = 0;
3351 
3352 	if (!pi || !link_up)
3353 		return ICE_ERR_PARAM;
3354 
3355 	phy_info = &pi->phy;
3356 
3357 	if (phy_info->get_link_info) {
3358 		status = ice_update_link_info(pi);
3359 
3360 		if (status)
3361 			ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3362 				  status);
3363 	}
3364 
3365 	*link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3366 
3367 	return status;
3368 }
3369 
3370 /**
3371  * ice_aq_set_link_restart_an
3372  * @pi: pointer to the port information structure
3373  * @ena_link: if true: enable link, if false: disable link
3374  * @cd: pointer to command details structure or NULL
3375  *
3376  * Sets up the link and restarts the Auto-Negotiation over the link.
3377  */
3378 enum ice_status
3379 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3380 			   struct ice_sq_cd *cd)
3381 {
3382 	struct ice_aqc_restart_an *cmd;
3383 	struct ice_aq_desc desc;
3384 
3385 	cmd = &desc.params.restart_an;
3386 
3387 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3388 
3389 	cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3390 	cmd->lport_num = pi->lport;
3391 	if (ena_link)
3392 		cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3393 	else
3394 		cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3395 
3396 	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3397 }
3398 
3399 /**
3400  * ice_aq_set_event_mask
3401  * @hw: pointer to the HW struct
3402  * @port_num: port number of the physical function
3403  * @mask: event mask to be set
3404  * @cd: pointer to command details structure or NULL
3405  *
3406  * Set event mask (0x0613)
3407  */
3408 enum ice_status
3409 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3410 		      struct ice_sq_cd *cd)
3411 {
3412 	struct ice_aqc_set_event_mask *cmd;
3413 	struct ice_aq_desc desc;
3414 
3415 	cmd = &desc.params.set_event_mask;
3416 
3417 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3418 
3419 	cmd->lport_num = port_num;
3420 
3421 	cmd->event_mask = cpu_to_le16(mask);
3422 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3423 }
3424 
3425 /**
3426  * ice_aq_set_mac_loopback
3427  * @hw: pointer to the HW struct
3428  * @ena_lpbk: Enable or Disable loopback
3429  * @cd: pointer to command details structure or NULL
3430  *
3431  * Enable/disable loopback on a given port
3432  */
3433 enum ice_status
3434 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3435 {
3436 	struct ice_aqc_set_mac_lb *cmd;
3437 	struct ice_aq_desc desc;
3438 
3439 	cmd = &desc.params.set_mac_lb;
3440 
3441 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3442 	if (ena_lpbk)
3443 		cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3444 
3445 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3446 }
3447 
3448 /**
3449  * ice_aq_set_port_id_led
3450  * @pi: pointer to the port information
3451  * @is_orig_mode: is this LED set to original mode (by the net-list)
3452  * @cd: pointer to command details structure or NULL
3453  *
3454  * Set LED value for the given port (0x06e9)
3455  */
3456 enum ice_status
3457 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3458 		       struct ice_sq_cd *cd)
3459 {
3460 	struct ice_aqc_set_port_id_led *cmd;
3461 	struct ice_hw *hw = pi->hw;
3462 	struct ice_aq_desc desc;
3463 
3464 	cmd = &desc.params.set_port_id_led;
3465 
3466 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3467 
3468 	if (is_orig_mode)
3469 		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3470 	else
3471 		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3472 
3473 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3474 }
3475 
3476 /**
3477  * ice_aq_sff_eeprom
3478  * @hw: pointer to the HW struct
3479  * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3480  * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3481  * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3482  * @page: QSFP page
3483  * @set_page: set or ignore the page
3484  * @data: pointer to data buffer to be read/written to the I2C device.
3485  * @length: 1-16 for read, 1 for write.
3486  * @write: 0 read, 1 for write.
3487  * @cd: pointer to command details structure or NULL
3488  *
3489  * Read/Write SFF EEPROM (0x06EE)
3490  */
3491 enum ice_status
3492 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3493 		  u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3494 		  bool write, struct ice_sq_cd *cd)
3495 {
3496 	struct ice_aqc_sff_eeprom *cmd;
3497 	struct ice_aq_desc desc;
3498 	enum ice_status status;
3499 
3500 	if (!data || (mem_addr & 0xff00))
3501 		return ICE_ERR_PARAM;
3502 
3503 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3504 	cmd = &desc.params.read_write_sff_param;
3505 	desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
3506 	cmd->lport_num = (u8)(lport & 0xff);
3507 	cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3508 	cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
3509 					 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3510 					((set_page <<
3511 					  ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3512 					 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3513 	cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
3514 	cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3515 	if (write)
3516 		cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
3517 
3518 	status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3519 	return status;
3520 }
3521 
3522 /**
3523  * __ice_aq_get_set_rss_lut
3524  * @hw: pointer to the hardware structure
3525  * @params: RSS LUT parameters
3526  * @set: set true to set the table, false to get the table
3527  *
3528  * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3529  */
3530 static enum ice_status
3531 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
3532 {
3533 	u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
3534 	struct ice_aqc_get_set_rss_lut *cmd_resp;
3535 	struct ice_aq_desc desc;
3536 	enum ice_status status;
3537 	u8 *lut;
3538 
3539 	if (!params)
3540 		return ICE_ERR_PARAM;
3541 
3542 	vsi_handle = params->vsi_handle;
3543 	lut = params->lut;
3544 
3545 	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3546 		return ICE_ERR_PARAM;
3547 
3548 	lut_size = params->lut_size;
3549 	lut_type = params->lut_type;
3550 	glob_lut_idx = params->global_lut_id;
3551 	vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3552 
3553 	cmd_resp = &desc.params.get_set_rss_lut;
3554 
3555 	if (set) {
3556 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3557 		desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3558 	} else {
3559 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3560 	}
3561 
3562 	cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3563 					 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3564 					ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3565 				       ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3566 
3567 	switch (lut_type) {
3568 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3569 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3570 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3571 		flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3572 			  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3573 		break;
3574 	default:
3575 		status = ICE_ERR_PARAM;
3576 		goto ice_aq_get_set_rss_lut_exit;
3577 	}
3578 
3579 	if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3580 		flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3581 			  ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3582 
3583 		if (!set)
3584 			goto ice_aq_get_set_rss_lut_send;
3585 	} else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3586 		if (!set)
3587 			goto ice_aq_get_set_rss_lut_send;
3588 	} else {
3589 		goto ice_aq_get_set_rss_lut_send;
3590 	}
3591 
3592 	/* LUT size is only valid for Global and PF table types */
3593 	switch (lut_size) {
3594 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3595 		break;
3596 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3597 		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3598 			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3599 			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3600 		break;
3601 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3602 		if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3603 			flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3604 				  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3605 				 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3606 			break;
3607 		}
3608 		fallthrough;
3609 	default:
3610 		status = ICE_ERR_PARAM;
3611 		goto ice_aq_get_set_rss_lut_exit;
3612 	}
3613 
3614 ice_aq_get_set_rss_lut_send:
3615 	cmd_resp->flags = cpu_to_le16(flags);
3616 	status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3617 
3618 ice_aq_get_set_rss_lut_exit:
3619 	return status;
3620 }
3621 
3622 /**
3623  * ice_aq_get_rss_lut
3624  * @hw: pointer to the hardware structure
3625  * @get_params: RSS LUT parameters used to specify which RSS LUT to get
3626  *
3627  * get the RSS lookup table, PF or VSI type
3628  */
3629 enum ice_status
3630 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
3631 {
3632 	return __ice_aq_get_set_rss_lut(hw, get_params, false);
3633 }
3634 
3635 /**
3636  * ice_aq_set_rss_lut
3637  * @hw: pointer to the hardware structure
3638  * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
3639  *
3640  * set the RSS lookup table, PF or VSI type
3641  */
3642 enum ice_status
3643 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
3644 {
3645 	return __ice_aq_get_set_rss_lut(hw, set_params, true);
3646 }
3647 
3648 /**
3649  * __ice_aq_get_set_rss_key
3650  * @hw: pointer to the HW struct
3651  * @vsi_id: VSI FW index
3652  * @key: pointer to key info struct
3653  * @set: set true to set the key, false to get the key
3654  *
3655  * get (0x0B04) or set (0x0B02) the RSS key per VSI
3656  */
3657 static enum
3658 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3659 				    struct ice_aqc_get_set_rss_keys *key,
3660 				    bool set)
3661 {
3662 	struct ice_aqc_get_set_rss_key *cmd_resp;
3663 	u16 key_size = sizeof(*key);
3664 	struct ice_aq_desc desc;
3665 
3666 	cmd_resp = &desc.params.get_set_rss_key;
3667 
3668 	if (set) {
3669 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3670 		desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3671 	} else {
3672 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3673 	}
3674 
3675 	cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3676 					 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3677 					ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3678 				       ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3679 
3680 	return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3681 }
3682 
3683 /**
3684  * ice_aq_get_rss_key
3685  * @hw: pointer to the HW struct
3686  * @vsi_handle: software VSI handle
3687  * @key: pointer to key info struct
3688  *
3689  * get the RSS key per VSI
3690  */
3691 enum ice_status
3692 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3693 		   struct ice_aqc_get_set_rss_keys *key)
3694 {
3695 	if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3696 		return ICE_ERR_PARAM;
3697 
3698 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3699 					key, false);
3700 }
3701 
3702 /**
3703  * ice_aq_set_rss_key
3704  * @hw: pointer to the HW struct
3705  * @vsi_handle: software VSI handle
3706  * @keys: pointer to key info struct
3707  *
3708  * set the RSS key per VSI
3709  */
3710 enum ice_status
3711 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3712 		   struct ice_aqc_get_set_rss_keys *keys)
3713 {
3714 	if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3715 		return ICE_ERR_PARAM;
3716 
3717 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3718 					keys, true);
3719 }
3720 
3721 /**
3722  * ice_aq_add_lan_txq
3723  * @hw: pointer to the hardware structure
3724  * @num_qgrps: Number of added queue groups
3725  * @qg_list: list of queue groups to be added
3726  * @buf_size: size of buffer for indirect command
3727  * @cd: pointer to command details structure or NULL
3728  *
3729  * Add Tx LAN queue (0x0C30)
3730  *
3731  * NOTE:
3732  * Prior to calling add Tx LAN queue:
3733  * Initialize the following as part of the Tx queue context:
3734  * Completion queue ID if the queue uses Completion queue, Quanta profile,
3735  * Cache profile and Packet shaper profile.
3736  *
3737  * After add Tx LAN queue AQ command is completed:
3738  * Interrupts should be associated with specific queues,
3739  * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3740  * flow.
3741  */
3742 static enum ice_status
3743 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3744 		   struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3745 		   struct ice_sq_cd *cd)
3746 {
3747 	struct ice_aqc_add_tx_qgrp *list;
3748 	struct ice_aqc_add_txqs *cmd;
3749 	struct ice_aq_desc desc;
3750 	u16 i, sum_size = 0;
3751 
3752 	cmd = &desc.params.add_txqs;
3753 
3754 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3755 
3756 	if (!qg_list)
3757 		return ICE_ERR_PARAM;
3758 
3759 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3760 		return ICE_ERR_PARAM;
3761 
3762 	for (i = 0, list = qg_list; i < num_qgrps; i++) {
3763 		sum_size += struct_size(list, txqs, list->num_txqs);
3764 		list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3765 						      list->num_txqs);
3766 	}
3767 
3768 	if (buf_size != sum_size)
3769 		return ICE_ERR_PARAM;
3770 
3771 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3772 
3773 	cmd->num_qgrps = num_qgrps;
3774 
3775 	return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3776 }
3777 
3778 /**
3779  * ice_aq_dis_lan_txq
3780  * @hw: pointer to the hardware structure
3781  * @num_qgrps: number of groups in the list
3782  * @qg_list: the list of groups to disable
3783  * @buf_size: the total size of the qg_list buffer in bytes
3784  * @rst_src: if called due to reset, specifies the reset source
3785  * @vmvf_num: the relative VM or VF number that is undergoing the reset
3786  * @cd: pointer to command details structure or NULL
3787  *
3788  * Disable LAN Tx queue (0x0C31)
3789  */
3790 static enum ice_status
3791 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3792 		   struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3793 		   enum ice_disq_rst_src rst_src, u16 vmvf_num,
3794 		   struct ice_sq_cd *cd)
3795 {
3796 	struct ice_aqc_dis_txq_item *item;
3797 	struct ice_aqc_dis_txqs *cmd;
3798 	struct ice_aq_desc desc;
3799 	enum ice_status status;
3800 	u16 i, sz = 0;
3801 
3802 	cmd = &desc.params.dis_txqs;
3803 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3804 
3805 	/* qg_list can be NULL only in VM/VF reset flow */
3806 	if (!qg_list && !rst_src)
3807 		return ICE_ERR_PARAM;
3808 
3809 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3810 		return ICE_ERR_PARAM;
3811 
3812 	cmd->num_entries = num_qgrps;
3813 
3814 	cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3815 					    ICE_AQC_Q_DIS_TIMEOUT_M);
3816 
3817 	switch (rst_src) {
3818 	case ICE_VM_RESET:
3819 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3820 		cmd->vmvf_and_timeout |=
3821 			cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3822 		break;
3823 	case ICE_VF_RESET:
3824 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
3825 		/* In this case, FW expects vmvf_num to be absolute VF ID */
3826 		cmd->vmvf_and_timeout |=
3827 			cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
3828 				    ICE_AQC_Q_DIS_VMVF_NUM_M);
3829 		break;
3830 	case ICE_NO_RESET:
3831 	default:
3832 		break;
3833 	}
3834 
3835 	/* flush pipe on time out */
3836 	cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3837 	/* If no queue group info, we are in a reset flow. Issue the AQ */
3838 	if (!qg_list)
3839 		goto do_aq;
3840 
3841 	/* set RD bit to indicate that command buffer is provided by the driver
3842 	 * and it needs to be read by the firmware
3843 	 */
3844 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3845 
3846 	for (i = 0, item = qg_list; i < num_qgrps; i++) {
3847 		u16 item_size = struct_size(item, q_id, item->num_qs);
3848 
3849 		/* If the num of queues is even, add 2 bytes of padding */
3850 		if ((item->num_qs % 2) == 0)
3851 			item_size += 2;
3852 
3853 		sz += item_size;
3854 
3855 		item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
3856 	}
3857 
3858 	if (buf_size != sz)
3859 		return ICE_ERR_PARAM;
3860 
3861 do_aq:
3862 	status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3863 	if (status) {
3864 		if (!qg_list)
3865 			ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3866 				  vmvf_num, hw->adminq.sq_last_status);
3867 		else
3868 			ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3869 				  le16_to_cpu(qg_list[0].q_id[0]),
3870 				  hw->adminq.sq_last_status);
3871 	}
3872 	return status;
3873 }
3874 
3875 /**
3876  * ice_aq_add_rdma_qsets
3877  * @hw: pointer to the hardware structure
3878  * @num_qset_grps: Number of RDMA Qset groups
3879  * @qset_list: list of Qset groups to be added
3880  * @buf_size: size of buffer for indirect command
3881  * @cd: pointer to command details structure or NULL
3882  *
3883  * Add Tx RDMA Qsets (0x0C33)
3884  */
3885 static int
3886 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
3887 		      struct ice_aqc_add_rdma_qset_data *qset_list,
3888 		      u16 buf_size, struct ice_sq_cd *cd)
3889 {
3890 	struct ice_aqc_add_rdma_qset_data *list;
3891 	struct ice_aqc_add_rdma_qset *cmd;
3892 	struct ice_aq_desc desc;
3893 	u16 i, sum_size = 0;
3894 
3895 	cmd = &desc.params.add_rdma_qset;
3896 
3897 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
3898 
3899 	if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)
3900 		return -EINVAL;
3901 
3902 	for (i = 0, list = qset_list; i < num_qset_grps; i++) {
3903 		u16 num_qsets = le16_to_cpu(list->num_qsets);
3904 
3905 		sum_size += struct_size(list, rdma_qsets, num_qsets);
3906 		list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets +
3907 							     num_qsets);
3908 	}
3909 
3910 	if (buf_size != sum_size)
3911 		return -EINVAL;
3912 
3913 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3914 
3915 	cmd->num_qset_grps = num_qset_grps;
3916 
3917 	return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, qset_list,
3918 						   buf_size, cd));
3919 }
3920 
3921 /* End of FW Admin Queue command wrappers */
3922 
3923 /**
3924  * ice_write_byte - write a byte to a packed context structure
3925  * @src_ctx:  the context structure to read from
3926  * @dest_ctx: the context to be written to
3927  * @ce_info:  a description of the struct to be filled
3928  */
3929 static void
3930 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3931 {
3932 	u8 src_byte, dest_byte, mask;
3933 	u8 *from, *dest;
3934 	u16 shift_width;
3935 
3936 	/* copy from the next struct field */
3937 	from = src_ctx + ce_info->offset;
3938 
3939 	/* prepare the bits and mask */
3940 	shift_width = ce_info->lsb % 8;
3941 	mask = (u8)(BIT(ce_info->width) - 1);
3942 
3943 	src_byte = *from;
3944 	src_byte &= mask;
3945 
3946 	/* shift to correct alignment */
3947 	mask <<= shift_width;
3948 	src_byte <<= shift_width;
3949 
3950 	/* get the current bits from the target bit string */
3951 	dest = dest_ctx + (ce_info->lsb / 8);
3952 
3953 	memcpy(&dest_byte, dest, sizeof(dest_byte));
3954 
3955 	dest_byte &= ~mask;	/* get the bits not changing */
3956 	dest_byte |= src_byte;	/* add in the new bits */
3957 
3958 	/* put it all back */
3959 	memcpy(dest, &dest_byte, sizeof(dest_byte));
3960 }
3961 
3962 /**
3963  * ice_write_word - write a word to a packed context structure
3964  * @src_ctx:  the context structure to read from
3965  * @dest_ctx: the context to be written to
3966  * @ce_info:  a description of the struct to be filled
3967  */
3968 static void
3969 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3970 {
3971 	u16 src_word, mask;
3972 	__le16 dest_word;
3973 	u8 *from, *dest;
3974 	u16 shift_width;
3975 
3976 	/* copy from the next struct field */
3977 	from = src_ctx + ce_info->offset;
3978 
3979 	/* prepare the bits and mask */
3980 	shift_width = ce_info->lsb % 8;
3981 	mask = BIT(ce_info->width) - 1;
3982 
3983 	/* don't swizzle the bits until after the mask because the mask bits
3984 	 * will be in a different bit position on big endian machines
3985 	 */
3986 	src_word = *(u16 *)from;
3987 	src_word &= mask;
3988 
3989 	/* shift to correct alignment */
3990 	mask <<= shift_width;
3991 	src_word <<= shift_width;
3992 
3993 	/* get the current bits from the target bit string */
3994 	dest = dest_ctx + (ce_info->lsb / 8);
3995 
3996 	memcpy(&dest_word, dest, sizeof(dest_word));
3997 
3998 	dest_word &= ~(cpu_to_le16(mask));	/* get the bits not changing */
3999 	dest_word |= cpu_to_le16(src_word);	/* add in the new bits */
4000 
4001 	/* put it all back */
4002 	memcpy(dest, &dest_word, sizeof(dest_word));
4003 }
4004 
4005 /**
4006  * ice_write_dword - write a dword to a packed context structure
4007  * @src_ctx:  the context structure to read from
4008  * @dest_ctx: the context to be written to
4009  * @ce_info:  a description of the struct to be filled
4010  */
4011 static void
4012 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4013 {
4014 	u32 src_dword, mask;
4015 	__le32 dest_dword;
4016 	u8 *from, *dest;
4017 	u16 shift_width;
4018 
4019 	/* copy from the next struct field */
4020 	from = src_ctx + ce_info->offset;
4021 
4022 	/* prepare the bits and mask */
4023 	shift_width = ce_info->lsb % 8;
4024 
4025 	/* if the field width is exactly 32 on an x86 machine, then the shift
4026 	 * operation will not work because the SHL instructions count is masked
4027 	 * to 5 bits so the shift will do nothing
4028 	 */
4029 	if (ce_info->width < 32)
4030 		mask = BIT(ce_info->width) - 1;
4031 	else
4032 		mask = (u32)~0;
4033 
4034 	/* don't swizzle the bits until after the mask because the mask bits
4035 	 * will be in a different bit position on big endian machines
4036 	 */
4037 	src_dword = *(u32 *)from;
4038 	src_dword &= mask;
4039 
4040 	/* shift to correct alignment */
4041 	mask <<= shift_width;
4042 	src_dword <<= shift_width;
4043 
4044 	/* get the current bits from the target bit string */
4045 	dest = dest_ctx + (ce_info->lsb / 8);
4046 
4047 	memcpy(&dest_dword, dest, sizeof(dest_dword));
4048 
4049 	dest_dword &= ~(cpu_to_le32(mask));	/* get the bits not changing */
4050 	dest_dword |= cpu_to_le32(src_dword);	/* add in the new bits */
4051 
4052 	/* put it all back */
4053 	memcpy(dest, &dest_dword, sizeof(dest_dword));
4054 }
4055 
4056 /**
4057  * ice_write_qword - write a qword to a packed context structure
4058  * @src_ctx:  the context structure to read from
4059  * @dest_ctx: the context to be written to
4060  * @ce_info:  a description of the struct to be filled
4061  */
4062 static void
4063 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4064 {
4065 	u64 src_qword, mask;
4066 	__le64 dest_qword;
4067 	u8 *from, *dest;
4068 	u16 shift_width;
4069 
4070 	/* copy from the next struct field */
4071 	from = src_ctx + ce_info->offset;
4072 
4073 	/* prepare the bits and mask */
4074 	shift_width = ce_info->lsb % 8;
4075 
4076 	/* if the field width is exactly 64 on an x86 machine, then the shift
4077 	 * operation will not work because the SHL instructions count is masked
4078 	 * to 6 bits so the shift will do nothing
4079 	 */
4080 	if (ce_info->width < 64)
4081 		mask = BIT_ULL(ce_info->width) - 1;
4082 	else
4083 		mask = (u64)~0;
4084 
4085 	/* don't swizzle the bits until after the mask because the mask bits
4086 	 * will be in a different bit position on big endian machines
4087 	 */
4088 	src_qword = *(u64 *)from;
4089 	src_qword &= mask;
4090 
4091 	/* shift to correct alignment */
4092 	mask <<= shift_width;
4093 	src_qword <<= shift_width;
4094 
4095 	/* get the current bits from the target bit string */
4096 	dest = dest_ctx + (ce_info->lsb / 8);
4097 
4098 	memcpy(&dest_qword, dest, sizeof(dest_qword));
4099 
4100 	dest_qword &= ~(cpu_to_le64(mask));	/* get the bits not changing */
4101 	dest_qword |= cpu_to_le64(src_qword);	/* add in the new bits */
4102 
4103 	/* put it all back */
4104 	memcpy(dest, &dest_qword, sizeof(dest_qword));
4105 }
4106 
4107 /**
4108  * ice_set_ctx - set context bits in packed structure
4109  * @hw: pointer to the hardware structure
4110  * @src_ctx:  pointer to a generic non-packed context structure
4111  * @dest_ctx: pointer to memory for the packed structure
4112  * @ce_info:  a description of the structure to be transformed
4113  */
4114 enum ice_status
4115 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4116 	    const struct ice_ctx_ele *ce_info)
4117 {
4118 	int f;
4119 
4120 	for (f = 0; ce_info[f].width; f++) {
4121 		/* We have to deal with each element of the FW response
4122 		 * using the correct size so that we are correct regardless
4123 		 * of the endianness of the machine.
4124 		 */
4125 		if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
4126 			ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
4127 				  f, ce_info[f].width, ce_info[f].size_of);
4128 			continue;
4129 		}
4130 		switch (ce_info[f].size_of) {
4131 		case sizeof(u8):
4132 			ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
4133 			break;
4134 		case sizeof(u16):
4135 			ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4136 			break;
4137 		case sizeof(u32):
4138 			ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4139 			break;
4140 		case sizeof(u64):
4141 			ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4142 			break;
4143 		default:
4144 			return ICE_ERR_INVAL_SIZE;
4145 		}
4146 	}
4147 
4148 	return 0;
4149 }
4150 
4151 /**
4152  * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
4153  * @hw: pointer to the HW struct
4154  * @vsi_handle: software VSI handle
4155  * @tc: TC number
4156  * @q_handle: software queue handle
4157  */
4158 struct ice_q_ctx *
4159 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4160 {
4161 	struct ice_vsi_ctx *vsi;
4162 	struct ice_q_ctx *q_ctx;
4163 
4164 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
4165 	if (!vsi)
4166 		return NULL;
4167 	if (q_handle >= vsi->num_lan_q_entries[tc])
4168 		return NULL;
4169 	if (!vsi->lan_q_ctx[tc])
4170 		return NULL;
4171 	q_ctx = vsi->lan_q_ctx[tc];
4172 	return &q_ctx[q_handle];
4173 }
4174 
4175 /**
4176  * ice_ena_vsi_txq
4177  * @pi: port information structure
4178  * @vsi_handle: software VSI handle
4179  * @tc: TC number
4180  * @q_handle: software queue handle
4181  * @num_qgrps: Number of added queue groups
4182  * @buf: list of queue groups to be added
4183  * @buf_size: size of buffer for indirect command
4184  * @cd: pointer to command details structure or NULL
4185  *
4186  * This function adds one LAN queue
4187  */
4188 enum ice_status
4189 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4190 		u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4191 		struct ice_sq_cd *cd)
4192 {
4193 	struct ice_aqc_txsched_elem_data node = { 0 };
4194 	struct ice_sched_node *parent;
4195 	struct ice_q_ctx *q_ctx;
4196 	enum ice_status status;
4197 	struct ice_hw *hw;
4198 
4199 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4200 		return ICE_ERR_CFG;
4201 
4202 	if (num_qgrps > 1 || buf->num_txqs > 1)
4203 		return ICE_ERR_MAX_LIMIT;
4204 
4205 	hw = pi->hw;
4206 
4207 	if (!ice_is_vsi_valid(hw, vsi_handle))
4208 		return ICE_ERR_PARAM;
4209 
4210 	mutex_lock(&pi->sched_lock);
4211 
4212 	q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4213 	if (!q_ctx) {
4214 		ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4215 			  q_handle);
4216 		status = ICE_ERR_PARAM;
4217 		goto ena_txq_exit;
4218 	}
4219 
4220 	/* find a parent node */
4221 	parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4222 					    ICE_SCHED_NODE_OWNER_LAN);
4223 	if (!parent) {
4224 		status = ICE_ERR_PARAM;
4225 		goto ena_txq_exit;
4226 	}
4227 
4228 	buf->parent_teid = parent->info.node_teid;
4229 	node.parent_teid = parent->info.node_teid;
4230 	/* Mark that the values in the "generic" section as valid. The default
4231 	 * value in the "generic" section is zero. This means that :
4232 	 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4233 	 * - 0 priority among siblings, indicated by Bit 1-3.
4234 	 * - WFQ, indicated by Bit 4.
4235 	 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4236 	 * Bit 5-6.
4237 	 * - Bit 7 is reserved.
4238 	 * Without setting the generic section as valid in valid_sections, the
4239 	 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
4240 	 */
4241 	buf->txqs[0].info.valid_sections =
4242 		ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4243 		ICE_AQC_ELEM_VALID_EIR;
4244 	buf->txqs[0].info.generic = 0;
4245 	buf->txqs[0].info.cir_bw.bw_profile_idx =
4246 		cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4247 	buf->txqs[0].info.cir_bw.bw_alloc =
4248 		cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4249 	buf->txqs[0].info.eir_bw.bw_profile_idx =
4250 		cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4251 	buf->txqs[0].info.eir_bw.bw_alloc =
4252 		cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4253 
4254 	/* add the LAN queue */
4255 	status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4256 	if (status) {
4257 		ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4258 			  le16_to_cpu(buf->txqs[0].txq_id),
4259 			  hw->adminq.sq_last_status);
4260 		goto ena_txq_exit;
4261 	}
4262 
4263 	node.node_teid = buf->txqs[0].q_teid;
4264 	node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4265 	q_ctx->q_handle = q_handle;
4266 	q_ctx->q_teid = le32_to_cpu(node.node_teid);
4267 
4268 	/* add a leaf node into scheduler tree queue layer */
4269 	status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
4270 	if (!status)
4271 		status = ice_sched_replay_q_bw(pi, q_ctx);
4272 
4273 ena_txq_exit:
4274 	mutex_unlock(&pi->sched_lock);
4275 	return status;
4276 }
4277 
4278 /**
4279  * ice_dis_vsi_txq
4280  * @pi: port information structure
4281  * @vsi_handle: software VSI handle
4282  * @tc: TC number
4283  * @num_queues: number of queues
4284  * @q_handles: pointer to software queue handle array
4285  * @q_ids: pointer to the q_id array
4286  * @q_teids: pointer to queue node teids
4287  * @rst_src: if called due to reset, specifies the reset source
4288  * @vmvf_num: the relative VM or VF number that is undergoing the reset
4289  * @cd: pointer to command details structure or NULL
4290  *
4291  * This function removes queues and their corresponding nodes in SW DB
4292  */
4293 enum ice_status
4294 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4295 		u16 *q_handles, u16 *q_ids, u32 *q_teids,
4296 		enum ice_disq_rst_src rst_src, u16 vmvf_num,
4297 		struct ice_sq_cd *cd)
4298 {
4299 	enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
4300 	struct ice_aqc_dis_txq_item *qg_list;
4301 	struct ice_q_ctx *q_ctx;
4302 	struct ice_hw *hw;
4303 	u16 i, buf_size;
4304 
4305 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4306 		return ICE_ERR_CFG;
4307 
4308 	hw = pi->hw;
4309 
4310 	if (!num_queues) {
4311 		/* if queue is disabled already yet the disable queue command
4312 		 * has to be sent to complete the VF reset, then call
4313 		 * ice_aq_dis_lan_txq without any queue information
4314 		 */
4315 		if (rst_src)
4316 			return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
4317 						  vmvf_num, NULL);
4318 		return ICE_ERR_CFG;
4319 	}
4320 
4321 	buf_size = struct_size(qg_list, q_id, 1);
4322 	qg_list = kzalloc(buf_size, GFP_KERNEL);
4323 	if (!qg_list)
4324 		return ICE_ERR_NO_MEMORY;
4325 
4326 	mutex_lock(&pi->sched_lock);
4327 
4328 	for (i = 0; i < num_queues; i++) {
4329 		struct ice_sched_node *node;
4330 
4331 		node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4332 		if (!node)
4333 			continue;
4334 		q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4335 		if (!q_ctx) {
4336 			ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4337 				  q_handles[i]);
4338 			continue;
4339 		}
4340 		if (q_ctx->q_handle != q_handles[i]) {
4341 			ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4342 				  q_ctx->q_handle, q_handles[i]);
4343 			continue;
4344 		}
4345 		qg_list->parent_teid = node->info.parent_teid;
4346 		qg_list->num_qs = 1;
4347 		qg_list->q_id[0] = cpu_to_le16(q_ids[i]);
4348 		status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4349 					    vmvf_num, cd);
4350 
4351 		if (status)
4352 			break;
4353 		ice_free_sched_node(pi, node);
4354 		q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4355 	}
4356 	mutex_unlock(&pi->sched_lock);
4357 	kfree(qg_list);
4358 	return status;
4359 }
4360 
4361 /**
4362  * ice_cfg_vsi_qs - configure the new/existing VSI queues
4363  * @pi: port information structure
4364  * @vsi_handle: software VSI handle
4365  * @tc_bitmap: TC bitmap
4366  * @maxqs: max queues array per TC
4367  * @owner: LAN or RDMA
4368  *
4369  * This function adds/updates the VSI queues per TC.
4370  */
4371 static enum ice_status
4372 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4373 	       u16 *maxqs, u8 owner)
4374 {
4375 	enum ice_status status = 0;
4376 	u8 i;
4377 
4378 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4379 		return ICE_ERR_CFG;
4380 
4381 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4382 		return ICE_ERR_PARAM;
4383 
4384 	mutex_lock(&pi->sched_lock);
4385 
4386 	ice_for_each_traffic_class(i) {
4387 		/* configuration is possible only if TC node is present */
4388 		if (!ice_sched_get_tc_node(pi, i))
4389 			continue;
4390 
4391 		status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4392 					   ice_is_tc_ena(tc_bitmap, i));
4393 		if (status)
4394 			break;
4395 	}
4396 
4397 	mutex_unlock(&pi->sched_lock);
4398 	return status;
4399 }
4400 
4401 /**
4402  * ice_cfg_vsi_lan - configure VSI LAN queues
4403  * @pi: port information structure
4404  * @vsi_handle: software VSI handle
4405  * @tc_bitmap: TC bitmap
4406  * @max_lanqs: max LAN queues array per TC
4407  *
4408  * This function adds/updates the VSI LAN queues per TC.
4409  */
4410 enum ice_status
4411 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4412 		u16 *max_lanqs)
4413 {
4414 	return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4415 			      ICE_SCHED_NODE_OWNER_LAN);
4416 }
4417 
4418 /**
4419  * ice_cfg_vsi_rdma - configure the VSI RDMA queues
4420  * @pi: port information structure
4421  * @vsi_handle: software VSI handle
4422  * @tc_bitmap: TC bitmap
4423  * @max_rdmaqs: max RDMA queues array per TC
4424  *
4425  * This function adds/updates the VSI RDMA queues per TC.
4426  */
4427 int
4428 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4429 		 u16 *max_rdmaqs)
4430 {
4431 	return ice_status_to_errno(ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap,
4432 						  max_rdmaqs,
4433 						  ICE_SCHED_NODE_OWNER_RDMA));
4434 }
4435 
4436 /**
4437  * ice_ena_vsi_rdma_qset
4438  * @pi: port information structure
4439  * @vsi_handle: software VSI handle
4440  * @tc: TC number
4441  * @rdma_qset: pointer to RDMA Qset
4442  * @num_qsets: number of RDMA Qsets
4443  * @qset_teid: pointer to Qset node TEIDs
4444  *
4445  * This function adds RDMA Qset
4446  */
4447 int
4448 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
4449 		      u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
4450 {
4451 	struct ice_aqc_txsched_elem_data node = { 0 };
4452 	struct ice_aqc_add_rdma_qset_data *buf;
4453 	struct ice_sched_node *parent;
4454 	enum ice_status status;
4455 	struct ice_hw *hw;
4456 	u16 i, buf_size;
4457 	int ret;
4458 
4459 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4460 		return -EIO;
4461 	hw = pi->hw;
4462 
4463 	if (!ice_is_vsi_valid(hw, vsi_handle))
4464 		return -EINVAL;
4465 
4466 	buf_size = struct_size(buf, rdma_qsets, num_qsets);
4467 	buf = kzalloc(buf_size, GFP_KERNEL);
4468 	if (!buf)
4469 		return -ENOMEM;
4470 	mutex_lock(&pi->sched_lock);
4471 
4472 	parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4473 					    ICE_SCHED_NODE_OWNER_RDMA);
4474 	if (!parent) {
4475 		ret = -EINVAL;
4476 		goto rdma_error_exit;
4477 	}
4478 	buf->parent_teid = parent->info.node_teid;
4479 	node.parent_teid = parent->info.node_teid;
4480 
4481 	buf->num_qsets = cpu_to_le16(num_qsets);
4482 	for (i = 0; i < num_qsets; i++) {
4483 		buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]);
4484 		buf->rdma_qsets[i].info.valid_sections =
4485 			ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4486 			ICE_AQC_ELEM_VALID_EIR;
4487 		buf->rdma_qsets[i].info.generic = 0;
4488 		buf->rdma_qsets[i].info.cir_bw.bw_profile_idx =
4489 			cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4490 		buf->rdma_qsets[i].info.cir_bw.bw_alloc =
4491 			cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4492 		buf->rdma_qsets[i].info.eir_bw.bw_profile_idx =
4493 			cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4494 		buf->rdma_qsets[i].info.eir_bw.bw_alloc =
4495 			cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4496 	}
4497 	ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
4498 	if (ret) {
4499 		ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
4500 		goto rdma_error_exit;
4501 	}
4502 	node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4503 	for (i = 0; i < num_qsets; i++) {
4504 		node.node_teid = buf->rdma_qsets[i].qset_teid;
4505 		status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
4506 					    &node);
4507 		if (status) {
4508 			ret = ice_status_to_errno(status);
4509 			break;
4510 		}
4511 		qset_teid[i] = le32_to_cpu(node.node_teid);
4512 	}
4513 rdma_error_exit:
4514 	mutex_unlock(&pi->sched_lock);
4515 	kfree(buf);
4516 	return ret;
4517 }
4518 
4519 /**
4520  * ice_dis_vsi_rdma_qset - free RDMA resources
4521  * @pi: port_info struct
4522  * @count: number of RDMA Qsets to free
4523  * @qset_teid: TEID of Qset node
4524  * @q_id: list of queue IDs being disabled
4525  */
4526 int
4527 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
4528 		      u16 *q_id)
4529 {
4530 	struct ice_aqc_dis_txq_item *qg_list;
4531 	enum ice_status status = 0;
4532 	struct ice_hw *hw;
4533 	u16 qg_size;
4534 	int i;
4535 
4536 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4537 		return -EIO;
4538 
4539 	hw = pi->hw;
4540 
4541 	qg_size = struct_size(qg_list, q_id, 1);
4542 	qg_list = kzalloc(qg_size, GFP_KERNEL);
4543 	if (!qg_list)
4544 		return -ENOMEM;
4545 
4546 	mutex_lock(&pi->sched_lock);
4547 
4548 	for (i = 0; i < count; i++) {
4549 		struct ice_sched_node *node;
4550 
4551 		node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
4552 		if (!node)
4553 			continue;
4554 
4555 		qg_list->parent_teid = node->info.parent_teid;
4556 		qg_list->num_qs = 1;
4557 		qg_list->q_id[0] =
4558 			cpu_to_le16(q_id[i] |
4559 				    ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);
4560 
4561 		status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size,
4562 					    ICE_NO_RESET, 0, NULL);
4563 		if (status)
4564 			break;
4565 
4566 		ice_free_sched_node(pi, node);
4567 	}
4568 
4569 	mutex_unlock(&pi->sched_lock);
4570 	kfree(qg_list);
4571 	return ice_status_to_errno(status);
4572 }
4573 
4574 /**
4575  * ice_replay_pre_init - replay pre initialization
4576  * @hw: pointer to the HW struct
4577  *
4578  * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4579  */
4580 static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
4581 {
4582 	struct ice_switch_info *sw = hw->switch_info;
4583 	u8 i;
4584 
4585 	/* Delete old entries from replay filter list head if there is any */
4586 	ice_rm_all_sw_replay_rule_info(hw);
4587 	/* In start of replay, move entries into replay_rules list, it
4588 	 * will allow adding rules entries back to filt_rules list,
4589 	 * which is operational list.
4590 	 */
4591 	for (i = 0; i < ICE_SW_LKUP_LAST; i++)
4592 		list_replace_init(&sw->recp_list[i].filt_rules,
4593 				  &sw->recp_list[i].filt_replay_rules);
4594 	ice_sched_replay_agg_vsi_preinit(hw);
4595 
4596 	return 0;
4597 }
4598 
4599 /**
4600  * ice_replay_vsi - replay VSI configuration
4601  * @hw: pointer to the HW struct
4602  * @vsi_handle: driver VSI handle
4603  *
4604  * Restore all VSI configuration after reset. It is required to call this
4605  * function with main VSI first.
4606  */
4607 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4608 {
4609 	enum ice_status status;
4610 
4611 	if (!ice_is_vsi_valid(hw, vsi_handle))
4612 		return ICE_ERR_PARAM;
4613 
4614 	/* Replay pre-initialization if there is any */
4615 	if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
4616 		status = ice_replay_pre_init(hw);
4617 		if (status)
4618 			return status;
4619 	}
4620 	/* Replay per VSI all RSS configurations */
4621 	status = ice_replay_rss_cfg(hw, vsi_handle);
4622 	if (status)
4623 		return status;
4624 	/* Replay per VSI all filters */
4625 	status = ice_replay_vsi_all_fltr(hw, vsi_handle);
4626 	if (!status)
4627 		status = ice_replay_vsi_agg(hw, vsi_handle);
4628 	return status;
4629 }
4630 
4631 /**
4632  * ice_replay_post - post replay configuration cleanup
4633  * @hw: pointer to the HW struct
4634  *
4635  * Post replay cleanup.
4636  */
4637 void ice_replay_post(struct ice_hw *hw)
4638 {
4639 	/* Delete old entries from replay filter list head */
4640 	ice_rm_all_sw_replay_rule_info(hw);
4641 	ice_sched_replay_agg(hw);
4642 }
4643 
4644 /**
4645  * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4646  * @hw: ptr to the hardware info
4647  * @reg: offset of 64 bit HW register to read from
4648  * @prev_stat_loaded: bool to specify if previous stats are loaded
4649  * @prev_stat: ptr to previous loaded stat value
4650  * @cur_stat: ptr to current stat value
4651  */
4652 void
4653 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4654 		  u64 *prev_stat, u64 *cur_stat)
4655 {
4656 	u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4657 
4658 	/* device stats are not reset at PFR, they likely will not be zeroed
4659 	 * when the driver starts. Thus, save the value from the first read
4660 	 * without adding to the statistic value so that we report stats which
4661 	 * count up from zero.
4662 	 */
4663 	if (!prev_stat_loaded) {
4664 		*prev_stat = new_data;
4665 		return;
4666 	}
4667 
4668 	/* Calculate the difference between the new and old values, and then
4669 	 * add it to the software stat value.
4670 	 */
4671 	if (new_data >= *prev_stat)
4672 		*cur_stat += new_data - *prev_stat;
4673 	else
4674 		/* to manage the potential roll-over */
4675 		*cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4676 
4677 	/* Update the previously stored value to prepare for next read */
4678 	*prev_stat = new_data;
4679 }
4680 
4681 /**
4682  * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4683  * @hw: ptr to the hardware info
4684  * @reg: offset of HW register to read from
4685  * @prev_stat_loaded: bool to specify if previous stats are loaded
4686  * @prev_stat: ptr to previous loaded stat value
4687  * @cur_stat: ptr to current stat value
4688  */
4689 void
4690 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4691 		  u64 *prev_stat, u64 *cur_stat)
4692 {
4693 	u32 new_data;
4694 
4695 	new_data = rd32(hw, reg);
4696 
4697 	/* device stats are not reset at PFR, they likely will not be zeroed
4698 	 * when the driver starts. Thus, save the value from the first read
4699 	 * without adding to the statistic value so that we report stats which
4700 	 * count up from zero.
4701 	 */
4702 	if (!prev_stat_loaded) {
4703 		*prev_stat = new_data;
4704 		return;
4705 	}
4706 
4707 	/* Calculate the difference between the new and old values, and then
4708 	 * add it to the software stat value.
4709 	 */
4710 	if (new_data >= *prev_stat)
4711 		*cur_stat += new_data - *prev_stat;
4712 	else
4713 		/* to manage the potential roll-over */
4714 		*cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4715 
4716 	/* Update the previously stored value to prepare for next read */
4717 	*prev_stat = new_data;
4718 }
4719 
4720 /**
4721  * ice_sched_query_elem - query element information from HW
4722  * @hw: pointer to the HW struct
4723  * @node_teid: node TEID to be queried
4724  * @buf: buffer to element information
4725  *
4726  * This function queries HW element information
4727  */
4728 enum ice_status
4729 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4730 		     struct ice_aqc_txsched_elem_data *buf)
4731 {
4732 	u16 buf_size, num_elem_ret = 0;
4733 	enum ice_status status;
4734 
4735 	buf_size = sizeof(*buf);
4736 	memset(buf, 0, buf_size);
4737 	buf->node_teid = cpu_to_le32(node_teid);
4738 	status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4739 					  NULL);
4740 	if (status || num_elem_ret != 1)
4741 		ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4742 	return status;
4743 }
4744 
4745 /**
4746  * ice_aq_set_driver_param - Set driver parameter to share via firmware
4747  * @hw: pointer to the HW struct
4748  * @idx: parameter index to set
4749  * @value: the value to set the parameter to
4750  * @cd: pointer to command details structure or NULL
4751  *
4752  * Set the value of one of the software defined parameters. All PFs connected
4753  * to this device can read the value using ice_aq_get_driver_param.
4754  *
4755  * Note that firmware provides no synchronization or locking, and will not
4756  * save the parameter value during a device reset. It is expected that
4757  * a single PF will write the parameter value, while all other PFs will only
4758  * read it.
4759  */
4760 int
4761 ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
4762 			u32 value, struct ice_sq_cd *cd)
4763 {
4764 	struct ice_aqc_driver_shared_params *cmd;
4765 	struct ice_aq_desc desc;
4766 
4767 	if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
4768 		return -EIO;
4769 
4770 	cmd = &desc.params.drv_shared_params;
4771 
4772 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
4773 
4774 	cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET;
4775 	cmd->param_indx = idx;
4776 	cmd->param_val = cpu_to_le32(value);
4777 
4778 	return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd));
4779 }
4780 
4781 /**
4782  * ice_aq_get_driver_param - Get driver parameter shared via firmware
4783  * @hw: pointer to the HW struct
4784  * @idx: parameter index to set
4785  * @value: storage to return the shared parameter
4786  * @cd: pointer to command details structure or NULL
4787  *
4788  * Get the value of one of the software defined parameters.
4789  *
4790  * Note that firmware provides no synchronization or locking. It is expected
4791  * that only a single PF will write a given parameter.
4792  */
4793 int
4794 ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
4795 			u32 *value, struct ice_sq_cd *cd)
4796 {
4797 	struct ice_aqc_driver_shared_params *cmd;
4798 	struct ice_aq_desc desc;
4799 	enum ice_status status;
4800 
4801 	if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
4802 		return -EIO;
4803 
4804 	cmd = &desc.params.drv_shared_params;
4805 
4806 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
4807 
4808 	cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET;
4809 	cmd->param_indx = idx;
4810 
4811 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4812 	if (status)
4813 		return ice_status_to_errno(status);
4814 
4815 	*value = le32_to_cpu(cmd->param_val);
4816 
4817 	return 0;
4818 }
4819 
4820 /**
4821  * ice_aq_set_gpio
4822  * @hw: pointer to the hw struct
4823  * @gpio_ctrl_handle: GPIO controller node handle
4824  * @pin_idx: IO Number of the GPIO that needs to be set
4825  * @value: SW provide IO value to set in the LSB
4826  * @cd: pointer to command details structure or NULL
4827  *
4828  * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology
4829  */
4830 int
4831 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
4832 		struct ice_sq_cd *cd)
4833 {
4834 	struct ice_aqc_gpio *cmd;
4835 	struct ice_aq_desc desc;
4836 
4837 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
4838 	cmd = &desc.params.read_write_gpio;
4839 	cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
4840 	cmd->gpio_num = pin_idx;
4841 	cmd->gpio_val = value ? 1 : 0;
4842 
4843 	return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd));
4844 }
4845 
4846 /**
4847  * ice_aq_get_gpio
4848  * @hw: pointer to the hw struct
4849  * @gpio_ctrl_handle: GPIO controller node handle
4850  * @pin_idx: IO Number of the GPIO that needs to be set
4851  * @value: IO value read
4852  * @cd: pointer to command details structure or NULL
4853  *
4854  * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of
4855  * the topology
4856  */
4857 int
4858 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
4859 		bool *value, struct ice_sq_cd *cd)
4860 {
4861 	struct ice_aqc_gpio *cmd;
4862 	struct ice_aq_desc desc;
4863 	enum ice_status status;
4864 
4865 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
4866 	cmd = &desc.params.read_write_gpio;
4867 	cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
4868 	cmd->gpio_num = pin_idx;
4869 
4870 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4871 	if (status)
4872 		return ice_status_to_errno(status);
4873 
4874 	*value = !!cmd->gpio_val;
4875 	return 0;
4876 }
4877 
4878 /**
4879  * ice_fw_supports_link_override
4880  * @hw: pointer to the hardware structure
4881  *
4882  * Checks if the firmware supports link override
4883  */
4884 bool ice_fw_supports_link_override(struct ice_hw *hw)
4885 {
4886 	if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
4887 		if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
4888 			return true;
4889 		if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
4890 		    hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
4891 			return true;
4892 	} else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
4893 		return true;
4894 	}
4895 
4896 	return false;
4897 }
4898 
4899 /**
4900  * ice_get_link_default_override
4901  * @ldo: pointer to the link default override struct
4902  * @pi: pointer to the port info struct
4903  *
4904  * Gets the link default override for a port
4905  */
4906 enum ice_status
4907 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
4908 			      struct ice_port_info *pi)
4909 {
4910 	u16 i, tlv, tlv_len, tlv_start, buf, offset;
4911 	struct ice_hw *hw = pi->hw;
4912 	enum ice_status status;
4913 
4914 	status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
4915 					ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
4916 	if (status) {
4917 		ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
4918 		return status;
4919 	}
4920 
4921 	/* Each port has its own config; calculate for our port */
4922 	tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
4923 		ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
4924 
4925 	/* link options first */
4926 	status = ice_read_sr_word(hw, tlv_start, &buf);
4927 	if (status) {
4928 		ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4929 		return status;
4930 	}
4931 	ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
4932 	ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
4933 		ICE_LINK_OVERRIDE_PHY_CFG_S;
4934 
4935 	/* link PHY config */
4936 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
4937 	status = ice_read_sr_word(hw, offset, &buf);
4938 	if (status) {
4939 		ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
4940 		return status;
4941 	}
4942 	ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
4943 
4944 	/* PHY types low */
4945 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
4946 	for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4947 		status = ice_read_sr_word(hw, (offset + i), &buf);
4948 		if (status) {
4949 			ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4950 			return status;
4951 		}
4952 		/* shift 16 bits at a time to fill 64 bits */
4953 		ldo->phy_type_low |= ((u64)buf << (i * 16));
4954 	}
4955 
4956 	/* PHY types high */
4957 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
4958 		ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
4959 	for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4960 		status = ice_read_sr_word(hw, (offset + i), &buf);
4961 		if (status) {
4962 			ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4963 			return status;
4964 		}
4965 		/* shift 16 bits at a time to fill 64 bits */
4966 		ldo->phy_type_high |= ((u64)buf << (i * 16));
4967 	}
4968 
4969 	return status;
4970 }
4971 
4972 /**
4973  * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
4974  * @caps: get PHY capability data
4975  */
4976 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
4977 {
4978 	if (caps->caps & ICE_AQC_PHY_AN_MODE ||
4979 	    caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
4980 				       ICE_AQC_PHY_AN_EN_CLAUSE73 |
4981 				       ICE_AQC_PHY_AN_EN_CLAUSE37))
4982 		return true;
4983 
4984 	return false;
4985 }
4986 
4987 /**
4988  * ice_aq_set_lldp_mib - Set the LLDP MIB
4989  * @hw: pointer to the HW struct
4990  * @mib_type: Local, Remote or both Local and Remote MIBs
4991  * @buf: pointer to the caller-supplied buffer to store the MIB block
4992  * @buf_size: size of the buffer (in bytes)
4993  * @cd: pointer to command details structure or NULL
4994  *
4995  * Set the LLDP MIB. (0x0A08)
4996  */
4997 enum ice_status
4998 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
4999 		    struct ice_sq_cd *cd)
5000 {
5001 	struct ice_aqc_lldp_set_local_mib *cmd;
5002 	struct ice_aq_desc desc;
5003 
5004 	cmd = &desc.params.lldp_set_mib;
5005 
5006 	if (buf_size == 0 || !buf)
5007 		return ICE_ERR_PARAM;
5008 
5009 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
5010 
5011 	desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
5012 	desc.datalen = cpu_to_le16(buf_size);
5013 
5014 	cmd->type = mib_type;
5015 	cmd->length = cpu_to_le16(buf_size);
5016 
5017 	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
5018 }
5019 
5020 /**
5021  * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
5022  * @hw: pointer to HW struct
5023  */
5024 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
5025 {
5026 	if (hw->mac_type != ICE_MAC_E810)
5027 		return false;
5028 
5029 	if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
5030 		if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
5031 			return true;
5032 		if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
5033 		    hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
5034 			return true;
5035 	} else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
5036 		return true;
5037 	}
5038 	return false;
5039 }
5040 
5041 /**
5042  * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
5043  * @hw: pointer to HW struct
5044  * @vsi_num: absolute HW index for VSI
5045  * @add: boolean for if adding or removing a filter
5046  */
5047 enum ice_status
5048 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
5049 {
5050 	struct ice_aqc_lldp_filter_ctrl *cmd;
5051 	struct ice_aq_desc desc;
5052 
5053 	cmd = &desc.params.lldp_filter_ctrl;
5054 
5055 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
5056 
5057 	if (add)
5058 		cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
5059 	else
5060 		cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
5061 
5062 	cmd->vsi_num = cpu_to_le16(vsi_num);
5063 
5064 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5065 }
5066 
5067 /**
5068  * ice_fw_supports_report_dflt_cfg
5069  * @hw: pointer to the hardware structure
5070  *
5071  * Checks if the firmware supports report default configuration
5072  */
5073 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
5074 {
5075 	if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
5076 		if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
5077 			return true;
5078 		if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
5079 		    hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
5080 			return true;
5081 	} else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
5082 		return true;
5083 	}
5084 	return false;
5085 }
5086