1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_common.h"
5 #include "ice_sched.h"
6 #include "ice_adminq_cmd.h"
7 
8 #define ICE_PF_RESET_WAIT_COUNT	200
9 
10 #define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \
11 	wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \
12 	     ((ICE_RX_OPC_MDID << \
13 	       GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
14 	      GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
15 	     (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
16 	      GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
17 
18 #define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \
19 	wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \
20 	     (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
21 	      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
22 	     (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
23 	      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \
24 	     (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \
25 	      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \
26 	     (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \
27 	      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M))
28 
29 /**
30  * ice_set_mac_type - Sets MAC type
31  * @hw: pointer to the HW structure
32  *
33  * This function sets the MAC type of the adapter based on the
34  * vendor ID and device ID stored in the HW structure.
35  */
36 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
37 {
38 	if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
39 		return ICE_ERR_DEVICE_NOT_SUPPORTED;
40 
41 	hw->mac_type = ICE_MAC_GENERIC;
42 	return 0;
43 }
44 
45 /**
46  * ice_dev_onetime_setup - Temporary HW/FW workarounds
47  * @hw: pointer to the HW structure
48  *
49  * This function provides temporary workarounds for certain issues
50  * that are expected to be fixed in the HW/FW.
51  */
52 void ice_dev_onetime_setup(struct ice_hw *hw)
53 {
54 #define MBX_PF_VT_PFALLOC	0x00231E80
55 	/* set VFs per PF */
56 	wr32(hw, MBX_PF_VT_PFALLOC, rd32(hw, PF_VT_PFALLOC_HIF));
57 }
58 
59 /**
60  * ice_clear_pf_cfg - Clear PF configuration
61  * @hw: pointer to the hardware structure
62  *
63  * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
64  * configuration, flow director filters, etc.).
65  */
66 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
67 {
68 	struct ice_aq_desc desc;
69 
70 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
71 
72 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
73 }
74 
75 /**
76  * ice_aq_manage_mac_read - manage MAC address read command
77  * @hw: pointer to the HW struct
78  * @buf: a virtual buffer to hold the manage MAC read response
79  * @buf_size: Size of the virtual buffer
80  * @cd: pointer to command details structure or NULL
81  *
82  * This function is used to return per PF station MAC address (0x0107).
83  * NOTE: Upon successful completion of this command, MAC address information
84  * is returned in user specified buffer. Please interpret user specified
85  * buffer as "manage_mac_read" response.
86  * Response such as various MAC addresses are stored in HW struct (port.mac)
87  * ice_aq_discover_caps is expected to be called before this function is called.
88  */
89 static enum ice_status
90 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
91 		       struct ice_sq_cd *cd)
92 {
93 	struct ice_aqc_manage_mac_read_resp *resp;
94 	struct ice_aqc_manage_mac_read *cmd;
95 	struct ice_aq_desc desc;
96 	enum ice_status status;
97 	u16 flags;
98 	u8 i;
99 
100 	cmd = &desc.params.mac_read;
101 
102 	if (buf_size < sizeof(*resp))
103 		return ICE_ERR_BUF_TOO_SHORT;
104 
105 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
106 
107 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
108 	if (status)
109 		return status;
110 
111 	resp = (struct ice_aqc_manage_mac_read_resp *)buf;
112 	flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
113 
114 	if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
115 		ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
116 		return ICE_ERR_CFG;
117 	}
118 
119 	/* A single port can report up to two (LAN and WoL) addresses */
120 	for (i = 0; i < cmd->num_addr; i++)
121 		if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
122 			ether_addr_copy(hw->port_info->mac.lan_addr,
123 					resp[i].mac_addr);
124 			ether_addr_copy(hw->port_info->mac.perm_addr,
125 					resp[i].mac_addr);
126 			break;
127 		}
128 
129 	return 0;
130 }
131 
132 /**
133  * ice_aq_get_phy_caps - returns PHY capabilities
134  * @pi: port information structure
135  * @qual_mods: report qualified modules
136  * @report_mode: report mode capabilities
137  * @pcaps: structure for PHY capabilities to be filled
138  * @cd: pointer to command details structure or NULL
139  *
140  * Returns the various PHY capabilities supported on the Port (0x0600)
141  */
142 enum ice_status
143 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
144 		    struct ice_aqc_get_phy_caps_data *pcaps,
145 		    struct ice_sq_cd *cd)
146 {
147 	struct ice_aqc_get_phy_caps *cmd;
148 	u16 pcaps_size = sizeof(*pcaps);
149 	struct ice_aq_desc desc;
150 	enum ice_status status;
151 
152 	cmd = &desc.params.get_phy;
153 
154 	if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
155 		return ICE_ERR_PARAM;
156 
157 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
158 
159 	if (qual_mods)
160 		cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
161 
162 	cmd->param0 |= cpu_to_le16(report_mode);
163 	status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
164 
165 	if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
166 		pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
167 		pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
168 	}
169 
170 	return status;
171 }
172 
173 /**
174  * ice_get_media_type - Gets media type
175  * @pi: port information structure
176  */
177 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
178 {
179 	struct ice_link_status *hw_link_info;
180 
181 	if (!pi)
182 		return ICE_MEDIA_UNKNOWN;
183 
184 	hw_link_info = &pi->phy.link_info;
185 	if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
186 		/* If more than one media type is selected, report unknown */
187 		return ICE_MEDIA_UNKNOWN;
188 
189 	if (hw_link_info->phy_type_low) {
190 		switch (hw_link_info->phy_type_low) {
191 		case ICE_PHY_TYPE_LOW_1000BASE_SX:
192 		case ICE_PHY_TYPE_LOW_1000BASE_LX:
193 		case ICE_PHY_TYPE_LOW_10GBASE_SR:
194 		case ICE_PHY_TYPE_LOW_10GBASE_LR:
195 		case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
196 		case ICE_PHY_TYPE_LOW_25GBASE_SR:
197 		case ICE_PHY_TYPE_LOW_25GBASE_LR:
198 		case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
199 		case ICE_PHY_TYPE_LOW_40GBASE_SR4:
200 		case ICE_PHY_TYPE_LOW_40GBASE_LR4:
201 		case ICE_PHY_TYPE_LOW_50GBASE_SR2:
202 		case ICE_PHY_TYPE_LOW_50GBASE_LR2:
203 		case ICE_PHY_TYPE_LOW_50GBASE_SR:
204 		case ICE_PHY_TYPE_LOW_50GBASE_FR:
205 		case ICE_PHY_TYPE_LOW_50GBASE_LR:
206 		case ICE_PHY_TYPE_LOW_100GBASE_SR4:
207 		case ICE_PHY_TYPE_LOW_100GBASE_LR4:
208 		case ICE_PHY_TYPE_LOW_100GBASE_SR2:
209 		case ICE_PHY_TYPE_LOW_100GBASE_DR:
210 			return ICE_MEDIA_FIBER;
211 		case ICE_PHY_TYPE_LOW_100BASE_TX:
212 		case ICE_PHY_TYPE_LOW_1000BASE_T:
213 		case ICE_PHY_TYPE_LOW_2500BASE_T:
214 		case ICE_PHY_TYPE_LOW_5GBASE_T:
215 		case ICE_PHY_TYPE_LOW_10GBASE_T:
216 		case ICE_PHY_TYPE_LOW_25GBASE_T:
217 			return ICE_MEDIA_BASET;
218 		case ICE_PHY_TYPE_LOW_10G_SFI_DA:
219 		case ICE_PHY_TYPE_LOW_25GBASE_CR:
220 		case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
221 		case ICE_PHY_TYPE_LOW_25GBASE_CR1:
222 		case ICE_PHY_TYPE_LOW_40GBASE_CR4:
223 		case ICE_PHY_TYPE_LOW_50GBASE_CR2:
224 		case ICE_PHY_TYPE_LOW_50GBASE_CP:
225 		case ICE_PHY_TYPE_LOW_100GBASE_CR4:
226 		case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
227 		case ICE_PHY_TYPE_LOW_100GBASE_CP2:
228 			return ICE_MEDIA_DA;
229 		case ICE_PHY_TYPE_LOW_1000BASE_KX:
230 		case ICE_PHY_TYPE_LOW_2500BASE_KX:
231 		case ICE_PHY_TYPE_LOW_2500BASE_X:
232 		case ICE_PHY_TYPE_LOW_5GBASE_KR:
233 		case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
234 		case ICE_PHY_TYPE_LOW_25GBASE_KR:
235 		case ICE_PHY_TYPE_LOW_25GBASE_KR1:
236 		case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
237 		case ICE_PHY_TYPE_LOW_40GBASE_KR4:
238 		case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
239 		case ICE_PHY_TYPE_LOW_50GBASE_KR2:
240 		case ICE_PHY_TYPE_LOW_100GBASE_KR4:
241 		case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
242 			return ICE_MEDIA_BACKPLANE;
243 		}
244 	} else {
245 		switch (hw_link_info->phy_type_high) {
246 		case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
247 			return ICE_MEDIA_BACKPLANE;
248 		}
249 	}
250 	return ICE_MEDIA_UNKNOWN;
251 }
252 
253 /**
254  * ice_aq_get_link_info
255  * @pi: port information structure
256  * @ena_lse: enable/disable LinkStatusEvent reporting
257  * @link: pointer to link status structure - optional
258  * @cd: pointer to command details structure or NULL
259  *
260  * Get Link Status (0x607). Returns the link status of the adapter.
261  */
262 enum ice_status
263 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
264 		     struct ice_link_status *link, struct ice_sq_cd *cd)
265 {
266 	struct ice_link_status *hw_link_info_old, *hw_link_info;
267 	struct ice_aqc_get_link_status_data link_data = { 0 };
268 	struct ice_aqc_get_link_status *resp;
269 	enum ice_media_type *hw_media_type;
270 	struct ice_fc_info *hw_fc_info;
271 	bool tx_pause, rx_pause;
272 	struct ice_aq_desc desc;
273 	enum ice_status status;
274 	u16 cmd_flags;
275 
276 	if (!pi)
277 		return ICE_ERR_PARAM;
278 	hw_link_info_old = &pi->phy.link_info_old;
279 	hw_media_type = &pi->phy.media_type;
280 	hw_link_info = &pi->phy.link_info;
281 	hw_fc_info = &pi->fc;
282 
283 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
284 	cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
285 	resp = &desc.params.get_link_status;
286 	resp->cmd_flags = cpu_to_le16(cmd_flags);
287 	resp->lport_num = pi->lport;
288 
289 	status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data),
290 				 cd);
291 
292 	if (status)
293 		return status;
294 
295 	/* save off old link status information */
296 	*hw_link_info_old = *hw_link_info;
297 
298 	/* update current link status information */
299 	hw_link_info->link_speed = le16_to_cpu(link_data.link_speed);
300 	hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low);
301 	hw_link_info->phy_type_high = le64_to_cpu(link_data.phy_type_high);
302 	*hw_media_type = ice_get_media_type(pi);
303 	hw_link_info->link_info = link_data.link_info;
304 	hw_link_info->an_info = link_data.an_info;
305 	hw_link_info->ext_info = link_data.ext_info;
306 	hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size);
307 	hw_link_info->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
308 	hw_link_info->topo_media_conflict = link_data.topo_media_conflict;
309 	hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M;
310 
311 	/* update fc info */
312 	tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
313 	rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
314 	if (tx_pause && rx_pause)
315 		hw_fc_info->current_mode = ICE_FC_FULL;
316 	else if (tx_pause)
317 		hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
318 	else if (rx_pause)
319 		hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
320 	else
321 		hw_fc_info->current_mode = ICE_FC_NONE;
322 
323 	hw_link_info->lse_ena =
324 		!!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
325 
326 	/* save link status information */
327 	if (link)
328 		*link = *hw_link_info;
329 
330 	/* flag cleared so calling functions don't call AQ again */
331 	pi->phy.get_link_info = false;
332 
333 	return 0;
334 }
335 
336 /**
337  * ice_init_flex_flags
338  * @hw: pointer to the hardware structure
339  * @prof_id: Rx Descriptor Builder profile ID
340  *
341  * Function to initialize Rx flex flags
342  */
343 static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
344 {
345 	u8 idx = 0;
346 
347 	/* Flex-flag fields (0-2) are programmed with FLG64 bits with layout:
348 	 * flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE
349 	 * flexiflags1[3:0] - Not used for flag programming
350 	 * flexiflags2[7:0] - Tunnel and VLAN types
351 	 * 2 invalid fields in last index
352 	 */
353 	switch (prof_id) {
354 	/* Rx flex flags are currently programmed for the NIC profiles only.
355 	 * Different flag bit programming configurations can be added per
356 	 * profile as needed.
357 	 */
358 	case ICE_RXDID_FLEX_NIC:
359 	case ICE_RXDID_FLEX_NIC_2:
360 		ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_FRG,
361 				   ICE_FLG_UDP_GRE, ICE_FLG_PKT_DSI,
362 				   ICE_FLG_FIN, idx++);
363 		/* flex flag 1 is not used for flexi-flag programming, skipping
364 		 * these four FLG64 bits.
365 		 */
366 		ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_SYN, ICE_FLG_RST,
367 				   ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx++);
368 		ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_DSI,
369 				   ICE_FLG_PKT_DSI, ICE_FLG_EVLAN_x8100,
370 				   ICE_FLG_EVLAN_x9100, idx++);
371 		ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_VLAN_x8100,
372 				   ICE_FLG_TNL_VLAN, ICE_FLG_TNL_MAC,
373 				   ICE_FLG_TNL0, idx++);
374 		ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_TNL1, ICE_FLG_TNL2,
375 				   ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx);
376 		break;
377 
378 	default:
379 		ice_debug(hw, ICE_DBG_INIT,
380 			  "Flag programming for profile ID %d not supported\n",
381 			  prof_id);
382 	}
383 }
384 
385 /**
386  * ice_init_flex_flds
387  * @hw: pointer to the hardware structure
388  * @prof_id: Rx Descriptor Builder profile ID
389  *
390  * Function to initialize flex descriptors
391  */
392 static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
393 {
394 	enum ice_flex_rx_mdid mdid;
395 
396 	switch (prof_id) {
397 	case ICE_RXDID_FLEX_NIC:
398 	case ICE_RXDID_FLEX_NIC_2:
399 		ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0);
400 		ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1);
401 		ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2);
402 
403 		mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ?
404 			ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH;
405 
406 		ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3);
407 
408 		ice_init_flex_flags(hw, prof_id);
409 		break;
410 
411 	default:
412 		ice_debug(hw, ICE_DBG_INIT,
413 			  "Field init for profile ID %d not supported\n",
414 			  prof_id);
415 	}
416 }
417 
418 /**
419  * ice_init_fltr_mgmt_struct - initializes filter management list and locks
420  * @hw: pointer to the HW struct
421  */
422 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
423 {
424 	struct ice_switch_info *sw;
425 
426 	hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
427 				       sizeof(*hw->switch_info), GFP_KERNEL);
428 	sw = hw->switch_info;
429 
430 	if (!sw)
431 		return ICE_ERR_NO_MEMORY;
432 
433 	INIT_LIST_HEAD(&sw->vsi_list_map_head);
434 
435 	return ice_init_def_sw_recp(hw);
436 }
437 
438 /**
439  * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
440  * @hw: pointer to the HW struct
441  */
442 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
443 {
444 	struct ice_switch_info *sw = hw->switch_info;
445 	struct ice_vsi_list_map_info *v_pos_map;
446 	struct ice_vsi_list_map_info *v_tmp_map;
447 	struct ice_sw_recipe *recps;
448 	u8 i;
449 
450 	list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
451 				 list_entry) {
452 		list_del(&v_pos_map->list_entry);
453 		devm_kfree(ice_hw_to_dev(hw), v_pos_map);
454 	}
455 	recps = hw->switch_info->recp_list;
456 	for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
457 		struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
458 
459 		recps[i].root_rid = i;
460 		mutex_destroy(&recps[i].filt_rule_lock);
461 		list_for_each_entry_safe(lst_itr, tmp_entry,
462 					 &recps[i].filt_rules, list_entry) {
463 			list_del(&lst_itr->list_entry);
464 			devm_kfree(ice_hw_to_dev(hw), lst_itr);
465 		}
466 	}
467 	ice_rm_all_sw_replay_rule_info(hw);
468 	devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
469 	devm_kfree(ice_hw_to_dev(hw), sw);
470 }
471 
472 #define ICE_FW_LOG_DESC_SIZE(n)	(sizeof(struct ice_aqc_fw_logging_data) + \
473 	(((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry)))
474 #define ICE_FW_LOG_DESC_SIZE_MAX	\
475 	ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX)
476 
477 /**
478  * ice_get_fw_log_cfg - get FW logging configuration
479  * @hw: pointer to the HW struct
480  */
481 static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
482 {
483 	struct ice_aqc_fw_logging_data *config;
484 	struct ice_aq_desc desc;
485 	enum ice_status status;
486 	u16 size;
487 
488 	size = ICE_FW_LOG_DESC_SIZE_MAX;
489 	config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
490 	if (!config)
491 		return ICE_ERR_NO_MEMORY;
492 
493 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
494 
495 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
496 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
497 
498 	status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
499 	if (!status) {
500 		u16 i;
501 
502 		/* Save FW logging information into the HW structure */
503 		for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
504 			u16 v, m, flgs;
505 
506 			v = le16_to_cpu(config->entry[i]);
507 			m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
508 			flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
509 
510 			if (m < ICE_AQC_FW_LOG_ID_MAX)
511 				hw->fw_log.evnts[m].cur = flgs;
512 		}
513 	}
514 
515 	devm_kfree(ice_hw_to_dev(hw), config);
516 
517 	return status;
518 }
519 
520 /**
521  * ice_cfg_fw_log - configure FW logging
522  * @hw: pointer to the HW struct
523  * @enable: enable certain FW logging events if true, disable all if false
524  *
525  * This function enables/disables the FW logging via Rx CQ events and a UART
526  * port based on predetermined configurations. FW logging via the Rx CQ can be
527  * enabled/disabled for individual PF's. However, FW logging via the UART can
528  * only be enabled/disabled for all PFs on the same device.
529  *
530  * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
531  * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
532  * before initializing the device.
533  *
534  * When re/configuring FW logging, callers need to update the "cfg" elements of
535  * the hw->fw_log.evnts array with the desired logging event configurations for
536  * modules of interest. When disabling FW logging completely, the callers can
537  * just pass false in the "enable" parameter. On completion, the function will
538  * update the "cur" element of the hw->fw_log.evnts array with the resulting
539  * logging event configurations of the modules that are being re/configured. FW
540  * logging modules that are not part of a reconfiguration operation retain their
541  * previous states.
542  *
543  * Before resetting the device, it is recommended that the driver disables FW
544  * logging before shutting down the control queue. When disabling FW logging
545  * ("enable" = false), the latest configurations of FW logging events stored in
546  * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
547  * a device reset.
548  *
549  * When enabling FW logging to emit log messages via the Rx CQ during the
550  * device's initialization phase, a mechanism alternative to interrupt handlers
551  * needs to be used to extract FW log messages from the Rx CQ periodically and
552  * to prevent the Rx CQ from being full and stalling other types of control
553  * messages from FW to SW. Interrupts are typically disabled during the device's
554  * initialization phase.
555  */
556 static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
557 {
558 	struct ice_aqc_fw_logging_data *data = NULL;
559 	struct ice_aqc_fw_logging *cmd;
560 	enum ice_status status = 0;
561 	u16 i, chgs = 0, len = 0;
562 	struct ice_aq_desc desc;
563 	u8 actv_evnts = 0;
564 	void *buf = NULL;
565 
566 	if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
567 		return 0;
568 
569 	/* Disable FW logging only when the control queue is still responsive */
570 	if (!enable &&
571 	    (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
572 		return 0;
573 
574 	/* Get current FW log settings */
575 	status = ice_get_fw_log_cfg(hw);
576 	if (status)
577 		return status;
578 
579 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
580 	cmd = &desc.params.fw_logging;
581 
582 	/* Indicate which controls are valid */
583 	if (hw->fw_log.cq_en)
584 		cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
585 
586 	if (hw->fw_log.uart_en)
587 		cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
588 
589 	if (enable) {
590 		/* Fill in an array of entries with FW logging modules and
591 		 * logging events being reconfigured.
592 		 */
593 		for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
594 			u16 val;
595 
596 			/* Keep track of enabled event types */
597 			actv_evnts |= hw->fw_log.evnts[i].cfg;
598 
599 			if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
600 				continue;
601 
602 			if (!data) {
603 				data = devm_kzalloc(ice_hw_to_dev(hw),
604 						    ICE_FW_LOG_DESC_SIZE_MAX,
605 						    GFP_KERNEL);
606 				if (!data)
607 					return ICE_ERR_NO_MEMORY;
608 			}
609 
610 			val = i << ICE_AQC_FW_LOG_ID_S;
611 			val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
612 			data->entry[chgs++] = cpu_to_le16(val);
613 		}
614 
615 		/* Only enable FW logging if at least one module is specified.
616 		 * If FW logging is currently enabled but all modules are not
617 		 * enabled to emit log messages, disable FW logging altogether.
618 		 */
619 		if (actv_evnts) {
620 			/* Leave if there is effectively no change */
621 			if (!chgs)
622 				goto out;
623 
624 			if (hw->fw_log.cq_en)
625 				cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
626 
627 			if (hw->fw_log.uart_en)
628 				cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
629 
630 			buf = data;
631 			len = ICE_FW_LOG_DESC_SIZE(chgs);
632 			desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
633 		}
634 	}
635 
636 	status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
637 	if (!status) {
638 		/* Update the current configuration to reflect events enabled.
639 		 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
640 		 * logging mode is enabled for the device. They do not reflect
641 		 * actual modules being enabled to emit log messages. So, their
642 		 * values remain unchanged even when all modules are disabled.
643 		 */
644 		u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
645 
646 		hw->fw_log.actv_evnts = actv_evnts;
647 		for (i = 0; i < cnt; i++) {
648 			u16 v, m;
649 
650 			if (!enable) {
651 				/* When disabling all FW logging events as part
652 				 * of device's de-initialization, the original
653 				 * configurations are retained, and can be used
654 				 * to reconfigure FW logging later if the device
655 				 * is re-initialized.
656 				 */
657 				hw->fw_log.evnts[i].cur = 0;
658 				continue;
659 			}
660 
661 			v = le16_to_cpu(data->entry[i]);
662 			m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
663 			hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
664 		}
665 	}
666 
667 out:
668 	if (data)
669 		devm_kfree(ice_hw_to_dev(hw), data);
670 
671 	return status;
672 }
673 
674 /**
675  * ice_output_fw_log
676  * @hw: pointer to the HW struct
677  * @desc: pointer to the AQ message descriptor
678  * @buf: pointer to the buffer accompanying the AQ message
679  *
680  * Formats a FW Log message and outputs it via the standard driver logs.
681  */
682 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
683 {
684 	ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
685 	ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
686 			le16_to_cpu(desc->datalen));
687 	ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
688 }
689 
690 /**
691  * ice_get_itr_intrl_gran - determine int/intrl granularity
692  * @hw: pointer to the HW struct
693  *
694  * Determines the ITR/intrl granularities based on the maximum aggregate
695  * bandwidth according to the device's configuration during power-on.
696  */
697 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
698 {
699 	u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
700 			 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
701 			GL_PWR_MODE_CTL_CAR_MAX_BW_S;
702 
703 	switch (max_agg_bw) {
704 	case ICE_MAX_AGG_BW_200G:
705 	case ICE_MAX_AGG_BW_100G:
706 	case ICE_MAX_AGG_BW_50G:
707 		hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
708 		hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
709 		break;
710 	case ICE_MAX_AGG_BW_25G:
711 		hw->itr_gran = ICE_ITR_GRAN_MAX_25;
712 		hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
713 		break;
714 	}
715 }
716 
717 /**
718  * ice_init_hw - main hardware initialization routine
719  * @hw: pointer to the hardware structure
720  */
721 enum ice_status ice_init_hw(struct ice_hw *hw)
722 {
723 	struct ice_aqc_get_phy_caps_data *pcaps;
724 	enum ice_status status;
725 	u16 mac_buf_len;
726 	void *mac_buf;
727 
728 	/* Set MAC type based on DeviceID */
729 	status = ice_set_mac_type(hw);
730 	if (status)
731 		return status;
732 
733 	hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
734 			 PF_FUNC_RID_FUNC_NUM_M) >>
735 		PF_FUNC_RID_FUNC_NUM_S;
736 
737 	status = ice_reset(hw, ICE_RESET_PFR);
738 	if (status)
739 		return status;
740 
741 	ice_get_itr_intrl_gran(hw);
742 
743 	status = ice_init_all_ctrlq(hw);
744 	if (status)
745 		goto err_unroll_cqinit;
746 
747 	/* Enable FW logging. Not fatal if this fails. */
748 	status = ice_cfg_fw_log(hw, true);
749 	if (status)
750 		ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
751 
752 	status = ice_clear_pf_cfg(hw);
753 	if (status)
754 		goto err_unroll_cqinit;
755 
756 	ice_clear_pxe_mode(hw);
757 
758 	status = ice_init_nvm(hw);
759 	if (status)
760 		goto err_unroll_cqinit;
761 
762 	status = ice_get_caps(hw);
763 	if (status)
764 		goto err_unroll_cqinit;
765 
766 	hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
767 				     sizeof(*hw->port_info), GFP_KERNEL);
768 	if (!hw->port_info) {
769 		status = ICE_ERR_NO_MEMORY;
770 		goto err_unroll_cqinit;
771 	}
772 
773 	/* set the back pointer to HW */
774 	hw->port_info->hw = hw;
775 
776 	/* Initialize port_info struct with switch configuration data */
777 	status = ice_get_initial_sw_cfg(hw);
778 	if (status)
779 		goto err_unroll_alloc;
780 
781 	hw->evb_veb = true;
782 
783 	/* Query the allocated resources for Tx scheduler */
784 	status = ice_sched_query_res_alloc(hw);
785 	if (status) {
786 		ice_debug(hw, ICE_DBG_SCHED,
787 			  "Failed to get scheduler allocated resources\n");
788 		goto err_unroll_alloc;
789 	}
790 
791 	/* Initialize port_info struct with scheduler data */
792 	status = ice_sched_init_port(hw->port_info);
793 	if (status)
794 		goto err_unroll_sched;
795 
796 	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
797 	if (!pcaps) {
798 		status = ICE_ERR_NO_MEMORY;
799 		goto err_unroll_sched;
800 	}
801 
802 	/* Initialize port_info struct with PHY capabilities */
803 	status = ice_aq_get_phy_caps(hw->port_info, false,
804 				     ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
805 	devm_kfree(ice_hw_to_dev(hw), pcaps);
806 	if (status)
807 		goto err_unroll_sched;
808 
809 	/* Initialize port_info struct with link information */
810 	status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
811 	if (status)
812 		goto err_unroll_sched;
813 
814 	/* need a valid SW entry point to build a Tx tree */
815 	if (!hw->sw_entry_point_layer) {
816 		ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
817 		status = ICE_ERR_CFG;
818 		goto err_unroll_sched;
819 	}
820 	INIT_LIST_HEAD(&hw->agg_list);
821 
822 	status = ice_init_fltr_mgmt_struct(hw);
823 	if (status)
824 		goto err_unroll_sched;
825 
826 	ice_dev_onetime_setup(hw);
827 
828 	/* Get MAC information */
829 	/* A single port can report up to two (LAN and WoL) addresses */
830 	mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
831 			       sizeof(struct ice_aqc_manage_mac_read_resp),
832 			       GFP_KERNEL);
833 	mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
834 
835 	if (!mac_buf) {
836 		status = ICE_ERR_NO_MEMORY;
837 		goto err_unroll_fltr_mgmt_struct;
838 	}
839 
840 	status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
841 	devm_kfree(ice_hw_to_dev(hw), mac_buf);
842 
843 	if (status)
844 		goto err_unroll_fltr_mgmt_struct;
845 
846 	ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
847 	ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
848 
849 	return 0;
850 
851 err_unroll_fltr_mgmt_struct:
852 	ice_cleanup_fltr_mgmt_struct(hw);
853 err_unroll_sched:
854 	ice_sched_cleanup_all(hw);
855 err_unroll_alloc:
856 	devm_kfree(ice_hw_to_dev(hw), hw->port_info);
857 err_unroll_cqinit:
858 	ice_shutdown_all_ctrlq(hw);
859 	return status;
860 }
861 
862 /**
863  * ice_deinit_hw - unroll initialization operations done by ice_init_hw
864  * @hw: pointer to the hardware structure
865  *
866  * This should be called only during nominal operation, not as a result of
867  * ice_init_hw() failing since ice_init_hw() will take care of unrolling
868  * applicable initializations if it fails for any reason.
869  */
870 void ice_deinit_hw(struct ice_hw *hw)
871 {
872 	ice_cleanup_fltr_mgmt_struct(hw);
873 
874 	ice_sched_cleanup_all(hw);
875 	ice_sched_clear_agg(hw);
876 
877 	if (hw->port_info) {
878 		devm_kfree(ice_hw_to_dev(hw), hw->port_info);
879 		hw->port_info = NULL;
880 	}
881 
882 	/* Attempt to disable FW logging before shutting down control queues */
883 	ice_cfg_fw_log(hw, false);
884 	ice_shutdown_all_ctrlq(hw);
885 
886 	/* Clear VSI contexts if not already cleared */
887 	ice_clear_all_vsi_ctx(hw);
888 }
889 
890 /**
891  * ice_check_reset - Check to see if a global reset is complete
892  * @hw: pointer to the hardware structure
893  */
894 enum ice_status ice_check_reset(struct ice_hw *hw)
895 {
896 	u32 cnt, reg = 0, grst_delay;
897 
898 	/* Poll for Device Active state in case a recent CORER, GLOBR,
899 	 * or EMPR has occurred. The grst delay value is in 100ms units.
900 	 * Add 1sec for outstanding AQ commands that can take a long time.
901 	 */
902 	grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
903 		      GLGEN_RSTCTL_GRSTDEL_S) + 10;
904 
905 	for (cnt = 0; cnt < grst_delay; cnt++) {
906 		mdelay(100);
907 		reg = rd32(hw, GLGEN_RSTAT);
908 		if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
909 			break;
910 	}
911 
912 	if (cnt == grst_delay) {
913 		ice_debug(hw, ICE_DBG_INIT,
914 			  "Global reset polling failed to complete.\n");
915 		return ICE_ERR_RESET_FAILED;
916 	}
917 
918 #define ICE_RESET_DONE_MASK	(GLNVM_ULD_CORER_DONE_M | \
919 				 GLNVM_ULD_GLOBR_DONE_M)
920 
921 	/* Device is Active; check Global Reset processes are done */
922 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
923 		reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
924 		if (reg == ICE_RESET_DONE_MASK) {
925 			ice_debug(hw, ICE_DBG_INIT,
926 				  "Global reset processes done. %d\n", cnt);
927 			break;
928 		}
929 		mdelay(10);
930 	}
931 
932 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
933 		ice_debug(hw, ICE_DBG_INIT,
934 			  "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
935 			  reg);
936 		return ICE_ERR_RESET_FAILED;
937 	}
938 
939 	return 0;
940 }
941 
942 /**
943  * ice_pf_reset - Reset the PF
944  * @hw: pointer to the hardware structure
945  *
946  * If a global reset has been triggered, this function checks
947  * for its completion and then issues the PF reset
948  */
949 static enum ice_status ice_pf_reset(struct ice_hw *hw)
950 {
951 	u32 cnt, reg;
952 
953 	/* If at function entry a global reset was already in progress, i.e.
954 	 * state is not 'device active' or any of the reset done bits are not
955 	 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
956 	 * global reset is done.
957 	 */
958 	if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
959 	    (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
960 		/* poll on global reset currently in progress until done */
961 		if (ice_check_reset(hw))
962 			return ICE_ERR_RESET_FAILED;
963 
964 		return 0;
965 	}
966 
967 	/* Reset the PF */
968 	reg = rd32(hw, PFGEN_CTRL);
969 
970 	wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
971 
972 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
973 		reg = rd32(hw, PFGEN_CTRL);
974 		if (!(reg & PFGEN_CTRL_PFSWR_M))
975 			break;
976 
977 		mdelay(1);
978 	}
979 
980 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
981 		ice_debug(hw, ICE_DBG_INIT,
982 			  "PF reset polling failed to complete.\n");
983 		return ICE_ERR_RESET_FAILED;
984 	}
985 
986 	return 0;
987 }
988 
989 /**
990  * ice_reset - Perform different types of reset
991  * @hw: pointer to the hardware structure
992  * @req: reset request
993  *
994  * This function triggers a reset as specified by the req parameter.
995  *
996  * Note:
997  * If anything other than a PF reset is triggered, PXE mode is restored.
998  * This has to be cleared using ice_clear_pxe_mode again, once the AQ
999  * interface has been restored in the rebuild flow.
1000  */
1001 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1002 {
1003 	u32 val = 0;
1004 
1005 	switch (req) {
1006 	case ICE_RESET_PFR:
1007 		return ice_pf_reset(hw);
1008 	case ICE_RESET_CORER:
1009 		ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1010 		val = GLGEN_RTRIG_CORER_M;
1011 		break;
1012 	case ICE_RESET_GLOBR:
1013 		ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1014 		val = GLGEN_RTRIG_GLOBR_M;
1015 		break;
1016 	default:
1017 		return ICE_ERR_PARAM;
1018 	}
1019 
1020 	val |= rd32(hw, GLGEN_RTRIG);
1021 	wr32(hw, GLGEN_RTRIG, val);
1022 	ice_flush(hw);
1023 
1024 	/* wait for the FW to be ready */
1025 	return ice_check_reset(hw);
1026 }
1027 
1028 /**
1029  * ice_copy_rxq_ctx_to_hw
1030  * @hw: pointer to the hardware structure
1031  * @ice_rxq_ctx: pointer to the rxq context
1032  * @rxq_index: the index of the Rx queue
1033  *
1034  * Copies rxq context from dense structure to HW register space
1035  */
1036 static enum ice_status
1037 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1038 {
1039 	u8 i;
1040 
1041 	if (!ice_rxq_ctx)
1042 		return ICE_ERR_BAD_PTR;
1043 
1044 	if (rxq_index > QRX_CTRL_MAX_INDEX)
1045 		return ICE_ERR_PARAM;
1046 
1047 	/* Copy each dword separately to HW */
1048 	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1049 		wr32(hw, QRX_CONTEXT(i, rxq_index),
1050 		     *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1051 
1052 		ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1053 			  *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1054 	}
1055 
1056 	return 0;
1057 }
1058 
1059 /* LAN Rx Queue Context */
1060 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1061 	/* Field		Width	LSB */
1062 	ICE_CTX_STORE(ice_rlan_ctx, head,		13,	0),
1063 	ICE_CTX_STORE(ice_rlan_ctx, cpuid,		8,	13),
1064 	ICE_CTX_STORE(ice_rlan_ctx, base,		57,	32),
1065 	ICE_CTX_STORE(ice_rlan_ctx, qlen,		13,	89),
1066 	ICE_CTX_STORE(ice_rlan_ctx, dbuf,		7,	102),
1067 	ICE_CTX_STORE(ice_rlan_ctx, hbuf,		5,	109),
1068 	ICE_CTX_STORE(ice_rlan_ctx, dtype,		2,	114),
1069 	ICE_CTX_STORE(ice_rlan_ctx, dsize,		1,	116),
1070 	ICE_CTX_STORE(ice_rlan_ctx, crcstrip,		1,	117),
1071 	ICE_CTX_STORE(ice_rlan_ctx, l2tsel,		1,	119),
1072 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_0,		4,	120),
1073 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_1,		2,	124),
1074 	ICE_CTX_STORE(ice_rlan_ctx, showiv,		1,	127),
1075 	ICE_CTX_STORE(ice_rlan_ctx, rxmax,		14,	174),
1076 	ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena,	1,	193),
1077 	ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena,	1,	194),
1078 	ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena,	1,	195),
1079 	ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena,	1,	196),
1080 	ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh,		3,	198),
1081 	{ 0 }
1082 };
1083 
1084 /**
1085  * ice_write_rxq_ctx
1086  * @hw: pointer to the hardware structure
1087  * @rlan_ctx: pointer to the rxq context
1088  * @rxq_index: the index of the Rx queue
1089  *
1090  * Converts rxq context from sparse to dense structure and then writes
1091  * it to HW register space
1092  */
1093 enum ice_status
1094 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1095 		  u32 rxq_index)
1096 {
1097 	u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1098 
1099 	ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1100 	return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1101 }
1102 
1103 /* LAN Tx Queue Context */
1104 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1105 				    /* Field			Width	LSB */
1106 	ICE_CTX_STORE(ice_tlan_ctx, base,			57,	0),
1107 	ICE_CTX_STORE(ice_tlan_ctx, port_num,			3,	57),
1108 	ICE_CTX_STORE(ice_tlan_ctx, cgd_num,			5,	60),
1109 	ICE_CTX_STORE(ice_tlan_ctx, pf_num,			3,	65),
1110 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_num,			10,	68),
1111 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_type,			2,	78),
1112 	ICE_CTX_STORE(ice_tlan_ctx, src_vsi,			10,	80),
1113 	ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena,			1,	90),
1114 	ICE_CTX_STORE(ice_tlan_ctx, alt_vlan,			1,	92),
1115 	ICE_CTX_STORE(ice_tlan_ctx, cpuid,			8,	93),
1116 	ICE_CTX_STORE(ice_tlan_ctx, wb_mode,			1,	101),
1117 	ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc,			1,	102),
1118 	ICE_CTX_STORE(ice_tlan_ctx, tphrd,			1,	103),
1119 	ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc,			1,	104),
1120 	ICE_CTX_STORE(ice_tlan_ctx, cmpq_id,			9,	105),
1121 	ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func,		14,	114),
1122 	ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode,	1,	128),
1123 	ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id,		6,	129),
1124 	ICE_CTX_STORE(ice_tlan_ctx, qlen,			13,	135),
1125 	ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx,		4,	148),
1126 	ICE_CTX_STORE(ice_tlan_ctx, tso_ena,			1,	152),
1127 	ICE_CTX_STORE(ice_tlan_ctx, tso_qnum,			11,	153),
1128 	ICE_CTX_STORE(ice_tlan_ctx, legacy_int,			1,	164),
1129 	ICE_CTX_STORE(ice_tlan_ctx, drop_ena,			1,	165),
1130 	ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx,		2,	166),
1131 	ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx,	3,	168),
1132 	ICE_CTX_STORE(ice_tlan_ctx, int_q_state,		110,	171),
1133 	{ 0 }
1134 };
1135 
1136 /**
1137  * ice_debug_cq
1138  * @hw: pointer to the hardware structure
1139  * @mask: debug mask
1140  * @desc: pointer to control queue descriptor
1141  * @buf: pointer to command buffer
1142  * @buf_len: max length of buf
1143  *
1144  * Dumps debug log about control command with descriptor contents.
1145  */
1146 void
1147 ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf,
1148 	     u16 buf_len)
1149 {
1150 	struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
1151 	u16 len;
1152 
1153 #ifndef CONFIG_DYNAMIC_DEBUG
1154 	if (!(mask & hw->debug_mask))
1155 		return;
1156 #endif
1157 
1158 	if (!desc)
1159 		return;
1160 
1161 	len = le16_to_cpu(cq_desc->datalen);
1162 
1163 	ice_debug(hw, mask,
1164 		  "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
1165 		  le16_to_cpu(cq_desc->opcode),
1166 		  le16_to_cpu(cq_desc->flags),
1167 		  le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
1168 	ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
1169 		  le32_to_cpu(cq_desc->cookie_high),
1170 		  le32_to_cpu(cq_desc->cookie_low));
1171 	ice_debug(hw, mask, "\tparam (0,1)  0x%08X 0x%08X\n",
1172 		  le32_to_cpu(cq_desc->params.generic.param0),
1173 		  le32_to_cpu(cq_desc->params.generic.param1));
1174 	ice_debug(hw, mask, "\taddr (h,l)   0x%08X 0x%08X\n",
1175 		  le32_to_cpu(cq_desc->params.generic.addr_high),
1176 		  le32_to_cpu(cq_desc->params.generic.addr_low));
1177 	if (buf && cq_desc->datalen != 0) {
1178 		ice_debug(hw, mask, "Buffer:\n");
1179 		if (buf_len < len)
1180 			len = buf_len;
1181 
1182 		ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
1183 	}
1184 }
1185 
1186 /* FW Admin Queue command wrappers */
1187 
1188 /**
1189  * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1190  * @hw: pointer to the HW struct
1191  * @desc: descriptor describing the command
1192  * @buf: buffer to use for indirect commands (NULL for direct commands)
1193  * @buf_size: size of buffer for indirect commands (0 for direct commands)
1194  * @cd: pointer to command details structure
1195  *
1196  * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1197  */
1198 enum ice_status
1199 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1200 		u16 buf_size, struct ice_sq_cd *cd)
1201 {
1202 	return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1203 }
1204 
1205 /**
1206  * ice_aq_get_fw_ver
1207  * @hw: pointer to the HW struct
1208  * @cd: pointer to command details structure or NULL
1209  *
1210  * Get the firmware version (0x0001) from the admin queue commands
1211  */
1212 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1213 {
1214 	struct ice_aqc_get_ver *resp;
1215 	struct ice_aq_desc desc;
1216 	enum ice_status status;
1217 
1218 	resp = &desc.params.get_ver;
1219 
1220 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1221 
1222 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1223 
1224 	if (!status) {
1225 		hw->fw_branch = resp->fw_branch;
1226 		hw->fw_maj_ver = resp->fw_major;
1227 		hw->fw_min_ver = resp->fw_minor;
1228 		hw->fw_patch = resp->fw_patch;
1229 		hw->fw_build = le32_to_cpu(resp->fw_build);
1230 		hw->api_branch = resp->api_branch;
1231 		hw->api_maj_ver = resp->api_major;
1232 		hw->api_min_ver = resp->api_minor;
1233 		hw->api_patch = resp->api_patch;
1234 	}
1235 
1236 	return status;
1237 }
1238 
1239 /**
1240  * ice_aq_q_shutdown
1241  * @hw: pointer to the HW struct
1242  * @unloading: is the driver unloading itself
1243  *
1244  * Tell the Firmware that we're shutting down the AdminQ and whether
1245  * or not the driver is unloading as well (0x0003).
1246  */
1247 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1248 {
1249 	struct ice_aqc_q_shutdown *cmd;
1250 	struct ice_aq_desc desc;
1251 
1252 	cmd = &desc.params.q_shutdown;
1253 
1254 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1255 
1256 	if (unloading)
1257 		cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING);
1258 
1259 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1260 }
1261 
1262 /**
1263  * ice_aq_req_res
1264  * @hw: pointer to the HW struct
1265  * @res: resource ID
1266  * @access: access type
1267  * @sdp_number: resource number
1268  * @timeout: the maximum time in ms that the driver may hold the resource
1269  * @cd: pointer to command details structure or NULL
1270  *
1271  * Requests common resource using the admin queue commands (0x0008).
1272  * When attempting to acquire the Global Config Lock, the driver can
1273  * learn of three states:
1274  *  1) ICE_SUCCESS -        acquired lock, and can perform download package
1275  *  2) ICE_ERR_AQ_ERROR -   did not get lock, driver should fail to load
1276  *  3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1277  *                          successfully downloaded the package; the driver does
1278  *                          not have to download the package and can continue
1279  *                          loading
1280  *
1281  * Note that if the caller is in an acquire lock, perform action, release lock
1282  * phase of operation, it is possible that the FW may detect a timeout and issue
1283  * a CORER. In this case, the driver will receive a CORER interrupt and will
1284  * have to determine its cause. The calling thread that is handling this flow
1285  * will likely get an error propagated back to it indicating the Download
1286  * Package, Update Package or the Release Resource AQ commands timed out.
1287  */
1288 static enum ice_status
1289 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1290 	       enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1291 	       struct ice_sq_cd *cd)
1292 {
1293 	struct ice_aqc_req_res *cmd_resp;
1294 	struct ice_aq_desc desc;
1295 	enum ice_status status;
1296 
1297 	cmd_resp = &desc.params.res_owner;
1298 
1299 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1300 
1301 	cmd_resp->res_id = cpu_to_le16(res);
1302 	cmd_resp->access_type = cpu_to_le16(access);
1303 	cmd_resp->res_number = cpu_to_le32(sdp_number);
1304 	cmd_resp->timeout = cpu_to_le32(*timeout);
1305 	*timeout = 0;
1306 
1307 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1308 
1309 	/* The completion specifies the maximum time in ms that the driver
1310 	 * may hold the resource in the Timeout field.
1311 	 */
1312 
1313 	/* Global config lock response utilizes an additional status field.
1314 	 *
1315 	 * If the Global config lock resource is held by some other driver, the
1316 	 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1317 	 * and the timeout field indicates the maximum time the current owner
1318 	 * of the resource has to free it.
1319 	 */
1320 	if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1321 		if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1322 			*timeout = le32_to_cpu(cmd_resp->timeout);
1323 			return 0;
1324 		} else if (le16_to_cpu(cmd_resp->status) ==
1325 			   ICE_AQ_RES_GLBL_IN_PROG) {
1326 			*timeout = le32_to_cpu(cmd_resp->timeout);
1327 			return ICE_ERR_AQ_ERROR;
1328 		} else if (le16_to_cpu(cmd_resp->status) ==
1329 			   ICE_AQ_RES_GLBL_DONE) {
1330 			return ICE_ERR_AQ_NO_WORK;
1331 		}
1332 
1333 		/* invalid FW response, force a timeout immediately */
1334 		*timeout = 0;
1335 		return ICE_ERR_AQ_ERROR;
1336 	}
1337 
1338 	/* If the resource is held by some other driver, the command completes
1339 	 * with a busy return value and the timeout field indicates the maximum
1340 	 * time the current owner of the resource has to free it.
1341 	 */
1342 	if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1343 		*timeout = le32_to_cpu(cmd_resp->timeout);
1344 
1345 	return status;
1346 }
1347 
1348 /**
1349  * ice_aq_release_res
1350  * @hw: pointer to the HW struct
1351  * @res: resource ID
1352  * @sdp_number: resource number
1353  * @cd: pointer to command details structure or NULL
1354  *
1355  * release common resource using the admin queue commands (0x0009)
1356  */
1357 static enum ice_status
1358 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1359 		   struct ice_sq_cd *cd)
1360 {
1361 	struct ice_aqc_req_res *cmd;
1362 	struct ice_aq_desc desc;
1363 
1364 	cmd = &desc.params.res_owner;
1365 
1366 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1367 
1368 	cmd->res_id = cpu_to_le16(res);
1369 	cmd->res_number = cpu_to_le32(sdp_number);
1370 
1371 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1372 }
1373 
1374 /**
1375  * ice_acquire_res
1376  * @hw: pointer to the HW structure
1377  * @res: resource ID
1378  * @access: access type (read or write)
1379  * @timeout: timeout in milliseconds
1380  *
1381  * This function will attempt to acquire the ownership of a resource.
1382  */
1383 enum ice_status
1384 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1385 		enum ice_aq_res_access_type access, u32 timeout)
1386 {
1387 #define ICE_RES_POLLING_DELAY_MS	10
1388 	u32 delay = ICE_RES_POLLING_DELAY_MS;
1389 	u32 time_left = timeout;
1390 	enum ice_status status;
1391 
1392 	status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1393 
1394 	/* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1395 	 * previously acquired the resource and performed any necessary updates;
1396 	 * in this case the caller does not obtain the resource and has no
1397 	 * further work to do.
1398 	 */
1399 	if (status == ICE_ERR_AQ_NO_WORK)
1400 		goto ice_acquire_res_exit;
1401 
1402 	if (status)
1403 		ice_debug(hw, ICE_DBG_RES,
1404 			  "resource %d acquire type %d failed.\n", res, access);
1405 
1406 	/* If necessary, poll until the current lock owner timeouts */
1407 	timeout = time_left;
1408 	while (status && timeout && time_left) {
1409 		mdelay(delay);
1410 		timeout = (timeout > delay) ? timeout - delay : 0;
1411 		status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1412 
1413 		if (status == ICE_ERR_AQ_NO_WORK)
1414 			/* lock free, but no work to do */
1415 			break;
1416 
1417 		if (!status)
1418 			/* lock acquired */
1419 			break;
1420 	}
1421 	if (status && status != ICE_ERR_AQ_NO_WORK)
1422 		ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1423 
1424 ice_acquire_res_exit:
1425 	if (status == ICE_ERR_AQ_NO_WORK) {
1426 		if (access == ICE_RES_WRITE)
1427 			ice_debug(hw, ICE_DBG_RES,
1428 				  "resource indicates no work to do.\n");
1429 		else
1430 			ice_debug(hw, ICE_DBG_RES,
1431 				  "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1432 	}
1433 	return status;
1434 }
1435 
1436 /**
1437  * ice_release_res
1438  * @hw: pointer to the HW structure
1439  * @res: resource ID
1440  *
1441  * This function will release a resource using the proper Admin Command.
1442  */
1443 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1444 {
1445 	enum ice_status status;
1446 	u32 total_delay = 0;
1447 
1448 	status = ice_aq_release_res(hw, res, 0, NULL);
1449 
1450 	/* there are some rare cases when trying to release the resource
1451 	 * results in an admin queue timeout, so handle them correctly
1452 	 */
1453 	while ((status == ICE_ERR_AQ_TIMEOUT) &&
1454 	       (total_delay < hw->adminq.sq_cmd_timeout)) {
1455 		mdelay(1);
1456 		status = ice_aq_release_res(hw, res, 0, NULL);
1457 		total_delay++;
1458 	}
1459 }
1460 
1461 /**
1462  * ice_get_num_per_func - determine number of resources per PF
1463  * @hw: pointer to the HW structure
1464  * @max: value to be evenly split between each PF
1465  *
1466  * Determine the number of valid functions by going through the bitmap returned
1467  * from parsing capabilities and use this to calculate the number of resources
1468  * per PF based on the max value passed in.
1469  */
1470 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1471 {
1472 	u8 funcs;
1473 
1474 #define ICE_CAPS_VALID_FUNCS_M	0xFF
1475 	funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1476 			 ICE_CAPS_VALID_FUNCS_M);
1477 
1478 	if (!funcs)
1479 		return 0;
1480 
1481 	return max / funcs;
1482 }
1483 
1484 /**
1485  * ice_parse_caps - parse function/device capabilities
1486  * @hw: pointer to the HW struct
1487  * @buf: pointer to a buffer containing function/device capability records
1488  * @cap_count: number of capability records in the list
1489  * @opc: type of capabilities list to parse
1490  *
1491  * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
1492  */
1493 static void
1494 ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1495 	       enum ice_adminq_opc opc)
1496 {
1497 	struct ice_aqc_list_caps_elem *cap_resp;
1498 	struct ice_hw_func_caps *func_p = NULL;
1499 	struct ice_hw_dev_caps *dev_p = NULL;
1500 	struct ice_hw_common_caps *caps;
1501 	char const *prefix;
1502 	u32 i;
1503 
1504 	if (!buf)
1505 		return;
1506 
1507 	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1508 
1509 	if (opc == ice_aqc_opc_list_dev_caps) {
1510 		dev_p = &hw->dev_caps;
1511 		caps = &dev_p->common_cap;
1512 		prefix = "dev cap";
1513 	} else if (opc == ice_aqc_opc_list_func_caps) {
1514 		func_p = &hw->func_caps;
1515 		caps = &func_p->common_cap;
1516 		prefix = "func cap";
1517 	} else {
1518 		ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1519 		return;
1520 	}
1521 
1522 	for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1523 		u32 logical_id = le32_to_cpu(cap_resp->logical_id);
1524 		u32 phys_id = le32_to_cpu(cap_resp->phys_id);
1525 		u32 number = le32_to_cpu(cap_resp->number);
1526 		u16 cap = le16_to_cpu(cap_resp->cap);
1527 
1528 		switch (cap) {
1529 		case ICE_AQC_CAPS_VALID_FUNCTIONS:
1530 			caps->valid_functions = number;
1531 			ice_debug(hw, ICE_DBG_INIT,
1532 				  "%s: valid functions = %d\n", prefix,
1533 				  caps->valid_functions);
1534 			break;
1535 		case ICE_AQC_CAPS_SRIOV:
1536 			caps->sr_iov_1_1 = (number == 1);
1537 			ice_debug(hw, ICE_DBG_INIT,
1538 				  "%s: SR-IOV = %d\n", prefix,
1539 				  caps->sr_iov_1_1);
1540 			break;
1541 		case ICE_AQC_CAPS_VF:
1542 			if (dev_p) {
1543 				dev_p->num_vfs_exposed = number;
1544 				ice_debug(hw, ICE_DBG_INIT,
1545 					  "%s: VFs exposed = %d\n", prefix,
1546 					  dev_p->num_vfs_exposed);
1547 			} else if (func_p) {
1548 				func_p->num_allocd_vfs = number;
1549 				func_p->vf_base_id = logical_id;
1550 				ice_debug(hw, ICE_DBG_INIT,
1551 					  "%s: VFs allocated = %d\n", prefix,
1552 					  func_p->num_allocd_vfs);
1553 				ice_debug(hw, ICE_DBG_INIT,
1554 					  "%s: VF base_id = %d\n", prefix,
1555 					  func_p->vf_base_id);
1556 			}
1557 			break;
1558 		case ICE_AQC_CAPS_VSI:
1559 			if (dev_p) {
1560 				dev_p->num_vsi_allocd_to_host = number;
1561 				ice_debug(hw, ICE_DBG_INIT,
1562 					  "%s: num VSI alloc to host = %d\n",
1563 					  prefix,
1564 					  dev_p->num_vsi_allocd_to_host);
1565 			} else if (func_p) {
1566 				func_p->guar_num_vsi =
1567 					ice_get_num_per_func(hw, ICE_MAX_VSI);
1568 				ice_debug(hw, ICE_DBG_INIT,
1569 					  "%s: num guaranteed VSI (fw) = %d\n",
1570 					  prefix, number);
1571 				ice_debug(hw, ICE_DBG_INIT,
1572 					  "%s: num guaranteed VSI = %d\n",
1573 					  prefix, func_p->guar_num_vsi);
1574 			}
1575 			break;
1576 		case ICE_AQC_CAPS_RSS:
1577 			caps->rss_table_size = number;
1578 			caps->rss_table_entry_width = logical_id;
1579 			ice_debug(hw, ICE_DBG_INIT,
1580 				  "%s: RSS table size = %d\n", prefix,
1581 				  caps->rss_table_size);
1582 			ice_debug(hw, ICE_DBG_INIT,
1583 				  "%s: RSS table width = %d\n", prefix,
1584 				  caps->rss_table_entry_width);
1585 			break;
1586 		case ICE_AQC_CAPS_RXQS:
1587 			caps->num_rxq = number;
1588 			caps->rxq_first_id = phys_id;
1589 			ice_debug(hw, ICE_DBG_INIT,
1590 				  "%s: num Rx queues = %d\n", prefix,
1591 				  caps->num_rxq);
1592 			ice_debug(hw, ICE_DBG_INIT,
1593 				  "%s: Rx first queue ID = %d\n", prefix,
1594 				  caps->rxq_first_id);
1595 			break;
1596 		case ICE_AQC_CAPS_TXQS:
1597 			caps->num_txq = number;
1598 			caps->txq_first_id = phys_id;
1599 			ice_debug(hw, ICE_DBG_INIT,
1600 				  "%s: num Tx queues = %d\n", prefix,
1601 				  caps->num_txq);
1602 			ice_debug(hw, ICE_DBG_INIT,
1603 				  "%s: Tx first queue ID = %d\n", prefix,
1604 				  caps->txq_first_id);
1605 			break;
1606 		case ICE_AQC_CAPS_MSIX:
1607 			caps->num_msix_vectors = number;
1608 			caps->msix_vector_first_id = phys_id;
1609 			ice_debug(hw, ICE_DBG_INIT,
1610 				  "%s: MSIX vector count = %d\n", prefix,
1611 				  caps->num_msix_vectors);
1612 			ice_debug(hw, ICE_DBG_INIT,
1613 				  "%s: MSIX first vector index = %d\n", prefix,
1614 				  caps->msix_vector_first_id);
1615 			break;
1616 		case ICE_AQC_CAPS_MAX_MTU:
1617 			caps->max_mtu = number;
1618 			ice_debug(hw, ICE_DBG_INIT, "%s: max MTU = %d\n",
1619 				  prefix, caps->max_mtu);
1620 			break;
1621 		default:
1622 			ice_debug(hw, ICE_DBG_INIT,
1623 				  "%s: unknown capability[%d]: 0x%x\n", prefix,
1624 				  i, cap);
1625 			break;
1626 		}
1627 	}
1628 }
1629 
1630 /**
1631  * ice_aq_discover_caps - query function/device capabilities
1632  * @hw: pointer to the HW struct
1633  * @buf: a virtual buffer to hold the capabilities
1634  * @buf_size: Size of the virtual buffer
1635  * @cap_count: cap count needed if AQ err==ENOMEM
1636  * @opc: capabilities type to discover - pass in the command opcode
1637  * @cd: pointer to command details structure or NULL
1638  *
1639  * Get the function(0x000a)/device(0x000b) capabilities description from
1640  * the firmware.
1641  */
1642 static enum ice_status
1643 ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
1644 		     enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1645 {
1646 	struct ice_aqc_list_caps *cmd;
1647 	struct ice_aq_desc desc;
1648 	enum ice_status status;
1649 
1650 	cmd = &desc.params.get_cap;
1651 
1652 	if (opc != ice_aqc_opc_list_func_caps &&
1653 	    opc != ice_aqc_opc_list_dev_caps)
1654 		return ICE_ERR_PARAM;
1655 
1656 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1657 
1658 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1659 	if (!status)
1660 		ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
1661 	else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
1662 		*cap_count = le32_to_cpu(cmd->count);
1663 	return status;
1664 }
1665 
1666 /**
1667  * ice_discover_caps - get info about the HW
1668  * @hw: pointer to the hardware structure
1669  * @opc: capabilities type to discover - pass in the command opcode
1670  */
1671 static enum ice_status
1672 ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
1673 {
1674 	enum ice_status status;
1675 	u32 cap_count;
1676 	u16 cbuf_len;
1677 	u8 retries;
1678 
1679 	/* The driver doesn't know how many capabilities the device will return
1680 	 * so the buffer size required isn't known ahead of time. The driver
1681 	 * starts with cbuf_len and if this turns out to be insufficient, the
1682 	 * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
1683 	 * The driver then allocates the buffer based on the count and retries
1684 	 * the operation. So it follows that the retry count is 2.
1685 	 */
1686 #define ICE_GET_CAP_BUF_COUNT	40
1687 #define ICE_GET_CAP_RETRY_COUNT	2
1688 
1689 	cap_count = ICE_GET_CAP_BUF_COUNT;
1690 	retries = ICE_GET_CAP_RETRY_COUNT;
1691 
1692 	do {
1693 		void *cbuf;
1694 
1695 		cbuf_len = (u16)(cap_count *
1696 				 sizeof(struct ice_aqc_list_caps_elem));
1697 		cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
1698 		if (!cbuf)
1699 			return ICE_ERR_NO_MEMORY;
1700 
1701 		status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
1702 					      opc, NULL);
1703 		devm_kfree(ice_hw_to_dev(hw), cbuf);
1704 
1705 		if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
1706 			break;
1707 
1708 		/* If ENOMEM is returned, try again with bigger buffer */
1709 	} while (--retries);
1710 
1711 	return status;
1712 }
1713 
1714 /**
1715  * ice_get_caps - get info about the HW
1716  * @hw: pointer to the hardware structure
1717  */
1718 enum ice_status ice_get_caps(struct ice_hw *hw)
1719 {
1720 	enum ice_status status;
1721 
1722 	status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
1723 	if (!status)
1724 		status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
1725 
1726 	return status;
1727 }
1728 
1729 /**
1730  * ice_aq_manage_mac_write - manage MAC address write command
1731  * @hw: pointer to the HW struct
1732  * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
1733  * @flags: flags to control write behavior
1734  * @cd: pointer to command details structure or NULL
1735  *
1736  * This function is used to write MAC address to the NVM (0x0108).
1737  */
1738 enum ice_status
1739 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
1740 			struct ice_sq_cd *cd)
1741 {
1742 	struct ice_aqc_manage_mac_write *cmd;
1743 	struct ice_aq_desc desc;
1744 
1745 	cmd = &desc.params.mac_write;
1746 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
1747 
1748 	cmd->flags = flags;
1749 
1750 	/* Prep values for flags, sah, sal */
1751 	cmd->sah = htons(*((const u16 *)mac_addr));
1752 	cmd->sal = htonl(*((const u32 *)(mac_addr + 2)));
1753 
1754 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1755 }
1756 
1757 /**
1758  * ice_aq_clear_pxe_mode
1759  * @hw: pointer to the HW struct
1760  *
1761  * Tell the firmware that the driver is taking over from PXE (0x0110).
1762  */
1763 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
1764 {
1765 	struct ice_aq_desc desc;
1766 
1767 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
1768 	desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
1769 
1770 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1771 }
1772 
1773 /**
1774  * ice_clear_pxe_mode - clear pxe operations mode
1775  * @hw: pointer to the HW struct
1776  *
1777  * Make sure all PXE mode settings are cleared, including things
1778  * like descriptor fetch/write-back mode.
1779  */
1780 void ice_clear_pxe_mode(struct ice_hw *hw)
1781 {
1782 	if (ice_check_sq_alive(hw, &hw->adminq))
1783 		ice_aq_clear_pxe_mode(hw);
1784 }
1785 
1786 /**
1787  * ice_get_link_speed_based_on_phy_type - returns link speed
1788  * @phy_type_low: lower part of phy_type
1789  * @phy_type_high: higher part of phy_type
1790  *
1791  * This helper function will convert an entry in PHY type structure
1792  * [phy_type_low, phy_type_high] to its corresponding link speed.
1793  * Note: In the structure of [phy_type_low, phy_type_high], there should
1794  * be one bit set, as this function will convert one PHY type to its
1795  * speed.
1796  * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
1797  * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
1798  */
1799 static u16
1800 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
1801 {
1802 	u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
1803 	u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
1804 
1805 	switch (phy_type_low) {
1806 	case ICE_PHY_TYPE_LOW_100BASE_TX:
1807 	case ICE_PHY_TYPE_LOW_100M_SGMII:
1808 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
1809 		break;
1810 	case ICE_PHY_TYPE_LOW_1000BASE_T:
1811 	case ICE_PHY_TYPE_LOW_1000BASE_SX:
1812 	case ICE_PHY_TYPE_LOW_1000BASE_LX:
1813 	case ICE_PHY_TYPE_LOW_1000BASE_KX:
1814 	case ICE_PHY_TYPE_LOW_1G_SGMII:
1815 		speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
1816 		break;
1817 	case ICE_PHY_TYPE_LOW_2500BASE_T:
1818 	case ICE_PHY_TYPE_LOW_2500BASE_X:
1819 	case ICE_PHY_TYPE_LOW_2500BASE_KX:
1820 		speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
1821 		break;
1822 	case ICE_PHY_TYPE_LOW_5GBASE_T:
1823 	case ICE_PHY_TYPE_LOW_5GBASE_KR:
1824 		speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
1825 		break;
1826 	case ICE_PHY_TYPE_LOW_10GBASE_T:
1827 	case ICE_PHY_TYPE_LOW_10G_SFI_DA:
1828 	case ICE_PHY_TYPE_LOW_10GBASE_SR:
1829 	case ICE_PHY_TYPE_LOW_10GBASE_LR:
1830 	case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
1831 	case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
1832 	case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
1833 		speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
1834 		break;
1835 	case ICE_PHY_TYPE_LOW_25GBASE_T:
1836 	case ICE_PHY_TYPE_LOW_25GBASE_CR:
1837 	case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
1838 	case ICE_PHY_TYPE_LOW_25GBASE_CR1:
1839 	case ICE_PHY_TYPE_LOW_25GBASE_SR:
1840 	case ICE_PHY_TYPE_LOW_25GBASE_LR:
1841 	case ICE_PHY_TYPE_LOW_25GBASE_KR:
1842 	case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
1843 	case ICE_PHY_TYPE_LOW_25GBASE_KR1:
1844 	case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
1845 	case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
1846 		speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
1847 		break;
1848 	case ICE_PHY_TYPE_LOW_40GBASE_CR4:
1849 	case ICE_PHY_TYPE_LOW_40GBASE_SR4:
1850 	case ICE_PHY_TYPE_LOW_40GBASE_LR4:
1851 	case ICE_PHY_TYPE_LOW_40GBASE_KR4:
1852 	case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
1853 	case ICE_PHY_TYPE_LOW_40G_XLAUI:
1854 		speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
1855 		break;
1856 	case ICE_PHY_TYPE_LOW_50GBASE_CR2:
1857 	case ICE_PHY_TYPE_LOW_50GBASE_SR2:
1858 	case ICE_PHY_TYPE_LOW_50GBASE_LR2:
1859 	case ICE_PHY_TYPE_LOW_50GBASE_KR2:
1860 	case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
1861 	case ICE_PHY_TYPE_LOW_50G_LAUI2:
1862 	case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
1863 	case ICE_PHY_TYPE_LOW_50G_AUI2:
1864 	case ICE_PHY_TYPE_LOW_50GBASE_CP:
1865 	case ICE_PHY_TYPE_LOW_50GBASE_SR:
1866 	case ICE_PHY_TYPE_LOW_50GBASE_FR:
1867 	case ICE_PHY_TYPE_LOW_50GBASE_LR:
1868 	case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
1869 	case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
1870 	case ICE_PHY_TYPE_LOW_50G_AUI1:
1871 		speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
1872 		break;
1873 	case ICE_PHY_TYPE_LOW_100GBASE_CR4:
1874 	case ICE_PHY_TYPE_LOW_100GBASE_SR4:
1875 	case ICE_PHY_TYPE_LOW_100GBASE_LR4:
1876 	case ICE_PHY_TYPE_LOW_100GBASE_KR4:
1877 	case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
1878 	case ICE_PHY_TYPE_LOW_100G_CAUI4:
1879 	case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
1880 	case ICE_PHY_TYPE_LOW_100G_AUI4:
1881 	case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
1882 	case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
1883 	case ICE_PHY_TYPE_LOW_100GBASE_CP2:
1884 	case ICE_PHY_TYPE_LOW_100GBASE_SR2:
1885 	case ICE_PHY_TYPE_LOW_100GBASE_DR:
1886 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
1887 		break;
1888 	default:
1889 		speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
1890 		break;
1891 	}
1892 
1893 	switch (phy_type_high) {
1894 	case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
1895 	case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
1896 	case ICE_PHY_TYPE_HIGH_100G_CAUI2:
1897 	case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
1898 	case ICE_PHY_TYPE_HIGH_100G_AUI2:
1899 		speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
1900 		break;
1901 	default:
1902 		speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
1903 		break;
1904 	}
1905 
1906 	if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
1907 	    speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
1908 		return ICE_AQ_LINK_SPEED_UNKNOWN;
1909 	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
1910 		 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
1911 		return ICE_AQ_LINK_SPEED_UNKNOWN;
1912 	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
1913 		 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
1914 		return speed_phy_type_low;
1915 	else
1916 		return speed_phy_type_high;
1917 }
1918 
1919 /**
1920  * ice_update_phy_type
1921  * @phy_type_low: pointer to the lower part of phy_type
1922  * @phy_type_high: pointer to the higher part of phy_type
1923  * @link_speeds_bitmap: targeted link speeds bitmap
1924  *
1925  * Note: For the link_speeds_bitmap structure, you can check it at
1926  * [ice_aqc_get_link_status->link_speed]. Caller can pass in
1927  * link_speeds_bitmap include multiple speeds.
1928  *
1929  * Each entry in this [phy_type_low, phy_type_high] structure will
1930  * present a certain link speed. This helper function will turn on bits
1931  * in [phy_type_low, phy_type_high] structure based on the value of
1932  * link_speeds_bitmap input parameter.
1933  */
1934 void
1935 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
1936 		    u16 link_speeds_bitmap)
1937 {
1938 	u64 pt_high;
1939 	u64 pt_low;
1940 	int index;
1941 	u16 speed;
1942 
1943 	/* We first check with low part of phy_type */
1944 	for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
1945 		pt_low = BIT_ULL(index);
1946 		speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
1947 
1948 		if (link_speeds_bitmap & speed)
1949 			*phy_type_low |= BIT_ULL(index);
1950 	}
1951 
1952 	/* We then check with high part of phy_type */
1953 	for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
1954 		pt_high = BIT_ULL(index);
1955 		speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
1956 
1957 		if (link_speeds_bitmap & speed)
1958 			*phy_type_high |= BIT_ULL(index);
1959 	}
1960 }
1961 
1962 /**
1963  * ice_aq_set_phy_cfg
1964  * @hw: pointer to the HW struct
1965  * @lport: logical port number
1966  * @cfg: structure with PHY configuration data to be set
1967  * @cd: pointer to command details structure or NULL
1968  *
1969  * Set the various PHY configuration parameters supported on the Port.
1970  * One or more of the Set PHY config parameters may be ignored in an MFP
1971  * mode as the PF may not have the privilege to set some of the PHY Config
1972  * parameters. This status will be indicated by the command response (0x0601).
1973  */
1974 enum ice_status
1975 ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
1976 		   struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
1977 {
1978 	struct ice_aq_desc desc;
1979 
1980 	if (!cfg)
1981 		return ICE_ERR_PARAM;
1982 
1983 	/* Ensure that only valid bits of cfg->caps can be turned on. */
1984 	if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
1985 		ice_debug(hw, ICE_DBG_PHY,
1986 			  "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
1987 			  cfg->caps);
1988 
1989 		cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
1990 	}
1991 
1992 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
1993 	desc.params.set_phy.lport_num = lport;
1994 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1995 
1996 	return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
1997 }
1998 
1999 /**
2000  * ice_update_link_info - update status of the HW network link
2001  * @pi: port info structure of the interested logical port
2002  */
2003 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2004 {
2005 	struct ice_link_status *li;
2006 	enum ice_status status;
2007 
2008 	if (!pi)
2009 		return ICE_ERR_PARAM;
2010 
2011 	li = &pi->phy.link_info;
2012 
2013 	status = ice_aq_get_link_info(pi, true, NULL, NULL);
2014 	if (status)
2015 		return status;
2016 
2017 	if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2018 		struct ice_aqc_get_phy_caps_data *pcaps;
2019 		struct ice_hw *hw;
2020 
2021 		hw = pi->hw;
2022 		pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
2023 				     GFP_KERNEL);
2024 		if (!pcaps)
2025 			return ICE_ERR_NO_MEMORY;
2026 
2027 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
2028 					     pcaps, NULL);
2029 		if (!status)
2030 			memcpy(li->module_type, &pcaps->module_type,
2031 			       sizeof(li->module_type));
2032 
2033 		devm_kfree(ice_hw_to_dev(hw), pcaps);
2034 	}
2035 
2036 	return status;
2037 }
2038 
2039 /**
2040  * ice_set_fc
2041  * @pi: port information structure
2042  * @aq_failures: pointer to status code, specific to ice_set_fc routine
2043  * @ena_auto_link_update: enable automatic link update
2044  *
2045  * Set the requested flow control mode.
2046  */
2047 enum ice_status
2048 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2049 {
2050 	struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2051 	struct ice_aqc_get_phy_caps_data *pcaps;
2052 	enum ice_status status;
2053 	u8 pause_mask = 0x0;
2054 	struct ice_hw *hw;
2055 
2056 	if (!pi)
2057 		return ICE_ERR_PARAM;
2058 	hw = pi->hw;
2059 	*aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
2060 
2061 	switch (pi->fc.req_mode) {
2062 	case ICE_FC_FULL:
2063 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2064 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2065 		break;
2066 	case ICE_FC_RX_PAUSE:
2067 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2068 		break;
2069 	case ICE_FC_TX_PAUSE:
2070 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2071 		break;
2072 	default:
2073 		break;
2074 	}
2075 
2076 	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
2077 	if (!pcaps)
2078 		return ICE_ERR_NO_MEMORY;
2079 
2080 	/* Get the current PHY config */
2081 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
2082 				     NULL);
2083 	if (status) {
2084 		*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2085 		goto out;
2086 	}
2087 
2088 	/* clear the old pause settings */
2089 	cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2090 				   ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2091 
2092 	/* set the new capabilities */
2093 	cfg.caps |= pause_mask;
2094 
2095 	/* If the capabilities have changed, then set the new config */
2096 	if (cfg.caps != pcaps->caps) {
2097 		int retry_count, retry_max = 10;
2098 
2099 		/* Auto restart link so settings take effect */
2100 		if (ena_auto_link_update)
2101 			cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2102 		/* Copy over all the old settings */
2103 		cfg.phy_type_high = pcaps->phy_type_high;
2104 		cfg.phy_type_low = pcaps->phy_type_low;
2105 		cfg.low_power_ctrl = pcaps->low_power_ctrl;
2106 		cfg.eee_cap = pcaps->eee_cap;
2107 		cfg.eeer_value = pcaps->eeer_value;
2108 		cfg.link_fec_opt = pcaps->link_fec_options;
2109 
2110 		status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
2111 		if (status) {
2112 			*aq_failures = ICE_SET_FC_AQ_FAIL_SET;
2113 			goto out;
2114 		}
2115 
2116 		/* Update the link info
2117 		 * It sometimes takes a really long time for link to
2118 		 * come back from the atomic reset. Thus, we wait a
2119 		 * little bit.
2120 		 */
2121 		for (retry_count = 0; retry_count < retry_max; retry_count++) {
2122 			status = ice_update_link_info(pi);
2123 
2124 			if (!status)
2125 				break;
2126 
2127 			mdelay(100);
2128 		}
2129 
2130 		if (status)
2131 			*aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
2132 	}
2133 
2134 out:
2135 	devm_kfree(ice_hw_to_dev(hw), pcaps);
2136 	return status;
2137 }
2138 
2139 /**
2140  * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
2141  * @caps: PHY ability structure to copy date from
2142  * @cfg: PHY configuration structure to copy data to
2143  *
2144  * Helper function to copy AQC PHY get ability data to PHY set configuration
2145  * data structure
2146  */
2147 void
2148 ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps,
2149 			 struct ice_aqc_set_phy_cfg_data *cfg)
2150 {
2151 	if (!caps || !cfg)
2152 		return;
2153 
2154 	cfg->phy_type_low = caps->phy_type_low;
2155 	cfg->phy_type_high = caps->phy_type_high;
2156 	cfg->caps = caps->caps;
2157 	cfg->low_power_ctrl = caps->low_power_ctrl;
2158 	cfg->eee_cap = caps->eee_cap;
2159 	cfg->eeer_value = caps->eeer_value;
2160 	cfg->link_fec_opt = caps->link_fec_options;
2161 }
2162 
2163 /**
2164  * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
2165  * @cfg: PHY configuration data to set FEC mode
2166  * @fec: FEC mode to configure
2167  *
2168  * Caller should copy ice_aqc_get_phy_caps_data.caps ICE_AQC_PHY_EN_AUTO_FEC
2169  * (bit 7) and ice_aqc_get_phy_caps_data.link_fec_options to cfg.caps
2170  * ICE_AQ_PHY_ENA_AUTO_FEC (bit 7) and cfg.link_fec_options before calling.
2171  */
2172 void
2173 ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
2174 {
2175 	switch (fec) {
2176 	case ICE_FEC_BASER:
2177 		/* Clear auto FEC and RS bits, and AND BASE-R ability
2178 		 * bits and OR request bits.
2179 		 */
2180 		cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
2181 		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2182 				     ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
2183 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2184 				     ICE_AQC_PHY_FEC_25G_KR_REQ;
2185 		break;
2186 	case ICE_FEC_RS:
2187 		/* Clear auto FEC and BASE-R bits, and AND RS ability
2188 		 * bits and OR request bits.
2189 		 */
2190 		cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
2191 		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
2192 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2193 				     ICE_AQC_PHY_FEC_25G_RS_544_REQ;
2194 		break;
2195 	case ICE_FEC_NONE:
2196 		/* Clear auto FEC and all FEC option bits. */
2197 		cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
2198 		cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
2199 		break;
2200 	case ICE_FEC_AUTO:
2201 		/* AND auto FEC bit, and all caps bits. */
2202 		cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
2203 		break;
2204 	}
2205 }
2206 
2207 /**
2208  * ice_get_link_status - get status of the HW network link
2209  * @pi: port information structure
2210  * @link_up: pointer to bool (true/false = linkup/linkdown)
2211  *
2212  * Variable link_up is true if link is up, false if link is down.
2213  * The variable link_up is invalid if status is non zero. As a
2214  * result of this call, link status reporting becomes enabled
2215  */
2216 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
2217 {
2218 	struct ice_phy_info *phy_info;
2219 	enum ice_status status = 0;
2220 
2221 	if (!pi || !link_up)
2222 		return ICE_ERR_PARAM;
2223 
2224 	phy_info = &pi->phy;
2225 
2226 	if (phy_info->get_link_info) {
2227 		status = ice_update_link_info(pi);
2228 
2229 		if (status)
2230 			ice_debug(pi->hw, ICE_DBG_LINK,
2231 				  "get link status error, status = %d\n",
2232 				  status);
2233 	}
2234 
2235 	*link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
2236 
2237 	return status;
2238 }
2239 
2240 /**
2241  * ice_aq_set_link_restart_an
2242  * @pi: pointer to the port information structure
2243  * @ena_link: if true: enable link, if false: disable link
2244  * @cd: pointer to command details structure or NULL
2245  *
2246  * Sets up the link and restarts the Auto-Negotiation over the link.
2247  */
2248 enum ice_status
2249 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
2250 			   struct ice_sq_cd *cd)
2251 {
2252 	struct ice_aqc_restart_an *cmd;
2253 	struct ice_aq_desc desc;
2254 
2255 	cmd = &desc.params.restart_an;
2256 
2257 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
2258 
2259 	cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
2260 	cmd->lport_num = pi->lport;
2261 	if (ena_link)
2262 		cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
2263 	else
2264 		cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
2265 
2266 	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
2267 }
2268 
2269 /**
2270  * ice_aq_set_event_mask
2271  * @hw: pointer to the HW struct
2272  * @port_num: port number of the physical function
2273  * @mask: event mask to be set
2274  * @cd: pointer to command details structure or NULL
2275  *
2276  * Set event mask (0x0613)
2277  */
2278 enum ice_status
2279 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
2280 		      struct ice_sq_cd *cd)
2281 {
2282 	struct ice_aqc_set_event_mask *cmd;
2283 	struct ice_aq_desc desc;
2284 
2285 	cmd = &desc.params.set_event_mask;
2286 
2287 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
2288 
2289 	cmd->lport_num = port_num;
2290 
2291 	cmd->event_mask = cpu_to_le16(mask);
2292 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2293 }
2294 
2295 /**
2296  * ice_aq_set_mac_loopback
2297  * @hw: pointer to the HW struct
2298  * @ena_lpbk: Enable or Disable loopback
2299  * @cd: pointer to command details structure or NULL
2300  *
2301  * Enable/disable loopback on a given port
2302  */
2303 enum ice_status
2304 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
2305 {
2306 	struct ice_aqc_set_mac_lb *cmd;
2307 	struct ice_aq_desc desc;
2308 
2309 	cmd = &desc.params.set_mac_lb;
2310 
2311 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
2312 	if (ena_lpbk)
2313 		cmd->lb_mode = ICE_AQ_MAC_LB_EN;
2314 
2315 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2316 }
2317 
2318 /**
2319  * ice_aq_set_port_id_led
2320  * @pi: pointer to the port information
2321  * @is_orig_mode: is this LED set to original mode (by the net-list)
2322  * @cd: pointer to command details structure or NULL
2323  *
2324  * Set LED value for the given port (0x06e9)
2325  */
2326 enum ice_status
2327 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
2328 		       struct ice_sq_cd *cd)
2329 {
2330 	struct ice_aqc_set_port_id_led *cmd;
2331 	struct ice_hw *hw = pi->hw;
2332 	struct ice_aq_desc desc;
2333 
2334 	cmd = &desc.params.set_port_id_led;
2335 
2336 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
2337 
2338 	if (is_orig_mode)
2339 		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
2340 	else
2341 		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
2342 
2343 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2344 }
2345 
2346 /**
2347  * __ice_aq_get_set_rss_lut
2348  * @hw: pointer to the hardware structure
2349  * @vsi_id: VSI FW index
2350  * @lut_type: LUT table type
2351  * @lut: pointer to the LUT buffer provided by the caller
2352  * @lut_size: size of the LUT buffer
2353  * @glob_lut_idx: global LUT index
2354  * @set: set true to set the table, false to get the table
2355  *
2356  * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
2357  */
2358 static enum ice_status
2359 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
2360 			 u16 lut_size, u8 glob_lut_idx, bool set)
2361 {
2362 	struct ice_aqc_get_set_rss_lut *cmd_resp;
2363 	struct ice_aq_desc desc;
2364 	enum ice_status status;
2365 	u16 flags = 0;
2366 
2367 	cmd_resp = &desc.params.get_set_rss_lut;
2368 
2369 	if (set) {
2370 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
2371 		desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2372 	} else {
2373 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
2374 	}
2375 
2376 	cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2377 					 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
2378 					ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
2379 				       ICE_AQC_GSET_RSS_LUT_VSI_VALID);
2380 
2381 	switch (lut_type) {
2382 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
2383 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
2384 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
2385 		flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
2386 			  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
2387 		break;
2388 	default:
2389 		status = ICE_ERR_PARAM;
2390 		goto ice_aq_get_set_rss_lut_exit;
2391 	}
2392 
2393 	if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
2394 		flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
2395 			  ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
2396 
2397 		if (!set)
2398 			goto ice_aq_get_set_rss_lut_send;
2399 	} else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2400 		if (!set)
2401 			goto ice_aq_get_set_rss_lut_send;
2402 	} else {
2403 		goto ice_aq_get_set_rss_lut_send;
2404 	}
2405 
2406 	/* LUT size is only valid for Global and PF table types */
2407 	switch (lut_size) {
2408 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
2409 		break;
2410 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
2411 		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
2412 			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2413 			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2414 		break;
2415 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
2416 		if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2417 			flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
2418 				  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2419 				 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2420 			break;
2421 		}
2422 		/* fall-through */
2423 	default:
2424 		status = ICE_ERR_PARAM;
2425 		goto ice_aq_get_set_rss_lut_exit;
2426 	}
2427 
2428 ice_aq_get_set_rss_lut_send:
2429 	cmd_resp->flags = cpu_to_le16(flags);
2430 	status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
2431 
2432 ice_aq_get_set_rss_lut_exit:
2433 	return status;
2434 }
2435 
2436 /**
2437  * ice_aq_get_rss_lut
2438  * @hw: pointer to the hardware structure
2439  * @vsi_handle: software VSI handle
2440  * @lut_type: LUT table type
2441  * @lut: pointer to the LUT buffer provided by the caller
2442  * @lut_size: size of the LUT buffer
2443  *
2444  * get the RSS lookup table, PF or VSI type
2445  */
2446 enum ice_status
2447 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2448 		   u8 *lut, u16 lut_size)
2449 {
2450 	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2451 		return ICE_ERR_PARAM;
2452 
2453 	return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2454 					lut_type, lut, lut_size, 0, false);
2455 }
2456 
2457 /**
2458  * ice_aq_set_rss_lut
2459  * @hw: pointer to the hardware structure
2460  * @vsi_handle: software VSI handle
2461  * @lut_type: LUT table type
2462  * @lut: pointer to the LUT buffer provided by the caller
2463  * @lut_size: size of the LUT buffer
2464  *
2465  * set the RSS lookup table, PF or VSI type
2466  */
2467 enum ice_status
2468 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2469 		   u8 *lut, u16 lut_size)
2470 {
2471 	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2472 		return ICE_ERR_PARAM;
2473 
2474 	return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2475 					lut_type, lut, lut_size, 0, true);
2476 }
2477 
2478 /**
2479  * __ice_aq_get_set_rss_key
2480  * @hw: pointer to the HW struct
2481  * @vsi_id: VSI FW index
2482  * @key: pointer to key info struct
2483  * @set: set true to set the key, false to get the key
2484  *
2485  * get (0x0B04) or set (0x0B02) the RSS key per VSI
2486  */
2487 static enum
2488 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
2489 				    struct ice_aqc_get_set_rss_keys *key,
2490 				    bool set)
2491 {
2492 	struct ice_aqc_get_set_rss_key *cmd_resp;
2493 	u16 key_size = sizeof(*key);
2494 	struct ice_aq_desc desc;
2495 
2496 	cmd_resp = &desc.params.get_set_rss_key;
2497 
2498 	if (set) {
2499 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
2500 		desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2501 	} else {
2502 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
2503 	}
2504 
2505 	cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2506 					 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
2507 					ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
2508 				       ICE_AQC_GSET_RSS_KEY_VSI_VALID);
2509 
2510 	return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
2511 }
2512 
2513 /**
2514  * ice_aq_get_rss_key
2515  * @hw: pointer to the HW struct
2516  * @vsi_handle: software VSI handle
2517  * @key: pointer to key info struct
2518  *
2519  * get the RSS key per VSI
2520  */
2521 enum ice_status
2522 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
2523 		   struct ice_aqc_get_set_rss_keys *key)
2524 {
2525 	if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
2526 		return ICE_ERR_PARAM;
2527 
2528 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2529 					key, false);
2530 }
2531 
2532 /**
2533  * ice_aq_set_rss_key
2534  * @hw: pointer to the HW struct
2535  * @vsi_handle: software VSI handle
2536  * @keys: pointer to key info struct
2537  *
2538  * set the RSS key per VSI
2539  */
2540 enum ice_status
2541 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
2542 		   struct ice_aqc_get_set_rss_keys *keys)
2543 {
2544 	if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
2545 		return ICE_ERR_PARAM;
2546 
2547 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2548 					keys, true);
2549 }
2550 
2551 /**
2552  * ice_aq_add_lan_txq
2553  * @hw: pointer to the hardware structure
2554  * @num_qgrps: Number of added queue groups
2555  * @qg_list: list of queue groups to be added
2556  * @buf_size: size of buffer for indirect command
2557  * @cd: pointer to command details structure or NULL
2558  *
2559  * Add Tx LAN queue (0x0C30)
2560  *
2561  * NOTE:
2562  * Prior to calling add Tx LAN queue:
2563  * Initialize the following as part of the Tx queue context:
2564  * Completion queue ID if the queue uses Completion queue, Quanta profile,
2565  * Cache profile and Packet shaper profile.
2566  *
2567  * After add Tx LAN queue AQ command is completed:
2568  * Interrupts should be associated with specific queues,
2569  * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
2570  * flow.
2571  */
2572 static enum ice_status
2573 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2574 		   struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
2575 		   struct ice_sq_cd *cd)
2576 {
2577 	u16 i, sum_header_size, sum_q_size = 0;
2578 	struct ice_aqc_add_tx_qgrp *list;
2579 	struct ice_aqc_add_txqs *cmd;
2580 	struct ice_aq_desc desc;
2581 
2582 	cmd = &desc.params.add_txqs;
2583 
2584 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
2585 
2586 	if (!qg_list)
2587 		return ICE_ERR_PARAM;
2588 
2589 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2590 		return ICE_ERR_PARAM;
2591 
2592 	sum_header_size = num_qgrps *
2593 		(sizeof(*qg_list) - sizeof(*qg_list->txqs));
2594 
2595 	list = qg_list;
2596 	for (i = 0; i < num_qgrps; i++) {
2597 		struct ice_aqc_add_txqs_perq *q = list->txqs;
2598 
2599 		sum_q_size += list->num_txqs * sizeof(*q);
2600 		list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
2601 	}
2602 
2603 	if (buf_size != (sum_header_size + sum_q_size))
2604 		return ICE_ERR_PARAM;
2605 
2606 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2607 
2608 	cmd->num_qgrps = num_qgrps;
2609 
2610 	return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2611 }
2612 
2613 /**
2614  * ice_aq_dis_lan_txq
2615  * @hw: pointer to the hardware structure
2616  * @num_qgrps: number of groups in the list
2617  * @qg_list: the list of groups to disable
2618  * @buf_size: the total size of the qg_list buffer in bytes
2619  * @rst_src: if called due to reset, specifies the reset source
2620  * @vmvf_num: the relative VM or VF number that is undergoing the reset
2621  * @cd: pointer to command details structure or NULL
2622  *
2623  * Disable LAN Tx queue (0x0C31)
2624  */
2625 static enum ice_status
2626 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2627 		   struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
2628 		   enum ice_disq_rst_src rst_src, u16 vmvf_num,
2629 		   struct ice_sq_cd *cd)
2630 {
2631 	struct ice_aqc_dis_txqs *cmd;
2632 	struct ice_aq_desc desc;
2633 	enum ice_status status;
2634 	u16 i, sz = 0;
2635 
2636 	cmd = &desc.params.dis_txqs;
2637 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
2638 
2639 	/* qg_list can be NULL only in VM/VF reset flow */
2640 	if (!qg_list && !rst_src)
2641 		return ICE_ERR_PARAM;
2642 
2643 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2644 		return ICE_ERR_PARAM;
2645 
2646 	cmd->num_entries = num_qgrps;
2647 
2648 	cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
2649 					    ICE_AQC_Q_DIS_TIMEOUT_M);
2650 
2651 	switch (rst_src) {
2652 	case ICE_VM_RESET:
2653 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
2654 		cmd->vmvf_and_timeout |=
2655 			cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
2656 		break;
2657 	case ICE_VF_RESET:
2658 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
2659 		/* In this case, FW expects vmvf_num to be absolute VF ID */
2660 		cmd->vmvf_and_timeout |=
2661 			cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
2662 				    ICE_AQC_Q_DIS_VMVF_NUM_M);
2663 		break;
2664 	case ICE_NO_RESET:
2665 	default:
2666 		break;
2667 	}
2668 
2669 	/* flush pipe on time out */
2670 	cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
2671 	/* If no queue group info, we are in a reset flow. Issue the AQ */
2672 	if (!qg_list)
2673 		goto do_aq;
2674 
2675 	/* set RD bit to indicate that command buffer is provided by the driver
2676 	 * and it needs to be read by the firmware
2677 	 */
2678 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2679 
2680 	for (i = 0; i < num_qgrps; ++i) {
2681 		/* Calculate the size taken up by the queue IDs in this group */
2682 		sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
2683 
2684 		/* Add the size of the group header */
2685 		sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
2686 
2687 		/* If the num of queues is even, add 2 bytes of padding */
2688 		if ((qg_list[i].num_qs % 2) == 0)
2689 			sz += 2;
2690 	}
2691 
2692 	if (buf_size != sz)
2693 		return ICE_ERR_PARAM;
2694 
2695 do_aq:
2696 	status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2697 	if (status) {
2698 		if (!qg_list)
2699 			ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
2700 				  vmvf_num, hw->adminq.sq_last_status);
2701 		else
2702 			ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
2703 				  le16_to_cpu(qg_list[0].q_id[0]),
2704 				  hw->adminq.sq_last_status);
2705 	}
2706 	return status;
2707 }
2708 
2709 /* End of FW Admin Queue command wrappers */
2710 
2711 /**
2712  * ice_write_byte - write a byte to a packed context structure
2713  * @src_ctx:  the context structure to read from
2714  * @dest_ctx: the context to be written to
2715  * @ce_info:  a description of the struct to be filled
2716  */
2717 static void
2718 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2719 {
2720 	u8 src_byte, dest_byte, mask;
2721 	u8 *from, *dest;
2722 	u16 shift_width;
2723 
2724 	/* copy from the next struct field */
2725 	from = src_ctx + ce_info->offset;
2726 
2727 	/* prepare the bits and mask */
2728 	shift_width = ce_info->lsb % 8;
2729 	mask = (u8)(BIT(ce_info->width) - 1);
2730 
2731 	src_byte = *from;
2732 	src_byte &= mask;
2733 
2734 	/* shift to correct alignment */
2735 	mask <<= shift_width;
2736 	src_byte <<= shift_width;
2737 
2738 	/* get the current bits from the target bit string */
2739 	dest = dest_ctx + (ce_info->lsb / 8);
2740 
2741 	memcpy(&dest_byte, dest, sizeof(dest_byte));
2742 
2743 	dest_byte &= ~mask;	/* get the bits not changing */
2744 	dest_byte |= src_byte;	/* add in the new bits */
2745 
2746 	/* put it all back */
2747 	memcpy(dest, &dest_byte, sizeof(dest_byte));
2748 }
2749 
2750 /**
2751  * ice_write_word - write a word to a packed context structure
2752  * @src_ctx:  the context structure to read from
2753  * @dest_ctx: the context to be written to
2754  * @ce_info:  a description of the struct to be filled
2755  */
2756 static void
2757 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2758 {
2759 	u16 src_word, mask;
2760 	__le16 dest_word;
2761 	u8 *from, *dest;
2762 	u16 shift_width;
2763 
2764 	/* copy from the next struct field */
2765 	from = src_ctx + ce_info->offset;
2766 
2767 	/* prepare the bits and mask */
2768 	shift_width = ce_info->lsb % 8;
2769 	mask = BIT(ce_info->width) - 1;
2770 
2771 	/* don't swizzle the bits until after the mask because the mask bits
2772 	 * will be in a different bit position on big endian machines
2773 	 */
2774 	src_word = *(u16 *)from;
2775 	src_word &= mask;
2776 
2777 	/* shift to correct alignment */
2778 	mask <<= shift_width;
2779 	src_word <<= shift_width;
2780 
2781 	/* get the current bits from the target bit string */
2782 	dest = dest_ctx + (ce_info->lsb / 8);
2783 
2784 	memcpy(&dest_word, dest, sizeof(dest_word));
2785 
2786 	dest_word &= ~(cpu_to_le16(mask));	/* get the bits not changing */
2787 	dest_word |= cpu_to_le16(src_word);	/* add in the new bits */
2788 
2789 	/* put it all back */
2790 	memcpy(dest, &dest_word, sizeof(dest_word));
2791 }
2792 
2793 /**
2794  * ice_write_dword - write a dword to a packed context structure
2795  * @src_ctx:  the context structure to read from
2796  * @dest_ctx: the context to be written to
2797  * @ce_info:  a description of the struct to be filled
2798  */
2799 static void
2800 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2801 {
2802 	u32 src_dword, mask;
2803 	__le32 dest_dword;
2804 	u8 *from, *dest;
2805 	u16 shift_width;
2806 
2807 	/* copy from the next struct field */
2808 	from = src_ctx + ce_info->offset;
2809 
2810 	/* prepare the bits and mask */
2811 	shift_width = ce_info->lsb % 8;
2812 
2813 	/* if the field width is exactly 32 on an x86 machine, then the shift
2814 	 * operation will not work because the SHL instructions count is masked
2815 	 * to 5 bits so the shift will do nothing
2816 	 */
2817 	if (ce_info->width < 32)
2818 		mask = BIT(ce_info->width) - 1;
2819 	else
2820 		mask = (u32)~0;
2821 
2822 	/* don't swizzle the bits until after the mask because the mask bits
2823 	 * will be in a different bit position on big endian machines
2824 	 */
2825 	src_dword = *(u32 *)from;
2826 	src_dword &= mask;
2827 
2828 	/* shift to correct alignment */
2829 	mask <<= shift_width;
2830 	src_dword <<= shift_width;
2831 
2832 	/* get the current bits from the target bit string */
2833 	dest = dest_ctx + (ce_info->lsb / 8);
2834 
2835 	memcpy(&dest_dword, dest, sizeof(dest_dword));
2836 
2837 	dest_dword &= ~(cpu_to_le32(mask));	/* get the bits not changing */
2838 	dest_dword |= cpu_to_le32(src_dword);	/* add in the new bits */
2839 
2840 	/* put it all back */
2841 	memcpy(dest, &dest_dword, sizeof(dest_dword));
2842 }
2843 
2844 /**
2845  * ice_write_qword - write a qword to a packed context structure
2846  * @src_ctx:  the context structure to read from
2847  * @dest_ctx: the context to be written to
2848  * @ce_info:  a description of the struct to be filled
2849  */
2850 static void
2851 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2852 {
2853 	u64 src_qword, mask;
2854 	__le64 dest_qword;
2855 	u8 *from, *dest;
2856 	u16 shift_width;
2857 
2858 	/* copy from the next struct field */
2859 	from = src_ctx + ce_info->offset;
2860 
2861 	/* prepare the bits and mask */
2862 	shift_width = ce_info->lsb % 8;
2863 
2864 	/* if the field width is exactly 64 on an x86 machine, then the shift
2865 	 * operation will not work because the SHL instructions count is masked
2866 	 * to 6 bits so the shift will do nothing
2867 	 */
2868 	if (ce_info->width < 64)
2869 		mask = BIT_ULL(ce_info->width) - 1;
2870 	else
2871 		mask = (u64)~0;
2872 
2873 	/* don't swizzle the bits until after the mask because the mask bits
2874 	 * will be in a different bit position on big endian machines
2875 	 */
2876 	src_qword = *(u64 *)from;
2877 	src_qword &= mask;
2878 
2879 	/* shift to correct alignment */
2880 	mask <<= shift_width;
2881 	src_qword <<= shift_width;
2882 
2883 	/* get the current bits from the target bit string */
2884 	dest = dest_ctx + (ce_info->lsb / 8);
2885 
2886 	memcpy(&dest_qword, dest, sizeof(dest_qword));
2887 
2888 	dest_qword &= ~(cpu_to_le64(mask));	/* get the bits not changing */
2889 	dest_qword |= cpu_to_le64(src_qword);	/* add in the new bits */
2890 
2891 	/* put it all back */
2892 	memcpy(dest, &dest_qword, sizeof(dest_qword));
2893 }
2894 
2895 /**
2896  * ice_set_ctx - set context bits in packed structure
2897  * @src_ctx:  pointer to a generic non-packed context structure
2898  * @dest_ctx: pointer to memory for the packed structure
2899  * @ce_info:  a description of the structure to be transformed
2900  */
2901 enum ice_status
2902 ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2903 {
2904 	int f;
2905 
2906 	for (f = 0; ce_info[f].width; f++) {
2907 		/* We have to deal with each element of the FW response
2908 		 * using the correct size so that we are correct regardless
2909 		 * of the endianness of the machine.
2910 		 */
2911 		switch (ce_info[f].size_of) {
2912 		case sizeof(u8):
2913 			ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
2914 			break;
2915 		case sizeof(u16):
2916 			ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
2917 			break;
2918 		case sizeof(u32):
2919 			ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
2920 			break;
2921 		case sizeof(u64):
2922 			ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
2923 			break;
2924 		default:
2925 			return ICE_ERR_INVAL_SIZE;
2926 		}
2927 	}
2928 
2929 	return 0;
2930 }
2931 
2932 /**
2933  * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
2934  * @hw: pointer to the HW struct
2935  * @vsi_handle: software VSI handle
2936  * @tc: TC number
2937  * @q_handle: software queue handle
2938  */
2939 static struct ice_q_ctx *
2940 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
2941 {
2942 	struct ice_vsi_ctx *vsi;
2943 	struct ice_q_ctx *q_ctx;
2944 
2945 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
2946 	if (!vsi)
2947 		return NULL;
2948 	if (q_handle >= vsi->num_lan_q_entries[tc])
2949 		return NULL;
2950 	if (!vsi->lan_q_ctx[tc])
2951 		return NULL;
2952 	q_ctx = vsi->lan_q_ctx[tc];
2953 	return &q_ctx[q_handle];
2954 }
2955 
2956 /**
2957  * ice_ena_vsi_txq
2958  * @pi: port information structure
2959  * @vsi_handle: software VSI handle
2960  * @tc: TC number
2961  * @q_handle: software queue handle
2962  * @num_qgrps: Number of added queue groups
2963  * @buf: list of queue groups to be added
2964  * @buf_size: size of buffer for indirect command
2965  * @cd: pointer to command details structure or NULL
2966  *
2967  * This function adds one LAN queue
2968  */
2969 enum ice_status
2970 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
2971 		u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
2972 		struct ice_sq_cd *cd)
2973 {
2974 	struct ice_aqc_txsched_elem_data node = { 0 };
2975 	struct ice_sched_node *parent;
2976 	struct ice_q_ctx *q_ctx;
2977 	enum ice_status status;
2978 	struct ice_hw *hw;
2979 
2980 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2981 		return ICE_ERR_CFG;
2982 
2983 	if (num_qgrps > 1 || buf->num_txqs > 1)
2984 		return ICE_ERR_MAX_LIMIT;
2985 
2986 	hw = pi->hw;
2987 
2988 	if (!ice_is_vsi_valid(hw, vsi_handle))
2989 		return ICE_ERR_PARAM;
2990 
2991 	mutex_lock(&pi->sched_lock);
2992 
2993 	q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
2994 	if (!q_ctx) {
2995 		ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
2996 			  q_handle);
2997 		status = ICE_ERR_PARAM;
2998 		goto ena_txq_exit;
2999 	}
3000 
3001 	/* find a parent node */
3002 	parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
3003 					    ICE_SCHED_NODE_OWNER_LAN);
3004 	if (!parent) {
3005 		status = ICE_ERR_PARAM;
3006 		goto ena_txq_exit;
3007 	}
3008 
3009 	buf->parent_teid = parent->info.node_teid;
3010 	node.parent_teid = parent->info.node_teid;
3011 	/* Mark that the values in the "generic" section as valid. The default
3012 	 * value in the "generic" section is zero. This means that :
3013 	 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
3014 	 * - 0 priority among siblings, indicated by Bit 1-3.
3015 	 * - WFQ, indicated by Bit 4.
3016 	 * - 0 Adjustment value is used in PSM credit update flow, indicated by
3017 	 * Bit 5-6.
3018 	 * - Bit 7 is reserved.
3019 	 * Without setting the generic section as valid in valid_sections, the
3020 	 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
3021 	 */
3022 	buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
3023 
3024 	/* add the LAN queue */
3025 	status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
3026 	if (status) {
3027 		ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
3028 			  le16_to_cpu(buf->txqs[0].txq_id),
3029 			  hw->adminq.sq_last_status);
3030 		goto ena_txq_exit;
3031 	}
3032 
3033 	node.node_teid = buf->txqs[0].q_teid;
3034 	node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
3035 	q_ctx->q_handle = q_handle;
3036 
3037 	/* add a leaf node into schduler tree queue layer */
3038 	status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
3039 
3040 ena_txq_exit:
3041 	mutex_unlock(&pi->sched_lock);
3042 	return status;
3043 }
3044 
3045 /**
3046  * ice_dis_vsi_txq
3047  * @pi: port information structure
3048  * @vsi_handle: software VSI handle
3049  * @tc: TC number
3050  * @num_queues: number of queues
3051  * @q_handles: pointer to software queue handle array
3052  * @q_ids: pointer to the q_id array
3053  * @q_teids: pointer to queue node teids
3054  * @rst_src: if called due to reset, specifies the reset source
3055  * @vmvf_num: the relative VM or VF number that is undergoing the reset
3056  * @cd: pointer to command details structure or NULL
3057  *
3058  * This function removes queues and their corresponding nodes in SW DB
3059  */
3060 enum ice_status
3061 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
3062 		u16 *q_handles, u16 *q_ids, u32 *q_teids,
3063 		enum ice_disq_rst_src rst_src, u16 vmvf_num,
3064 		struct ice_sq_cd *cd)
3065 {
3066 	enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
3067 	struct ice_aqc_dis_txq_item qg_list;
3068 	struct ice_q_ctx *q_ctx;
3069 	u16 i;
3070 
3071 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3072 		return ICE_ERR_CFG;
3073 
3074 	if (!num_queues) {
3075 		/* if queue is disabled already yet the disable queue command
3076 		 * has to be sent to complete the VF reset, then call
3077 		 * ice_aq_dis_lan_txq without any queue information
3078 		 */
3079 		if (rst_src)
3080 			return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
3081 						  vmvf_num, NULL);
3082 		return ICE_ERR_CFG;
3083 	}
3084 
3085 	mutex_lock(&pi->sched_lock);
3086 
3087 	for (i = 0; i < num_queues; i++) {
3088 		struct ice_sched_node *node;
3089 
3090 		node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
3091 		if (!node)
3092 			continue;
3093 		q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
3094 		if (!q_ctx) {
3095 			ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
3096 				  q_handles[i]);
3097 			continue;
3098 		}
3099 		if (q_ctx->q_handle != q_handles[i]) {
3100 			ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
3101 				  q_ctx->q_handle, q_handles[i]);
3102 			continue;
3103 		}
3104 		qg_list.parent_teid = node->info.parent_teid;
3105 		qg_list.num_qs = 1;
3106 		qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
3107 		status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
3108 					    sizeof(qg_list), rst_src, vmvf_num,
3109 					    cd);
3110 
3111 		if (status)
3112 			break;
3113 		ice_free_sched_node(pi, node);
3114 		q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
3115 	}
3116 	mutex_unlock(&pi->sched_lock);
3117 	return status;
3118 }
3119 
3120 /**
3121  * ice_cfg_vsi_qs - configure the new/existing VSI queues
3122  * @pi: port information structure
3123  * @vsi_handle: software VSI handle
3124  * @tc_bitmap: TC bitmap
3125  * @maxqs: max queues array per TC
3126  * @owner: LAN or RDMA
3127  *
3128  * This function adds/updates the VSI queues per TC.
3129  */
3130 static enum ice_status
3131 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
3132 	       u16 *maxqs, u8 owner)
3133 {
3134 	enum ice_status status = 0;
3135 	u8 i;
3136 
3137 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3138 		return ICE_ERR_CFG;
3139 
3140 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3141 		return ICE_ERR_PARAM;
3142 
3143 	mutex_lock(&pi->sched_lock);
3144 
3145 	ice_for_each_traffic_class(i) {
3146 		/* configuration is possible only if TC node is present */
3147 		if (!ice_sched_get_tc_node(pi, i))
3148 			continue;
3149 
3150 		status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
3151 					   ice_is_tc_ena(tc_bitmap, i));
3152 		if (status)
3153 			break;
3154 	}
3155 
3156 	mutex_unlock(&pi->sched_lock);
3157 	return status;
3158 }
3159 
3160 /**
3161  * ice_cfg_vsi_lan - configure VSI LAN queues
3162  * @pi: port information structure
3163  * @vsi_handle: software VSI handle
3164  * @tc_bitmap: TC bitmap
3165  * @max_lanqs: max LAN queues array per TC
3166  *
3167  * This function adds/updates the VSI LAN queues per TC.
3168  */
3169 enum ice_status
3170 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
3171 		u16 *max_lanqs)
3172 {
3173 	return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
3174 			      ICE_SCHED_NODE_OWNER_LAN);
3175 }
3176 
3177 /**
3178  * ice_replay_pre_init - replay pre initialization
3179  * @hw: pointer to the HW struct
3180  *
3181  * Initializes required config data for VSI, FD, ACL, and RSS before replay.
3182  */
3183 static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
3184 {
3185 	struct ice_switch_info *sw = hw->switch_info;
3186 	u8 i;
3187 
3188 	/* Delete old entries from replay filter list head if there is any */
3189 	ice_rm_all_sw_replay_rule_info(hw);
3190 	/* In start of replay, move entries into replay_rules list, it
3191 	 * will allow adding rules entries back to filt_rules list,
3192 	 * which is operational list.
3193 	 */
3194 	for (i = 0; i < ICE_SW_LKUP_LAST; i++)
3195 		list_replace_init(&sw->recp_list[i].filt_rules,
3196 				  &sw->recp_list[i].filt_replay_rules);
3197 
3198 	return 0;
3199 }
3200 
3201 /**
3202  * ice_replay_vsi - replay VSI configuration
3203  * @hw: pointer to the HW struct
3204  * @vsi_handle: driver VSI handle
3205  *
3206  * Restore all VSI configuration after reset. It is required to call this
3207  * function with main VSI first.
3208  */
3209 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
3210 {
3211 	enum ice_status status;
3212 
3213 	if (!ice_is_vsi_valid(hw, vsi_handle))
3214 		return ICE_ERR_PARAM;
3215 
3216 	/* Replay pre-initialization if there is any */
3217 	if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
3218 		status = ice_replay_pre_init(hw);
3219 		if (status)
3220 			return status;
3221 	}
3222 
3223 	/* Replay per VSI all filters */
3224 	status = ice_replay_vsi_all_fltr(hw, vsi_handle);
3225 	return status;
3226 }
3227 
3228 /**
3229  * ice_replay_post - post replay configuration cleanup
3230  * @hw: pointer to the HW struct
3231  *
3232  * Post replay cleanup.
3233  */
3234 void ice_replay_post(struct ice_hw *hw)
3235 {
3236 	/* Delete old entries from replay filter list head */
3237 	ice_rm_all_sw_replay_rule_info(hw);
3238 }
3239 
3240 /**
3241  * ice_stat_update40 - read 40 bit stat from the chip and update stat values
3242  * @hw: ptr to the hardware info
3243  * @hireg: high 32 bit HW register to read from
3244  * @loreg: low 32 bit HW register to read from
3245  * @prev_stat_loaded: bool to specify if previous stats are loaded
3246  * @prev_stat: ptr to previous loaded stat value
3247  * @cur_stat: ptr to current stat value
3248  */
3249 void
3250 ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
3251 		  bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat)
3252 {
3253 	u64 new_data;
3254 
3255 	new_data = rd32(hw, loreg);
3256 	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
3257 
3258 	/* device stats are not reset at PFR, they likely will not be zeroed
3259 	 * when the driver starts. So save the first values read and use them as
3260 	 * offsets to be subtracted from the raw values in order to report stats
3261 	 * that count from zero.
3262 	 */
3263 	if (!prev_stat_loaded)
3264 		*prev_stat = new_data;
3265 	if (new_data >= *prev_stat)
3266 		*cur_stat = new_data - *prev_stat;
3267 	else
3268 		/* to manage the potential roll-over */
3269 		*cur_stat = (new_data + BIT_ULL(40)) - *prev_stat;
3270 	*cur_stat &= 0xFFFFFFFFFFULL;
3271 }
3272 
3273 /**
3274  * ice_stat_update32 - read 32 bit stat from the chip and update stat values
3275  * @hw: ptr to the hardware info
3276  * @reg: HW register to read from
3277  * @prev_stat_loaded: bool to specify if previous stats are loaded
3278  * @prev_stat: ptr to previous loaded stat value
3279  * @cur_stat: ptr to current stat value
3280  */
3281 void
3282 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
3283 		  u64 *prev_stat, u64 *cur_stat)
3284 {
3285 	u32 new_data;
3286 
3287 	new_data = rd32(hw, reg);
3288 
3289 	/* device stats are not reset at PFR, they likely will not be zeroed
3290 	 * when the driver starts. So save the first values read and use them as
3291 	 * offsets to be subtracted from the raw values in order to report stats
3292 	 * that count from zero.
3293 	 */
3294 	if (!prev_stat_loaded)
3295 		*prev_stat = new_data;
3296 	if (new_data >= *prev_stat)
3297 		*cur_stat = new_data - *prev_stat;
3298 	else
3299 		/* to manage the potential roll-over */
3300 		*cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
3301 }
3302 
3303 /**
3304  * ice_sched_query_elem - query element information from HW
3305  * @hw: pointer to the HW struct
3306  * @node_teid: node TEID to be queried
3307  * @buf: buffer to element information
3308  *
3309  * This function queries HW element information
3310  */
3311 enum ice_status
3312 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
3313 		     struct ice_aqc_get_elem *buf)
3314 {
3315 	u16 buf_size, num_elem_ret = 0;
3316 	enum ice_status status;
3317 
3318 	buf_size = sizeof(*buf);
3319 	memset(buf, 0, buf_size);
3320 	buf->generic[0].node_teid = cpu_to_le32(node_teid);
3321 	status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
3322 					  NULL);
3323 	if (status || num_elem_ret != 1)
3324 		ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
3325 	return status;
3326 }
3327