xref: /openbmc/linux/drivers/net/ethernet/intel/ice/ice_common.c (revision 458a445deb9c9fb13cec46fe9b179a84d2ff514f)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_common.h"
5 #include "ice_sched.h"
6 #include "ice_adminq_cmd.h"
7 
8 #define ICE_PF_RESET_WAIT_COUNT	200
9 
10 #define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \
11 	wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \
12 	     ((ICE_RX_OPC_MDID << \
13 	       GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
14 	      GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
15 	     (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
16 	      GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
17 
18 #define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \
19 	wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \
20 	     (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
21 	      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
22 	     (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
23 	      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \
24 	     (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \
25 	      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \
26 	     (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \
27 	      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M))
28 
29 /**
30  * ice_set_mac_type - Sets MAC type
31  * @hw: pointer to the HW structure
32  *
33  * This function sets the MAC type of the adapter based on the
34  * vendor ID and device ID stored in the hw structure.
35  */
36 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
37 {
38 	if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
39 		return ICE_ERR_DEVICE_NOT_SUPPORTED;
40 
41 	hw->mac_type = ICE_MAC_GENERIC;
42 	return 0;
43 }
44 
45 /**
46  * ice_dev_onetime_setup - Temporary HW/FW workarounds
47  * @hw: pointer to the HW structure
48  *
49  * This function provides temporary workarounds for certain issues
50  * that are expected to be fixed in the HW/FW.
51  */
52 void ice_dev_onetime_setup(struct ice_hw *hw)
53 {
54 	/* configure Rx - set non pxe mode */
55 	wr32(hw, GLLAN_RCTL_0, 0x1);
56 
57 #define MBX_PF_VT_PFALLOC	0x00231E80
58 	/* set VFs per PF */
59 	wr32(hw, MBX_PF_VT_PFALLOC, rd32(hw, PF_VT_PFALLOC_HIF));
60 }
61 
62 /**
63  * ice_clear_pf_cfg - Clear PF configuration
64  * @hw: pointer to the hardware structure
65  *
66  * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
67  * configuration, flow director filters, etc.).
68  */
69 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
70 {
71 	struct ice_aq_desc desc;
72 
73 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
74 
75 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
76 }
77 
78 /**
79  * ice_aq_manage_mac_read - manage MAC address read command
80  * @hw: pointer to the hw struct
81  * @buf: a virtual buffer to hold the manage MAC read response
82  * @buf_size: Size of the virtual buffer
83  * @cd: pointer to command details structure or NULL
84  *
85  * This function is used to return per PF station MAC address (0x0107).
86  * NOTE: Upon successful completion of this command, MAC address information
87  * is returned in user specified buffer. Please interpret user specified
88  * buffer as "manage_mac_read" response.
89  * Response such as various MAC addresses are stored in HW struct (port.mac)
90  * ice_aq_discover_caps is expected to be called before this function is called.
91  */
92 static enum ice_status
93 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
94 		       struct ice_sq_cd *cd)
95 {
96 	struct ice_aqc_manage_mac_read_resp *resp;
97 	struct ice_aqc_manage_mac_read *cmd;
98 	struct ice_aq_desc desc;
99 	enum ice_status status;
100 	u16 flags;
101 	u8 i;
102 
103 	cmd = &desc.params.mac_read;
104 
105 	if (buf_size < sizeof(*resp))
106 		return ICE_ERR_BUF_TOO_SHORT;
107 
108 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
109 
110 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
111 	if (status)
112 		return status;
113 
114 	resp = (struct ice_aqc_manage_mac_read_resp *)buf;
115 	flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
116 
117 	if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
118 		ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
119 		return ICE_ERR_CFG;
120 	}
121 
122 	/* A single port can report up to two (LAN and WoL) addresses */
123 	for (i = 0; i < cmd->num_addr; i++)
124 		if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
125 			ether_addr_copy(hw->port_info->mac.lan_addr,
126 					resp[i].mac_addr);
127 			ether_addr_copy(hw->port_info->mac.perm_addr,
128 					resp[i].mac_addr);
129 			break;
130 		}
131 
132 	return 0;
133 }
134 
135 /**
136  * ice_aq_get_phy_caps - returns PHY capabilities
137  * @pi: port information structure
138  * @qual_mods: report qualified modules
139  * @report_mode: report mode capabilities
140  * @pcaps: structure for PHY capabilities to be filled
141  * @cd: pointer to command details structure or NULL
142  *
143  * Returns the various PHY capabilities supported on the Port (0x0600)
144  */
145 enum ice_status
146 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
147 		    struct ice_aqc_get_phy_caps_data *pcaps,
148 		    struct ice_sq_cd *cd)
149 {
150 	struct ice_aqc_get_phy_caps *cmd;
151 	u16 pcaps_size = sizeof(*pcaps);
152 	struct ice_aq_desc desc;
153 	enum ice_status status;
154 
155 	cmd = &desc.params.get_phy;
156 
157 	if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
158 		return ICE_ERR_PARAM;
159 
160 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
161 
162 	if (qual_mods)
163 		cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
164 
165 	cmd->param0 |= cpu_to_le16(report_mode);
166 	status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
167 
168 	if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP)
169 		pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
170 
171 	return status;
172 }
173 
174 /**
175  * ice_get_media_type - Gets media type
176  * @pi: port information structure
177  */
178 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
179 {
180 	struct ice_link_status *hw_link_info;
181 
182 	if (!pi)
183 		return ICE_MEDIA_UNKNOWN;
184 
185 	hw_link_info = &pi->phy.link_info;
186 
187 	if (hw_link_info->phy_type_low) {
188 		switch (hw_link_info->phy_type_low) {
189 		case ICE_PHY_TYPE_LOW_1000BASE_SX:
190 		case ICE_PHY_TYPE_LOW_1000BASE_LX:
191 		case ICE_PHY_TYPE_LOW_10GBASE_SR:
192 		case ICE_PHY_TYPE_LOW_10GBASE_LR:
193 		case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
194 		case ICE_PHY_TYPE_LOW_25GBASE_SR:
195 		case ICE_PHY_TYPE_LOW_25GBASE_LR:
196 		case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
197 		case ICE_PHY_TYPE_LOW_40GBASE_SR4:
198 		case ICE_PHY_TYPE_LOW_40GBASE_LR4:
199 			return ICE_MEDIA_FIBER;
200 		case ICE_PHY_TYPE_LOW_100BASE_TX:
201 		case ICE_PHY_TYPE_LOW_1000BASE_T:
202 		case ICE_PHY_TYPE_LOW_2500BASE_T:
203 		case ICE_PHY_TYPE_LOW_5GBASE_T:
204 		case ICE_PHY_TYPE_LOW_10GBASE_T:
205 		case ICE_PHY_TYPE_LOW_25GBASE_T:
206 			return ICE_MEDIA_BASET;
207 		case ICE_PHY_TYPE_LOW_10G_SFI_DA:
208 		case ICE_PHY_TYPE_LOW_25GBASE_CR:
209 		case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
210 		case ICE_PHY_TYPE_LOW_25GBASE_CR1:
211 		case ICE_PHY_TYPE_LOW_40GBASE_CR4:
212 			return ICE_MEDIA_DA;
213 		case ICE_PHY_TYPE_LOW_1000BASE_KX:
214 		case ICE_PHY_TYPE_LOW_2500BASE_KX:
215 		case ICE_PHY_TYPE_LOW_2500BASE_X:
216 		case ICE_PHY_TYPE_LOW_5GBASE_KR:
217 		case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
218 		case ICE_PHY_TYPE_LOW_25GBASE_KR:
219 		case ICE_PHY_TYPE_LOW_25GBASE_KR1:
220 		case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
221 		case ICE_PHY_TYPE_LOW_40GBASE_KR4:
222 			return ICE_MEDIA_BACKPLANE;
223 		}
224 	}
225 
226 	return ICE_MEDIA_UNKNOWN;
227 }
228 
229 /**
230  * ice_aq_get_link_info
231  * @pi: port information structure
232  * @ena_lse: enable/disable LinkStatusEvent reporting
233  * @link: pointer to link status structure - optional
234  * @cd: pointer to command details structure or NULL
235  *
236  * Get Link Status (0x607). Returns the link status of the adapter.
237  */
238 static enum ice_status
239 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
240 		     struct ice_link_status *link, struct ice_sq_cd *cd)
241 {
242 	struct ice_link_status *hw_link_info_old, *hw_link_info;
243 	struct ice_aqc_get_link_status_data link_data = { 0 };
244 	struct ice_aqc_get_link_status *resp;
245 	enum ice_media_type *hw_media_type;
246 	struct ice_fc_info *hw_fc_info;
247 	bool tx_pause, rx_pause;
248 	struct ice_aq_desc desc;
249 	enum ice_status status;
250 	u16 cmd_flags;
251 
252 	if (!pi)
253 		return ICE_ERR_PARAM;
254 	hw_link_info_old = &pi->phy.link_info_old;
255 	hw_media_type = &pi->phy.media_type;
256 	hw_link_info = &pi->phy.link_info;
257 	hw_fc_info = &pi->fc;
258 
259 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
260 	cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
261 	resp = &desc.params.get_link_status;
262 	resp->cmd_flags = cpu_to_le16(cmd_flags);
263 	resp->lport_num = pi->lport;
264 
265 	status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data),
266 				 cd);
267 
268 	if (status)
269 		return status;
270 
271 	/* save off old link status information */
272 	*hw_link_info_old = *hw_link_info;
273 
274 	/* update current link status information */
275 	hw_link_info->link_speed = le16_to_cpu(link_data.link_speed);
276 	hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low);
277 	*hw_media_type = ice_get_media_type(pi);
278 	hw_link_info->link_info = link_data.link_info;
279 	hw_link_info->an_info = link_data.an_info;
280 	hw_link_info->ext_info = link_data.ext_info;
281 	hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size);
282 	hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M;
283 
284 	/* update fc info */
285 	tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
286 	rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
287 	if (tx_pause && rx_pause)
288 		hw_fc_info->current_mode = ICE_FC_FULL;
289 	else if (tx_pause)
290 		hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
291 	else if (rx_pause)
292 		hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
293 	else
294 		hw_fc_info->current_mode = ICE_FC_NONE;
295 
296 	hw_link_info->lse_ena =
297 		!!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
298 
299 	/* save link status information */
300 	if (link)
301 		*link = *hw_link_info;
302 
303 	/* flag cleared so calling functions don't call AQ again */
304 	pi->phy.get_link_info = false;
305 
306 	return status;
307 }
308 
309 /**
310  * ice_init_flex_flags
311  * @hw: pointer to the hardware structure
312  * @prof_id: Rx Descriptor Builder profile ID
313  *
314  * Function to initialize Rx flex flags
315  */
316 static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
317 {
318 	u8 idx = 0;
319 
320 	/* Flex-flag fields (0-2) are programmed with FLG64 bits with layout:
321 	 * flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE
322 	 * flexiflags1[3:0] - Not used for flag programming
323 	 * flexiflags2[7:0] - Tunnel and VLAN types
324 	 * 2 invalid fields in last index
325 	 */
326 	switch (prof_id) {
327 	/* Rx flex flags are currently programmed for the NIC profiles only.
328 	 * Different flag bit programming configurations can be added per
329 	 * profile as needed.
330 	 */
331 	case ICE_RXDID_FLEX_NIC:
332 	case ICE_RXDID_FLEX_NIC_2:
333 		ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_FRG,
334 				   ICE_RXFLG_UDP_GRE, ICE_RXFLG_PKT_DSI,
335 				   ICE_RXFLG_FIN, idx++);
336 		/* flex flag 1 is not used for flexi-flag programming, skipping
337 		 * these four FLG64 bits.
338 		 */
339 		ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_SYN, ICE_RXFLG_RST,
340 				   ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++);
341 		ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_DSI,
342 				   ICE_RXFLG_PKT_DSI, ICE_RXFLG_EVLAN_x8100,
343 				   ICE_RXFLG_EVLAN_x9100, idx++);
344 		ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_VLAN_x8100,
345 				   ICE_RXFLG_TNL_VLAN, ICE_RXFLG_TNL_MAC,
346 				   ICE_RXFLG_TNL0, idx++);
347 		ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2,
348 				   ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx);
349 		break;
350 
351 	default:
352 		ice_debug(hw, ICE_DBG_INIT,
353 			  "Flag programming for profile ID %d not supported\n",
354 			  prof_id);
355 	}
356 }
357 
358 /**
359  * ice_init_flex_flds
360  * @hw: pointer to the hardware structure
361  * @prof_id: Rx Descriptor Builder profile ID
362  *
363  * Function to initialize flex descriptors
364  */
365 static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
366 {
367 	enum ice_flex_rx_mdid mdid;
368 
369 	switch (prof_id) {
370 	case ICE_RXDID_FLEX_NIC:
371 	case ICE_RXDID_FLEX_NIC_2:
372 		ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0);
373 		ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1);
374 		ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2);
375 
376 		mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ?
377 			ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH;
378 
379 		ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3);
380 
381 		ice_init_flex_flags(hw, prof_id);
382 		break;
383 
384 	default:
385 		ice_debug(hw, ICE_DBG_INIT,
386 			  "Field init for profile ID %d not supported\n",
387 			  prof_id);
388 	}
389 }
390 
391 /**
392  * ice_init_fltr_mgmt_struct - initializes filter management list and locks
393  * @hw: pointer to the hw struct
394  */
395 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
396 {
397 	struct ice_switch_info *sw;
398 
399 	hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
400 				       sizeof(*hw->switch_info), GFP_KERNEL);
401 	sw = hw->switch_info;
402 
403 	if (!sw)
404 		return ICE_ERR_NO_MEMORY;
405 
406 	INIT_LIST_HEAD(&sw->vsi_list_map_head);
407 
408 	ice_init_def_sw_recp(hw);
409 
410 	return 0;
411 }
412 
413 /**
414  * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
415  * @hw: pointer to the hw struct
416  */
417 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
418 {
419 	struct ice_switch_info *sw = hw->switch_info;
420 	struct ice_vsi_list_map_info *v_pos_map;
421 	struct ice_vsi_list_map_info *v_tmp_map;
422 	struct ice_sw_recipe *recps;
423 	u8 i;
424 
425 	list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
426 				 list_entry) {
427 		list_del(&v_pos_map->list_entry);
428 		devm_kfree(ice_hw_to_dev(hw), v_pos_map);
429 	}
430 	recps = hw->switch_info->recp_list;
431 	for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
432 		struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
433 
434 		recps[i].root_rid = i;
435 		mutex_destroy(&recps[i].filt_rule_lock);
436 		list_for_each_entry_safe(lst_itr, tmp_entry,
437 					 &recps[i].filt_rules, list_entry) {
438 			list_del(&lst_itr->list_entry);
439 			devm_kfree(ice_hw_to_dev(hw), lst_itr);
440 		}
441 	}
442 	ice_rm_all_sw_replay_rule_info(hw);
443 	devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
444 	devm_kfree(ice_hw_to_dev(hw), sw);
445 }
446 
447 #define ICE_FW_LOG_DESC_SIZE(n)	(sizeof(struct ice_aqc_fw_logging_data) + \
448 	(((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry)))
449 #define ICE_FW_LOG_DESC_SIZE_MAX	\
450 	ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX)
451 
452 /**
453  * ice_cfg_fw_log - configure FW logging
454  * @hw: pointer to the hw struct
455  * @enable: enable certain FW logging events if true, disable all if false
456  *
457  * This function enables/disables the FW logging via Rx CQ events and a UART
458  * port based on predetermined configurations. FW logging via the Rx CQ can be
459  * enabled/disabled for individual PF's. However, FW logging via the UART can
460  * only be enabled/disabled for all PFs on the same device.
461  *
462  * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
463  * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
464  * before initializing the device.
465  *
466  * When re/configuring FW logging, callers need to update the "cfg" elements of
467  * the hw->fw_log.evnts array with the desired logging event configurations for
468  * modules of interest. When disabling FW logging completely, the callers can
469  * just pass false in the "enable" parameter. On completion, the function will
470  * update the "cur" element of the hw->fw_log.evnts array with the resulting
471  * logging event configurations of the modules that are being re/configured. FW
472  * logging modules that are not part of a reconfiguration operation retain their
473  * previous states.
474  *
475  * Before resetting the device, it is recommended that the driver disables FW
476  * logging before shutting down the control queue. When disabling FW logging
477  * ("enable" = false), the latest configurations of FW logging events stored in
478  * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
479  * a device reset.
480  *
481  * When enabling FW logging to emit log messages via the Rx CQ during the
482  * device's initialization phase, a mechanism alternative to interrupt handlers
483  * needs to be used to extract FW log messages from the Rx CQ periodically and
484  * to prevent the Rx CQ from being full and stalling other types of control
485  * messages from FW to SW. Interrupts are typically disabled during the device's
486  * initialization phase.
487  */
488 static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
489 {
490 	struct ice_aqc_fw_logging_data *data = NULL;
491 	struct ice_aqc_fw_logging *cmd;
492 	enum ice_status status = 0;
493 	u16 i, chgs = 0, len = 0;
494 	struct ice_aq_desc desc;
495 	u8 actv_evnts = 0;
496 	void *buf = NULL;
497 
498 	if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
499 		return 0;
500 
501 	/* Disable FW logging only when the control queue is still responsive */
502 	if (!enable &&
503 	    (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
504 		return 0;
505 
506 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
507 	cmd = &desc.params.fw_logging;
508 
509 	/* Indicate which controls are valid */
510 	if (hw->fw_log.cq_en)
511 		cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
512 
513 	if (hw->fw_log.uart_en)
514 		cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
515 
516 	if (enable) {
517 		/* Fill in an array of entries with FW logging modules and
518 		 * logging events being reconfigured.
519 		 */
520 		for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
521 			u16 val;
522 
523 			/* Keep track of enabled event types */
524 			actv_evnts |= hw->fw_log.evnts[i].cfg;
525 
526 			if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
527 				continue;
528 
529 			if (!data) {
530 				data = devm_kzalloc(ice_hw_to_dev(hw),
531 						    ICE_FW_LOG_DESC_SIZE_MAX,
532 						    GFP_KERNEL);
533 				if (!data)
534 					return ICE_ERR_NO_MEMORY;
535 			}
536 
537 			val = i << ICE_AQC_FW_LOG_ID_S;
538 			val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
539 			data->entry[chgs++] = cpu_to_le16(val);
540 		}
541 
542 		/* Only enable FW logging if at least one module is specified.
543 		 * If FW logging is currently enabled but all modules are not
544 		 * enabled to emit log messages, disable FW logging altogether.
545 		 */
546 		if (actv_evnts) {
547 			/* Leave if there is effectively no change */
548 			if (!chgs)
549 				goto out;
550 
551 			if (hw->fw_log.cq_en)
552 				cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
553 
554 			if (hw->fw_log.uart_en)
555 				cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
556 
557 			buf = data;
558 			len = ICE_FW_LOG_DESC_SIZE(chgs);
559 			desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
560 		}
561 	}
562 
563 	status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
564 	if (!status) {
565 		/* Update the current configuration to reflect events enabled.
566 		 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
567 		 * logging mode is enabled for the device. They do not reflect
568 		 * actual modules being enabled to emit log messages. So, their
569 		 * values remain unchanged even when all modules are disabled.
570 		 */
571 		u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
572 
573 		hw->fw_log.actv_evnts = actv_evnts;
574 		for (i = 0; i < cnt; i++) {
575 			u16 v, m;
576 
577 			if (!enable) {
578 				/* When disabling all FW logging events as part
579 				 * of device's de-initialization, the original
580 				 * configurations are retained, and can be used
581 				 * to reconfigure FW logging later if the device
582 				 * is re-initialized.
583 				 */
584 				hw->fw_log.evnts[i].cur = 0;
585 				continue;
586 			}
587 
588 			v = le16_to_cpu(data->entry[i]);
589 			m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
590 			hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
591 		}
592 	}
593 
594 out:
595 	if (data)
596 		devm_kfree(ice_hw_to_dev(hw), data);
597 
598 	return status;
599 }
600 
601 /**
602  * ice_output_fw_log
603  * @hw: pointer to the hw struct
604  * @desc: pointer to the AQ message descriptor
605  * @buf: pointer to the buffer accompanying the AQ message
606  *
607  * Formats a FW Log message and outputs it via the standard driver logs.
608  */
609 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
610 {
611 	ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg Start ]\n");
612 	ice_debug_array(hw, ICE_DBG_AQ_MSG, 16, 1, (u8 *)buf,
613 			le16_to_cpu(desc->datalen));
614 	ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg End ]\n");
615 }
616 
617 /**
618  * ice_get_itr_intrl_gran - determine int/intrl granularity
619  * @hw: pointer to the hw struct
620  *
621  * Determines the itr/intrl granularities based on the maximum aggregate
622  * bandwidth according to the device's configuration during power-on.
623  */
624 static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw)
625 {
626 	u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
627 			 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
628 			GL_PWR_MODE_CTL_CAR_MAX_BW_S;
629 
630 	switch (max_agg_bw) {
631 	case ICE_MAX_AGG_BW_200G:
632 	case ICE_MAX_AGG_BW_100G:
633 	case ICE_MAX_AGG_BW_50G:
634 		hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
635 		hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
636 		break;
637 	case ICE_MAX_AGG_BW_25G:
638 		hw->itr_gran = ICE_ITR_GRAN_MAX_25;
639 		hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
640 		break;
641 	default:
642 		ice_debug(hw, ICE_DBG_INIT,
643 			  "Failed to determine itr/intrl granularity\n");
644 		return ICE_ERR_CFG;
645 	}
646 
647 	return 0;
648 }
649 
650 /**
651  * ice_init_hw - main hardware initialization routine
652  * @hw: pointer to the hardware structure
653  */
654 enum ice_status ice_init_hw(struct ice_hw *hw)
655 {
656 	struct ice_aqc_get_phy_caps_data *pcaps;
657 	enum ice_status status;
658 	u16 mac_buf_len;
659 	void *mac_buf;
660 
661 	/* Set MAC type based on DeviceID */
662 	status = ice_set_mac_type(hw);
663 	if (status)
664 		return status;
665 
666 	hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
667 			 PF_FUNC_RID_FUNC_NUM_M) >>
668 		PF_FUNC_RID_FUNC_NUM_S;
669 
670 	status = ice_reset(hw, ICE_RESET_PFR);
671 	if (status)
672 		return status;
673 
674 	status = ice_get_itr_intrl_gran(hw);
675 	if (status)
676 		return status;
677 
678 	status = ice_init_all_ctrlq(hw);
679 	if (status)
680 		goto err_unroll_cqinit;
681 
682 	/* Enable FW logging. Not fatal if this fails. */
683 	status = ice_cfg_fw_log(hw, true);
684 	if (status)
685 		ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
686 
687 	status = ice_clear_pf_cfg(hw);
688 	if (status)
689 		goto err_unroll_cqinit;
690 
691 	ice_clear_pxe_mode(hw);
692 
693 	status = ice_init_nvm(hw);
694 	if (status)
695 		goto err_unroll_cqinit;
696 
697 	status = ice_get_caps(hw);
698 	if (status)
699 		goto err_unroll_cqinit;
700 
701 	hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
702 				     sizeof(*hw->port_info), GFP_KERNEL);
703 	if (!hw->port_info) {
704 		status = ICE_ERR_NO_MEMORY;
705 		goto err_unroll_cqinit;
706 	}
707 
708 	/* set the back pointer to hw */
709 	hw->port_info->hw = hw;
710 
711 	/* Initialize port_info struct with switch configuration data */
712 	status = ice_get_initial_sw_cfg(hw);
713 	if (status)
714 		goto err_unroll_alloc;
715 
716 	hw->evb_veb = true;
717 
718 	/* Query the allocated resources for tx scheduler */
719 	status = ice_sched_query_res_alloc(hw);
720 	if (status) {
721 		ice_debug(hw, ICE_DBG_SCHED,
722 			  "Failed to get scheduler allocated resources\n");
723 		goto err_unroll_alloc;
724 	}
725 
726 	/* Initialize port_info struct with scheduler data */
727 	status = ice_sched_init_port(hw->port_info);
728 	if (status)
729 		goto err_unroll_sched;
730 
731 	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
732 	if (!pcaps) {
733 		status = ICE_ERR_NO_MEMORY;
734 		goto err_unroll_sched;
735 	}
736 
737 	/* Initialize port_info struct with PHY capabilities */
738 	status = ice_aq_get_phy_caps(hw->port_info, false,
739 				     ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
740 	devm_kfree(ice_hw_to_dev(hw), pcaps);
741 	if (status)
742 		goto err_unroll_sched;
743 
744 	/* Initialize port_info struct with link information */
745 	status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
746 	if (status)
747 		goto err_unroll_sched;
748 
749 	/* need a valid SW entry point to build a Tx tree */
750 	if (!hw->sw_entry_point_layer) {
751 		ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
752 		status = ICE_ERR_CFG;
753 		goto err_unroll_sched;
754 	}
755 
756 	status = ice_init_fltr_mgmt_struct(hw);
757 	if (status)
758 		goto err_unroll_sched;
759 
760 	ice_dev_onetime_setup(hw);
761 
762 	/* Get MAC information */
763 	/* A single port can report up to two (LAN and WoL) addresses */
764 	mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
765 			       sizeof(struct ice_aqc_manage_mac_read_resp),
766 			       GFP_KERNEL);
767 	mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
768 
769 	if (!mac_buf) {
770 		status = ICE_ERR_NO_MEMORY;
771 		goto err_unroll_fltr_mgmt_struct;
772 	}
773 
774 	status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
775 	devm_kfree(ice_hw_to_dev(hw), mac_buf);
776 
777 	if (status)
778 		goto err_unroll_fltr_mgmt_struct;
779 
780 	ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
781 	ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
782 
783 	return 0;
784 
785 err_unroll_fltr_mgmt_struct:
786 	ice_cleanup_fltr_mgmt_struct(hw);
787 err_unroll_sched:
788 	ice_sched_cleanup_all(hw);
789 err_unroll_alloc:
790 	devm_kfree(ice_hw_to_dev(hw), hw->port_info);
791 err_unroll_cqinit:
792 	ice_shutdown_all_ctrlq(hw);
793 	return status;
794 }
795 
796 /**
797  * ice_deinit_hw - unroll initialization operations done by ice_init_hw
798  * @hw: pointer to the hardware structure
799  */
800 void ice_deinit_hw(struct ice_hw *hw)
801 {
802 	ice_cleanup_fltr_mgmt_struct(hw);
803 
804 	ice_sched_cleanup_all(hw);
805 
806 	if (hw->port_info) {
807 		devm_kfree(ice_hw_to_dev(hw), hw->port_info);
808 		hw->port_info = NULL;
809 	}
810 
811 	/* Attempt to disable FW logging before shutting down control queues */
812 	ice_cfg_fw_log(hw, false);
813 	ice_shutdown_all_ctrlq(hw);
814 
815 	/* Clear VSI contexts if not already cleared */
816 	ice_clear_all_vsi_ctx(hw);
817 }
818 
819 /**
820  * ice_check_reset - Check to see if a global reset is complete
821  * @hw: pointer to the hardware structure
822  */
823 enum ice_status ice_check_reset(struct ice_hw *hw)
824 {
825 	u32 cnt, reg = 0, grst_delay;
826 
827 	/* Poll for Device Active state in case a recent CORER, GLOBR,
828 	 * or EMPR has occurred. The grst delay value is in 100ms units.
829 	 * Add 1sec for outstanding AQ commands that can take a long time.
830 	 */
831 	grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
832 		      GLGEN_RSTCTL_GRSTDEL_S) + 10;
833 
834 	for (cnt = 0; cnt < grst_delay; cnt++) {
835 		mdelay(100);
836 		reg = rd32(hw, GLGEN_RSTAT);
837 		if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
838 			break;
839 	}
840 
841 	if (cnt == grst_delay) {
842 		ice_debug(hw, ICE_DBG_INIT,
843 			  "Global reset polling failed to complete.\n");
844 		return ICE_ERR_RESET_FAILED;
845 	}
846 
847 #define ICE_RESET_DONE_MASK	(GLNVM_ULD_CORER_DONE_M | \
848 				 GLNVM_ULD_GLOBR_DONE_M)
849 
850 	/* Device is Active; check Global Reset processes are done */
851 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
852 		reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
853 		if (reg == ICE_RESET_DONE_MASK) {
854 			ice_debug(hw, ICE_DBG_INIT,
855 				  "Global reset processes done. %d\n", cnt);
856 			break;
857 		}
858 		mdelay(10);
859 	}
860 
861 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
862 		ice_debug(hw, ICE_DBG_INIT,
863 			  "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
864 			  reg);
865 		return ICE_ERR_RESET_FAILED;
866 	}
867 
868 	return 0;
869 }
870 
871 /**
872  * ice_pf_reset - Reset the PF
873  * @hw: pointer to the hardware structure
874  *
875  * If a global reset has been triggered, this function checks
876  * for its completion and then issues the PF reset
877  */
878 static enum ice_status ice_pf_reset(struct ice_hw *hw)
879 {
880 	u32 cnt, reg;
881 
882 	/* If at function entry a global reset was already in progress, i.e.
883 	 * state is not 'device active' or any of the reset done bits are not
884 	 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
885 	 * global reset is done.
886 	 */
887 	if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
888 	    (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
889 		/* poll on global reset currently in progress until done */
890 		if (ice_check_reset(hw))
891 			return ICE_ERR_RESET_FAILED;
892 
893 		return 0;
894 	}
895 
896 	/* Reset the PF */
897 	reg = rd32(hw, PFGEN_CTRL);
898 
899 	wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
900 
901 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
902 		reg = rd32(hw, PFGEN_CTRL);
903 		if (!(reg & PFGEN_CTRL_PFSWR_M))
904 			break;
905 
906 		mdelay(1);
907 	}
908 
909 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
910 		ice_debug(hw, ICE_DBG_INIT,
911 			  "PF reset polling failed to complete.\n");
912 		return ICE_ERR_RESET_FAILED;
913 	}
914 
915 	return 0;
916 }
917 
918 /**
919  * ice_reset - Perform different types of reset
920  * @hw: pointer to the hardware structure
921  * @req: reset request
922  *
923  * This function triggers a reset as specified by the req parameter.
924  *
925  * Note:
926  * If anything other than a PF reset is triggered, PXE mode is restored.
927  * This has to be cleared using ice_clear_pxe_mode again, once the AQ
928  * interface has been restored in the rebuild flow.
929  */
930 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
931 {
932 	u32 val = 0;
933 
934 	switch (req) {
935 	case ICE_RESET_PFR:
936 		return ice_pf_reset(hw);
937 	case ICE_RESET_CORER:
938 		ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
939 		val = GLGEN_RTRIG_CORER_M;
940 		break;
941 	case ICE_RESET_GLOBR:
942 		ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
943 		val = GLGEN_RTRIG_GLOBR_M;
944 		break;
945 	default:
946 		return ICE_ERR_PARAM;
947 	}
948 
949 	val |= rd32(hw, GLGEN_RTRIG);
950 	wr32(hw, GLGEN_RTRIG, val);
951 	ice_flush(hw);
952 
953 	/* wait for the FW to be ready */
954 	return ice_check_reset(hw);
955 }
956 
957 /**
958  * ice_copy_rxq_ctx_to_hw
959  * @hw: pointer to the hardware structure
960  * @ice_rxq_ctx: pointer to the rxq context
961  * @rxq_index: the index of the rx queue
962  *
963  * Copies rxq context from dense structure to hw register space
964  */
965 static enum ice_status
966 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
967 {
968 	u8 i;
969 
970 	if (!ice_rxq_ctx)
971 		return ICE_ERR_BAD_PTR;
972 
973 	if (rxq_index > QRX_CTRL_MAX_INDEX)
974 		return ICE_ERR_PARAM;
975 
976 	/* Copy each dword separately to hw */
977 	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
978 		wr32(hw, QRX_CONTEXT(i, rxq_index),
979 		     *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
980 
981 		ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
982 			  *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
983 	}
984 
985 	return 0;
986 }
987 
988 /* LAN Rx Queue Context */
989 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
990 	/* Field		Width	LSB */
991 	ICE_CTX_STORE(ice_rlan_ctx, head,		13,	0),
992 	ICE_CTX_STORE(ice_rlan_ctx, cpuid,		8,	13),
993 	ICE_CTX_STORE(ice_rlan_ctx, base,		57,	32),
994 	ICE_CTX_STORE(ice_rlan_ctx, qlen,		13,	89),
995 	ICE_CTX_STORE(ice_rlan_ctx, dbuf,		7,	102),
996 	ICE_CTX_STORE(ice_rlan_ctx, hbuf,		5,	109),
997 	ICE_CTX_STORE(ice_rlan_ctx, dtype,		2,	114),
998 	ICE_CTX_STORE(ice_rlan_ctx, dsize,		1,	116),
999 	ICE_CTX_STORE(ice_rlan_ctx, crcstrip,		1,	117),
1000 	ICE_CTX_STORE(ice_rlan_ctx, l2tsel,		1,	119),
1001 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_0,		4,	120),
1002 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_1,		2,	124),
1003 	ICE_CTX_STORE(ice_rlan_ctx, showiv,		1,	127),
1004 	ICE_CTX_STORE(ice_rlan_ctx, rxmax,		14,	174),
1005 	ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena,	1,	193),
1006 	ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena,	1,	194),
1007 	ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena,	1,	195),
1008 	ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena,	1,	196),
1009 	ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh,		3,	198),
1010 	{ 0 }
1011 };
1012 
1013 /**
1014  * ice_write_rxq_ctx
1015  * @hw: pointer to the hardware structure
1016  * @rlan_ctx: pointer to the rxq context
1017  * @rxq_index: the index of the rx queue
1018  *
1019  * Converts rxq context from sparse to dense structure and then writes
1020  * it to hw register space
1021  */
1022 enum ice_status
1023 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1024 		  u32 rxq_index)
1025 {
1026 	u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1027 
1028 	ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1029 	return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1030 }
1031 
1032 /* LAN Tx Queue Context */
1033 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1034 				    /* Field			Width	LSB */
1035 	ICE_CTX_STORE(ice_tlan_ctx, base,			57,	0),
1036 	ICE_CTX_STORE(ice_tlan_ctx, port_num,			3,	57),
1037 	ICE_CTX_STORE(ice_tlan_ctx, cgd_num,			5,	60),
1038 	ICE_CTX_STORE(ice_tlan_ctx, pf_num,			3,	65),
1039 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_num,			10,	68),
1040 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_type,			2,	78),
1041 	ICE_CTX_STORE(ice_tlan_ctx, src_vsi,			10,	80),
1042 	ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena,			1,	90),
1043 	ICE_CTX_STORE(ice_tlan_ctx, alt_vlan,			1,	92),
1044 	ICE_CTX_STORE(ice_tlan_ctx, cpuid,			8,	93),
1045 	ICE_CTX_STORE(ice_tlan_ctx, wb_mode,			1,	101),
1046 	ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc,			1,	102),
1047 	ICE_CTX_STORE(ice_tlan_ctx, tphrd,			1,	103),
1048 	ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc,			1,	104),
1049 	ICE_CTX_STORE(ice_tlan_ctx, cmpq_id,			9,	105),
1050 	ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func,		14,	114),
1051 	ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode,	1,	128),
1052 	ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id,		6,	129),
1053 	ICE_CTX_STORE(ice_tlan_ctx, qlen,			13,	135),
1054 	ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx,		4,	148),
1055 	ICE_CTX_STORE(ice_tlan_ctx, tso_ena,			1,	152),
1056 	ICE_CTX_STORE(ice_tlan_ctx, tso_qnum,			11,	153),
1057 	ICE_CTX_STORE(ice_tlan_ctx, legacy_int,			1,	164),
1058 	ICE_CTX_STORE(ice_tlan_ctx, drop_ena,			1,	165),
1059 	ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx,		2,	166),
1060 	ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx,	3,	168),
1061 	ICE_CTX_STORE(ice_tlan_ctx, int_q_state,		110,	171),
1062 	{ 0 }
1063 };
1064 
1065 /**
1066  * ice_debug_cq
1067  * @hw: pointer to the hardware structure
1068  * @mask: debug mask
1069  * @desc: pointer to control queue descriptor
1070  * @buf: pointer to command buffer
1071  * @buf_len: max length of buf
1072  *
1073  * Dumps debug log about control command with descriptor contents.
1074  */
1075 void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
1076 		  void *buf, u16 buf_len)
1077 {
1078 	struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
1079 	u16 len;
1080 
1081 #ifndef CONFIG_DYNAMIC_DEBUG
1082 	if (!(mask & hw->debug_mask))
1083 		return;
1084 #endif
1085 
1086 	if (!desc)
1087 		return;
1088 
1089 	len = le16_to_cpu(cq_desc->datalen);
1090 
1091 	ice_debug(hw, mask,
1092 		  "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
1093 		  le16_to_cpu(cq_desc->opcode),
1094 		  le16_to_cpu(cq_desc->flags),
1095 		  le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
1096 	ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
1097 		  le32_to_cpu(cq_desc->cookie_high),
1098 		  le32_to_cpu(cq_desc->cookie_low));
1099 	ice_debug(hw, mask, "\tparam (0,1)  0x%08X 0x%08X\n",
1100 		  le32_to_cpu(cq_desc->params.generic.param0),
1101 		  le32_to_cpu(cq_desc->params.generic.param1));
1102 	ice_debug(hw, mask, "\taddr (h,l)   0x%08X 0x%08X\n",
1103 		  le32_to_cpu(cq_desc->params.generic.addr_high),
1104 		  le32_to_cpu(cq_desc->params.generic.addr_low));
1105 	if (buf && cq_desc->datalen != 0) {
1106 		ice_debug(hw, mask, "Buffer:\n");
1107 		if (buf_len < len)
1108 			len = buf_len;
1109 
1110 		ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
1111 	}
1112 }
1113 
1114 /* FW Admin Queue command wrappers */
1115 
1116 /**
1117  * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1118  * @hw: pointer to the hw struct
1119  * @desc: descriptor describing the command
1120  * @buf: buffer to use for indirect commands (NULL for direct commands)
1121  * @buf_size: size of buffer for indirect commands (0 for direct commands)
1122  * @cd: pointer to command details structure
1123  *
1124  * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1125  */
1126 enum ice_status
1127 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1128 		u16 buf_size, struct ice_sq_cd *cd)
1129 {
1130 	return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1131 }
1132 
1133 /**
1134  * ice_aq_get_fw_ver
1135  * @hw: pointer to the hw struct
1136  * @cd: pointer to command details structure or NULL
1137  *
1138  * Get the firmware version (0x0001) from the admin queue commands
1139  */
1140 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1141 {
1142 	struct ice_aqc_get_ver *resp;
1143 	struct ice_aq_desc desc;
1144 	enum ice_status status;
1145 
1146 	resp = &desc.params.get_ver;
1147 
1148 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1149 
1150 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1151 
1152 	if (!status) {
1153 		hw->fw_branch = resp->fw_branch;
1154 		hw->fw_maj_ver = resp->fw_major;
1155 		hw->fw_min_ver = resp->fw_minor;
1156 		hw->fw_patch = resp->fw_patch;
1157 		hw->fw_build = le32_to_cpu(resp->fw_build);
1158 		hw->api_branch = resp->api_branch;
1159 		hw->api_maj_ver = resp->api_major;
1160 		hw->api_min_ver = resp->api_minor;
1161 		hw->api_patch = resp->api_patch;
1162 	}
1163 
1164 	return status;
1165 }
1166 
1167 /**
1168  * ice_aq_q_shutdown
1169  * @hw: pointer to the hw struct
1170  * @unloading: is the driver unloading itself
1171  *
1172  * Tell the Firmware that we're shutting down the AdminQ and whether
1173  * or not the driver is unloading as well (0x0003).
1174  */
1175 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1176 {
1177 	struct ice_aqc_q_shutdown *cmd;
1178 	struct ice_aq_desc desc;
1179 
1180 	cmd = &desc.params.q_shutdown;
1181 
1182 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1183 
1184 	if (unloading)
1185 		cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING);
1186 
1187 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1188 }
1189 
1190 /**
1191  * ice_aq_req_res
1192  * @hw: pointer to the hw struct
1193  * @res: resource id
1194  * @access: access type
1195  * @sdp_number: resource number
1196  * @timeout: the maximum time in ms that the driver may hold the resource
1197  * @cd: pointer to command details structure or NULL
1198  *
1199  * Requests common resource using the admin queue commands (0x0008).
1200  * When attempting to acquire the Global Config Lock, the driver can
1201  * learn of three states:
1202  *  1) ICE_SUCCESS -        acquired lock, and can perform download package
1203  *  2) ICE_ERR_AQ_ERROR -   did not get lock, driver should fail to load
1204  *  3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1205  *                          successfully downloaded the package; the driver does
1206  *                          not have to download the package and can continue
1207  *                          loading
1208  *
1209  * Note that if the caller is in an acquire lock, perform action, release lock
1210  * phase of operation, it is possible that the FW may detect a timeout and issue
1211  * a CORER. In this case, the driver will receive a CORER interrupt and will
1212  * have to determine its cause. The calling thread that is handling this flow
1213  * will likely get an error propagated back to it indicating the Download
1214  * Package, Update Package or the Release Resource AQ commands timed out.
1215  */
1216 static enum ice_status
1217 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1218 	       enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1219 	       struct ice_sq_cd *cd)
1220 {
1221 	struct ice_aqc_req_res *cmd_resp;
1222 	struct ice_aq_desc desc;
1223 	enum ice_status status;
1224 
1225 	cmd_resp = &desc.params.res_owner;
1226 
1227 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1228 
1229 	cmd_resp->res_id = cpu_to_le16(res);
1230 	cmd_resp->access_type = cpu_to_le16(access);
1231 	cmd_resp->res_number = cpu_to_le32(sdp_number);
1232 	cmd_resp->timeout = cpu_to_le32(*timeout);
1233 	*timeout = 0;
1234 
1235 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1236 
1237 	/* The completion specifies the maximum time in ms that the driver
1238 	 * may hold the resource in the Timeout field.
1239 	 */
1240 
1241 	/* Global config lock response utilizes an additional status field.
1242 	 *
1243 	 * If the Global config lock resource is held by some other driver, the
1244 	 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1245 	 * and the timeout field indicates the maximum time the current owner
1246 	 * of the resource has to free it.
1247 	 */
1248 	if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1249 		if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1250 			*timeout = le32_to_cpu(cmd_resp->timeout);
1251 			return 0;
1252 		} else if (le16_to_cpu(cmd_resp->status) ==
1253 			   ICE_AQ_RES_GLBL_IN_PROG) {
1254 			*timeout = le32_to_cpu(cmd_resp->timeout);
1255 			return ICE_ERR_AQ_ERROR;
1256 		} else if (le16_to_cpu(cmd_resp->status) ==
1257 			   ICE_AQ_RES_GLBL_DONE) {
1258 			return ICE_ERR_AQ_NO_WORK;
1259 		}
1260 
1261 		/* invalid FW response, force a timeout immediately */
1262 		*timeout = 0;
1263 		return ICE_ERR_AQ_ERROR;
1264 	}
1265 
1266 	/* If the resource is held by some other driver, the command completes
1267 	 * with a busy return value and the timeout field indicates the maximum
1268 	 * time the current owner of the resource has to free it.
1269 	 */
1270 	if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1271 		*timeout = le32_to_cpu(cmd_resp->timeout);
1272 
1273 	return status;
1274 }
1275 
1276 /**
1277  * ice_aq_release_res
1278  * @hw: pointer to the hw struct
1279  * @res: resource id
1280  * @sdp_number: resource number
1281  * @cd: pointer to command details structure or NULL
1282  *
1283  * release common resource using the admin queue commands (0x0009)
1284  */
1285 static enum ice_status
1286 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1287 		   struct ice_sq_cd *cd)
1288 {
1289 	struct ice_aqc_req_res *cmd;
1290 	struct ice_aq_desc desc;
1291 
1292 	cmd = &desc.params.res_owner;
1293 
1294 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1295 
1296 	cmd->res_id = cpu_to_le16(res);
1297 	cmd->res_number = cpu_to_le32(sdp_number);
1298 
1299 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1300 }
1301 
1302 /**
1303  * ice_acquire_res
1304  * @hw: pointer to the HW structure
1305  * @res: resource id
1306  * @access: access type (read or write)
1307  * @timeout: timeout in milliseconds
1308  *
1309  * This function will attempt to acquire the ownership of a resource.
1310  */
1311 enum ice_status
1312 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1313 		enum ice_aq_res_access_type access, u32 timeout)
1314 {
1315 #define ICE_RES_POLLING_DELAY_MS	10
1316 	u32 delay = ICE_RES_POLLING_DELAY_MS;
1317 	u32 time_left = timeout;
1318 	enum ice_status status;
1319 
1320 	status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1321 
1322 	/* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1323 	 * previously acquired the resource and performed any necessary updates;
1324 	 * in this case the caller does not obtain the resource and has no
1325 	 * further work to do.
1326 	 */
1327 	if (status == ICE_ERR_AQ_NO_WORK)
1328 		goto ice_acquire_res_exit;
1329 
1330 	if (status)
1331 		ice_debug(hw, ICE_DBG_RES,
1332 			  "resource %d acquire type %d failed.\n", res, access);
1333 
1334 	/* If necessary, poll until the current lock owner timeouts */
1335 	timeout = time_left;
1336 	while (status && timeout && time_left) {
1337 		mdelay(delay);
1338 		timeout = (timeout > delay) ? timeout - delay : 0;
1339 		status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1340 
1341 		if (status == ICE_ERR_AQ_NO_WORK)
1342 			/* lock free, but no work to do */
1343 			break;
1344 
1345 		if (!status)
1346 			/* lock acquired */
1347 			break;
1348 	}
1349 	if (status && status != ICE_ERR_AQ_NO_WORK)
1350 		ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1351 
1352 ice_acquire_res_exit:
1353 	if (status == ICE_ERR_AQ_NO_WORK) {
1354 		if (access == ICE_RES_WRITE)
1355 			ice_debug(hw, ICE_DBG_RES,
1356 				  "resource indicates no work to do.\n");
1357 		else
1358 			ice_debug(hw, ICE_DBG_RES,
1359 				  "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1360 	}
1361 	return status;
1362 }
1363 
1364 /**
1365  * ice_release_res
1366  * @hw: pointer to the HW structure
1367  * @res: resource id
1368  *
1369  * This function will release a resource using the proper Admin Command.
1370  */
1371 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1372 {
1373 	enum ice_status status;
1374 	u32 total_delay = 0;
1375 
1376 	status = ice_aq_release_res(hw, res, 0, NULL);
1377 
1378 	/* there are some rare cases when trying to release the resource
1379 	 * results in an admin Q timeout, so handle them correctly
1380 	 */
1381 	while ((status == ICE_ERR_AQ_TIMEOUT) &&
1382 	       (total_delay < hw->adminq.sq_cmd_timeout)) {
1383 		mdelay(1);
1384 		status = ice_aq_release_res(hw, res, 0, NULL);
1385 		total_delay++;
1386 	}
1387 }
1388 
1389 /**
1390  * ice_parse_caps - parse function/device capabilities
1391  * @hw: pointer to the hw struct
1392  * @buf: pointer to a buffer containing function/device capability records
1393  * @cap_count: number of capability records in the list
1394  * @opc: type of capabilities list to parse
1395  *
1396  * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
1397  */
1398 static void
1399 ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1400 	       enum ice_adminq_opc opc)
1401 {
1402 	struct ice_aqc_list_caps_elem *cap_resp;
1403 	struct ice_hw_func_caps *func_p = NULL;
1404 	struct ice_hw_dev_caps *dev_p = NULL;
1405 	struct ice_hw_common_caps *caps;
1406 	u32 i;
1407 
1408 	if (!buf)
1409 		return;
1410 
1411 	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1412 
1413 	if (opc == ice_aqc_opc_list_dev_caps) {
1414 		dev_p = &hw->dev_caps;
1415 		caps = &dev_p->common_cap;
1416 	} else if (opc == ice_aqc_opc_list_func_caps) {
1417 		func_p = &hw->func_caps;
1418 		caps = &func_p->common_cap;
1419 	} else {
1420 		ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1421 		return;
1422 	}
1423 
1424 	for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1425 		u32 logical_id = le32_to_cpu(cap_resp->logical_id);
1426 		u32 phys_id = le32_to_cpu(cap_resp->phys_id);
1427 		u32 number = le32_to_cpu(cap_resp->number);
1428 		u16 cap = le16_to_cpu(cap_resp->cap);
1429 
1430 		switch (cap) {
1431 		case ICE_AQC_CAPS_SRIOV:
1432 			caps->sr_iov_1_1 = (number == 1);
1433 			ice_debug(hw, ICE_DBG_INIT,
1434 				  "HW caps: SR-IOV = %d\n", caps->sr_iov_1_1);
1435 			break;
1436 		case ICE_AQC_CAPS_VF:
1437 			if (dev_p) {
1438 				dev_p->num_vfs_exposed = number;
1439 				ice_debug(hw, ICE_DBG_INIT,
1440 					  "HW caps: VFs exposed = %d\n",
1441 					  dev_p->num_vfs_exposed);
1442 			} else if (func_p) {
1443 				func_p->num_allocd_vfs = number;
1444 				func_p->vf_base_id = logical_id;
1445 				ice_debug(hw, ICE_DBG_INIT,
1446 					  "HW caps: VFs allocated = %d\n",
1447 					  func_p->num_allocd_vfs);
1448 				ice_debug(hw, ICE_DBG_INIT,
1449 					  "HW caps: VF base_id = %d\n",
1450 					  func_p->vf_base_id);
1451 			}
1452 			break;
1453 		case ICE_AQC_CAPS_VSI:
1454 			if (dev_p) {
1455 				dev_p->num_vsi_allocd_to_host = number;
1456 				ice_debug(hw, ICE_DBG_INIT,
1457 					  "HW caps: Dev.VSI cnt = %d\n",
1458 					  dev_p->num_vsi_allocd_to_host);
1459 			} else if (func_p) {
1460 				func_p->guaranteed_num_vsi = number;
1461 				ice_debug(hw, ICE_DBG_INIT,
1462 					  "HW caps: Func.VSI cnt = %d\n",
1463 					  func_p->guaranteed_num_vsi);
1464 			}
1465 			break;
1466 		case ICE_AQC_CAPS_RSS:
1467 			caps->rss_table_size = number;
1468 			caps->rss_table_entry_width = logical_id;
1469 			ice_debug(hw, ICE_DBG_INIT,
1470 				  "HW caps: RSS table size = %d\n",
1471 				  caps->rss_table_size);
1472 			ice_debug(hw, ICE_DBG_INIT,
1473 				  "HW caps: RSS table width = %d\n",
1474 				  caps->rss_table_entry_width);
1475 			break;
1476 		case ICE_AQC_CAPS_RXQS:
1477 			caps->num_rxq = number;
1478 			caps->rxq_first_id = phys_id;
1479 			ice_debug(hw, ICE_DBG_INIT,
1480 				  "HW caps: Num Rx Qs = %d\n", caps->num_rxq);
1481 			ice_debug(hw, ICE_DBG_INIT,
1482 				  "HW caps: Rx first queue ID = %d\n",
1483 				  caps->rxq_first_id);
1484 			break;
1485 		case ICE_AQC_CAPS_TXQS:
1486 			caps->num_txq = number;
1487 			caps->txq_first_id = phys_id;
1488 			ice_debug(hw, ICE_DBG_INIT,
1489 				  "HW caps: Num Tx Qs = %d\n", caps->num_txq);
1490 			ice_debug(hw, ICE_DBG_INIT,
1491 				  "HW caps: Tx first queue ID = %d\n",
1492 				  caps->txq_first_id);
1493 			break;
1494 		case ICE_AQC_CAPS_MSIX:
1495 			caps->num_msix_vectors = number;
1496 			caps->msix_vector_first_id = phys_id;
1497 			ice_debug(hw, ICE_DBG_INIT,
1498 				  "HW caps: MSIX vector count = %d\n",
1499 				  caps->num_msix_vectors);
1500 			ice_debug(hw, ICE_DBG_INIT,
1501 				  "HW caps: MSIX first vector index = %d\n",
1502 				  caps->msix_vector_first_id);
1503 			break;
1504 		case ICE_AQC_CAPS_MAX_MTU:
1505 			caps->max_mtu = number;
1506 			if (dev_p)
1507 				ice_debug(hw, ICE_DBG_INIT,
1508 					  "HW caps: Dev.MaxMTU = %d\n",
1509 					  caps->max_mtu);
1510 			else if (func_p)
1511 				ice_debug(hw, ICE_DBG_INIT,
1512 					  "HW caps: func.MaxMTU = %d\n",
1513 					  caps->max_mtu);
1514 			break;
1515 		default:
1516 			ice_debug(hw, ICE_DBG_INIT,
1517 				  "HW caps: Unknown capability[%d]: 0x%x\n", i,
1518 				  cap);
1519 			break;
1520 		}
1521 	}
1522 }
1523 
1524 /**
1525  * ice_aq_discover_caps - query function/device capabilities
1526  * @hw: pointer to the hw struct
1527  * @buf: a virtual buffer to hold the capabilities
1528  * @buf_size: Size of the virtual buffer
1529  * @cap_count: cap count needed if AQ err==ENOMEM
1530  * @opc: capabilities type to discover - pass in the command opcode
1531  * @cd: pointer to command details structure or NULL
1532  *
1533  * Get the function(0x000a)/device(0x000b) capabilities description from
1534  * the firmware.
1535  */
1536 static enum ice_status
1537 ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
1538 		     enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1539 {
1540 	struct ice_aqc_list_caps *cmd;
1541 	struct ice_aq_desc desc;
1542 	enum ice_status status;
1543 
1544 	cmd = &desc.params.get_cap;
1545 
1546 	if (opc != ice_aqc_opc_list_func_caps &&
1547 	    opc != ice_aqc_opc_list_dev_caps)
1548 		return ICE_ERR_PARAM;
1549 
1550 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1551 
1552 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1553 	if (!status)
1554 		ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
1555 	else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
1556 		*cap_count = le32_to_cpu(cmd->count);
1557 	return status;
1558 }
1559 
1560 /**
1561  * ice_discover_caps - get info about the HW
1562  * @hw: pointer to the hardware structure
1563  * @opc: capabilities type to discover - pass in the command opcode
1564  */
1565 static enum ice_status ice_discover_caps(struct ice_hw *hw,
1566 					 enum ice_adminq_opc opc)
1567 {
1568 	enum ice_status status;
1569 	u32 cap_count;
1570 	u16 cbuf_len;
1571 	u8 retries;
1572 
1573 	/* The driver doesn't know how many capabilities the device will return
1574 	 * so the buffer size required isn't known ahead of time. The driver
1575 	 * starts with cbuf_len and if this turns out to be insufficient, the
1576 	 * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
1577 	 * The driver then allocates the buffer based on the count and retries
1578 	 * the operation. So it follows that the retry count is 2.
1579 	 */
1580 #define ICE_GET_CAP_BUF_COUNT	40
1581 #define ICE_GET_CAP_RETRY_COUNT	2
1582 
1583 	cap_count = ICE_GET_CAP_BUF_COUNT;
1584 	retries = ICE_GET_CAP_RETRY_COUNT;
1585 
1586 	do {
1587 		void *cbuf;
1588 
1589 		cbuf_len = (u16)(cap_count *
1590 				 sizeof(struct ice_aqc_list_caps_elem));
1591 		cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
1592 		if (!cbuf)
1593 			return ICE_ERR_NO_MEMORY;
1594 
1595 		status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
1596 					      opc, NULL);
1597 		devm_kfree(ice_hw_to_dev(hw), cbuf);
1598 
1599 		if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
1600 			break;
1601 
1602 		/* If ENOMEM is returned, try again with bigger buffer */
1603 	} while (--retries);
1604 
1605 	return status;
1606 }
1607 
1608 /**
1609  * ice_get_caps - get info about the HW
1610  * @hw: pointer to the hardware structure
1611  */
1612 enum ice_status ice_get_caps(struct ice_hw *hw)
1613 {
1614 	enum ice_status status;
1615 
1616 	status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
1617 	if (!status)
1618 		status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
1619 
1620 	return status;
1621 }
1622 
1623 /**
1624  * ice_aq_manage_mac_write - manage MAC address write command
1625  * @hw: pointer to the hw struct
1626  * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
1627  * @flags: flags to control write behavior
1628  * @cd: pointer to command details structure or NULL
1629  *
1630  * This function is used to write MAC address to the NVM (0x0108).
1631  */
1632 enum ice_status
1633 ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags,
1634 			struct ice_sq_cd *cd)
1635 {
1636 	struct ice_aqc_manage_mac_write *cmd;
1637 	struct ice_aq_desc desc;
1638 
1639 	cmd = &desc.params.mac_write;
1640 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
1641 
1642 	cmd->flags = flags;
1643 
1644 	/* Prep values for flags, sah, sal */
1645 	cmd->sah = htons(*((u16 *)mac_addr));
1646 	cmd->sal = htonl(*((u32 *)(mac_addr + 2)));
1647 
1648 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1649 }
1650 
1651 /**
1652  * ice_aq_clear_pxe_mode
1653  * @hw: pointer to the hw struct
1654  *
1655  * Tell the firmware that the driver is taking over from PXE (0x0110).
1656  */
1657 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
1658 {
1659 	struct ice_aq_desc desc;
1660 
1661 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
1662 	desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
1663 
1664 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1665 }
1666 
1667 /**
1668  * ice_clear_pxe_mode - clear pxe operations mode
1669  * @hw: pointer to the hw struct
1670  *
1671  * Make sure all PXE mode settings are cleared, including things
1672  * like descriptor fetch/write-back mode.
1673  */
1674 void ice_clear_pxe_mode(struct ice_hw *hw)
1675 {
1676 	if (ice_check_sq_alive(hw, &hw->adminq))
1677 		ice_aq_clear_pxe_mode(hw);
1678 }
1679 
1680 /**
1681  * ice_get_link_speed_based_on_phy_type - returns link speed
1682  * @phy_type_low: lower part of phy_type
1683  *
1684  * This helper function will convert a phy_type_low to its corresponding link
1685  * speed.
1686  * Note: In the structure of phy_type_low, there should be one bit set, as
1687  * this function will convert one phy type to its speed.
1688  * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
1689  * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
1690  */
1691 static u16
1692 ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
1693 {
1694 	u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
1695 
1696 	switch (phy_type_low) {
1697 	case ICE_PHY_TYPE_LOW_100BASE_TX:
1698 	case ICE_PHY_TYPE_LOW_100M_SGMII:
1699 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
1700 		break;
1701 	case ICE_PHY_TYPE_LOW_1000BASE_T:
1702 	case ICE_PHY_TYPE_LOW_1000BASE_SX:
1703 	case ICE_PHY_TYPE_LOW_1000BASE_LX:
1704 	case ICE_PHY_TYPE_LOW_1000BASE_KX:
1705 	case ICE_PHY_TYPE_LOW_1G_SGMII:
1706 		speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
1707 		break;
1708 	case ICE_PHY_TYPE_LOW_2500BASE_T:
1709 	case ICE_PHY_TYPE_LOW_2500BASE_X:
1710 	case ICE_PHY_TYPE_LOW_2500BASE_KX:
1711 		speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
1712 		break;
1713 	case ICE_PHY_TYPE_LOW_5GBASE_T:
1714 	case ICE_PHY_TYPE_LOW_5GBASE_KR:
1715 		speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
1716 		break;
1717 	case ICE_PHY_TYPE_LOW_10GBASE_T:
1718 	case ICE_PHY_TYPE_LOW_10G_SFI_DA:
1719 	case ICE_PHY_TYPE_LOW_10GBASE_SR:
1720 	case ICE_PHY_TYPE_LOW_10GBASE_LR:
1721 	case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
1722 	case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
1723 	case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
1724 		speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
1725 		break;
1726 	case ICE_PHY_TYPE_LOW_25GBASE_T:
1727 	case ICE_PHY_TYPE_LOW_25GBASE_CR:
1728 	case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
1729 	case ICE_PHY_TYPE_LOW_25GBASE_CR1:
1730 	case ICE_PHY_TYPE_LOW_25GBASE_SR:
1731 	case ICE_PHY_TYPE_LOW_25GBASE_LR:
1732 	case ICE_PHY_TYPE_LOW_25GBASE_KR:
1733 	case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
1734 	case ICE_PHY_TYPE_LOW_25GBASE_KR1:
1735 	case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
1736 	case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
1737 		speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
1738 		break;
1739 	case ICE_PHY_TYPE_LOW_40GBASE_CR4:
1740 	case ICE_PHY_TYPE_LOW_40GBASE_SR4:
1741 	case ICE_PHY_TYPE_LOW_40GBASE_LR4:
1742 	case ICE_PHY_TYPE_LOW_40GBASE_KR4:
1743 	case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
1744 	case ICE_PHY_TYPE_LOW_40G_XLAUI:
1745 		speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
1746 		break;
1747 	default:
1748 		speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
1749 		break;
1750 	}
1751 
1752 	return speed_phy_type_low;
1753 }
1754 
1755 /**
1756  * ice_update_phy_type
1757  * @phy_type_low: pointer to the lower part of phy_type
1758  * @link_speeds_bitmap: targeted link speeds bitmap
1759  *
1760  * Note: For the link_speeds_bitmap structure, you can check it at
1761  * [ice_aqc_get_link_status->link_speed]. Caller can pass in
1762  * link_speeds_bitmap include multiple speeds.
1763  *
1764  * The value of phy_type_low will present a certain link speed. This helper
1765  * function will turn on bits in the phy_type_low based on the value of
1766  * link_speeds_bitmap input parameter.
1767  */
1768 void ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap)
1769 {
1770 	u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN;
1771 	u64 pt_low;
1772 	int index;
1773 
1774 	/* We first check with low part of phy_type */
1775 	for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
1776 		pt_low = BIT_ULL(index);
1777 		speed = ice_get_link_speed_based_on_phy_type(pt_low);
1778 
1779 		if (link_speeds_bitmap & speed)
1780 			*phy_type_low |= BIT_ULL(index);
1781 	}
1782 }
1783 
1784 /**
1785  * ice_aq_set_phy_cfg
1786  * @hw: pointer to the hw struct
1787  * @lport: logical port number
1788  * @cfg: structure with PHY configuration data to be set
1789  * @cd: pointer to command details structure or NULL
1790  *
1791  * Set the various PHY configuration parameters supported on the Port.
1792  * One or more of the Set PHY config parameters may be ignored in an MFP
1793  * mode as the PF may not have the privilege to set some of the PHY Config
1794  * parameters. This status will be indicated by the command response (0x0601).
1795  */
1796 enum ice_status
1797 ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
1798 		   struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
1799 {
1800 	struct ice_aq_desc desc;
1801 
1802 	if (!cfg)
1803 		return ICE_ERR_PARAM;
1804 
1805 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
1806 	desc.params.set_phy.lport_num = lport;
1807 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1808 
1809 	return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
1810 }
1811 
1812 /**
1813  * ice_update_link_info - update status of the HW network link
1814  * @pi: port info structure of the interested logical port
1815  */
1816 enum ice_status ice_update_link_info(struct ice_port_info *pi)
1817 {
1818 	struct ice_aqc_get_phy_caps_data *pcaps;
1819 	struct ice_phy_info *phy_info;
1820 	enum ice_status status;
1821 	struct ice_hw *hw;
1822 
1823 	if (!pi)
1824 		return ICE_ERR_PARAM;
1825 
1826 	hw = pi->hw;
1827 
1828 	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
1829 	if (!pcaps)
1830 		return ICE_ERR_NO_MEMORY;
1831 
1832 	phy_info = &pi->phy;
1833 	status = ice_aq_get_link_info(pi, true, NULL, NULL);
1834 	if (status)
1835 		goto out;
1836 
1837 	if (phy_info->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
1838 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
1839 					     pcaps, NULL);
1840 		if (status)
1841 			goto out;
1842 
1843 		memcpy(phy_info->link_info.module_type, &pcaps->module_type,
1844 		       sizeof(phy_info->link_info.module_type));
1845 	}
1846 out:
1847 	devm_kfree(ice_hw_to_dev(hw), pcaps);
1848 	return status;
1849 }
1850 
1851 /**
1852  * ice_set_fc
1853  * @pi: port information structure
1854  * @aq_failures: pointer to status code, specific to ice_set_fc routine
1855  * @ena_auto_link_update: enable automatic link update
1856  *
1857  * Set the requested flow control mode.
1858  */
1859 enum ice_status
1860 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
1861 {
1862 	struct ice_aqc_set_phy_cfg_data cfg = { 0 };
1863 	struct ice_aqc_get_phy_caps_data *pcaps;
1864 	enum ice_status status;
1865 	u8 pause_mask = 0x0;
1866 	struct ice_hw *hw;
1867 
1868 	if (!pi)
1869 		return ICE_ERR_PARAM;
1870 	hw = pi->hw;
1871 	*aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
1872 
1873 	switch (pi->fc.req_mode) {
1874 	case ICE_FC_FULL:
1875 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
1876 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
1877 		break;
1878 	case ICE_FC_RX_PAUSE:
1879 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
1880 		break;
1881 	case ICE_FC_TX_PAUSE:
1882 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
1883 		break;
1884 	default:
1885 		break;
1886 	}
1887 
1888 	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
1889 	if (!pcaps)
1890 		return ICE_ERR_NO_MEMORY;
1891 
1892 	/* Get the current phy config */
1893 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
1894 				     NULL);
1895 	if (status) {
1896 		*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
1897 		goto out;
1898 	}
1899 
1900 	/* clear the old pause settings */
1901 	cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
1902 				   ICE_AQC_PHY_EN_RX_LINK_PAUSE);
1903 	/* set the new capabilities */
1904 	cfg.caps |= pause_mask;
1905 	/* If the capabilities have changed, then set the new config */
1906 	if (cfg.caps != pcaps->caps) {
1907 		int retry_count, retry_max = 10;
1908 
1909 		/* Auto restart link so settings take effect */
1910 		if (ena_auto_link_update)
1911 			cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1912 		/* Copy over all the old settings */
1913 		cfg.phy_type_low = pcaps->phy_type_low;
1914 		cfg.low_power_ctrl = pcaps->low_power_ctrl;
1915 		cfg.eee_cap = pcaps->eee_cap;
1916 		cfg.eeer_value = pcaps->eeer_value;
1917 		cfg.link_fec_opt = pcaps->link_fec_options;
1918 
1919 		status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
1920 		if (status) {
1921 			*aq_failures = ICE_SET_FC_AQ_FAIL_SET;
1922 			goto out;
1923 		}
1924 
1925 		/* Update the link info
1926 		 * It sometimes takes a really long time for link to
1927 		 * come back from the atomic reset. Thus, we wait a
1928 		 * little bit.
1929 		 */
1930 		for (retry_count = 0; retry_count < retry_max; retry_count++) {
1931 			status = ice_update_link_info(pi);
1932 
1933 			if (!status)
1934 				break;
1935 
1936 			mdelay(100);
1937 		}
1938 
1939 		if (status)
1940 			*aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
1941 	}
1942 
1943 out:
1944 	devm_kfree(ice_hw_to_dev(hw), pcaps);
1945 	return status;
1946 }
1947 
1948 /**
1949  * ice_get_link_status - get status of the HW network link
1950  * @pi: port information structure
1951  * @link_up: pointer to bool (true/false = linkup/linkdown)
1952  *
1953  * Variable link_up is true if link is up, false if link is down.
1954  * The variable link_up is invalid if status is non zero. As a
1955  * result of this call, link status reporting becomes enabled
1956  */
1957 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
1958 {
1959 	struct ice_phy_info *phy_info;
1960 	enum ice_status status = 0;
1961 
1962 	if (!pi || !link_up)
1963 		return ICE_ERR_PARAM;
1964 
1965 	phy_info = &pi->phy;
1966 
1967 	if (phy_info->get_link_info) {
1968 		status = ice_update_link_info(pi);
1969 
1970 		if (status)
1971 			ice_debug(pi->hw, ICE_DBG_LINK,
1972 				  "get link status error, status = %d\n",
1973 				  status);
1974 	}
1975 
1976 	*link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
1977 
1978 	return status;
1979 }
1980 
1981 /**
1982  * ice_aq_set_link_restart_an
1983  * @pi: pointer to the port information structure
1984  * @ena_link: if true: enable link, if false: disable link
1985  * @cd: pointer to command details structure or NULL
1986  *
1987  * Sets up the link and restarts the Auto-Negotiation over the link.
1988  */
1989 enum ice_status
1990 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
1991 			   struct ice_sq_cd *cd)
1992 {
1993 	struct ice_aqc_restart_an *cmd;
1994 	struct ice_aq_desc desc;
1995 
1996 	cmd = &desc.params.restart_an;
1997 
1998 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
1999 
2000 	cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
2001 	cmd->lport_num = pi->lport;
2002 	if (ena_link)
2003 		cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
2004 	else
2005 		cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
2006 
2007 	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
2008 }
2009 
2010 /**
2011  * __ice_aq_get_set_rss_lut
2012  * @hw: pointer to the hardware structure
2013  * @vsi_id: VSI FW index
2014  * @lut_type: LUT table type
2015  * @lut: pointer to the LUT buffer provided by the caller
2016  * @lut_size: size of the LUT buffer
2017  * @glob_lut_idx: global LUT index
2018  * @set: set true to set the table, false to get the table
2019  *
2020  * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
2021  */
2022 static enum ice_status
2023 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
2024 			 u16 lut_size, u8 glob_lut_idx, bool set)
2025 {
2026 	struct ice_aqc_get_set_rss_lut *cmd_resp;
2027 	struct ice_aq_desc desc;
2028 	enum ice_status status;
2029 	u16 flags = 0;
2030 
2031 	cmd_resp = &desc.params.get_set_rss_lut;
2032 
2033 	if (set) {
2034 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
2035 		desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2036 	} else {
2037 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
2038 	}
2039 
2040 	cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2041 					 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
2042 					ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
2043 				       ICE_AQC_GSET_RSS_LUT_VSI_VALID);
2044 
2045 	switch (lut_type) {
2046 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
2047 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
2048 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
2049 		flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
2050 			  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
2051 		break;
2052 	default:
2053 		status = ICE_ERR_PARAM;
2054 		goto ice_aq_get_set_rss_lut_exit;
2055 	}
2056 
2057 	if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
2058 		flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
2059 			  ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
2060 
2061 		if (!set)
2062 			goto ice_aq_get_set_rss_lut_send;
2063 	} else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2064 		if (!set)
2065 			goto ice_aq_get_set_rss_lut_send;
2066 	} else {
2067 		goto ice_aq_get_set_rss_lut_send;
2068 	}
2069 
2070 	/* LUT size is only valid for Global and PF table types */
2071 	switch (lut_size) {
2072 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
2073 		break;
2074 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
2075 		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
2076 			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2077 			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2078 		break;
2079 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
2080 		if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2081 			flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
2082 				  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2083 				 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2084 			break;
2085 		}
2086 		/* fall-through */
2087 	default:
2088 		status = ICE_ERR_PARAM;
2089 		goto ice_aq_get_set_rss_lut_exit;
2090 	}
2091 
2092 ice_aq_get_set_rss_lut_send:
2093 	cmd_resp->flags = cpu_to_le16(flags);
2094 	status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
2095 
2096 ice_aq_get_set_rss_lut_exit:
2097 	return status;
2098 }
2099 
2100 /**
2101  * ice_aq_get_rss_lut
2102  * @hw: pointer to the hardware structure
2103  * @vsi_handle: software VSI handle
2104  * @lut_type: LUT table type
2105  * @lut: pointer to the LUT buffer provided by the caller
2106  * @lut_size: size of the LUT buffer
2107  *
2108  * get the RSS lookup table, PF or VSI type
2109  */
2110 enum ice_status
2111 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2112 		   u8 *lut, u16 lut_size)
2113 {
2114 	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2115 		return ICE_ERR_PARAM;
2116 
2117 	return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2118 					lut_type, lut, lut_size, 0, false);
2119 }
2120 
2121 /**
2122  * ice_aq_set_rss_lut
2123  * @hw: pointer to the hardware structure
2124  * @vsi_handle: software VSI handle
2125  * @lut_type: LUT table type
2126  * @lut: pointer to the LUT buffer provided by the caller
2127  * @lut_size: size of the LUT buffer
2128  *
2129  * set the RSS lookup table, PF or VSI type
2130  */
2131 enum ice_status
2132 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2133 		   u8 *lut, u16 lut_size)
2134 {
2135 	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2136 		return ICE_ERR_PARAM;
2137 
2138 	return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2139 					lut_type, lut, lut_size, 0, true);
2140 }
2141 
2142 /**
2143  * __ice_aq_get_set_rss_key
2144  * @hw: pointer to the hw struct
2145  * @vsi_id: VSI FW index
2146  * @key: pointer to key info struct
2147  * @set: set true to set the key, false to get the key
2148  *
2149  * get (0x0B04) or set (0x0B02) the RSS key per VSI
2150  */
2151 static enum
2152 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
2153 				    struct ice_aqc_get_set_rss_keys *key,
2154 				    bool set)
2155 {
2156 	struct ice_aqc_get_set_rss_key *cmd_resp;
2157 	u16 key_size = sizeof(*key);
2158 	struct ice_aq_desc desc;
2159 
2160 	cmd_resp = &desc.params.get_set_rss_key;
2161 
2162 	if (set) {
2163 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
2164 		desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2165 	} else {
2166 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
2167 	}
2168 
2169 	cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2170 					 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
2171 					ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
2172 				       ICE_AQC_GSET_RSS_KEY_VSI_VALID);
2173 
2174 	return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
2175 }
2176 
2177 /**
2178  * ice_aq_get_rss_key
2179  * @hw: pointer to the hw struct
2180  * @vsi_handle: software VSI handle
2181  * @key: pointer to key info struct
2182  *
2183  * get the RSS key per VSI
2184  */
2185 enum ice_status
2186 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
2187 		   struct ice_aqc_get_set_rss_keys *key)
2188 {
2189 	if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
2190 		return ICE_ERR_PARAM;
2191 
2192 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2193 					key, false);
2194 }
2195 
2196 /**
2197  * ice_aq_set_rss_key
2198  * @hw: pointer to the hw struct
2199  * @vsi_handle: software VSI handle
2200  * @keys: pointer to key info struct
2201  *
2202  * set the RSS key per VSI
2203  */
2204 enum ice_status
2205 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
2206 		   struct ice_aqc_get_set_rss_keys *keys)
2207 {
2208 	if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
2209 		return ICE_ERR_PARAM;
2210 
2211 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2212 					keys, true);
2213 }
2214 
2215 /**
2216  * ice_aq_add_lan_txq
2217  * @hw: pointer to the hardware structure
2218  * @num_qgrps: Number of added queue groups
2219  * @qg_list: list of queue groups to be added
2220  * @buf_size: size of buffer for indirect command
2221  * @cd: pointer to command details structure or NULL
2222  *
2223  * Add Tx LAN queue (0x0C30)
2224  *
2225  * NOTE:
2226  * Prior to calling add Tx LAN queue:
2227  * Initialize the following as part of the Tx queue context:
2228  * Completion queue ID if the queue uses Completion queue, Quanta profile,
2229  * Cache profile and Packet shaper profile.
2230  *
2231  * After add Tx LAN queue AQ command is completed:
2232  * Interrupts should be associated with specific queues,
2233  * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
2234  * flow.
2235  */
2236 static enum ice_status
2237 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2238 		   struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
2239 		   struct ice_sq_cd *cd)
2240 {
2241 	u16 i, sum_header_size, sum_q_size = 0;
2242 	struct ice_aqc_add_tx_qgrp *list;
2243 	struct ice_aqc_add_txqs *cmd;
2244 	struct ice_aq_desc desc;
2245 
2246 	cmd = &desc.params.add_txqs;
2247 
2248 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
2249 
2250 	if (!qg_list)
2251 		return ICE_ERR_PARAM;
2252 
2253 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2254 		return ICE_ERR_PARAM;
2255 
2256 	sum_header_size = num_qgrps *
2257 		(sizeof(*qg_list) - sizeof(*qg_list->txqs));
2258 
2259 	list = qg_list;
2260 	for (i = 0; i < num_qgrps; i++) {
2261 		struct ice_aqc_add_txqs_perq *q = list->txqs;
2262 
2263 		sum_q_size += list->num_txqs * sizeof(*q);
2264 		list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
2265 	}
2266 
2267 	if (buf_size != (sum_header_size + sum_q_size))
2268 		return ICE_ERR_PARAM;
2269 
2270 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2271 
2272 	cmd->num_qgrps = num_qgrps;
2273 
2274 	return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2275 }
2276 
2277 /**
2278  * ice_aq_dis_lan_txq
2279  * @hw: pointer to the hardware structure
2280  * @num_qgrps: number of groups in the list
2281  * @qg_list: the list of groups to disable
2282  * @buf_size: the total size of the qg_list buffer in bytes
2283  * @rst_src: if called due to reset, specifies the RST source
2284  * @vmvf_num: the relative VM or VF number that is undergoing the reset
2285  * @cd: pointer to command details structure or NULL
2286  *
2287  * Disable LAN Tx queue (0x0C31)
2288  */
2289 static enum ice_status
2290 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2291 		   struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
2292 		   enum ice_disq_rst_src rst_src, u16 vmvf_num,
2293 		   struct ice_sq_cd *cd)
2294 {
2295 	struct ice_aqc_dis_txqs *cmd;
2296 	struct ice_aq_desc desc;
2297 	u16 i, sz = 0;
2298 
2299 	cmd = &desc.params.dis_txqs;
2300 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
2301 
2302 	/* qg_list can be NULL only in VM/VF reset flow */
2303 	if (!qg_list && !rst_src)
2304 		return ICE_ERR_PARAM;
2305 
2306 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2307 		return ICE_ERR_PARAM;
2308 
2309 	cmd->num_entries = num_qgrps;
2310 
2311 	cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
2312 					    ICE_AQC_Q_DIS_TIMEOUT_M);
2313 
2314 	switch (rst_src) {
2315 	case ICE_VM_RESET:
2316 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
2317 		cmd->vmvf_and_timeout |=
2318 			cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
2319 		break;
2320 	case ICE_VF_RESET:
2321 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
2322 		/* In this case, FW expects vmvf_num to be absolute VF id */
2323 		cmd->vmvf_and_timeout |=
2324 			cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
2325 				    ICE_AQC_Q_DIS_VMVF_NUM_M);
2326 		break;
2327 	case ICE_NO_RESET:
2328 	default:
2329 		break;
2330 	}
2331 
2332 	/* If no queue group info, we are in a reset flow. Issue the AQ */
2333 	if (!qg_list)
2334 		goto do_aq;
2335 
2336 	/* set RD bit to indicate that command buffer is provided by the driver
2337 	 * and it needs to be read by the firmware
2338 	 */
2339 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2340 
2341 	for (i = 0; i < num_qgrps; ++i) {
2342 		/* Calculate the size taken up by the queue IDs in this group */
2343 		sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
2344 
2345 		/* Add the size of the group header */
2346 		sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
2347 
2348 		/* If the num of queues is even, add 2 bytes of padding */
2349 		if ((qg_list[i].num_qs % 2) == 0)
2350 			sz += 2;
2351 	}
2352 
2353 	if (buf_size != sz)
2354 		return ICE_ERR_PARAM;
2355 
2356 do_aq:
2357 	return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2358 }
2359 
2360 /* End of FW Admin Queue command wrappers */
2361 
2362 /**
2363  * ice_write_byte - write a byte to a packed context structure
2364  * @src_ctx:  the context structure to read from
2365  * @dest_ctx: the context to be written to
2366  * @ce_info:  a description of the struct to be filled
2367  */
2368 static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx,
2369 			   const struct ice_ctx_ele *ce_info)
2370 {
2371 	u8 src_byte, dest_byte, mask;
2372 	u8 *from, *dest;
2373 	u16 shift_width;
2374 
2375 	/* copy from the next struct field */
2376 	from = src_ctx + ce_info->offset;
2377 
2378 	/* prepare the bits and mask */
2379 	shift_width = ce_info->lsb % 8;
2380 	mask = (u8)(BIT(ce_info->width) - 1);
2381 
2382 	src_byte = *from;
2383 	src_byte &= mask;
2384 
2385 	/* shift to correct alignment */
2386 	mask <<= shift_width;
2387 	src_byte <<= shift_width;
2388 
2389 	/* get the current bits from the target bit string */
2390 	dest = dest_ctx + (ce_info->lsb / 8);
2391 
2392 	memcpy(&dest_byte, dest, sizeof(dest_byte));
2393 
2394 	dest_byte &= ~mask;	/* get the bits not changing */
2395 	dest_byte |= src_byte;	/* add in the new bits */
2396 
2397 	/* put it all back */
2398 	memcpy(dest, &dest_byte, sizeof(dest_byte));
2399 }
2400 
2401 /**
2402  * ice_write_word - write a word to a packed context structure
2403  * @src_ctx:  the context structure to read from
2404  * @dest_ctx: the context to be written to
2405  * @ce_info:  a description of the struct to be filled
2406  */
2407 static void ice_write_word(u8 *src_ctx, u8 *dest_ctx,
2408 			   const struct ice_ctx_ele *ce_info)
2409 {
2410 	u16 src_word, mask;
2411 	__le16 dest_word;
2412 	u8 *from, *dest;
2413 	u16 shift_width;
2414 
2415 	/* copy from the next struct field */
2416 	from = src_ctx + ce_info->offset;
2417 
2418 	/* prepare the bits and mask */
2419 	shift_width = ce_info->lsb % 8;
2420 	mask = BIT(ce_info->width) - 1;
2421 
2422 	/* don't swizzle the bits until after the mask because the mask bits
2423 	 * will be in a different bit position on big endian machines
2424 	 */
2425 	src_word = *(u16 *)from;
2426 	src_word &= mask;
2427 
2428 	/* shift to correct alignment */
2429 	mask <<= shift_width;
2430 	src_word <<= shift_width;
2431 
2432 	/* get the current bits from the target bit string */
2433 	dest = dest_ctx + (ce_info->lsb / 8);
2434 
2435 	memcpy(&dest_word, dest, sizeof(dest_word));
2436 
2437 	dest_word &= ~(cpu_to_le16(mask));	/* get the bits not changing */
2438 	dest_word |= cpu_to_le16(src_word);	/* add in the new bits */
2439 
2440 	/* put it all back */
2441 	memcpy(dest, &dest_word, sizeof(dest_word));
2442 }
2443 
2444 /**
2445  * ice_write_dword - write a dword to a packed context structure
2446  * @src_ctx:  the context structure to read from
2447  * @dest_ctx: the context to be written to
2448  * @ce_info:  a description of the struct to be filled
2449  */
2450 static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx,
2451 			    const struct ice_ctx_ele *ce_info)
2452 {
2453 	u32 src_dword, mask;
2454 	__le32 dest_dword;
2455 	u8 *from, *dest;
2456 	u16 shift_width;
2457 
2458 	/* copy from the next struct field */
2459 	from = src_ctx + ce_info->offset;
2460 
2461 	/* prepare the bits and mask */
2462 	shift_width = ce_info->lsb % 8;
2463 
2464 	/* if the field width is exactly 32 on an x86 machine, then the shift
2465 	 * operation will not work because the SHL instructions count is masked
2466 	 * to 5 bits so the shift will do nothing
2467 	 */
2468 	if (ce_info->width < 32)
2469 		mask = BIT(ce_info->width) - 1;
2470 	else
2471 		mask = (u32)~0;
2472 
2473 	/* don't swizzle the bits until after the mask because the mask bits
2474 	 * will be in a different bit position on big endian machines
2475 	 */
2476 	src_dword = *(u32 *)from;
2477 	src_dword &= mask;
2478 
2479 	/* shift to correct alignment */
2480 	mask <<= shift_width;
2481 	src_dword <<= shift_width;
2482 
2483 	/* get the current bits from the target bit string */
2484 	dest = dest_ctx + (ce_info->lsb / 8);
2485 
2486 	memcpy(&dest_dword, dest, sizeof(dest_dword));
2487 
2488 	dest_dword &= ~(cpu_to_le32(mask));	/* get the bits not changing */
2489 	dest_dword |= cpu_to_le32(src_dword);	/* add in the new bits */
2490 
2491 	/* put it all back */
2492 	memcpy(dest, &dest_dword, sizeof(dest_dword));
2493 }
2494 
2495 /**
2496  * ice_write_qword - write a qword to a packed context structure
2497  * @src_ctx:  the context structure to read from
2498  * @dest_ctx: the context to be written to
2499  * @ce_info:  a description of the struct to be filled
2500  */
2501 static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx,
2502 			    const struct ice_ctx_ele *ce_info)
2503 {
2504 	u64 src_qword, mask;
2505 	__le64 dest_qword;
2506 	u8 *from, *dest;
2507 	u16 shift_width;
2508 
2509 	/* copy from the next struct field */
2510 	from = src_ctx + ce_info->offset;
2511 
2512 	/* prepare the bits and mask */
2513 	shift_width = ce_info->lsb % 8;
2514 
2515 	/* if the field width is exactly 64 on an x86 machine, then the shift
2516 	 * operation will not work because the SHL instructions count is masked
2517 	 * to 6 bits so the shift will do nothing
2518 	 */
2519 	if (ce_info->width < 64)
2520 		mask = BIT_ULL(ce_info->width) - 1;
2521 	else
2522 		mask = (u64)~0;
2523 
2524 	/* don't swizzle the bits until after the mask because the mask bits
2525 	 * will be in a different bit position on big endian machines
2526 	 */
2527 	src_qword = *(u64 *)from;
2528 	src_qword &= mask;
2529 
2530 	/* shift to correct alignment */
2531 	mask <<= shift_width;
2532 	src_qword <<= shift_width;
2533 
2534 	/* get the current bits from the target bit string */
2535 	dest = dest_ctx + (ce_info->lsb / 8);
2536 
2537 	memcpy(&dest_qword, dest, sizeof(dest_qword));
2538 
2539 	dest_qword &= ~(cpu_to_le64(mask));	/* get the bits not changing */
2540 	dest_qword |= cpu_to_le64(src_qword);	/* add in the new bits */
2541 
2542 	/* put it all back */
2543 	memcpy(dest, &dest_qword, sizeof(dest_qword));
2544 }
2545 
2546 /**
2547  * ice_set_ctx - set context bits in packed structure
2548  * @src_ctx:  pointer to a generic non-packed context structure
2549  * @dest_ctx: pointer to memory for the packed structure
2550  * @ce_info:  a description of the structure to be transformed
2551  */
2552 enum ice_status
2553 ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2554 {
2555 	int f;
2556 
2557 	for (f = 0; ce_info[f].width; f++) {
2558 		/* We have to deal with each element of the FW response
2559 		 * using the correct size so that we are correct regardless
2560 		 * of the endianness of the machine.
2561 		 */
2562 		switch (ce_info[f].size_of) {
2563 		case sizeof(u8):
2564 			ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
2565 			break;
2566 		case sizeof(u16):
2567 			ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
2568 			break;
2569 		case sizeof(u32):
2570 			ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
2571 			break;
2572 		case sizeof(u64):
2573 			ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
2574 			break;
2575 		default:
2576 			return ICE_ERR_INVAL_SIZE;
2577 		}
2578 	}
2579 
2580 	return 0;
2581 }
2582 
2583 /**
2584  * ice_ena_vsi_txq
2585  * @pi: port information structure
2586  * @vsi_handle: software VSI handle
2587  * @tc: tc number
2588  * @num_qgrps: Number of added queue groups
2589  * @buf: list of queue groups to be added
2590  * @buf_size: size of buffer for indirect command
2591  * @cd: pointer to command details structure or NULL
2592  *
2593  * This function adds one lan q
2594  */
2595 enum ice_status
2596 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
2597 		struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
2598 		struct ice_sq_cd *cd)
2599 {
2600 	struct ice_aqc_txsched_elem_data node = { 0 };
2601 	struct ice_sched_node *parent;
2602 	enum ice_status status;
2603 	struct ice_hw *hw;
2604 
2605 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2606 		return ICE_ERR_CFG;
2607 
2608 	if (num_qgrps > 1 || buf->num_txqs > 1)
2609 		return ICE_ERR_MAX_LIMIT;
2610 
2611 	hw = pi->hw;
2612 
2613 	if (!ice_is_vsi_valid(hw, vsi_handle))
2614 		return ICE_ERR_PARAM;
2615 
2616 	mutex_lock(&pi->sched_lock);
2617 
2618 	/* find a parent node */
2619 	parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
2620 					    ICE_SCHED_NODE_OWNER_LAN);
2621 	if (!parent) {
2622 		status = ICE_ERR_PARAM;
2623 		goto ena_txq_exit;
2624 	}
2625 
2626 	buf->parent_teid = parent->info.node_teid;
2627 	node.parent_teid = parent->info.node_teid;
2628 	/* Mark that the values in the "generic" section as valid. The default
2629 	 * value in the "generic" section is zero. This means that :
2630 	 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
2631 	 * - 0 priority among siblings, indicated by Bit 1-3.
2632 	 * - WFQ, indicated by Bit 4.
2633 	 * - 0 Adjustment value is used in PSM credit update flow, indicated by
2634 	 * Bit 5-6.
2635 	 * - Bit 7 is reserved.
2636 	 * Without setting the generic section as valid in valid_sections, the
2637 	 * Admin Q command will fail with error code ICE_AQ_RC_EINVAL.
2638 	 */
2639 	buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
2640 
2641 	/* add the lan q */
2642 	status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
2643 	if (status)
2644 		goto ena_txq_exit;
2645 
2646 	node.node_teid = buf->txqs[0].q_teid;
2647 	node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
2648 
2649 	/* add a leaf node into schduler tree q layer */
2650 	status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
2651 
2652 ena_txq_exit:
2653 	mutex_unlock(&pi->sched_lock);
2654 	return status;
2655 }
2656 
2657 /**
2658  * ice_dis_vsi_txq
2659  * @pi: port information structure
2660  * @num_queues: number of queues
2661  * @q_ids: pointer to the q_id array
2662  * @q_teids: pointer to queue node teids
2663  * @rst_src: if called due to reset, specifies the RST source
2664  * @vmvf_num: the relative VM or VF number that is undergoing the reset
2665  * @cd: pointer to command details structure or NULL
2666  *
2667  * This function removes queues and their corresponding nodes in SW DB
2668  */
2669 enum ice_status
2670 ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
2671 		u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
2672 		struct ice_sq_cd *cd)
2673 {
2674 	enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
2675 	struct ice_aqc_dis_txq_item qg_list;
2676 	u16 i;
2677 
2678 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2679 		return ICE_ERR_CFG;
2680 
2681 	/* if queue is disabled already yet the disable queue command has to be
2682 	 * sent to complete the VF reset, then call ice_aq_dis_lan_txq without
2683 	 * any queue information
2684 	 */
2685 
2686 	if (!num_queues && rst_src)
2687 		return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, vmvf_num,
2688 					  NULL);
2689 
2690 	mutex_lock(&pi->sched_lock);
2691 
2692 	for (i = 0; i < num_queues; i++) {
2693 		struct ice_sched_node *node;
2694 
2695 		node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
2696 		if (!node)
2697 			continue;
2698 		qg_list.parent_teid = node->info.parent_teid;
2699 		qg_list.num_qs = 1;
2700 		qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
2701 		status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
2702 					    sizeof(qg_list), rst_src, vmvf_num,
2703 					    cd);
2704 
2705 		if (status)
2706 			break;
2707 		ice_free_sched_node(pi, node);
2708 	}
2709 	mutex_unlock(&pi->sched_lock);
2710 	return status;
2711 }
2712 
2713 /**
2714  * ice_cfg_vsi_qs - configure the new/exisiting VSI queues
2715  * @pi: port information structure
2716  * @vsi_handle: software VSI handle
2717  * @tc_bitmap: TC bitmap
2718  * @maxqs: max queues array per TC
2719  * @owner: lan or rdma
2720  *
2721  * This function adds/updates the VSI queues per TC.
2722  */
2723 static enum ice_status
2724 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
2725 	       u16 *maxqs, u8 owner)
2726 {
2727 	enum ice_status status = 0;
2728 	u8 i;
2729 
2730 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2731 		return ICE_ERR_CFG;
2732 
2733 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
2734 		return ICE_ERR_PARAM;
2735 
2736 	mutex_lock(&pi->sched_lock);
2737 
2738 	for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
2739 		/* configuration is possible only if TC node is present */
2740 		if (!ice_sched_get_tc_node(pi, i))
2741 			continue;
2742 
2743 		status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
2744 					   ice_is_tc_ena(tc_bitmap, i));
2745 		if (status)
2746 			break;
2747 	}
2748 
2749 	mutex_unlock(&pi->sched_lock);
2750 	return status;
2751 }
2752 
2753 /**
2754  * ice_cfg_vsi_lan - configure VSI lan queues
2755  * @pi: port information structure
2756  * @vsi_handle: software VSI handle
2757  * @tc_bitmap: TC bitmap
2758  * @max_lanqs: max lan queues array per TC
2759  *
2760  * This function adds/updates the VSI lan queues per TC.
2761  */
2762 enum ice_status
2763 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
2764 		u16 *max_lanqs)
2765 {
2766 	return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
2767 			      ICE_SCHED_NODE_OWNER_LAN);
2768 }
2769 
2770 /**
2771  * ice_replay_pre_init - replay pre initialization
2772  * @hw: pointer to the hw struct
2773  *
2774  * Initializes required config data for VSI, FD, ACL, and RSS before replay.
2775  */
2776 static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
2777 {
2778 	struct ice_switch_info *sw = hw->switch_info;
2779 	u8 i;
2780 
2781 	/* Delete old entries from replay filter list head if there is any */
2782 	ice_rm_all_sw_replay_rule_info(hw);
2783 	/* In start of replay, move entries into replay_rules list, it
2784 	 * will allow adding rules entries back to filt_rules list,
2785 	 * which is operational list.
2786 	 */
2787 	for (i = 0; i < ICE_SW_LKUP_LAST; i++)
2788 		list_replace_init(&sw->recp_list[i].filt_rules,
2789 				  &sw->recp_list[i].filt_replay_rules);
2790 
2791 	return 0;
2792 }
2793 
2794 /**
2795  * ice_replay_vsi - replay VSI configuration
2796  * @hw: pointer to the hw struct
2797  * @vsi_handle: driver VSI handle
2798  *
2799  * Restore all VSI configuration after reset. It is required to call this
2800  * function with main VSI first.
2801  */
2802 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
2803 {
2804 	enum ice_status status;
2805 
2806 	if (!ice_is_vsi_valid(hw, vsi_handle))
2807 		return ICE_ERR_PARAM;
2808 
2809 	/* Replay pre-initialization if there is any */
2810 	if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
2811 		status = ice_replay_pre_init(hw);
2812 		if (status)
2813 			return status;
2814 	}
2815 
2816 	/* Replay per VSI all filters */
2817 	status = ice_replay_vsi_all_fltr(hw, vsi_handle);
2818 	return status;
2819 }
2820 
2821 /**
2822  * ice_replay_post - post replay configuration cleanup
2823  * @hw: pointer to the hw struct
2824  *
2825  * Post replay cleanup.
2826  */
2827 void ice_replay_post(struct ice_hw *hw)
2828 {
2829 	/* Delete old entries from replay filter list head */
2830 	ice_rm_all_sw_replay_rule_info(hw);
2831 }
2832 
2833 /**
2834  * ice_stat_update40 - read 40 bit stat from the chip and update stat values
2835  * @hw: ptr to the hardware info
2836  * @hireg: high 32 bit HW register to read from
2837  * @loreg: low 32 bit HW register to read from
2838  * @prev_stat_loaded: bool to specify if previous stats are loaded
2839  * @prev_stat: ptr to previous loaded stat value
2840  * @cur_stat: ptr to current stat value
2841  */
2842 void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
2843 		       bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat)
2844 {
2845 	u64 new_data;
2846 
2847 	new_data = rd32(hw, loreg);
2848 	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
2849 
2850 	/* device stats are not reset at PFR, they likely will not be zeroed
2851 	 * when the driver starts. So save the first values read and use them as
2852 	 * offsets to be subtracted from the raw values in order to report stats
2853 	 * that count from zero.
2854 	 */
2855 	if (!prev_stat_loaded)
2856 		*prev_stat = new_data;
2857 	if (new_data >= *prev_stat)
2858 		*cur_stat = new_data - *prev_stat;
2859 	else
2860 		/* to manage the potential roll-over */
2861 		*cur_stat = (new_data + BIT_ULL(40)) - *prev_stat;
2862 	*cur_stat &= 0xFFFFFFFFFFULL;
2863 }
2864 
2865 /**
2866  * ice_stat_update32 - read 32 bit stat from the chip and update stat values
2867  * @hw: ptr to the hardware info
2868  * @reg: HW register to read from
2869  * @prev_stat_loaded: bool to specify if previous stats are loaded
2870  * @prev_stat: ptr to previous loaded stat value
2871  * @cur_stat: ptr to current stat value
2872  */
2873 void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
2874 		       u64 *prev_stat, u64 *cur_stat)
2875 {
2876 	u32 new_data;
2877 
2878 	new_data = rd32(hw, reg);
2879 
2880 	/* device stats are not reset at PFR, they likely will not be zeroed
2881 	 * when the driver starts. So save the first values read and use them as
2882 	 * offsets to be subtracted from the raw values in order to report stats
2883 	 * that count from zero.
2884 	 */
2885 	if (!prev_stat_loaded)
2886 		*prev_stat = new_data;
2887 	if (new_data >= *prev_stat)
2888 		*cur_stat = new_data - *prev_stat;
2889 	else
2890 		/* to manage the potential roll-over */
2891 		*cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
2892 }
2893