1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_common.h"
5 #include "ice_sched.h"
6 #include "ice_adminq_cmd.h"
7 
8 #define ICE_PF_RESET_WAIT_COUNT	200
9 
10 #define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \
11 	wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \
12 	     ((ICE_RX_OPC_MDID << \
13 	       GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
14 	      GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
15 	     (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
16 	      GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
17 
18 #define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \
19 	wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \
20 	     (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
21 	      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
22 	     (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
23 	      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \
24 	     (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \
25 	      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \
26 	     (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \
27 	      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M))
28 
29 /**
30  * ice_set_mac_type - Sets MAC type
31  * @hw: pointer to the HW structure
32  *
33  * This function sets the MAC type of the adapter based on the
34  * vendor ID and device ID stored in the hw structure.
35  */
36 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
37 {
38 	if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
39 		return ICE_ERR_DEVICE_NOT_SUPPORTED;
40 
41 	hw->mac_type = ICE_MAC_GENERIC;
42 	return 0;
43 }
44 
45 /**
46  * ice_dev_onetime_setup - Temporary HW/FW workarounds
47  * @hw: pointer to the HW structure
48  *
49  * This function provides temporary workarounds for certain issues
50  * that are expected to be fixed in the HW/FW.
51  */
52 void ice_dev_onetime_setup(struct ice_hw *hw)
53 {
54 	/* configure Rx - set non pxe mode */
55 	wr32(hw, GLLAN_RCTL_0, 0x1);
56 
57 #define MBX_PF_VT_PFALLOC	0x00231E80
58 	/* set VFs per PF */
59 	wr32(hw, MBX_PF_VT_PFALLOC, rd32(hw, PF_VT_PFALLOC_HIF));
60 }
61 
62 /**
63  * ice_clear_pf_cfg - Clear PF configuration
64  * @hw: pointer to the hardware structure
65  *
66  * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
67  * configuration, flow director filters, etc.).
68  */
69 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
70 {
71 	struct ice_aq_desc desc;
72 
73 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
74 
75 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
76 }
77 
78 /**
79  * ice_aq_manage_mac_read - manage MAC address read command
80  * @hw: pointer to the hw struct
81  * @buf: a virtual buffer to hold the manage MAC read response
82  * @buf_size: Size of the virtual buffer
83  * @cd: pointer to command details structure or NULL
84  *
85  * This function is used to return per PF station MAC address (0x0107).
86  * NOTE: Upon successful completion of this command, MAC address information
87  * is returned in user specified buffer. Please interpret user specified
88  * buffer as "manage_mac_read" response.
89  * Response such as various MAC addresses are stored in HW struct (port.mac)
90  * ice_aq_discover_caps is expected to be called before this function is called.
91  */
92 static enum ice_status
93 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
94 		       struct ice_sq_cd *cd)
95 {
96 	struct ice_aqc_manage_mac_read_resp *resp;
97 	struct ice_aqc_manage_mac_read *cmd;
98 	struct ice_aq_desc desc;
99 	enum ice_status status;
100 	u16 flags;
101 	u8 i;
102 
103 	cmd = &desc.params.mac_read;
104 
105 	if (buf_size < sizeof(*resp))
106 		return ICE_ERR_BUF_TOO_SHORT;
107 
108 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
109 
110 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
111 	if (status)
112 		return status;
113 
114 	resp = (struct ice_aqc_manage_mac_read_resp *)buf;
115 	flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
116 
117 	if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
118 		ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
119 		return ICE_ERR_CFG;
120 	}
121 
122 	/* A single port can report up to two (LAN and WoL) addresses */
123 	for (i = 0; i < cmd->num_addr; i++)
124 		if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
125 			ether_addr_copy(hw->port_info->mac.lan_addr,
126 					resp[i].mac_addr);
127 			ether_addr_copy(hw->port_info->mac.perm_addr,
128 					resp[i].mac_addr);
129 			break;
130 		}
131 
132 	return 0;
133 }
134 
135 /**
136  * ice_aq_get_phy_caps - returns PHY capabilities
137  * @pi: port information structure
138  * @qual_mods: report qualified modules
139  * @report_mode: report mode capabilities
140  * @pcaps: structure for PHY capabilities to be filled
141  * @cd: pointer to command details structure or NULL
142  *
143  * Returns the various PHY capabilities supported on the Port (0x0600)
144  */
145 enum ice_status
146 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
147 		    struct ice_aqc_get_phy_caps_data *pcaps,
148 		    struct ice_sq_cd *cd)
149 {
150 	struct ice_aqc_get_phy_caps *cmd;
151 	u16 pcaps_size = sizeof(*pcaps);
152 	struct ice_aq_desc desc;
153 	enum ice_status status;
154 
155 	cmd = &desc.params.get_phy;
156 
157 	if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
158 		return ICE_ERR_PARAM;
159 
160 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
161 
162 	if (qual_mods)
163 		cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
164 
165 	cmd->param0 |= cpu_to_le16(report_mode);
166 	status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
167 
168 	if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP)
169 		pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
170 
171 	return status;
172 }
173 
174 /**
175  * ice_get_media_type - Gets media type
176  * @pi: port information structure
177  */
178 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
179 {
180 	struct ice_link_status *hw_link_info;
181 
182 	if (!pi)
183 		return ICE_MEDIA_UNKNOWN;
184 
185 	hw_link_info = &pi->phy.link_info;
186 
187 	if (hw_link_info->phy_type_low) {
188 		switch (hw_link_info->phy_type_low) {
189 		case ICE_PHY_TYPE_LOW_1000BASE_SX:
190 		case ICE_PHY_TYPE_LOW_1000BASE_LX:
191 		case ICE_PHY_TYPE_LOW_10GBASE_SR:
192 		case ICE_PHY_TYPE_LOW_10GBASE_LR:
193 		case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
194 		case ICE_PHY_TYPE_LOW_25GBASE_SR:
195 		case ICE_PHY_TYPE_LOW_25GBASE_LR:
196 		case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
197 		case ICE_PHY_TYPE_LOW_40GBASE_SR4:
198 		case ICE_PHY_TYPE_LOW_40GBASE_LR4:
199 			return ICE_MEDIA_FIBER;
200 		case ICE_PHY_TYPE_LOW_100BASE_TX:
201 		case ICE_PHY_TYPE_LOW_1000BASE_T:
202 		case ICE_PHY_TYPE_LOW_2500BASE_T:
203 		case ICE_PHY_TYPE_LOW_5GBASE_T:
204 		case ICE_PHY_TYPE_LOW_10GBASE_T:
205 		case ICE_PHY_TYPE_LOW_25GBASE_T:
206 			return ICE_MEDIA_BASET;
207 		case ICE_PHY_TYPE_LOW_10G_SFI_DA:
208 		case ICE_PHY_TYPE_LOW_25GBASE_CR:
209 		case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
210 		case ICE_PHY_TYPE_LOW_25GBASE_CR1:
211 		case ICE_PHY_TYPE_LOW_40GBASE_CR4:
212 			return ICE_MEDIA_DA;
213 		case ICE_PHY_TYPE_LOW_1000BASE_KX:
214 		case ICE_PHY_TYPE_LOW_2500BASE_KX:
215 		case ICE_PHY_TYPE_LOW_2500BASE_X:
216 		case ICE_PHY_TYPE_LOW_5GBASE_KR:
217 		case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
218 		case ICE_PHY_TYPE_LOW_25GBASE_KR:
219 		case ICE_PHY_TYPE_LOW_25GBASE_KR1:
220 		case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
221 		case ICE_PHY_TYPE_LOW_40GBASE_KR4:
222 			return ICE_MEDIA_BACKPLANE;
223 		}
224 	}
225 
226 	return ICE_MEDIA_UNKNOWN;
227 }
228 
229 /**
230  * ice_aq_get_link_info
231  * @pi: port information structure
232  * @ena_lse: enable/disable LinkStatusEvent reporting
233  * @link: pointer to link status structure - optional
234  * @cd: pointer to command details structure or NULL
235  *
236  * Get Link Status (0x607). Returns the link status of the adapter.
237  */
238 static enum ice_status
239 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
240 		     struct ice_link_status *link, struct ice_sq_cd *cd)
241 {
242 	struct ice_link_status *hw_link_info_old, *hw_link_info;
243 	struct ice_aqc_get_link_status_data link_data = { 0 };
244 	struct ice_aqc_get_link_status *resp;
245 	enum ice_media_type *hw_media_type;
246 	struct ice_fc_info *hw_fc_info;
247 	bool tx_pause, rx_pause;
248 	struct ice_aq_desc desc;
249 	enum ice_status status;
250 	u16 cmd_flags;
251 
252 	if (!pi)
253 		return ICE_ERR_PARAM;
254 	hw_link_info_old = &pi->phy.link_info_old;
255 	hw_media_type = &pi->phy.media_type;
256 	hw_link_info = &pi->phy.link_info;
257 	hw_fc_info = &pi->fc;
258 
259 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
260 	cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
261 	resp = &desc.params.get_link_status;
262 	resp->cmd_flags = cpu_to_le16(cmd_flags);
263 	resp->lport_num = pi->lport;
264 
265 	status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data),
266 				 cd);
267 
268 	if (status)
269 		return status;
270 
271 	/* save off old link status information */
272 	*hw_link_info_old = *hw_link_info;
273 
274 	/* update current link status information */
275 	hw_link_info->link_speed = le16_to_cpu(link_data.link_speed);
276 	hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low);
277 	*hw_media_type = ice_get_media_type(pi);
278 	hw_link_info->link_info = link_data.link_info;
279 	hw_link_info->an_info = link_data.an_info;
280 	hw_link_info->ext_info = link_data.ext_info;
281 	hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size);
282 	hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M;
283 
284 	/* update fc info */
285 	tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
286 	rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
287 	if (tx_pause && rx_pause)
288 		hw_fc_info->current_mode = ICE_FC_FULL;
289 	else if (tx_pause)
290 		hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
291 	else if (rx_pause)
292 		hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
293 	else
294 		hw_fc_info->current_mode = ICE_FC_NONE;
295 
296 	hw_link_info->lse_ena =
297 		!!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
298 
299 	/* save link status information */
300 	if (link)
301 		*link = *hw_link_info;
302 
303 	/* flag cleared so calling functions don't call AQ again */
304 	pi->phy.get_link_info = false;
305 
306 	return status;
307 }
308 
309 /**
310  * ice_init_flex_flags
311  * @hw: pointer to the hardware structure
312  * @prof_id: Rx Descriptor Builder profile ID
313  *
314  * Function to initialize Rx flex flags
315  */
316 static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
317 {
318 	u8 idx = 0;
319 
320 	/* Flex-flag fields (0-2) are programmed with FLG64 bits with layout:
321 	 * flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE
322 	 * flexiflags1[3:0] - Not used for flag programming
323 	 * flexiflags2[7:0] - Tunnel and VLAN types
324 	 * 2 invalid fields in last index
325 	 */
326 	switch (prof_id) {
327 	/* Rx flex flags are currently programmed for the NIC profiles only.
328 	 * Different flag bit programming configurations can be added per
329 	 * profile as needed.
330 	 */
331 	case ICE_RXDID_FLEX_NIC:
332 	case ICE_RXDID_FLEX_NIC_2:
333 		ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_FRG,
334 				   ICE_RXFLG_UDP_GRE, ICE_RXFLG_PKT_DSI,
335 				   ICE_RXFLG_FIN, idx++);
336 		/* flex flag 1 is not used for flexi-flag programming, skipping
337 		 * these four FLG64 bits.
338 		 */
339 		ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_SYN, ICE_RXFLG_RST,
340 				   ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++);
341 		ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_DSI,
342 				   ICE_RXFLG_PKT_DSI, ICE_RXFLG_EVLAN_x8100,
343 				   ICE_RXFLG_EVLAN_x9100, idx++);
344 		ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_VLAN_x8100,
345 				   ICE_RXFLG_TNL_VLAN, ICE_RXFLG_TNL_MAC,
346 				   ICE_RXFLG_TNL0, idx++);
347 		ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2,
348 				   ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx);
349 		break;
350 
351 	default:
352 		ice_debug(hw, ICE_DBG_INIT,
353 			  "Flag programming for profile ID %d not supported\n",
354 			  prof_id);
355 	}
356 }
357 
358 /**
359  * ice_init_flex_flds
360  * @hw: pointer to the hardware structure
361  * @prof_id: Rx Descriptor Builder profile ID
362  *
363  * Function to initialize flex descriptors
364  */
365 static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
366 {
367 	enum ice_flex_rx_mdid mdid;
368 
369 	switch (prof_id) {
370 	case ICE_RXDID_FLEX_NIC:
371 	case ICE_RXDID_FLEX_NIC_2:
372 		ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0);
373 		ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1);
374 		ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2);
375 
376 		mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ?
377 			ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH;
378 
379 		ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3);
380 
381 		ice_init_flex_flags(hw, prof_id);
382 		break;
383 
384 	default:
385 		ice_debug(hw, ICE_DBG_INIT,
386 			  "Field init for profile ID %d not supported\n",
387 			  prof_id);
388 	}
389 }
390 
391 /**
392  * ice_init_fltr_mgmt_struct - initializes filter management list and locks
393  * @hw: pointer to the hw struct
394  */
395 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
396 {
397 	struct ice_switch_info *sw;
398 
399 	hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
400 				       sizeof(*hw->switch_info), GFP_KERNEL);
401 	sw = hw->switch_info;
402 
403 	if (!sw)
404 		return ICE_ERR_NO_MEMORY;
405 
406 	INIT_LIST_HEAD(&sw->vsi_list_map_head);
407 
408 	ice_init_def_sw_recp(hw);
409 
410 	return 0;
411 }
412 
413 /**
414  * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
415  * @hw: pointer to the hw struct
416  */
417 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
418 {
419 	struct ice_switch_info *sw = hw->switch_info;
420 	struct ice_vsi_list_map_info *v_pos_map;
421 	struct ice_vsi_list_map_info *v_tmp_map;
422 	struct ice_sw_recipe *recps;
423 	u8 i;
424 
425 	list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
426 				 list_entry) {
427 		list_del(&v_pos_map->list_entry);
428 		devm_kfree(ice_hw_to_dev(hw), v_pos_map);
429 	}
430 	recps = hw->switch_info->recp_list;
431 	for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
432 		struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
433 
434 		recps[i].root_rid = i;
435 		mutex_destroy(&recps[i].filt_rule_lock);
436 		list_for_each_entry_safe(lst_itr, tmp_entry,
437 					 &recps[i].filt_rules, list_entry) {
438 			list_del(&lst_itr->list_entry);
439 			devm_kfree(ice_hw_to_dev(hw), lst_itr);
440 		}
441 	}
442 	ice_rm_all_sw_replay_rule_info(hw);
443 	devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
444 	devm_kfree(ice_hw_to_dev(hw), sw);
445 }
446 
447 #define ICE_FW_LOG_DESC_SIZE(n)	(sizeof(struct ice_aqc_fw_logging_data) + \
448 	(((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry)))
449 #define ICE_FW_LOG_DESC_SIZE_MAX	\
450 	ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX)
451 
452 /**
453  * ice_cfg_fw_log - configure FW logging
454  * @hw: pointer to the hw struct
455  * @enable: enable certain FW logging events if true, disable all if false
456  *
457  * This function enables/disables the FW logging via Rx CQ events and a UART
458  * port based on predetermined configurations. FW logging via the Rx CQ can be
459  * enabled/disabled for individual PF's. However, FW logging via the UART can
460  * only be enabled/disabled for all PFs on the same device.
461  *
462  * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
463  * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
464  * before initializing the device.
465  *
466  * When re/configuring FW logging, callers need to update the "cfg" elements of
467  * the hw->fw_log.evnts array with the desired logging event configurations for
468  * modules of interest. When disabling FW logging completely, the callers can
469  * just pass false in the "enable" parameter. On completion, the function will
470  * update the "cur" element of the hw->fw_log.evnts array with the resulting
471  * logging event configurations of the modules that are being re/configured. FW
472  * logging modules that are not part of a reconfiguration operation retain their
473  * previous states.
474  *
475  * Before resetting the device, it is recommended that the driver disables FW
476  * logging before shutting down the control queue. When disabling FW logging
477  * ("enable" = false), the latest configurations of FW logging events stored in
478  * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
479  * a device reset.
480  *
481  * When enabling FW logging to emit log messages via the Rx CQ during the
482  * device's initialization phase, a mechanism alternative to interrupt handlers
483  * needs to be used to extract FW log messages from the Rx CQ periodically and
484  * to prevent the Rx CQ from being full and stalling other types of control
485  * messages from FW to SW. Interrupts are typically disabled during the device's
486  * initialization phase.
487  */
488 static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
489 {
490 	struct ice_aqc_fw_logging_data *data = NULL;
491 	struct ice_aqc_fw_logging *cmd;
492 	enum ice_status status = 0;
493 	u16 i, chgs = 0, len = 0;
494 	struct ice_aq_desc desc;
495 	u8 actv_evnts = 0;
496 	void *buf = NULL;
497 
498 	if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
499 		return 0;
500 
501 	/* Disable FW logging only when the control queue is still responsive */
502 	if (!enable &&
503 	    (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
504 		return 0;
505 
506 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
507 	cmd = &desc.params.fw_logging;
508 
509 	/* Indicate which controls are valid */
510 	if (hw->fw_log.cq_en)
511 		cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
512 
513 	if (hw->fw_log.uart_en)
514 		cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
515 
516 	if (enable) {
517 		/* Fill in an array of entries with FW logging modules and
518 		 * logging events being reconfigured.
519 		 */
520 		for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
521 			u16 val;
522 
523 			/* Keep track of enabled event types */
524 			actv_evnts |= hw->fw_log.evnts[i].cfg;
525 
526 			if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
527 				continue;
528 
529 			if (!data) {
530 				data = devm_kzalloc(ice_hw_to_dev(hw),
531 						    ICE_FW_LOG_DESC_SIZE_MAX,
532 						    GFP_KERNEL);
533 				if (!data)
534 					return ICE_ERR_NO_MEMORY;
535 			}
536 
537 			val = i << ICE_AQC_FW_LOG_ID_S;
538 			val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
539 			data->entry[chgs++] = cpu_to_le16(val);
540 		}
541 
542 		/* Only enable FW logging if at least one module is specified.
543 		 * If FW logging is currently enabled but all modules are not
544 		 * enabled to emit log messages, disable FW logging altogether.
545 		 */
546 		if (actv_evnts) {
547 			/* Leave if there is effectively no change */
548 			if (!chgs)
549 				goto out;
550 
551 			if (hw->fw_log.cq_en)
552 				cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
553 
554 			if (hw->fw_log.uart_en)
555 				cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
556 
557 			buf = data;
558 			len = ICE_FW_LOG_DESC_SIZE(chgs);
559 			desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
560 		}
561 	}
562 
563 	status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
564 	if (!status) {
565 		/* Update the current configuration to reflect events enabled.
566 		 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
567 		 * logging mode is enabled for the device. They do not reflect
568 		 * actual modules being enabled to emit log messages. So, their
569 		 * values remain unchanged even when all modules are disabled.
570 		 */
571 		u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
572 
573 		hw->fw_log.actv_evnts = actv_evnts;
574 		for (i = 0; i < cnt; i++) {
575 			u16 v, m;
576 
577 			if (!enable) {
578 				/* When disabling all FW logging events as part
579 				 * of device's de-initialization, the original
580 				 * configurations are retained, and can be used
581 				 * to reconfigure FW logging later if the device
582 				 * is re-initialized.
583 				 */
584 				hw->fw_log.evnts[i].cur = 0;
585 				continue;
586 			}
587 
588 			v = le16_to_cpu(data->entry[i]);
589 			m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
590 			hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
591 		}
592 	}
593 
594 out:
595 	if (data)
596 		devm_kfree(ice_hw_to_dev(hw), data);
597 
598 	return status;
599 }
600 
601 /**
602  * ice_output_fw_log
603  * @hw: pointer to the hw struct
604  * @desc: pointer to the AQ message descriptor
605  * @buf: pointer to the buffer accompanying the AQ message
606  *
607  * Formats a FW Log message and outputs it via the standard driver logs.
608  */
609 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
610 {
611 	ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg Start ]\n");
612 	ice_debug_array(hw, ICE_DBG_AQ_MSG, 16, 1, (u8 *)buf,
613 			le16_to_cpu(desc->datalen));
614 	ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg End ]\n");
615 }
616 
617 /**
618  * ice_get_itr_intrl_gran - determine int/intrl granularity
619  * @hw: pointer to the hw struct
620  *
621  * Determines the itr/intrl granularities based on the maximum aggregate
622  * bandwidth according to the device's configuration during power-on.
623  */
624 static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw)
625 {
626 	u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
627 			 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
628 			GL_PWR_MODE_CTL_CAR_MAX_BW_S;
629 
630 	switch (max_agg_bw) {
631 	case ICE_MAX_AGG_BW_200G:
632 	case ICE_MAX_AGG_BW_100G:
633 	case ICE_MAX_AGG_BW_50G:
634 		hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
635 		hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
636 		break;
637 	case ICE_MAX_AGG_BW_25G:
638 		hw->itr_gran = ICE_ITR_GRAN_MAX_25;
639 		hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
640 		break;
641 	default:
642 		ice_debug(hw, ICE_DBG_INIT,
643 			  "Failed to determine itr/intrl granularity\n");
644 		return ICE_ERR_CFG;
645 	}
646 
647 	return 0;
648 }
649 
650 /**
651  * ice_init_hw - main hardware initialization routine
652  * @hw: pointer to the hardware structure
653  */
654 enum ice_status ice_init_hw(struct ice_hw *hw)
655 {
656 	struct ice_aqc_get_phy_caps_data *pcaps;
657 	enum ice_status status;
658 	u16 mac_buf_len;
659 	void *mac_buf;
660 
661 	/* Set MAC type based on DeviceID */
662 	status = ice_set_mac_type(hw);
663 	if (status)
664 		return status;
665 
666 	hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
667 			 PF_FUNC_RID_FUNC_NUM_M) >>
668 		PF_FUNC_RID_FUNC_NUM_S;
669 
670 	status = ice_reset(hw, ICE_RESET_PFR);
671 	if (status)
672 		return status;
673 
674 	status = ice_get_itr_intrl_gran(hw);
675 	if (status)
676 		return status;
677 
678 	status = ice_init_all_ctrlq(hw);
679 	if (status)
680 		goto err_unroll_cqinit;
681 
682 	/* Enable FW logging. Not fatal if this fails. */
683 	status = ice_cfg_fw_log(hw, true);
684 	if (status)
685 		ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
686 
687 	status = ice_clear_pf_cfg(hw);
688 	if (status)
689 		goto err_unroll_cqinit;
690 
691 	ice_clear_pxe_mode(hw);
692 
693 	status = ice_init_nvm(hw);
694 	if (status)
695 		goto err_unroll_cqinit;
696 
697 	status = ice_get_caps(hw);
698 	if (status)
699 		goto err_unroll_cqinit;
700 
701 	hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
702 				     sizeof(*hw->port_info), GFP_KERNEL);
703 	if (!hw->port_info) {
704 		status = ICE_ERR_NO_MEMORY;
705 		goto err_unroll_cqinit;
706 	}
707 
708 	/* set the back pointer to hw */
709 	hw->port_info->hw = hw;
710 
711 	/* Initialize port_info struct with switch configuration data */
712 	status = ice_get_initial_sw_cfg(hw);
713 	if (status)
714 		goto err_unroll_alloc;
715 
716 	hw->evb_veb = true;
717 
718 	/* Query the allocated resources for tx scheduler */
719 	status = ice_sched_query_res_alloc(hw);
720 	if (status) {
721 		ice_debug(hw, ICE_DBG_SCHED,
722 			  "Failed to get scheduler allocated resources\n");
723 		goto err_unroll_alloc;
724 	}
725 
726 	/* Initialize port_info struct with scheduler data */
727 	status = ice_sched_init_port(hw->port_info);
728 	if (status)
729 		goto err_unroll_sched;
730 
731 	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
732 	if (!pcaps) {
733 		status = ICE_ERR_NO_MEMORY;
734 		goto err_unroll_sched;
735 	}
736 
737 	/* Initialize port_info struct with PHY capabilities */
738 	status = ice_aq_get_phy_caps(hw->port_info, false,
739 				     ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
740 	devm_kfree(ice_hw_to_dev(hw), pcaps);
741 	if (status)
742 		goto err_unroll_sched;
743 
744 	/* Initialize port_info struct with link information */
745 	status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
746 	if (status)
747 		goto err_unroll_sched;
748 
749 	/* need a valid SW entry point to build a Tx tree */
750 	if (!hw->sw_entry_point_layer) {
751 		ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
752 		status = ICE_ERR_CFG;
753 		goto err_unroll_sched;
754 	}
755 
756 	status = ice_init_fltr_mgmt_struct(hw);
757 	if (status)
758 		goto err_unroll_sched;
759 
760 	ice_dev_onetime_setup(hw);
761 
762 	/* Get MAC information */
763 	/* A single port can report up to two (LAN and WoL) addresses */
764 	mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
765 			       sizeof(struct ice_aqc_manage_mac_read_resp),
766 			       GFP_KERNEL);
767 	mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
768 
769 	if (!mac_buf) {
770 		status = ICE_ERR_NO_MEMORY;
771 		goto err_unroll_fltr_mgmt_struct;
772 	}
773 
774 	status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
775 	devm_kfree(ice_hw_to_dev(hw), mac_buf);
776 
777 	if (status)
778 		goto err_unroll_fltr_mgmt_struct;
779 
780 	ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
781 	ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
782 
783 	return 0;
784 
785 err_unroll_fltr_mgmt_struct:
786 	ice_cleanup_fltr_mgmt_struct(hw);
787 err_unroll_sched:
788 	ice_sched_cleanup_all(hw);
789 err_unroll_alloc:
790 	devm_kfree(ice_hw_to_dev(hw), hw->port_info);
791 err_unroll_cqinit:
792 	ice_shutdown_all_ctrlq(hw);
793 	return status;
794 }
795 
796 /**
797  * ice_deinit_hw - unroll initialization operations done by ice_init_hw
798  * @hw: pointer to the hardware structure
799  */
800 void ice_deinit_hw(struct ice_hw *hw)
801 {
802 	ice_cleanup_fltr_mgmt_struct(hw);
803 
804 	ice_sched_cleanup_all(hw);
805 
806 	if (hw->port_info) {
807 		devm_kfree(ice_hw_to_dev(hw), hw->port_info);
808 		hw->port_info = NULL;
809 	}
810 
811 	/* Attempt to disable FW logging before shutting down control queues */
812 	ice_cfg_fw_log(hw, false);
813 	ice_shutdown_all_ctrlq(hw);
814 }
815 
816 /**
817  * ice_check_reset - Check to see if a global reset is complete
818  * @hw: pointer to the hardware structure
819  */
820 enum ice_status ice_check_reset(struct ice_hw *hw)
821 {
822 	u32 cnt, reg = 0, grst_delay;
823 
824 	/* Poll for Device Active state in case a recent CORER, GLOBR,
825 	 * or EMPR has occurred. The grst delay value is in 100ms units.
826 	 * Add 1sec for outstanding AQ commands that can take a long time.
827 	 */
828 	grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
829 		      GLGEN_RSTCTL_GRSTDEL_S) + 10;
830 
831 	for (cnt = 0; cnt < grst_delay; cnt++) {
832 		mdelay(100);
833 		reg = rd32(hw, GLGEN_RSTAT);
834 		if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
835 			break;
836 	}
837 
838 	if (cnt == grst_delay) {
839 		ice_debug(hw, ICE_DBG_INIT,
840 			  "Global reset polling failed to complete.\n");
841 		return ICE_ERR_RESET_FAILED;
842 	}
843 
844 #define ICE_RESET_DONE_MASK	(GLNVM_ULD_CORER_DONE_M | \
845 				 GLNVM_ULD_GLOBR_DONE_M)
846 
847 	/* Device is Active; check Global Reset processes are done */
848 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
849 		reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
850 		if (reg == ICE_RESET_DONE_MASK) {
851 			ice_debug(hw, ICE_DBG_INIT,
852 				  "Global reset processes done. %d\n", cnt);
853 			break;
854 		}
855 		mdelay(10);
856 	}
857 
858 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
859 		ice_debug(hw, ICE_DBG_INIT,
860 			  "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
861 			  reg);
862 		return ICE_ERR_RESET_FAILED;
863 	}
864 
865 	return 0;
866 }
867 
868 /**
869  * ice_pf_reset - Reset the PF
870  * @hw: pointer to the hardware structure
871  *
872  * If a global reset has been triggered, this function checks
873  * for its completion and then issues the PF reset
874  */
875 static enum ice_status ice_pf_reset(struct ice_hw *hw)
876 {
877 	u32 cnt, reg;
878 
879 	/* If at function entry a global reset was already in progress, i.e.
880 	 * state is not 'device active' or any of the reset done bits are not
881 	 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
882 	 * global reset is done.
883 	 */
884 	if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
885 	    (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
886 		/* poll on global reset currently in progress until done */
887 		if (ice_check_reset(hw))
888 			return ICE_ERR_RESET_FAILED;
889 
890 		return 0;
891 	}
892 
893 	/* Reset the PF */
894 	reg = rd32(hw, PFGEN_CTRL);
895 
896 	wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
897 
898 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
899 		reg = rd32(hw, PFGEN_CTRL);
900 		if (!(reg & PFGEN_CTRL_PFSWR_M))
901 			break;
902 
903 		mdelay(1);
904 	}
905 
906 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
907 		ice_debug(hw, ICE_DBG_INIT,
908 			  "PF reset polling failed to complete.\n");
909 		return ICE_ERR_RESET_FAILED;
910 	}
911 
912 	return 0;
913 }
914 
915 /**
916  * ice_reset - Perform different types of reset
917  * @hw: pointer to the hardware structure
918  * @req: reset request
919  *
920  * This function triggers a reset as specified by the req parameter.
921  *
922  * Note:
923  * If anything other than a PF reset is triggered, PXE mode is restored.
924  * This has to be cleared using ice_clear_pxe_mode again, once the AQ
925  * interface has been restored in the rebuild flow.
926  */
927 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
928 {
929 	u32 val = 0;
930 
931 	switch (req) {
932 	case ICE_RESET_PFR:
933 		return ice_pf_reset(hw);
934 	case ICE_RESET_CORER:
935 		ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
936 		val = GLGEN_RTRIG_CORER_M;
937 		break;
938 	case ICE_RESET_GLOBR:
939 		ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
940 		val = GLGEN_RTRIG_GLOBR_M;
941 		break;
942 	default:
943 		return ICE_ERR_PARAM;
944 	}
945 
946 	val |= rd32(hw, GLGEN_RTRIG);
947 	wr32(hw, GLGEN_RTRIG, val);
948 	ice_flush(hw);
949 
950 	/* wait for the FW to be ready */
951 	return ice_check_reset(hw);
952 }
953 
954 /**
955  * ice_copy_rxq_ctx_to_hw
956  * @hw: pointer to the hardware structure
957  * @ice_rxq_ctx: pointer to the rxq context
958  * @rxq_index: the index of the rx queue
959  *
960  * Copies rxq context from dense structure to hw register space
961  */
962 static enum ice_status
963 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
964 {
965 	u8 i;
966 
967 	if (!ice_rxq_ctx)
968 		return ICE_ERR_BAD_PTR;
969 
970 	if (rxq_index > QRX_CTRL_MAX_INDEX)
971 		return ICE_ERR_PARAM;
972 
973 	/* Copy each dword separately to hw */
974 	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
975 		wr32(hw, QRX_CONTEXT(i, rxq_index),
976 		     *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
977 
978 		ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
979 			  *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
980 	}
981 
982 	return 0;
983 }
984 
985 /* LAN Rx Queue Context */
986 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
987 	/* Field		Width	LSB */
988 	ICE_CTX_STORE(ice_rlan_ctx, head,		13,	0),
989 	ICE_CTX_STORE(ice_rlan_ctx, cpuid,		8,	13),
990 	ICE_CTX_STORE(ice_rlan_ctx, base,		57,	32),
991 	ICE_CTX_STORE(ice_rlan_ctx, qlen,		13,	89),
992 	ICE_CTX_STORE(ice_rlan_ctx, dbuf,		7,	102),
993 	ICE_CTX_STORE(ice_rlan_ctx, hbuf,		5,	109),
994 	ICE_CTX_STORE(ice_rlan_ctx, dtype,		2,	114),
995 	ICE_CTX_STORE(ice_rlan_ctx, dsize,		1,	116),
996 	ICE_CTX_STORE(ice_rlan_ctx, crcstrip,		1,	117),
997 	ICE_CTX_STORE(ice_rlan_ctx, l2tsel,		1,	119),
998 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_0,		4,	120),
999 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_1,		2,	124),
1000 	ICE_CTX_STORE(ice_rlan_ctx, showiv,		1,	127),
1001 	ICE_CTX_STORE(ice_rlan_ctx, rxmax,		14,	174),
1002 	ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena,	1,	193),
1003 	ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena,	1,	194),
1004 	ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena,	1,	195),
1005 	ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena,	1,	196),
1006 	ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh,		3,	198),
1007 	{ 0 }
1008 };
1009 
1010 /**
1011  * ice_write_rxq_ctx
1012  * @hw: pointer to the hardware structure
1013  * @rlan_ctx: pointer to the rxq context
1014  * @rxq_index: the index of the rx queue
1015  *
1016  * Converts rxq context from sparse to dense structure and then writes
1017  * it to hw register space
1018  */
1019 enum ice_status
1020 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1021 		  u32 rxq_index)
1022 {
1023 	u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1024 
1025 	ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1026 	return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1027 }
1028 
1029 /* LAN Tx Queue Context */
1030 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1031 				    /* Field			Width	LSB */
1032 	ICE_CTX_STORE(ice_tlan_ctx, base,			57,	0),
1033 	ICE_CTX_STORE(ice_tlan_ctx, port_num,			3,	57),
1034 	ICE_CTX_STORE(ice_tlan_ctx, cgd_num,			5,	60),
1035 	ICE_CTX_STORE(ice_tlan_ctx, pf_num,			3,	65),
1036 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_num,			10,	68),
1037 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_type,			2,	78),
1038 	ICE_CTX_STORE(ice_tlan_ctx, src_vsi,			10,	80),
1039 	ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena,			1,	90),
1040 	ICE_CTX_STORE(ice_tlan_ctx, alt_vlan,			1,	92),
1041 	ICE_CTX_STORE(ice_tlan_ctx, cpuid,			8,	93),
1042 	ICE_CTX_STORE(ice_tlan_ctx, wb_mode,			1,	101),
1043 	ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc,			1,	102),
1044 	ICE_CTX_STORE(ice_tlan_ctx, tphrd,			1,	103),
1045 	ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc,			1,	104),
1046 	ICE_CTX_STORE(ice_tlan_ctx, cmpq_id,			9,	105),
1047 	ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func,		14,	114),
1048 	ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode,	1,	128),
1049 	ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id,		6,	129),
1050 	ICE_CTX_STORE(ice_tlan_ctx, qlen,			13,	135),
1051 	ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx,		4,	148),
1052 	ICE_CTX_STORE(ice_tlan_ctx, tso_ena,			1,	152),
1053 	ICE_CTX_STORE(ice_tlan_ctx, tso_qnum,			11,	153),
1054 	ICE_CTX_STORE(ice_tlan_ctx, legacy_int,			1,	164),
1055 	ICE_CTX_STORE(ice_tlan_ctx, drop_ena,			1,	165),
1056 	ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx,		2,	166),
1057 	ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx,	3,	168),
1058 	ICE_CTX_STORE(ice_tlan_ctx, int_q_state,		110,	171),
1059 	{ 0 }
1060 };
1061 
1062 /**
1063  * ice_debug_cq
1064  * @hw: pointer to the hardware structure
1065  * @mask: debug mask
1066  * @desc: pointer to control queue descriptor
1067  * @buf: pointer to command buffer
1068  * @buf_len: max length of buf
1069  *
1070  * Dumps debug log about control command with descriptor contents.
1071  */
1072 void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
1073 		  void *buf, u16 buf_len)
1074 {
1075 	struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
1076 	u16 len;
1077 
1078 #ifndef CONFIG_DYNAMIC_DEBUG
1079 	if (!(mask & hw->debug_mask))
1080 		return;
1081 #endif
1082 
1083 	if (!desc)
1084 		return;
1085 
1086 	len = le16_to_cpu(cq_desc->datalen);
1087 
1088 	ice_debug(hw, mask,
1089 		  "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
1090 		  le16_to_cpu(cq_desc->opcode),
1091 		  le16_to_cpu(cq_desc->flags),
1092 		  le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
1093 	ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
1094 		  le32_to_cpu(cq_desc->cookie_high),
1095 		  le32_to_cpu(cq_desc->cookie_low));
1096 	ice_debug(hw, mask, "\tparam (0,1)  0x%08X 0x%08X\n",
1097 		  le32_to_cpu(cq_desc->params.generic.param0),
1098 		  le32_to_cpu(cq_desc->params.generic.param1));
1099 	ice_debug(hw, mask, "\taddr (h,l)   0x%08X 0x%08X\n",
1100 		  le32_to_cpu(cq_desc->params.generic.addr_high),
1101 		  le32_to_cpu(cq_desc->params.generic.addr_low));
1102 	if (buf && cq_desc->datalen != 0) {
1103 		ice_debug(hw, mask, "Buffer:\n");
1104 		if (buf_len < len)
1105 			len = buf_len;
1106 
1107 		ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
1108 	}
1109 }
1110 
1111 /* FW Admin Queue command wrappers */
1112 
1113 /**
1114  * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1115  * @hw: pointer to the hw struct
1116  * @desc: descriptor describing the command
1117  * @buf: buffer to use for indirect commands (NULL for direct commands)
1118  * @buf_size: size of buffer for indirect commands (0 for direct commands)
1119  * @cd: pointer to command details structure
1120  *
1121  * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1122  */
1123 enum ice_status
1124 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1125 		u16 buf_size, struct ice_sq_cd *cd)
1126 {
1127 	return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1128 }
1129 
1130 /**
1131  * ice_aq_get_fw_ver
1132  * @hw: pointer to the hw struct
1133  * @cd: pointer to command details structure or NULL
1134  *
1135  * Get the firmware version (0x0001) from the admin queue commands
1136  */
1137 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1138 {
1139 	struct ice_aqc_get_ver *resp;
1140 	struct ice_aq_desc desc;
1141 	enum ice_status status;
1142 
1143 	resp = &desc.params.get_ver;
1144 
1145 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1146 
1147 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1148 
1149 	if (!status) {
1150 		hw->fw_branch = resp->fw_branch;
1151 		hw->fw_maj_ver = resp->fw_major;
1152 		hw->fw_min_ver = resp->fw_minor;
1153 		hw->fw_patch = resp->fw_patch;
1154 		hw->fw_build = le32_to_cpu(resp->fw_build);
1155 		hw->api_branch = resp->api_branch;
1156 		hw->api_maj_ver = resp->api_major;
1157 		hw->api_min_ver = resp->api_minor;
1158 		hw->api_patch = resp->api_patch;
1159 	}
1160 
1161 	return status;
1162 }
1163 
1164 /**
1165  * ice_aq_q_shutdown
1166  * @hw: pointer to the hw struct
1167  * @unloading: is the driver unloading itself
1168  *
1169  * Tell the Firmware that we're shutting down the AdminQ and whether
1170  * or not the driver is unloading as well (0x0003).
1171  */
1172 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1173 {
1174 	struct ice_aqc_q_shutdown *cmd;
1175 	struct ice_aq_desc desc;
1176 
1177 	cmd = &desc.params.q_shutdown;
1178 
1179 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1180 
1181 	if (unloading)
1182 		cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING);
1183 
1184 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1185 }
1186 
1187 /**
1188  * ice_aq_req_res
1189  * @hw: pointer to the hw struct
1190  * @res: resource id
1191  * @access: access type
1192  * @sdp_number: resource number
1193  * @timeout: the maximum time in ms that the driver may hold the resource
1194  * @cd: pointer to command details structure or NULL
1195  *
1196  * Requests common resource using the admin queue commands (0x0008).
1197  * When attempting to acquire the Global Config Lock, the driver can
1198  * learn of three states:
1199  *  1) ICE_SUCCESS -        acquired lock, and can perform download package
1200  *  2) ICE_ERR_AQ_ERROR -   did not get lock, driver should fail to load
1201  *  3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1202  *                          successfully downloaded the package; the driver does
1203  *                          not have to download the package and can continue
1204  *                          loading
1205  *
1206  * Note that if the caller is in an acquire lock, perform action, release lock
1207  * phase of operation, it is possible that the FW may detect a timeout and issue
1208  * a CORER. In this case, the driver will receive a CORER interrupt and will
1209  * have to determine its cause. The calling thread that is handling this flow
1210  * will likely get an error propagated back to it indicating the Download
1211  * Package, Update Package or the Release Resource AQ commands timed out.
1212  */
1213 static enum ice_status
1214 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1215 	       enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1216 	       struct ice_sq_cd *cd)
1217 {
1218 	struct ice_aqc_req_res *cmd_resp;
1219 	struct ice_aq_desc desc;
1220 	enum ice_status status;
1221 
1222 	cmd_resp = &desc.params.res_owner;
1223 
1224 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1225 
1226 	cmd_resp->res_id = cpu_to_le16(res);
1227 	cmd_resp->access_type = cpu_to_le16(access);
1228 	cmd_resp->res_number = cpu_to_le32(sdp_number);
1229 	cmd_resp->timeout = cpu_to_le32(*timeout);
1230 	*timeout = 0;
1231 
1232 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1233 
1234 	/* The completion specifies the maximum time in ms that the driver
1235 	 * may hold the resource in the Timeout field.
1236 	 */
1237 
1238 	/* Global config lock response utilizes an additional status field.
1239 	 *
1240 	 * If the Global config lock resource is held by some other driver, the
1241 	 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1242 	 * and the timeout field indicates the maximum time the current owner
1243 	 * of the resource has to free it.
1244 	 */
1245 	if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1246 		if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1247 			*timeout = le32_to_cpu(cmd_resp->timeout);
1248 			return 0;
1249 		} else if (le16_to_cpu(cmd_resp->status) ==
1250 			   ICE_AQ_RES_GLBL_IN_PROG) {
1251 			*timeout = le32_to_cpu(cmd_resp->timeout);
1252 			return ICE_ERR_AQ_ERROR;
1253 		} else if (le16_to_cpu(cmd_resp->status) ==
1254 			   ICE_AQ_RES_GLBL_DONE) {
1255 			return ICE_ERR_AQ_NO_WORK;
1256 		}
1257 
1258 		/* invalid FW response, force a timeout immediately */
1259 		*timeout = 0;
1260 		return ICE_ERR_AQ_ERROR;
1261 	}
1262 
1263 	/* If the resource is held by some other driver, the command completes
1264 	 * with a busy return value and the timeout field indicates the maximum
1265 	 * time the current owner of the resource has to free it.
1266 	 */
1267 	if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1268 		*timeout = le32_to_cpu(cmd_resp->timeout);
1269 
1270 	return status;
1271 }
1272 
1273 /**
1274  * ice_aq_release_res
1275  * @hw: pointer to the hw struct
1276  * @res: resource id
1277  * @sdp_number: resource number
1278  * @cd: pointer to command details structure or NULL
1279  *
1280  * release common resource using the admin queue commands (0x0009)
1281  */
1282 static enum ice_status
1283 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1284 		   struct ice_sq_cd *cd)
1285 {
1286 	struct ice_aqc_req_res *cmd;
1287 	struct ice_aq_desc desc;
1288 
1289 	cmd = &desc.params.res_owner;
1290 
1291 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1292 
1293 	cmd->res_id = cpu_to_le16(res);
1294 	cmd->res_number = cpu_to_le32(sdp_number);
1295 
1296 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1297 }
1298 
1299 /**
1300  * ice_acquire_res
1301  * @hw: pointer to the HW structure
1302  * @res: resource id
1303  * @access: access type (read or write)
1304  * @timeout: timeout in milliseconds
1305  *
1306  * This function will attempt to acquire the ownership of a resource.
1307  */
1308 enum ice_status
1309 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1310 		enum ice_aq_res_access_type access, u32 timeout)
1311 {
1312 #define ICE_RES_POLLING_DELAY_MS	10
1313 	u32 delay = ICE_RES_POLLING_DELAY_MS;
1314 	u32 time_left = timeout;
1315 	enum ice_status status;
1316 
1317 	status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1318 
1319 	/* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1320 	 * previously acquired the resource and performed any necessary updates;
1321 	 * in this case the caller does not obtain the resource and has no
1322 	 * further work to do.
1323 	 */
1324 	if (status == ICE_ERR_AQ_NO_WORK)
1325 		goto ice_acquire_res_exit;
1326 
1327 	if (status)
1328 		ice_debug(hw, ICE_DBG_RES,
1329 			  "resource %d acquire type %d failed.\n", res, access);
1330 
1331 	/* If necessary, poll until the current lock owner timeouts */
1332 	timeout = time_left;
1333 	while (status && timeout && time_left) {
1334 		mdelay(delay);
1335 		timeout = (timeout > delay) ? timeout - delay : 0;
1336 		status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1337 
1338 		if (status == ICE_ERR_AQ_NO_WORK)
1339 			/* lock free, but no work to do */
1340 			break;
1341 
1342 		if (!status)
1343 			/* lock acquired */
1344 			break;
1345 	}
1346 	if (status && status != ICE_ERR_AQ_NO_WORK)
1347 		ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1348 
1349 ice_acquire_res_exit:
1350 	if (status == ICE_ERR_AQ_NO_WORK) {
1351 		if (access == ICE_RES_WRITE)
1352 			ice_debug(hw, ICE_DBG_RES,
1353 				  "resource indicates no work to do.\n");
1354 		else
1355 			ice_debug(hw, ICE_DBG_RES,
1356 				  "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1357 	}
1358 	return status;
1359 }
1360 
1361 /**
1362  * ice_release_res
1363  * @hw: pointer to the HW structure
1364  * @res: resource id
1365  *
1366  * This function will release a resource using the proper Admin Command.
1367  */
1368 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1369 {
1370 	enum ice_status status;
1371 	u32 total_delay = 0;
1372 
1373 	status = ice_aq_release_res(hw, res, 0, NULL);
1374 
1375 	/* there are some rare cases when trying to release the resource
1376 	 * results in an admin Q timeout, so handle them correctly
1377 	 */
1378 	while ((status == ICE_ERR_AQ_TIMEOUT) &&
1379 	       (total_delay < hw->adminq.sq_cmd_timeout)) {
1380 		mdelay(1);
1381 		status = ice_aq_release_res(hw, res, 0, NULL);
1382 		total_delay++;
1383 	}
1384 }
1385 
1386 /**
1387  * ice_parse_caps - parse function/device capabilities
1388  * @hw: pointer to the hw struct
1389  * @buf: pointer to a buffer containing function/device capability records
1390  * @cap_count: number of capability records in the list
1391  * @opc: type of capabilities list to parse
1392  *
1393  * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
1394  */
1395 static void
1396 ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1397 	       enum ice_adminq_opc opc)
1398 {
1399 	struct ice_aqc_list_caps_elem *cap_resp;
1400 	struct ice_hw_func_caps *func_p = NULL;
1401 	struct ice_hw_dev_caps *dev_p = NULL;
1402 	struct ice_hw_common_caps *caps;
1403 	u32 i;
1404 
1405 	if (!buf)
1406 		return;
1407 
1408 	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1409 
1410 	if (opc == ice_aqc_opc_list_dev_caps) {
1411 		dev_p = &hw->dev_caps;
1412 		caps = &dev_p->common_cap;
1413 	} else if (opc == ice_aqc_opc_list_func_caps) {
1414 		func_p = &hw->func_caps;
1415 		caps = &func_p->common_cap;
1416 	} else {
1417 		ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1418 		return;
1419 	}
1420 
1421 	for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1422 		u32 logical_id = le32_to_cpu(cap_resp->logical_id);
1423 		u32 phys_id = le32_to_cpu(cap_resp->phys_id);
1424 		u32 number = le32_to_cpu(cap_resp->number);
1425 		u16 cap = le16_to_cpu(cap_resp->cap);
1426 
1427 		switch (cap) {
1428 		case ICE_AQC_CAPS_SRIOV:
1429 			caps->sr_iov_1_1 = (number == 1);
1430 			ice_debug(hw, ICE_DBG_INIT,
1431 				  "HW caps: SR-IOV = %d\n", caps->sr_iov_1_1);
1432 			break;
1433 		case ICE_AQC_CAPS_VF:
1434 			if (dev_p) {
1435 				dev_p->num_vfs_exposed = number;
1436 				ice_debug(hw, ICE_DBG_INIT,
1437 					  "HW caps: VFs exposed = %d\n",
1438 					  dev_p->num_vfs_exposed);
1439 			} else if (func_p) {
1440 				func_p->num_allocd_vfs = number;
1441 				func_p->vf_base_id = logical_id;
1442 				ice_debug(hw, ICE_DBG_INIT,
1443 					  "HW caps: VFs allocated = %d\n",
1444 					  func_p->num_allocd_vfs);
1445 				ice_debug(hw, ICE_DBG_INIT,
1446 					  "HW caps: VF base_id = %d\n",
1447 					  func_p->vf_base_id);
1448 			}
1449 			break;
1450 		case ICE_AQC_CAPS_VSI:
1451 			if (dev_p) {
1452 				dev_p->num_vsi_allocd_to_host = number;
1453 				ice_debug(hw, ICE_DBG_INIT,
1454 					  "HW caps: Dev.VSI cnt = %d\n",
1455 					  dev_p->num_vsi_allocd_to_host);
1456 			} else if (func_p) {
1457 				func_p->guaranteed_num_vsi = number;
1458 				ice_debug(hw, ICE_DBG_INIT,
1459 					  "HW caps: Func.VSI cnt = %d\n",
1460 					  func_p->guaranteed_num_vsi);
1461 			}
1462 			break;
1463 		case ICE_AQC_CAPS_RSS:
1464 			caps->rss_table_size = number;
1465 			caps->rss_table_entry_width = logical_id;
1466 			ice_debug(hw, ICE_DBG_INIT,
1467 				  "HW caps: RSS table size = %d\n",
1468 				  caps->rss_table_size);
1469 			ice_debug(hw, ICE_DBG_INIT,
1470 				  "HW caps: RSS table width = %d\n",
1471 				  caps->rss_table_entry_width);
1472 			break;
1473 		case ICE_AQC_CAPS_RXQS:
1474 			caps->num_rxq = number;
1475 			caps->rxq_first_id = phys_id;
1476 			ice_debug(hw, ICE_DBG_INIT,
1477 				  "HW caps: Num Rx Qs = %d\n", caps->num_rxq);
1478 			ice_debug(hw, ICE_DBG_INIT,
1479 				  "HW caps: Rx first queue ID = %d\n",
1480 				  caps->rxq_first_id);
1481 			break;
1482 		case ICE_AQC_CAPS_TXQS:
1483 			caps->num_txq = number;
1484 			caps->txq_first_id = phys_id;
1485 			ice_debug(hw, ICE_DBG_INIT,
1486 				  "HW caps: Num Tx Qs = %d\n", caps->num_txq);
1487 			ice_debug(hw, ICE_DBG_INIT,
1488 				  "HW caps: Tx first queue ID = %d\n",
1489 				  caps->txq_first_id);
1490 			break;
1491 		case ICE_AQC_CAPS_MSIX:
1492 			caps->num_msix_vectors = number;
1493 			caps->msix_vector_first_id = phys_id;
1494 			ice_debug(hw, ICE_DBG_INIT,
1495 				  "HW caps: MSIX vector count = %d\n",
1496 				  caps->num_msix_vectors);
1497 			ice_debug(hw, ICE_DBG_INIT,
1498 				  "HW caps: MSIX first vector index = %d\n",
1499 				  caps->msix_vector_first_id);
1500 			break;
1501 		case ICE_AQC_CAPS_MAX_MTU:
1502 			caps->max_mtu = number;
1503 			if (dev_p)
1504 				ice_debug(hw, ICE_DBG_INIT,
1505 					  "HW caps: Dev.MaxMTU = %d\n",
1506 					  caps->max_mtu);
1507 			else if (func_p)
1508 				ice_debug(hw, ICE_DBG_INIT,
1509 					  "HW caps: func.MaxMTU = %d\n",
1510 					  caps->max_mtu);
1511 			break;
1512 		default:
1513 			ice_debug(hw, ICE_DBG_INIT,
1514 				  "HW caps: Unknown capability[%d]: 0x%x\n", i,
1515 				  cap);
1516 			break;
1517 		}
1518 	}
1519 }
1520 
1521 /**
1522  * ice_aq_discover_caps - query function/device capabilities
1523  * @hw: pointer to the hw struct
1524  * @buf: a virtual buffer to hold the capabilities
1525  * @buf_size: Size of the virtual buffer
1526  * @cap_count: cap count needed if AQ err==ENOMEM
1527  * @opc: capabilities type to discover - pass in the command opcode
1528  * @cd: pointer to command details structure or NULL
1529  *
1530  * Get the function(0x000a)/device(0x000b) capabilities description from
1531  * the firmware.
1532  */
1533 static enum ice_status
1534 ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
1535 		     enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1536 {
1537 	struct ice_aqc_list_caps *cmd;
1538 	struct ice_aq_desc desc;
1539 	enum ice_status status;
1540 
1541 	cmd = &desc.params.get_cap;
1542 
1543 	if (opc != ice_aqc_opc_list_func_caps &&
1544 	    opc != ice_aqc_opc_list_dev_caps)
1545 		return ICE_ERR_PARAM;
1546 
1547 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1548 
1549 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1550 	if (!status)
1551 		ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
1552 	else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
1553 		*cap_count = le32_to_cpu(cmd->count);
1554 	return status;
1555 }
1556 
1557 /**
1558  * ice_discover_caps - get info about the HW
1559  * @hw: pointer to the hardware structure
1560  * @opc: capabilities type to discover - pass in the command opcode
1561  */
1562 static enum ice_status ice_discover_caps(struct ice_hw *hw,
1563 					 enum ice_adminq_opc opc)
1564 {
1565 	enum ice_status status;
1566 	u32 cap_count;
1567 	u16 cbuf_len;
1568 	u8 retries;
1569 
1570 	/* The driver doesn't know how many capabilities the device will return
1571 	 * so the buffer size required isn't known ahead of time. The driver
1572 	 * starts with cbuf_len and if this turns out to be insufficient, the
1573 	 * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
1574 	 * The driver then allocates the buffer based on the count and retries
1575 	 * the operation. So it follows that the retry count is 2.
1576 	 */
1577 #define ICE_GET_CAP_BUF_COUNT	40
1578 #define ICE_GET_CAP_RETRY_COUNT	2
1579 
1580 	cap_count = ICE_GET_CAP_BUF_COUNT;
1581 	retries = ICE_GET_CAP_RETRY_COUNT;
1582 
1583 	do {
1584 		void *cbuf;
1585 
1586 		cbuf_len = (u16)(cap_count *
1587 				 sizeof(struct ice_aqc_list_caps_elem));
1588 		cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
1589 		if (!cbuf)
1590 			return ICE_ERR_NO_MEMORY;
1591 
1592 		status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
1593 					      opc, NULL);
1594 		devm_kfree(ice_hw_to_dev(hw), cbuf);
1595 
1596 		if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
1597 			break;
1598 
1599 		/* If ENOMEM is returned, try again with bigger buffer */
1600 	} while (--retries);
1601 
1602 	return status;
1603 }
1604 
1605 /**
1606  * ice_get_caps - get info about the HW
1607  * @hw: pointer to the hardware structure
1608  */
1609 enum ice_status ice_get_caps(struct ice_hw *hw)
1610 {
1611 	enum ice_status status;
1612 
1613 	status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
1614 	if (!status)
1615 		status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
1616 
1617 	return status;
1618 }
1619 
1620 /**
1621  * ice_aq_manage_mac_write - manage MAC address write command
1622  * @hw: pointer to the hw struct
1623  * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
1624  * @flags: flags to control write behavior
1625  * @cd: pointer to command details structure or NULL
1626  *
1627  * This function is used to write MAC address to the NVM (0x0108).
1628  */
1629 enum ice_status
1630 ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags,
1631 			struct ice_sq_cd *cd)
1632 {
1633 	struct ice_aqc_manage_mac_write *cmd;
1634 	struct ice_aq_desc desc;
1635 
1636 	cmd = &desc.params.mac_write;
1637 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
1638 
1639 	cmd->flags = flags;
1640 
1641 	/* Prep values for flags, sah, sal */
1642 	cmd->sah = htons(*((u16 *)mac_addr));
1643 	cmd->sal = htonl(*((u32 *)(mac_addr + 2)));
1644 
1645 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1646 }
1647 
1648 /**
1649  * ice_aq_clear_pxe_mode
1650  * @hw: pointer to the hw struct
1651  *
1652  * Tell the firmware that the driver is taking over from PXE (0x0110).
1653  */
1654 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
1655 {
1656 	struct ice_aq_desc desc;
1657 
1658 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
1659 	desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
1660 
1661 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1662 }
1663 
1664 /**
1665  * ice_clear_pxe_mode - clear pxe operations mode
1666  * @hw: pointer to the hw struct
1667  *
1668  * Make sure all PXE mode settings are cleared, including things
1669  * like descriptor fetch/write-back mode.
1670  */
1671 void ice_clear_pxe_mode(struct ice_hw *hw)
1672 {
1673 	if (ice_check_sq_alive(hw, &hw->adminq))
1674 		ice_aq_clear_pxe_mode(hw);
1675 }
1676 
1677 /**
1678  * ice_get_link_speed_based_on_phy_type - returns link speed
1679  * @phy_type_low: lower part of phy_type
1680  *
1681  * This helper function will convert a phy_type_low to its corresponding link
1682  * speed.
1683  * Note: In the structure of phy_type_low, there should be one bit set, as
1684  * this function will convert one phy type to its speed.
1685  * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
1686  * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
1687  */
1688 static u16
1689 ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
1690 {
1691 	u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
1692 
1693 	switch (phy_type_low) {
1694 	case ICE_PHY_TYPE_LOW_100BASE_TX:
1695 	case ICE_PHY_TYPE_LOW_100M_SGMII:
1696 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
1697 		break;
1698 	case ICE_PHY_TYPE_LOW_1000BASE_T:
1699 	case ICE_PHY_TYPE_LOW_1000BASE_SX:
1700 	case ICE_PHY_TYPE_LOW_1000BASE_LX:
1701 	case ICE_PHY_TYPE_LOW_1000BASE_KX:
1702 	case ICE_PHY_TYPE_LOW_1G_SGMII:
1703 		speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
1704 		break;
1705 	case ICE_PHY_TYPE_LOW_2500BASE_T:
1706 	case ICE_PHY_TYPE_LOW_2500BASE_X:
1707 	case ICE_PHY_TYPE_LOW_2500BASE_KX:
1708 		speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
1709 		break;
1710 	case ICE_PHY_TYPE_LOW_5GBASE_T:
1711 	case ICE_PHY_TYPE_LOW_5GBASE_KR:
1712 		speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
1713 		break;
1714 	case ICE_PHY_TYPE_LOW_10GBASE_T:
1715 	case ICE_PHY_TYPE_LOW_10G_SFI_DA:
1716 	case ICE_PHY_TYPE_LOW_10GBASE_SR:
1717 	case ICE_PHY_TYPE_LOW_10GBASE_LR:
1718 	case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
1719 	case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
1720 	case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
1721 		speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
1722 		break;
1723 	case ICE_PHY_TYPE_LOW_25GBASE_T:
1724 	case ICE_PHY_TYPE_LOW_25GBASE_CR:
1725 	case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
1726 	case ICE_PHY_TYPE_LOW_25GBASE_CR1:
1727 	case ICE_PHY_TYPE_LOW_25GBASE_SR:
1728 	case ICE_PHY_TYPE_LOW_25GBASE_LR:
1729 	case ICE_PHY_TYPE_LOW_25GBASE_KR:
1730 	case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
1731 	case ICE_PHY_TYPE_LOW_25GBASE_KR1:
1732 	case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
1733 	case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
1734 		speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
1735 		break;
1736 	case ICE_PHY_TYPE_LOW_40GBASE_CR4:
1737 	case ICE_PHY_TYPE_LOW_40GBASE_SR4:
1738 	case ICE_PHY_TYPE_LOW_40GBASE_LR4:
1739 	case ICE_PHY_TYPE_LOW_40GBASE_KR4:
1740 	case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
1741 	case ICE_PHY_TYPE_LOW_40G_XLAUI:
1742 		speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
1743 		break;
1744 	default:
1745 		speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
1746 		break;
1747 	}
1748 
1749 	return speed_phy_type_low;
1750 }
1751 
1752 /**
1753  * ice_update_phy_type
1754  * @phy_type_low: pointer to the lower part of phy_type
1755  * @link_speeds_bitmap: targeted link speeds bitmap
1756  *
1757  * Note: For the link_speeds_bitmap structure, you can check it at
1758  * [ice_aqc_get_link_status->link_speed]. Caller can pass in
1759  * link_speeds_bitmap include multiple speeds.
1760  *
1761  * The value of phy_type_low will present a certain link speed. This helper
1762  * function will turn on bits in the phy_type_low based on the value of
1763  * link_speeds_bitmap input parameter.
1764  */
1765 void ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap)
1766 {
1767 	u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN;
1768 	u64 pt_low;
1769 	int index;
1770 
1771 	/* We first check with low part of phy_type */
1772 	for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
1773 		pt_low = BIT_ULL(index);
1774 		speed = ice_get_link_speed_based_on_phy_type(pt_low);
1775 
1776 		if (link_speeds_bitmap & speed)
1777 			*phy_type_low |= BIT_ULL(index);
1778 	}
1779 }
1780 
1781 /**
1782  * ice_aq_set_phy_cfg
1783  * @hw: pointer to the hw struct
1784  * @lport: logical port number
1785  * @cfg: structure with PHY configuration data to be set
1786  * @cd: pointer to command details structure or NULL
1787  *
1788  * Set the various PHY configuration parameters supported on the Port.
1789  * One or more of the Set PHY config parameters may be ignored in an MFP
1790  * mode as the PF may not have the privilege to set some of the PHY Config
1791  * parameters. This status will be indicated by the command response (0x0601).
1792  */
1793 enum ice_status
1794 ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
1795 		   struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
1796 {
1797 	struct ice_aq_desc desc;
1798 
1799 	if (!cfg)
1800 		return ICE_ERR_PARAM;
1801 
1802 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
1803 	desc.params.set_phy.lport_num = lport;
1804 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1805 
1806 	return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
1807 }
1808 
1809 /**
1810  * ice_update_link_info - update status of the HW network link
1811  * @pi: port info structure of the interested logical port
1812  */
1813 enum ice_status ice_update_link_info(struct ice_port_info *pi)
1814 {
1815 	struct ice_aqc_get_phy_caps_data *pcaps;
1816 	struct ice_phy_info *phy_info;
1817 	enum ice_status status;
1818 	struct ice_hw *hw;
1819 
1820 	if (!pi)
1821 		return ICE_ERR_PARAM;
1822 
1823 	hw = pi->hw;
1824 
1825 	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
1826 	if (!pcaps)
1827 		return ICE_ERR_NO_MEMORY;
1828 
1829 	phy_info = &pi->phy;
1830 	status = ice_aq_get_link_info(pi, true, NULL, NULL);
1831 	if (status)
1832 		goto out;
1833 
1834 	if (phy_info->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
1835 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
1836 					     pcaps, NULL);
1837 		if (status)
1838 			goto out;
1839 
1840 		memcpy(phy_info->link_info.module_type, &pcaps->module_type,
1841 		       sizeof(phy_info->link_info.module_type));
1842 	}
1843 out:
1844 	devm_kfree(ice_hw_to_dev(hw), pcaps);
1845 	return status;
1846 }
1847 
1848 /**
1849  * ice_set_fc
1850  * @pi: port information structure
1851  * @aq_failures: pointer to status code, specific to ice_set_fc routine
1852  * @ena_auto_link_update: enable automatic link update
1853  *
1854  * Set the requested flow control mode.
1855  */
1856 enum ice_status
1857 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
1858 {
1859 	struct ice_aqc_set_phy_cfg_data cfg = { 0 };
1860 	struct ice_aqc_get_phy_caps_data *pcaps;
1861 	enum ice_status status;
1862 	u8 pause_mask = 0x0;
1863 	struct ice_hw *hw;
1864 
1865 	if (!pi)
1866 		return ICE_ERR_PARAM;
1867 	hw = pi->hw;
1868 	*aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
1869 
1870 	switch (pi->fc.req_mode) {
1871 	case ICE_FC_FULL:
1872 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
1873 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
1874 		break;
1875 	case ICE_FC_RX_PAUSE:
1876 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
1877 		break;
1878 	case ICE_FC_TX_PAUSE:
1879 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
1880 		break;
1881 	default:
1882 		break;
1883 	}
1884 
1885 	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
1886 	if (!pcaps)
1887 		return ICE_ERR_NO_MEMORY;
1888 
1889 	/* Get the current phy config */
1890 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
1891 				     NULL);
1892 	if (status) {
1893 		*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
1894 		goto out;
1895 	}
1896 
1897 	/* clear the old pause settings */
1898 	cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
1899 				   ICE_AQC_PHY_EN_RX_LINK_PAUSE);
1900 	/* set the new capabilities */
1901 	cfg.caps |= pause_mask;
1902 	/* If the capabilities have changed, then set the new config */
1903 	if (cfg.caps != pcaps->caps) {
1904 		int retry_count, retry_max = 10;
1905 
1906 		/* Auto restart link so settings take effect */
1907 		if (ena_auto_link_update)
1908 			cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1909 		/* Copy over all the old settings */
1910 		cfg.phy_type_low = pcaps->phy_type_low;
1911 		cfg.low_power_ctrl = pcaps->low_power_ctrl;
1912 		cfg.eee_cap = pcaps->eee_cap;
1913 		cfg.eeer_value = pcaps->eeer_value;
1914 		cfg.link_fec_opt = pcaps->link_fec_options;
1915 
1916 		status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
1917 		if (status) {
1918 			*aq_failures = ICE_SET_FC_AQ_FAIL_SET;
1919 			goto out;
1920 		}
1921 
1922 		/* Update the link info
1923 		 * It sometimes takes a really long time for link to
1924 		 * come back from the atomic reset. Thus, we wait a
1925 		 * little bit.
1926 		 */
1927 		for (retry_count = 0; retry_count < retry_max; retry_count++) {
1928 			status = ice_update_link_info(pi);
1929 
1930 			if (!status)
1931 				break;
1932 
1933 			mdelay(100);
1934 		}
1935 
1936 		if (status)
1937 			*aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
1938 	}
1939 
1940 out:
1941 	devm_kfree(ice_hw_to_dev(hw), pcaps);
1942 	return status;
1943 }
1944 
1945 /**
1946  * ice_get_link_status - get status of the HW network link
1947  * @pi: port information structure
1948  * @link_up: pointer to bool (true/false = linkup/linkdown)
1949  *
1950  * Variable link_up is true if link is up, false if link is down.
1951  * The variable link_up is invalid if status is non zero. As a
1952  * result of this call, link status reporting becomes enabled
1953  */
1954 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
1955 {
1956 	struct ice_phy_info *phy_info;
1957 	enum ice_status status = 0;
1958 
1959 	if (!pi || !link_up)
1960 		return ICE_ERR_PARAM;
1961 
1962 	phy_info = &pi->phy;
1963 
1964 	if (phy_info->get_link_info) {
1965 		status = ice_update_link_info(pi);
1966 
1967 		if (status)
1968 			ice_debug(pi->hw, ICE_DBG_LINK,
1969 				  "get link status error, status = %d\n",
1970 				  status);
1971 	}
1972 
1973 	*link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
1974 
1975 	return status;
1976 }
1977 
1978 /**
1979  * ice_aq_set_link_restart_an
1980  * @pi: pointer to the port information structure
1981  * @ena_link: if true: enable link, if false: disable link
1982  * @cd: pointer to command details structure or NULL
1983  *
1984  * Sets up the link and restarts the Auto-Negotiation over the link.
1985  */
1986 enum ice_status
1987 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
1988 			   struct ice_sq_cd *cd)
1989 {
1990 	struct ice_aqc_restart_an *cmd;
1991 	struct ice_aq_desc desc;
1992 
1993 	cmd = &desc.params.restart_an;
1994 
1995 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
1996 
1997 	cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
1998 	cmd->lport_num = pi->lport;
1999 	if (ena_link)
2000 		cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
2001 	else
2002 		cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
2003 
2004 	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
2005 }
2006 
2007 /**
2008  * __ice_aq_get_set_rss_lut
2009  * @hw: pointer to the hardware structure
2010  * @vsi_id: VSI FW index
2011  * @lut_type: LUT table type
2012  * @lut: pointer to the LUT buffer provided by the caller
2013  * @lut_size: size of the LUT buffer
2014  * @glob_lut_idx: global LUT index
2015  * @set: set true to set the table, false to get the table
2016  *
2017  * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
2018  */
2019 static enum ice_status
2020 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
2021 			 u16 lut_size, u8 glob_lut_idx, bool set)
2022 {
2023 	struct ice_aqc_get_set_rss_lut *cmd_resp;
2024 	struct ice_aq_desc desc;
2025 	enum ice_status status;
2026 	u16 flags = 0;
2027 
2028 	cmd_resp = &desc.params.get_set_rss_lut;
2029 
2030 	if (set) {
2031 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
2032 		desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2033 	} else {
2034 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
2035 	}
2036 
2037 	cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2038 					 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
2039 					ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
2040 				       ICE_AQC_GSET_RSS_LUT_VSI_VALID);
2041 
2042 	switch (lut_type) {
2043 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
2044 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
2045 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
2046 		flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
2047 			  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
2048 		break;
2049 	default:
2050 		status = ICE_ERR_PARAM;
2051 		goto ice_aq_get_set_rss_lut_exit;
2052 	}
2053 
2054 	if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
2055 		flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
2056 			  ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
2057 
2058 		if (!set)
2059 			goto ice_aq_get_set_rss_lut_send;
2060 	} else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2061 		if (!set)
2062 			goto ice_aq_get_set_rss_lut_send;
2063 	} else {
2064 		goto ice_aq_get_set_rss_lut_send;
2065 	}
2066 
2067 	/* LUT size is only valid for Global and PF table types */
2068 	switch (lut_size) {
2069 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
2070 		break;
2071 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
2072 		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
2073 			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2074 			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2075 		break;
2076 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
2077 		if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2078 			flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
2079 				  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2080 				 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2081 			break;
2082 		}
2083 		/* fall-through */
2084 	default:
2085 		status = ICE_ERR_PARAM;
2086 		goto ice_aq_get_set_rss_lut_exit;
2087 	}
2088 
2089 ice_aq_get_set_rss_lut_send:
2090 	cmd_resp->flags = cpu_to_le16(flags);
2091 	status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
2092 
2093 ice_aq_get_set_rss_lut_exit:
2094 	return status;
2095 }
2096 
2097 /**
2098  * ice_aq_get_rss_lut
2099  * @hw: pointer to the hardware structure
2100  * @vsi_handle: software VSI handle
2101  * @lut_type: LUT table type
2102  * @lut: pointer to the LUT buffer provided by the caller
2103  * @lut_size: size of the LUT buffer
2104  *
2105  * get the RSS lookup table, PF or VSI type
2106  */
2107 enum ice_status
2108 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2109 		   u8 *lut, u16 lut_size)
2110 {
2111 	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2112 		return ICE_ERR_PARAM;
2113 
2114 	return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2115 					lut_type, lut, lut_size, 0, false);
2116 }
2117 
2118 /**
2119  * ice_aq_set_rss_lut
2120  * @hw: pointer to the hardware structure
2121  * @vsi_handle: software VSI handle
2122  * @lut_type: LUT table type
2123  * @lut: pointer to the LUT buffer provided by the caller
2124  * @lut_size: size of the LUT buffer
2125  *
2126  * set the RSS lookup table, PF or VSI type
2127  */
2128 enum ice_status
2129 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2130 		   u8 *lut, u16 lut_size)
2131 {
2132 	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2133 		return ICE_ERR_PARAM;
2134 
2135 	return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2136 					lut_type, lut, lut_size, 0, true);
2137 }
2138 
2139 /**
2140  * __ice_aq_get_set_rss_key
2141  * @hw: pointer to the hw struct
2142  * @vsi_id: VSI FW index
2143  * @key: pointer to key info struct
2144  * @set: set true to set the key, false to get the key
2145  *
2146  * get (0x0B04) or set (0x0B02) the RSS key per VSI
2147  */
2148 static enum
2149 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
2150 				    struct ice_aqc_get_set_rss_keys *key,
2151 				    bool set)
2152 {
2153 	struct ice_aqc_get_set_rss_key *cmd_resp;
2154 	u16 key_size = sizeof(*key);
2155 	struct ice_aq_desc desc;
2156 
2157 	cmd_resp = &desc.params.get_set_rss_key;
2158 
2159 	if (set) {
2160 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
2161 		desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2162 	} else {
2163 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
2164 	}
2165 
2166 	cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2167 					 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
2168 					ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
2169 				       ICE_AQC_GSET_RSS_KEY_VSI_VALID);
2170 
2171 	return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
2172 }
2173 
2174 /**
2175  * ice_aq_get_rss_key
2176  * @hw: pointer to the hw struct
2177  * @vsi_handle: software VSI handle
2178  * @key: pointer to key info struct
2179  *
2180  * get the RSS key per VSI
2181  */
2182 enum ice_status
2183 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
2184 		   struct ice_aqc_get_set_rss_keys *key)
2185 {
2186 	if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
2187 		return ICE_ERR_PARAM;
2188 
2189 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2190 					key, false);
2191 }
2192 
2193 /**
2194  * ice_aq_set_rss_key
2195  * @hw: pointer to the hw struct
2196  * @vsi_handle: software VSI handle
2197  * @keys: pointer to key info struct
2198  *
2199  * set the RSS key per VSI
2200  */
2201 enum ice_status
2202 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
2203 		   struct ice_aqc_get_set_rss_keys *keys)
2204 {
2205 	if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
2206 		return ICE_ERR_PARAM;
2207 
2208 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2209 					keys, true);
2210 }
2211 
2212 /**
2213  * ice_aq_add_lan_txq
2214  * @hw: pointer to the hardware structure
2215  * @num_qgrps: Number of added queue groups
2216  * @qg_list: list of queue groups to be added
2217  * @buf_size: size of buffer for indirect command
2218  * @cd: pointer to command details structure or NULL
2219  *
2220  * Add Tx LAN queue (0x0C30)
2221  *
2222  * NOTE:
2223  * Prior to calling add Tx LAN queue:
2224  * Initialize the following as part of the Tx queue context:
2225  * Completion queue ID if the queue uses Completion queue, Quanta profile,
2226  * Cache profile and Packet shaper profile.
2227  *
2228  * After add Tx LAN queue AQ command is completed:
2229  * Interrupts should be associated with specific queues,
2230  * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
2231  * flow.
2232  */
2233 static enum ice_status
2234 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2235 		   struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
2236 		   struct ice_sq_cd *cd)
2237 {
2238 	u16 i, sum_header_size, sum_q_size = 0;
2239 	struct ice_aqc_add_tx_qgrp *list;
2240 	struct ice_aqc_add_txqs *cmd;
2241 	struct ice_aq_desc desc;
2242 
2243 	cmd = &desc.params.add_txqs;
2244 
2245 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
2246 
2247 	if (!qg_list)
2248 		return ICE_ERR_PARAM;
2249 
2250 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2251 		return ICE_ERR_PARAM;
2252 
2253 	sum_header_size = num_qgrps *
2254 		(sizeof(*qg_list) - sizeof(*qg_list->txqs));
2255 
2256 	list = qg_list;
2257 	for (i = 0; i < num_qgrps; i++) {
2258 		struct ice_aqc_add_txqs_perq *q = list->txqs;
2259 
2260 		sum_q_size += list->num_txqs * sizeof(*q);
2261 		list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
2262 	}
2263 
2264 	if (buf_size != (sum_header_size + sum_q_size))
2265 		return ICE_ERR_PARAM;
2266 
2267 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2268 
2269 	cmd->num_qgrps = num_qgrps;
2270 
2271 	return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2272 }
2273 
2274 /**
2275  * ice_aq_dis_lan_txq
2276  * @hw: pointer to the hardware structure
2277  * @num_qgrps: number of groups in the list
2278  * @qg_list: the list of groups to disable
2279  * @buf_size: the total size of the qg_list buffer in bytes
2280  * @rst_src: if called due to reset, specifies the RST source
2281  * @vmvf_num: the relative VM or VF number that is undergoing the reset
2282  * @cd: pointer to command details structure or NULL
2283  *
2284  * Disable LAN Tx queue (0x0C31)
2285  */
2286 static enum ice_status
2287 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2288 		   struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
2289 		   enum ice_disq_rst_src rst_src, u16 vmvf_num,
2290 		   struct ice_sq_cd *cd)
2291 {
2292 	struct ice_aqc_dis_txqs *cmd;
2293 	struct ice_aq_desc desc;
2294 	u16 i, sz = 0;
2295 
2296 	cmd = &desc.params.dis_txqs;
2297 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
2298 
2299 	/* qg_list can be NULL only in VM/VF reset flow */
2300 	if (!qg_list && !rst_src)
2301 		return ICE_ERR_PARAM;
2302 
2303 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2304 		return ICE_ERR_PARAM;
2305 
2306 	cmd->num_entries = num_qgrps;
2307 
2308 	cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
2309 					    ICE_AQC_Q_DIS_TIMEOUT_M);
2310 
2311 	switch (rst_src) {
2312 	case ICE_VM_RESET:
2313 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
2314 		cmd->vmvf_and_timeout |=
2315 			cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
2316 		break;
2317 	case ICE_VF_RESET:
2318 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
2319 		/* In this case, FW expects vmvf_num to be absolute VF id */
2320 		cmd->vmvf_and_timeout |=
2321 			cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
2322 				    ICE_AQC_Q_DIS_VMVF_NUM_M);
2323 		break;
2324 	case ICE_NO_RESET:
2325 	default:
2326 		break;
2327 	}
2328 
2329 	/* If no queue group info, we are in a reset flow. Issue the AQ */
2330 	if (!qg_list)
2331 		goto do_aq;
2332 
2333 	/* set RD bit to indicate that command buffer is provided by the driver
2334 	 * and it needs to be read by the firmware
2335 	 */
2336 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2337 
2338 	for (i = 0; i < num_qgrps; ++i) {
2339 		/* Calculate the size taken up by the queue IDs in this group */
2340 		sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
2341 
2342 		/* Add the size of the group header */
2343 		sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
2344 
2345 		/* If the num of queues is even, add 2 bytes of padding */
2346 		if ((qg_list[i].num_qs % 2) == 0)
2347 			sz += 2;
2348 	}
2349 
2350 	if (buf_size != sz)
2351 		return ICE_ERR_PARAM;
2352 
2353 do_aq:
2354 	return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2355 }
2356 
2357 /* End of FW Admin Queue command wrappers */
2358 
2359 /**
2360  * ice_write_byte - write a byte to a packed context structure
2361  * @src_ctx:  the context structure to read from
2362  * @dest_ctx: the context to be written to
2363  * @ce_info:  a description of the struct to be filled
2364  */
2365 static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx,
2366 			   const struct ice_ctx_ele *ce_info)
2367 {
2368 	u8 src_byte, dest_byte, mask;
2369 	u8 *from, *dest;
2370 	u16 shift_width;
2371 
2372 	/* copy from the next struct field */
2373 	from = src_ctx + ce_info->offset;
2374 
2375 	/* prepare the bits and mask */
2376 	shift_width = ce_info->lsb % 8;
2377 	mask = (u8)(BIT(ce_info->width) - 1);
2378 
2379 	src_byte = *from;
2380 	src_byte &= mask;
2381 
2382 	/* shift to correct alignment */
2383 	mask <<= shift_width;
2384 	src_byte <<= shift_width;
2385 
2386 	/* get the current bits from the target bit string */
2387 	dest = dest_ctx + (ce_info->lsb / 8);
2388 
2389 	memcpy(&dest_byte, dest, sizeof(dest_byte));
2390 
2391 	dest_byte &= ~mask;	/* get the bits not changing */
2392 	dest_byte |= src_byte;	/* add in the new bits */
2393 
2394 	/* put it all back */
2395 	memcpy(dest, &dest_byte, sizeof(dest_byte));
2396 }
2397 
2398 /**
2399  * ice_write_word - write a word to a packed context structure
2400  * @src_ctx:  the context structure to read from
2401  * @dest_ctx: the context to be written to
2402  * @ce_info:  a description of the struct to be filled
2403  */
2404 static void ice_write_word(u8 *src_ctx, u8 *dest_ctx,
2405 			   const struct ice_ctx_ele *ce_info)
2406 {
2407 	u16 src_word, mask;
2408 	__le16 dest_word;
2409 	u8 *from, *dest;
2410 	u16 shift_width;
2411 
2412 	/* copy from the next struct field */
2413 	from = src_ctx + ce_info->offset;
2414 
2415 	/* prepare the bits and mask */
2416 	shift_width = ce_info->lsb % 8;
2417 	mask = BIT(ce_info->width) - 1;
2418 
2419 	/* don't swizzle the bits until after the mask because the mask bits
2420 	 * will be in a different bit position on big endian machines
2421 	 */
2422 	src_word = *(u16 *)from;
2423 	src_word &= mask;
2424 
2425 	/* shift to correct alignment */
2426 	mask <<= shift_width;
2427 	src_word <<= shift_width;
2428 
2429 	/* get the current bits from the target bit string */
2430 	dest = dest_ctx + (ce_info->lsb / 8);
2431 
2432 	memcpy(&dest_word, dest, sizeof(dest_word));
2433 
2434 	dest_word &= ~(cpu_to_le16(mask));	/* get the bits not changing */
2435 	dest_word |= cpu_to_le16(src_word);	/* add in the new bits */
2436 
2437 	/* put it all back */
2438 	memcpy(dest, &dest_word, sizeof(dest_word));
2439 }
2440 
2441 /**
2442  * ice_write_dword - write a dword to a packed context structure
2443  * @src_ctx:  the context structure to read from
2444  * @dest_ctx: the context to be written to
2445  * @ce_info:  a description of the struct to be filled
2446  */
2447 static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx,
2448 			    const struct ice_ctx_ele *ce_info)
2449 {
2450 	u32 src_dword, mask;
2451 	__le32 dest_dword;
2452 	u8 *from, *dest;
2453 	u16 shift_width;
2454 
2455 	/* copy from the next struct field */
2456 	from = src_ctx + ce_info->offset;
2457 
2458 	/* prepare the bits and mask */
2459 	shift_width = ce_info->lsb % 8;
2460 
2461 	/* if the field width is exactly 32 on an x86 machine, then the shift
2462 	 * operation will not work because the SHL instructions count is masked
2463 	 * to 5 bits so the shift will do nothing
2464 	 */
2465 	if (ce_info->width < 32)
2466 		mask = BIT(ce_info->width) - 1;
2467 	else
2468 		mask = (u32)~0;
2469 
2470 	/* don't swizzle the bits until after the mask because the mask bits
2471 	 * will be in a different bit position on big endian machines
2472 	 */
2473 	src_dword = *(u32 *)from;
2474 	src_dword &= mask;
2475 
2476 	/* shift to correct alignment */
2477 	mask <<= shift_width;
2478 	src_dword <<= shift_width;
2479 
2480 	/* get the current bits from the target bit string */
2481 	dest = dest_ctx + (ce_info->lsb / 8);
2482 
2483 	memcpy(&dest_dword, dest, sizeof(dest_dword));
2484 
2485 	dest_dword &= ~(cpu_to_le32(mask));	/* get the bits not changing */
2486 	dest_dword |= cpu_to_le32(src_dword);	/* add in the new bits */
2487 
2488 	/* put it all back */
2489 	memcpy(dest, &dest_dword, sizeof(dest_dword));
2490 }
2491 
2492 /**
2493  * ice_write_qword - write a qword to a packed context structure
2494  * @src_ctx:  the context structure to read from
2495  * @dest_ctx: the context to be written to
2496  * @ce_info:  a description of the struct to be filled
2497  */
2498 static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx,
2499 			    const struct ice_ctx_ele *ce_info)
2500 {
2501 	u64 src_qword, mask;
2502 	__le64 dest_qword;
2503 	u8 *from, *dest;
2504 	u16 shift_width;
2505 
2506 	/* copy from the next struct field */
2507 	from = src_ctx + ce_info->offset;
2508 
2509 	/* prepare the bits and mask */
2510 	shift_width = ce_info->lsb % 8;
2511 
2512 	/* if the field width is exactly 64 on an x86 machine, then the shift
2513 	 * operation will not work because the SHL instructions count is masked
2514 	 * to 6 bits so the shift will do nothing
2515 	 */
2516 	if (ce_info->width < 64)
2517 		mask = BIT_ULL(ce_info->width) - 1;
2518 	else
2519 		mask = (u64)~0;
2520 
2521 	/* don't swizzle the bits until after the mask because the mask bits
2522 	 * will be in a different bit position on big endian machines
2523 	 */
2524 	src_qword = *(u64 *)from;
2525 	src_qword &= mask;
2526 
2527 	/* shift to correct alignment */
2528 	mask <<= shift_width;
2529 	src_qword <<= shift_width;
2530 
2531 	/* get the current bits from the target bit string */
2532 	dest = dest_ctx + (ce_info->lsb / 8);
2533 
2534 	memcpy(&dest_qword, dest, sizeof(dest_qword));
2535 
2536 	dest_qword &= ~(cpu_to_le64(mask));	/* get the bits not changing */
2537 	dest_qword |= cpu_to_le64(src_qword);	/* add in the new bits */
2538 
2539 	/* put it all back */
2540 	memcpy(dest, &dest_qword, sizeof(dest_qword));
2541 }
2542 
2543 /**
2544  * ice_set_ctx - set context bits in packed structure
2545  * @src_ctx:  pointer to a generic non-packed context structure
2546  * @dest_ctx: pointer to memory for the packed structure
2547  * @ce_info:  a description of the structure to be transformed
2548  */
2549 enum ice_status
2550 ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2551 {
2552 	int f;
2553 
2554 	for (f = 0; ce_info[f].width; f++) {
2555 		/* We have to deal with each element of the FW response
2556 		 * using the correct size so that we are correct regardless
2557 		 * of the endianness of the machine.
2558 		 */
2559 		switch (ce_info[f].size_of) {
2560 		case sizeof(u8):
2561 			ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
2562 			break;
2563 		case sizeof(u16):
2564 			ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
2565 			break;
2566 		case sizeof(u32):
2567 			ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
2568 			break;
2569 		case sizeof(u64):
2570 			ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
2571 			break;
2572 		default:
2573 			return ICE_ERR_INVAL_SIZE;
2574 		}
2575 	}
2576 
2577 	return 0;
2578 }
2579 
2580 /**
2581  * ice_ena_vsi_txq
2582  * @pi: port information structure
2583  * @vsi_handle: software VSI handle
2584  * @tc: tc number
2585  * @num_qgrps: Number of added queue groups
2586  * @buf: list of queue groups to be added
2587  * @buf_size: size of buffer for indirect command
2588  * @cd: pointer to command details structure or NULL
2589  *
2590  * This function adds one lan q
2591  */
2592 enum ice_status
2593 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
2594 		struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
2595 		struct ice_sq_cd *cd)
2596 {
2597 	struct ice_aqc_txsched_elem_data node = { 0 };
2598 	struct ice_sched_node *parent;
2599 	enum ice_status status;
2600 	struct ice_hw *hw;
2601 
2602 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2603 		return ICE_ERR_CFG;
2604 
2605 	if (num_qgrps > 1 || buf->num_txqs > 1)
2606 		return ICE_ERR_MAX_LIMIT;
2607 
2608 	hw = pi->hw;
2609 
2610 	if (!ice_is_vsi_valid(hw, vsi_handle))
2611 		return ICE_ERR_PARAM;
2612 
2613 	mutex_lock(&pi->sched_lock);
2614 
2615 	/* find a parent node */
2616 	parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
2617 					    ICE_SCHED_NODE_OWNER_LAN);
2618 	if (!parent) {
2619 		status = ICE_ERR_PARAM;
2620 		goto ena_txq_exit;
2621 	}
2622 
2623 	buf->parent_teid = parent->info.node_teid;
2624 	node.parent_teid = parent->info.node_teid;
2625 	/* Mark that the values in the "generic" section as valid. The default
2626 	 * value in the "generic" section is zero. This means that :
2627 	 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
2628 	 * - 0 priority among siblings, indicated by Bit 1-3.
2629 	 * - WFQ, indicated by Bit 4.
2630 	 * - 0 Adjustment value is used in PSM credit update flow, indicated by
2631 	 * Bit 5-6.
2632 	 * - Bit 7 is reserved.
2633 	 * Without setting the generic section as valid in valid_sections, the
2634 	 * Admin Q command will fail with error code ICE_AQ_RC_EINVAL.
2635 	 */
2636 	buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
2637 
2638 	/* add the lan q */
2639 	status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
2640 	if (status)
2641 		goto ena_txq_exit;
2642 
2643 	node.node_teid = buf->txqs[0].q_teid;
2644 	node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
2645 
2646 	/* add a leaf node into schduler tree q layer */
2647 	status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
2648 
2649 ena_txq_exit:
2650 	mutex_unlock(&pi->sched_lock);
2651 	return status;
2652 }
2653 
2654 /**
2655  * ice_dis_vsi_txq
2656  * @pi: port information structure
2657  * @num_queues: number of queues
2658  * @q_ids: pointer to the q_id array
2659  * @q_teids: pointer to queue node teids
2660  * @rst_src: if called due to reset, specifies the RST source
2661  * @vmvf_num: the relative VM or VF number that is undergoing the reset
2662  * @cd: pointer to command details structure or NULL
2663  *
2664  * This function removes queues and their corresponding nodes in SW DB
2665  */
2666 enum ice_status
2667 ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
2668 		u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
2669 		struct ice_sq_cd *cd)
2670 {
2671 	enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
2672 	struct ice_aqc_dis_txq_item qg_list;
2673 	u16 i;
2674 
2675 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2676 		return ICE_ERR_CFG;
2677 
2678 	/* if queue is disabled already yet the disable queue command has to be
2679 	 * sent to complete the VF reset, then call ice_aq_dis_lan_txq without
2680 	 * any queue information
2681 	 */
2682 
2683 	if (!num_queues && rst_src)
2684 		return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, vmvf_num,
2685 					  NULL);
2686 
2687 	mutex_lock(&pi->sched_lock);
2688 
2689 	for (i = 0; i < num_queues; i++) {
2690 		struct ice_sched_node *node;
2691 
2692 		node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
2693 		if (!node)
2694 			continue;
2695 		qg_list.parent_teid = node->info.parent_teid;
2696 		qg_list.num_qs = 1;
2697 		qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
2698 		status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
2699 					    sizeof(qg_list), rst_src, vmvf_num,
2700 					    cd);
2701 
2702 		if (status)
2703 			break;
2704 		ice_free_sched_node(pi, node);
2705 	}
2706 	mutex_unlock(&pi->sched_lock);
2707 	return status;
2708 }
2709 
2710 /**
2711  * ice_cfg_vsi_qs - configure the new/exisiting VSI queues
2712  * @pi: port information structure
2713  * @vsi_handle: software VSI handle
2714  * @tc_bitmap: TC bitmap
2715  * @maxqs: max queues array per TC
2716  * @owner: lan or rdma
2717  *
2718  * This function adds/updates the VSI queues per TC.
2719  */
2720 static enum ice_status
2721 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
2722 	       u16 *maxqs, u8 owner)
2723 {
2724 	enum ice_status status = 0;
2725 	u8 i;
2726 
2727 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2728 		return ICE_ERR_CFG;
2729 
2730 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
2731 		return ICE_ERR_PARAM;
2732 
2733 	mutex_lock(&pi->sched_lock);
2734 
2735 	for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
2736 		/* configuration is possible only if TC node is present */
2737 		if (!ice_sched_get_tc_node(pi, i))
2738 			continue;
2739 
2740 		status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
2741 					   ice_is_tc_ena(tc_bitmap, i));
2742 		if (status)
2743 			break;
2744 	}
2745 
2746 	mutex_unlock(&pi->sched_lock);
2747 	return status;
2748 }
2749 
2750 /**
2751  * ice_cfg_vsi_lan - configure VSI lan queues
2752  * @pi: port information structure
2753  * @vsi_handle: software VSI handle
2754  * @tc_bitmap: TC bitmap
2755  * @max_lanqs: max lan queues array per TC
2756  *
2757  * This function adds/updates the VSI lan queues per TC.
2758  */
2759 enum ice_status
2760 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
2761 		u16 *max_lanqs)
2762 {
2763 	return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
2764 			      ICE_SCHED_NODE_OWNER_LAN);
2765 }
2766 
2767 /**
2768  * ice_replay_pre_init - replay pre initialization
2769  * @hw: pointer to the hw struct
2770  *
2771  * Initializes required config data for VSI, FD, ACL, and RSS before replay.
2772  */
2773 static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
2774 {
2775 	struct ice_switch_info *sw = hw->switch_info;
2776 	u8 i;
2777 
2778 	/* Delete old entries from replay filter list head if there is any */
2779 	ice_rm_all_sw_replay_rule_info(hw);
2780 	/* In start of replay, move entries into replay_rules list, it
2781 	 * will allow adding rules entries back to filt_rules list,
2782 	 * which is operational list.
2783 	 */
2784 	for (i = 0; i < ICE_SW_LKUP_LAST; i++)
2785 		list_replace_init(&sw->recp_list[i].filt_rules,
2786 				  &sw->recp_list[i].filt_replay_rules);
2787 
2788 	return 0;
2789 }
2790 
2791 /**
2792  * ice_replay_vsi - replay VSI configuration
2793  * @hw: pointer to the hw struct
2794  * @vsi_handle: driver VSI handle
2795  *
2796  * Restore all VSI configuration after reset. It is required to call this
2797  * function with main VSI first.
2798  */
2799 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
2800 {
2801 	enum ice_status status;
2802 
2803 	if (!ice_is_vsi_valid(hw, vsi_handle))
2804 		return ICE_ERR_PARAM;
2805 
2806 	/* Replay pre-initialization if there is any */
2807 	if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
2808 		status = ice_replay_pre_init(hw);
2809 		if (status)
2810 			return status;
2811 	}
2812 
2813 	/* Replay per VSI all filters */
2814 	status = ice_replay_vsi_all_fltr(hw, vsi_handle);
2815 	return status;
2816 }
2817 
2818 /**
2819  * ice_replay_post - post replay configuration cleanup
2820  * @hw: pointer to the hw struct
2821  *
2822  * Post replay cleanup.
2823  */
2824 void ice_replay_post(struct ice_hw *hw)
2825 {
2826 	/* Delete old entries from replay filter list head */
2827 	ice_rm_all_sw_replay_rule_info(hw);
2828 }
2829 
2830 /**
2831  * ice_stat_update40 - read 40 bit stat from the chip and update stat values
2832  * @hw: ptr to the hardware info
2833  * @hireg: high 32 bit HW register to read from
2834  * @loreg: low 32 bit HW register to read from
2835  * @prev_stat_loaded: bool to specify if previous stats are loaded
2836  * @prev_stat: ptr to previous loaded stat value
2837  * @cur_stat: ptr to current stat value
2838  */
2839 void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
2840 		       bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat)
2841 {
2842 	u64 new_data;
2843 
2844 	new_data = rd32(hw, loreg);
2845 	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
2846 
2847 	/* device stats are not reset at PFR, they likely will not be zeroed
2848 	 * when the driver starts. So save the first values read and use them as
2849 	 * offsets to be subtracted from the raw values in order to report stats
2850 	 * that count from zero.
2851 	 */
2852 	if (!prev_stat_loaded)
2853 		*prev_stat = new_data;
2854 	if (new_data >= *prev_stat)
2855 		*cur_stat = new_data - *prev_stat;
2856 	else
2857 		/* to manage the potential roll-over */
2858 		*cur_stat = (new_data + BIT_ULL(40)) - *prev_stat;
2859 	*cur_stat &= 0xFFFFFFFFFFULL;
2860 }
2861 
2862 /**
2863  * ice_stat_update32 - read 32 bit stat from the chip and update stat values
2864  * @hw: ptr to the hardware info
2865  * @reg: HW register to read from
2866  * @prev_stat_loaded: bool to specify if previous stats are loaded
2867  * @prev_stat: ptr to previous loaded stat value
2868  * @cur_stat: ptr to current stat value
2869  */
2870 void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
2871 		       u64 *prev_stat, u64 *cur_stat)
2872 {
2873 	u32 new_data;
2874 
2875 	new_data = rd32(hw, reg);
2876 
2877 	/* device stats are not reset at PFR, they likely will not be zeroed
2878 	 * when the driver starts. So save the first values read and use them as
2879 	 * offsets to be subtracted from the raw values in order to report stats
2880 	 * that count from zero.
2881 	 */
2882 	if (!prev_stat_loaded)
2883 		*prev_stat = new_data;
2884 	if (new_data >= *prev_stat)
2885 		*cur_stat = new_data - *prev_stat;
2886 	else
2887 		/* to manage the potential roll-over */
2888 		*cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
2889 }
2890