xref: /openbmc/linux/drivers/net/ethernet/intel/ice/ice_common.c (revision 081c65360bd817672d0753fdf68ab34802d7a81d)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_common.h"
5 #include "ice_sched.h"
6 #include "ice_adminq_cmd.h"
7 
8 #define ICE_PF_RESET_WAIT_COUNT	200
9 
10 #define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \
11 	wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \
12 	     ((ICE_RX_OPC_MDID << \
13 	       GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
14 	      GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
15 	     (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
16 	      GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
17 
18 #define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \
19 	wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \
20 	     (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
21 	      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
22 	     (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
23 	      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \
24 	     (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \
25 	      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \
26 	     (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \
27 	      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M))
28 
29 /**
30  * ice_set_mac_type - Sets MAC type
31  * @hw: pointer to the HW structure
32  *
33  * This function sets the MAC type of the adapter based on the
34  * vendor ID and device ID stored in the HW structure.
35  */
36 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
37 {
38 	if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
39 		return ICE_ERR_DEVICE_NOT_SUPPORTED;
40 
41 	hw->mac_type = ICE_MAC_GENERIC;
42 	return 0;
43 }
44 
45 /**
46  * ice_dev_onetime_setup - Temporary HW/FW workarounds
47  * @hw: pointer to the HW structure
48  *
49  * This function provides temporary workarounds for certain issues
50  * that are expected to be fixed in the HW/FW.
51  */
52 void ice_dev_onetime_setup(struct ice_hw *hw)
53 {
54 #define MBX_PF_VT_PFALLOC	0x00231E80
55 	/* set VFs per PF */
56 	wr32(hw, MBX_PF_VT_PFALLOC, rd32(hw, PF_VT_PFALLOC_HIF));
57 }
58 
59 /**
60  * ice_clear_pf_cfg - Clear PF configuration
61  * @hw: pointer to the hardware structure
62  *
63  * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
64  * configuration, flow director filters, etc.).
65  */
66 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
67 {
68 	struct ice_aq_desc desc;
69 
70 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
71 
72 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
73 }
74 
75 /**
76  * ice_aq_manage_mac_read - manage MAC address read command
77  * @hw: pointer to the HW struct
78  * @buf: a virtual buffer to hold the manage MAC read response
79  * @buf_size: Size of the virtual buffer
80  * @cd: pointer to command details structure or NULL
81  *
82  * This function is used to return per PF station MAC address (0x0107).
83  * NOTE: Upon successful completion of this command, MAC address information
84  * is returned in user specified buffer. Please interpret user specified
85  * buffer as "manage_mac_read" response.
86  * Response such as various MAC addresses are stored in HW struct (port.mac)
87  * ice_aq_discover_caps is expected to be called before this function is called.
88  */
89 static enum ice_status
90 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
91 		       struct ice_sq_cd *cd)
92 {
93 	struct ice_aqc_manage_mac_read_resp *resp;
94 	struct ice_aqc_manage_mac_read *cmd;
95 	struct ice_aq_desc desc;
96 	enum ice_status status;
97 	u16 flags;
98 	u8 i;
99 
100 	cmd = &desc.params.mac_read;
101 
102 	if (buf_size < sizeof(*resp))
103 		return ICE_ERR_BUF_TOO_SHORT;
104 
105 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
106 
107 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
108 	if (status)
109 		return status;
110 
111 	resp = (struct ice_aqc_manage_mac_read_resp *)buf;
112 	flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
113 
114 	if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
115 		ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
116 		return ICE_ERR_CFG;
117 	}
118 
119 	/* A single port can report up to two (LAN and WoL) addresses */
120 	for (i = 0; i < cmd->num_addr; i++)
121 		if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
122 			ether_addr_copy(hw->port_info->mac.lan_addr,
123 					resp[i].mac_addr);
124 			ether_addr_copy(hw->port_info->mac.perm_addr,
125 					resp[i].mac_addr);
126 			break;
127 		}
128 
129 	return 0;
130 }
131 
132 /**
133  * ice_aq_get_phy_caps - returns PHY capabilities
134  * @pi: port information structure
135  * @qual_mods: report qualified modules
136  * @report_mode: report mode capabilities
137  * @pcaps: structure for PHY capabilities to be filled
138  * @cd: pointer to command details structure or NULL
139  *
140  * Returns the various PHY capabilities supported on the Port (0x0600)
141  */
142 enum ice_status
143 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
144 		    struct ice_aqc_get_phy_caps_data *pcaps,
145 		    struct ice_sq_cd *cd)
146 {
147 	struct ice_aqc_get_phy_caps *cmd;
148 	u16 pcaps_size = sizeof(*pcaps);
149 	struct ice_aq_desc desc;
150 	enum ice_status status;
151 
152 	cmd = &desc.params.get_phy;
153 
154 	if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
155 		return ICE_ERR_PARAM;
156 
157 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
158 
159 	if (qual_mods)
160 		cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
161 
162 	cmd->param0 |= cpu_to_le16(report_mode);
163 	status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
164 
165 	if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
166 		pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
167 		pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
168 	}
169 
170 	return status;
171 }
172 
173 /**
174  * ice_get_media_type - Gets media type
175  * @pi: port information structure
176  */
177 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
178 {
179 	struct ice_link_status *hw_link_info;
180 
181 	if (!pi)
182 		return ICE_MEDIA_UNKNOWN;
183 
184 	hw_link_info = &pi->phy.link_info;
185 	if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
186 		/* If more than one media type is selected, report unknown */
187 		return ICE_MEDIA_UNKNOWN;
188 
189 	if (hw_link_info->phy_type_low) {
190 		switch (hw_link_info->phy_type_low) {
191 		case ICE_PHY_TYPE_LOW_1000BASE_SX:
192 		case ICE_PHY_TYPE_LOW_1000BASE_LX:
193 		case ICE_PHY_TYPE_LOW_10GBASE_SR:
194 		case ICE_PHY_TYPE_LOW_10GBASE_LR:
195 		case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
196 		case ICE_PHY_TYPE_LOW_25GBASE_SR:
197 		case ICE_PHY_TYPE_LOW_25GBASE_LR:
198 		case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
199 		case ICE_PHY_TYPE_LOW_40GBASE_SR4:
200 		case ICE_PHY_TYPE_LOW_40GBASE_LR4:
201 		case ICE_PHY_TYPE_LOW_50GBASE_SR2:
202 		case ICE_PHY_TYPE_LOW_50GBASE_LR2:
203 		case ICE_PHY_TYPE_LOW_50GBASE_SR:
204 		case ICE_PHY_TYPE_LOW_50GBASE_FR:
205 		case ICE_PHY_TYPE_LOW_50GBASE_LR:
206 		case ICE_PHY_TYPE_LOW_100GBASE_SR4:
207 		case ICE_PHY_TYPE_LOW_100GBASE_LR4:
208 		case ICE_PHY_TYPE_LOW_100GBASE_SR2:
209 		case ICE_PHY_TYPE_LOW_100GBASE_DR:
210 			return ICE_MEDIA_FIBER;
211 		case ICE_PHY_TYPE_LOW_100BASE_TX:
212 		case ICE_PHY_TYPE_LOW_1000BASE_T:
213 		case ICE_PHY_TYPE_LOW_2500BASE_T:
214 		case ICE_PHY_TYPE_LOW_5GBASE_T:
215 		case ICE_PHY_TYPE_LOW_10GBASE_T:
216 		case ICE_PHY_TYPE_LOW_25GBASE_T:
217 			return ICE_MEDIA_BASET;
218 		case ICE_PHY_TYPE_LOW_10G_SFI_DA:
219 		case ICE_PHY_TYPE_LOW_25GBASE_CR:
220 		case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
221 		case ICE_PHY_TYPE_LOW_25GBASE_CR1:
222 		case ICE_PHY_TYPE_LOW_40GBASE_CR4:
223 		case ICE_PHY_TYPE_LOW_50GBASE_CR2:
224 		case ICE_PHY_TYPE_LOW_50GBASE_CP:
225 		case ICE_PHY_TYPE_LOW_100GBASE_CR4:
226 		case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
227 		case ICE_PHY_TYPE_LOW_100GBASE_CP2:
228 			return ICE_MEDIA_DA;
229 		case ICE_PHY_TYPE_LOW_1000BASE_KX:
230 		case ICE_PHY_TYPE_LOW_2500BASE_KX:
231 		case ICE_PHY_TYPE_LOW_2500BASE_X:
232 		case ICE_PHY_TYPE_LOW_5GBASE_KR:
233 		case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
234 		case ICE_PHY_TYPE_LOW_25GBASE_KR:
235 		case ICE_PHY_TYPE_LOW_25GBASE_KR1:
236 		case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
237 		case ICE_PHY_TYPE_LOW_40GBASE_KR4:
238 		case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
239 		case ICE_PHY_TYPE_LOW_50GBASE_KR2:
240 		case ICE_PHY_TYPE_LOW_100GBASE_KR4:
241 		case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
242 			return ICE_MEDIA_BACKPLANE;
243 		}
244 	} else {
245 		switch (hw_link_info->phy_type_high) {
246 		case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
247 			return ICE_MEDIA_BACKPLANE;
248 		}
249 	}
250 	return ICE_MEDIA_UNKNOWN;
251 }
252 
253 /**
254  * ice_aq_get_link_info
255  * @pi: port information structure
256  * @ena_lse: enable/disable LinkStatusEvent reporting
257  * @link: pointer to link status structure - optional
258  * @cd: pointer to command details structure or NULL
259  *
260  * Get Link Status (0x607). Returns the link status of the adapter.
261  */
262 enum ice_status
263 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
264 		     struct ice_link_status *link, struct ice_sq_cd *cd)
265 {
266 	struct ice_aqc_get_link_status_data link_data = { 0 };
267 	struct ice_aqc_get_link_status *resp;
268 	struct ice_link_status *li_old, *li;
269 	enum ice_media_type *hw_media_type;
270 	struct ice_fc_info *hw_fc_info;
271 	bool tx_pause, rx_pause;
272 	struct ice_aq_desc desc;
273 	enum ice_status status;
274 	struct ice_hw *hw;
275 	u16 cmd_flags;
276 
277 	if (!pi)
278 		return ICE_ERR_PARAM;
279 	hw = pi->hw;
280 	li_old = &pi->phy.link_info_old;
281 	hw_media_type = &pi->phy.media_type;
282 	li = &pi->phy.link_info;
283 	hw_fc_info = &pi->fc;
284 
285 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
286 	cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
287 	resp = &desc.params.get_link_status;
288 	resp->cmd_flags = cpu_to_le16(cmd_flags);
289 	resp->lport_num = pi->lport;
290 
291 	status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
292 
293 	if (status)
294 		return status;
295 
296 	/* save off old link status information */
297 	*li_old = *li;
298 
299 	/* update current link status information */
300 	li->link_speed = le16_to_cpu(link_data.link_speed);
301 	li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
302 	li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
303 	*hw_media_type = ice_get_media_type(pi);
304 	li->link_info = link_data.link_info;
305 	li->an_info = link_data.an_info;
306 	li->ext_info = link_data.ext_info;
307 	li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
308 	li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
309 	li->topo_media_conflict = link_data.topo_media_conflict;
310 	li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
311 				      ICE_AQ_CFG_PACING_TYPE_M);
312 
313 	/* update fc info */
314 	tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
315 	rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
316 	if (tx_pause && rx_pause)
317 		hw_fc_info->current_mode = ICE_FC_FULL;
318 	else if (tx_pause)
319 		hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
320 	else if (rx_pause)
321 		hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
322 	else
323 		hw_fc_info->current_mode = ICE_FC_NONE;
324 
325 	li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
326 
327 	ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed);
328 	ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
329 		  (unsigned long long)li->phy_type_low);
330 	ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
331 		  (unsigned long long)li->phy_type_high);
332 	ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type);
333 	ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info);
334 	ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info);
335 	ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info);
336 	ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena);
337 	ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size);
338 	ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing);
339 
340 	/* save link status information */
341 	if (link)
342 		*link = *li;
343 
344 	/* flag cleared so calling functions don't call AQ again */
345 	pi->phy.get_link_info = false;
346 
347 	return 0;
348 }
349 
350 /**
351  * ice_init_flex_flags
352  * @hw: pointer to the hardware structure
353  * @prof_id: Rx Descriptor Builder profile ID
354  *
355  * Function to initialize Rx flex flags
356  */
357 static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
358 {
359 	u8 idx = 0;
360 
361 	/* Flex-flag fields (0-2) are programmed with FLG64 bits with layout:
362 	 * flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE
363 	 * flexiflags1[3:0] - Not used for flag programming
364 	 * flexiflags2[7:0] - Tunnel and VLAN types
365 	 * 2 invalid fields in last index
366 	 */
367 	switch (prof_id) {
368 	/* Rx flex flags are currently programmed for the NIC profiles only.
369 	 * Different flag bit programming configurations can be added per
370 	 * profile as needed.
371 	 */
372 	case ICE_RXDID_FLEX_NIC:
373 	case ICE_RXDID_FLEX_NIC_2:
374 		ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_FRG,
375 				   ICE_FLG_UDP_GRE, ICE_FLG_PKT_DSI,
376 				   ICE_FLG_FIN, idx++);
377 		/* flex flag 1 is not used for flexi-flag programming, skipping
378 		 * these four FLG64 bits.
379 		 */
380 		ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_SYN, ICE_FLG_RST,
381 				   ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx++);
382 		ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_DSI,
383 				   ICE_FLG_PKT_DSI, ICE_FLG_EVLAN_x8100,
384 				   ICE_FLG_EVLAN_x9100, idx++);
385 		ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_VLAN_x8100,
386 				   ICE_FLG_TNL_VLAN, ICE_FLG_TNL_MAC,
387 				   ICE_FLG_TNL0, idx++);
388 		ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_TNL1, ICE_FLG_TNL2,
389 				   ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx);
390 		break;
391 
392 	default:
393 		ice_debug(hw, ICE_DBG_INIT,
394 			  "Flag programming for profile ID %d not supported\n",
395 			  prof_id);
396 	}
397 }
398 
399 /**
400  * ice_init_flex_flds
401  * @hw: pointer to the hardware structure
402  * @prof_id: Rx Descriptor Builder profile ID
403  *
404  * Function to initialize flex descriptors
405  */
406 static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
407 {
408 	enum ice_flex_rx_mdid mdid;
409 
410 	switch (prof_id) {
411 	case ICE_RXDID_FLEX_NIC:
412 	case ICE_RXDID_FLEX_NIC_2:
413 		ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0);
414 		ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1);
415 		ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2);
416 
417 		mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ?
418 			ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH;
419 
420 		ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3);
421 
422 		ice_init_flex_flags(hw, prof_id);
423 		break;
424 
425 	default:
426 		ice_debug(hw, ICE_DBG_INIT,
427 			  "Field init for profile ID %d not supported\n",
428 			  prof_id);
429 	}
430 }
431 
432 /**
433  * ice_init_fltr_mgmt_struct - initializes filter management list and locks
434  * @hw: pointer to the HW struct
435  */
436 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
437 {
438 	struct ice_switch_info *sw;
439 
440 	hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
441 				       sizeof(*hw->switch_info), GFP_KERNEL);
442 	sw = hw->switch_info;
443 
444 	if (!sw)
445 		return ICE_ERR_NO_MEMORY;
446 
447 	INIT_LIST_HEAD(&sw->vsi_list_map_head);
448 
449 	return ice_init_def_sw_recp(hw);
450 }
451 
452 /**
453  * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
454  * @hw: pointer to the HW struct
455  */
456 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
457 {
458 	struct ice_switch_info *sw = hw->switch_info;
459 	struct ice_vsi_list_map_info *v_pos_map;
460 	struct ice_vsi_list_map_info *v_tmp_map;
461 	struct ice_sw_recipe *recps;
462 	u8 i;
463 
464 	list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
465 				 list_entry) {
466 		list_del(&v_pos_map->list_entry);
467 		devm_kfree(ice_hw_to_dev(hw), v_pos_map);
468 	}
469 	recps = hw->switch_info->recp_list;
470 	for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
471 		struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
472 
473 		recps[i].root_rid = i;
474 		mutex_destroy(&recps[i].filt_rule_lock);
475 		list_for_each_entry_safe(lst_itr, tmp_entry,
476 					 &recps[i].filt_rules, list_entry) {
477 			list_del(&lst_itr->list_entry);
478 			devm_kfree(ice_hw_to_dev(hw), lst_itr);
479 		}
480 	}
481 	ice_rm_all_sw_replay_rule_info(hw);
482 	devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
483 	devm_kfree(ice_hw_to_dev(hw), sw);
484 }
485 
486 #define ICE_FW_LOG_DESC_SIZE(n)	(sizeof(struct ice_aqc_fw_logging_data) + \
487 	(((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry)))
488 #define ICE_FW_LOG_DESC_SIZE_MAX	\
489 	ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX)
490 
491 /**
492  * ice_get_fw_log_cfg - get FW logging configuration
493  * @hw: pointer to the HW struct
494  */
495 static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
496 {
497 	struct ice_aqc_fw_logging_data *config;
498 	struct ice_aq_desc desc;
499 	enum ice_status status;
500 	u16 size;
501 
502 	size = ICE_FW_LOG_DESC_SIZE_MAX;
503 	config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
504 	if (!config)
505 		return ICE_ERR_NO_MEMORY;
506 
507 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
508 
509 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
510 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
511 
512 	status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
513 	if (!status) {
514 		u16 i;
515 
516 		/* Save FW logging information into the HW structure */
517 		for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
518 			u16 v, m, flgs;
519 
520 			v = le16_to_cpu(config->entry[i]);
521 			m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
522 			flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
523 
524 			if (m < ICE_AQC_FW_LOG_ID_MAX)
525 				hw->fw_log.evnts[m].cur = flgs;
526 		}
527 	}
528 
529 	devm_kfree(ice_hw_to_dev(hw), config);
530 
531 	return status;
532 }
533 
534 /**
535  * ice_cfg_fw_log - configure FW logging
536  * @hw: pointer to the HW struct
537  * @enable: enable certain FW logging events if true, disable all if false
538  *
539  * This function enables/disables the FW logging via Rx CQ events and a UART
540  * port based on predetermined configurations. FW logging via the Rx CQ can be
541  * enabled/disabled for individual PF's. However, FW logging via the UART can
542  * only be enabled/disabled for all PFs on the same device.
543  *
544  * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
545  * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
546  * before initializing the device.
547  *
548  * When re/configuring FW logging, callers need to update the "cfg" elements of
549  * the hw->fw_log.evnts array with the desired logging event configurations for
550  * modules of interest. When disabling FW logging completely, the callers can
551  * just pass false in the "enable" parameter. On completion, the function will
552  * update the "cur" element of the hw->fw_log.evnts array with the resulting
553  * logging event configurations of the modules that are being re/configured. FW
554  * logging modules that are not part of a reconfiguration operation retain their
555  * previous states.
556  *
557  * Before resetting the device, it is recommended that the driver disables FW
558  * logging before shutting down the control queue. When disabling FW logging
559  * ("enable" = false), the latest configurations of FW logging events stored in
560  * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
561  * a device reset.
562  *
563  * When enabling FW logging to emit log messages via the Rx CQ during the
564  * device's initialization phase, a mechanism alternative to interrupt handlers
565  * needs to be used to extract FW log messages from the Rx CQ periodically and
566  * to prevent the Rx CQ from being full and stalling other types of control
567  * messages from FW to SW. Interrupts are typically disabled during the device's
568  * initialization phase.
569  */
570 static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
571 {
572 	struct ice_aqc_fw_logging_data *data = NULL;
573 	struct ice_aqc_fw_logging *cmd;
574 	enum ice_status status = 0;
575 	u16 i, chgs = 0, len = 0;
576 	struct ice_aq_desc desc;
577 	u8 actv_evnts = 0;
578 	void *buf = NULL;
579 
580 	if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
581 		return 0;
582 
583 	/* Disable FW logging only when the control queue is still responsive */
584 	if (!enable &&
585 	    (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
586 		return 0;
587 
588 	/* Get current FW log settings */
589 	status = ice_get_fw_log_cfg(hw);
590 	if (status)
591 		return status;
592 
593 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
594 	cmd = &desc.params.fw_logging;
595 
596 	/* Indicate which controls are valid */
597 	if (hw->fw_log.cq_en)
598 		cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
599 
600 	if (hw->fw_log.uart_en)
601 		cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
602 
603 	if (enable) {
604 		/* Fill in an array of entries with FW logging modules and
605 		 * logging events being reconfigured.
606 		 */
607 		for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
608 			u16 val;
609 
610 			/* Keep track of enabled event types */
611 			actv_evnts |= hw->fw_log.evnts[i].cfg;
612 
613 			if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
614 				continue;
615 
616 			if (!data) {
617 				data = devm_kzalloc(ice_hw_to_dev(hw),
618 						    ICE_FW_LOG_DESC_SIZE_MAX,
619 						    GFP_KERNEL);
620 				if (!data)
621 					return ICE_ERR_NO_MEMORY;
622 			}
623 
624 			val = i << ICE_AQC_FW_LOG_ID_S;
625 			val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
626 			data->entry[chgs++] = cpu_to_le16(val);
627 		}
628 
629 		/* Only enable FW logging if at least one module is specified.
630 		 * If FW logging is currently enabled but all modules are not
631 		 * enabled to emit log messages, disable FW logging altogether.
632 		 */
633 		if (actv_evnts) {
634 			/* Leave if there is effectively no change */
635 			if (!chgs)
636 				goto out;
637 
638 			if (hw->fw_log.cq_en)
639 				cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
640 
641 			if (hw->fw_log.uart_en)
642 				cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
643 
644 			buf = data;
645 			len = ICE_FW_LOG_DESC_SIZE(chgs);
646 			desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
647 		}
648 	}
649 
650 	status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
651 	if (!status) {
652 		/* Update the current configuration to reflect events enabled.
653 		 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
654 		 * logging mode is enabled for the device. They do not reflect
655 		 * actual modules being enabled to emit log messages. So, their
656 		 * values remain unchanged even when all modules are disabled.
657 		 */
658 		u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
659 
660 		hw->fw_log.actv_evnts = actv_evnts;
661 		for (i = 0; i < cnt; i++) {
662 			u16 v, m;
663 
664 			if (!enable) {
665 				/* When disabling all FW logging events as part
666 				 * of device's de-initialization, the original
667 				 * configurations are retained, and can be used
668 				 * to reconfigure FW logging later if the device
669 				 * is re-initialized.
670 				 */
671 				hw->fw_log.evnts[i].cur = 0;
672 				continue;
673 			}
674 
675 			v = le16_to_cpu(data->entry[i]);
676 			m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
677 			hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
678 		}
679 	}
680 
681 out:
682 	if (data)
683 		devm_kfree(ice_hw_to_dev(hw), data);
684 
685 	return status;
686 }
687 
688 /**
689  * ice_output_fw_log
690  * @hw: pointer to the HW struct
691  * @desc: pointer to the AQ message descriptor
692  * @buf: pointer to the buffer accompanying the AQ message
693  *
694  * Formats a FW Log message and outputs it via the standard driver logs.
695  */
696 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
697 {
698 	ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
699 	ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
700 			le16_to_cpu(desc->datalen));
701 	ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
702 }
703 
704 /**
705  * ice_get_itr_intrl_gran - determine int/intrl granularity
706  * @hw: pointer to the HW struct
707  *
708  * Determines the ITR/intrl granularities based on the maximum aggregate
709  * bandwidth according to the device's configuration during power-on.
710  */
711 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
712 {
713 	u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
714 			 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
715 			GL_PWR_MODE_CTL_CAR_MAX_BW_S;
716 
717 	switch (max_agg_bw) {
718 	case ICE_MAX_AGG_BW_200G:
719 	case ICE_MAX_AGG_BW_100G:
720 	case ICE_MAX_AGG_BW_50G:
721 		hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
722 		hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
723 		break;
724 	case ICE_MAX_AGG_BW_25G:
725 		hw->itr_gran = ICE_ITR_GRAN_MAX_25;
726 		hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
727 		break;
728 	}
729 }
730 
731 /**
732  * ice_get_nvm_version - get cached NVM version data
733  * @hw: pointer to the hardware structure
734  * @oem_ver: 8 bit NVM version
735  * @oem_build: 16 bit NVM build number
736  * @oem_patch: 8 NVM patch number
737  * @ver_hi: high 16 bits of the NVM version
738  * @ver_lo: low 16 bits of the NVM version
739  */
740 void
741 ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build,
742 		    u8 *oem_patch, u8 *ver_hi, u8 *ver_lo)
743 {
744 	struct ice_nvm_info *nvm = &hw->nvm;
745 
746 	*oem_ver = (u8)((nvm->oem_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
747 	*oem_patch = (u8)(nvm->oem_ver & ICE_OEM_VER_PATCH_MASK);
748 	*oem_build = (u16)((nvm->oem_ver & ICE_OEM_VER_BUILD_MASK) >>
749 			   ICE_OEM_VER_BUILD_SHIFT);
750 	*ver_hi = (nvm->ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
751 	*ver_lo = (nvm->ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
752 }
753 
754 /**
755  * ice_init_hw - main hardware initialization routine
756  * @hw: pointer to the hardware structure
757  */
758 enum ice_status ice_init_hw(struct ice_hw *hw)
759 {
760 	struct ice_aqc_get_phy_caps_data *pcaps;
761 	enum ice_status status;
762 	u16 mac_buf_len;
763 	void *mac_buf;
764 
765 	/* Set MAC type based on DeviceID */
766 	status = ice_set_mac_type(hw);
767 	if (status)
768 		return status;
769 
770 	hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
771 			 PF_FUNC_RID_FUNC_NUM_M) >>
772 		PF_FUNC_RID_FUNC_NUM_S;
773 
774 	status = ice_reset(hw, ICE_RESET_PFR);
775 	if (status)
776 		return status;
777 
778 	ice_get_itr_intrl_gran(hw);
779 
780 	status = ice_create_all_ctrlq(hw);
781 	if (status)
782 		goto err_unroll_cqinit;
783 
784 	/* Enable FW logging. Not fatal if this fails. */
785 	status = ice_cfg_fw_log(hw, true);
786 	if (status)
787 		ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
788 
789 	status = ice_clear_pf_cfg(hw);
790 	if (status)
791 		goto err_unroll_cqinit;
792 
793 	ice_clear_pxe_mode(hw);
794 
795 	status = ice_init_nvm(hw);
796 	if (status)
797 		goto err_unroll_cqinit;
798 
799 	status = ice_get_caps(hw);
800 	if (status)
801 		goto err_unroll_cqinit;
802 
803 	hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
804 				     sizeof(*hw->port_info), GFP_KERNEL);
805 	if (!hw->port_info) {
806 		status = ICE_ERR_NO_MEMORY;
807 		goto err_unroll_cqinit;
808 	}
809 
810 	/* set the back pointer to HW */
811 	hw->port_info->hw = hw;
812 
813 	/* Initialize port_info struct with switch configuration data */
814 	status = ice_get_initial_sw_cfg(hw);
815 	if (status)
816 		goto err_unroll_alloc;
817 
818 	hw->evb_veb = true;
819 
820 	/* Query the allocated resources for Tx scheduler */
821 	status = ice_sched_query_res_alloc(hw);
822 	if (status) {
823 		ice_debug(hw, ICE_DBG_SCHED,
824 			  "Failed to get scheduler allocated resources\n");
825 		goto err_unroll_alloc;
826 	}
827 
828 	/* Initialize port_info struct with scheduler data */
829 	status = ice_sched_init_port(hw->port_info);
830 	if (status)
831 		goto err_unroll_sched;
832 
833 	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
834 	if (!pcaps) {
835 		status = ICE_ERR_NO_MEMORY;
836 		goto err_unroll_sched;
837 	}
838 
839 	/* Initialize port_info struct with PHY capabilities */
840 	status = ice_aq_get_phy_caps(hw->port_info, false,
841 				     ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
842 	devm_kfree(ice_hw_to_dev(hw), pcaps);
843 	if (status)
844 		goto err_unroll_sched;
845 
846 	/* Initialize port_info struct with link information */
847 	status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
848 	if (status)
849 		goto err_unroll_sched;
850 
851 	/* need a valid SW entry point to build a Tx tree */
852 	if (!hw->sw_entry_point_layer) {
853 		ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
854 		status = ICE_ERR_CFG;
855 		goto err_unroll_sched;
856 	}
857 	INIT_LIST_HEAD(&hw->agg_list);
858 	/* Initialize max burst size */
859 	if (!hw->max_burst_size)
860 		ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
861 
862 	status = ice_init_fltr_mgmt_struct(hw);
863 	if (status)
864 		goto err_unroll_sched;
865 
866 	ice_dev_onetime_setup(hw);
867 
868 	/* Get MAC information */
869 	/* A single port can report up to two (LAN and WoL) addresses */
870 	mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
871 			       sizeof(struct ice_aqc_manage_mac_read_resp),
872 			       GFP_KERNEL);
873 	mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
874 
875 	if (!mac_buf) {
876 		status = ICE_ERR_NO_MEMORY;
877 		goto err_unroll_fltr_mgmt_struct;
878 	}
879 
880 	status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
881 	devm_kfree(ice_hw_to_dev(hw), mac_buf);
882 
883 	if (status)
884 		goto err_unroll_fltr_mgmt_struct;
885 
886 	ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
887 	ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
888 	status = ice_init_hw_tbls(hw);
889 	if (status)
890 		goto err_unroll_fltr_mgmt_struct;
891 	return 0;
892 
893 err_unroll_fltr_mgmt_struct:
894 	ice_cleanup_fltr_mgmt_struct(hw);
895 err_unroll_sched:
896 	ice_sched_cleanup_all(hw);
897 err_unroll_alloc:
898 	devm_kfree(ice_hw_to_dev(hw), hw->port_info);
899 err_unroll_cqinit:
900 	ice_destroy_all_ctrlq(hw);
901 	return status;
902 }
903 
904 /**
905  * ice_deinit_hw - unroll initialization operations done by ice_init_hw
906  * @hw: pointer to the hardware structure
907  *
908  * This should be called only during nominal operation, not as a result of
909  * ice_init_hw() failing since ice_init_hw() will take care of unrolling
910  * applicable initializations if it fails for any reason.
911  */
912 void ice_deinit_hw(struct ice_hw *hw)
913 {
914 	ice_cleanup_fltr_mgmt_struct(hw);
915 
916 	ice_sched_cleanup_all(hw);
917 	ice_sched_clear_agg(hw);
918 	ice_free_seg(hw);
919 	ice_free_hw_tbls(hw);
920 
921 	if (hw->port_info) {
922 		devm_kfree(ice_hw_to_dev(hw), hw->port_info);
923 		hw->port_info = NULL;
924 	}
925 
926 	/* Attempt to disable FW logging before shutting down control queues */
927 	ice_cfg_fw_log(hw, false);
928 	ice_destroy_all_ctrlq(hw);
929 
930 	/* Clear VSI contexts if not already cleared */
931 	ice_clear_all_vsi_ctx(hw);
932 }
933 
934 /**
935  * ice_check_reset - Check to see if a global reset is complete
936  * @hw: pointer to the hardware structure
937  */
938 enum ice_status ice_check_reset(struct ice_hw *hw)
939 {
940 	u32 cnt, reg = 0, grst_delay;
941 
942 	/* Poll for Device Active state in case a recent CORER, GLOBR,
943 	 * or EMPR has occurred. The grst delay value is in 100ms units.
944 	 * Add 1sec for outstanding AQ commands that can take a long time.
945 	 */
946 	grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
947 		      GLGEN_RSTCTL_GRSTDEL_S) + 10;
948 
949 	for (cnt = 0; cnt < grst_delay; cnt++) {
950 		mdelay(100);
951 		reg = rd32(hw, GLGEN_RSTAT);
952 		if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
953 			break;
954 	}
955 
956 	if (cnt == grst_delay) {
957 		ice_debug(hw, ICE_DBG_INIT,
958 			  "Global reset polling failed to complete.\n");
959 		return ICE_ERR_RESET_FAILED;
960 	}
961 
962 #define ICE_RESET_DONE_MASK	(GLNVM_ULD_CORER_DONE_M | \
963 				 GLNVM_ULD_GLOBR_DONE_M)
964 
965 	/* Device is Active; check Global Reset processes are done */
966 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
967 		reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
968 		if (reg == ICE_RESET_DONE_MASK) {
969 			ice_debug(hw, ICE_DBG_INIT,
970 				  "Global reset processes done. %d\n", cnt);
971 			break;
972 		}
973 		mdelay(10);
974 	}
975 
976 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
977 		ice_debug(hw, ICE_DBG_INIT,
978 			  "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
979 			  reg);
980 		return ICE_ERR_RESET_FAILED;
981 	}
982 
983 	return 0;
984 }
985 
986 /**
987  * ice_pf_reset - Reset the PF
988  * @hw: pointer to the hardware structure
989  *
990  * If a global reset has been triggered, this function checks
991  * for its completion and then issues the PF reset
992  */
993 static enum ice_status ice_pf_reset(struct ice_hw *hw)
994 {
995 	u32 cnt, reg;
996 
997 	/* If at function entry a global reset was already in progress, i.e.
998 	 * state is not 'device active' or any of the reset done bits are not
999 	 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1000 	 * global reset is done.
1001 	 */
1002 	if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1003 	    (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1004 		/* poll on global reset currently in progress until done */
1005 		if (ice_check_reset(hw))
1006 			return ICE_ERR_RESET_FAILED;
1007 
1008 		return 0;
1009 	}
1010 
1011 	/* Reset the PF */
1012 	reg = rd32(hw, PFGEN_CTRL);
1013 
1014 	wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1015 
1016 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1017 		reg = rd32(hw, PFGEN_CTRL);
1018 		if (!(reg & PFGEN_CTRL_PFSWR_M))
1019 			break;
1020 
1021 		mdelay(1);
1022 	}
1023 
1024 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1025 		ice_debug(hw, ICE_DBG_INIT,
1026 			  "PF reset polling failed to complete.\n");
1027 		return ICE_ERR_RESET_FAILED;
1028 	}
1029 
1030 	return 0;
1031 }
1032 
1033 /**
1034  * ice_reset - Perform different types of reset
1035  * @hw: pointer to the hardware structure
1036  * @req: reset request
1037  *
1038  * This function triggers a reset as specified by the req parameter.
1039  *
1040  * Note:
1041  * If anything other than a PF reset is triggered, PXE mode is restored.
1042  * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1043  * interface has been restored in the rebuild flow.
1044  */
1045 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1046 {
1047 	u32 val = 0;
1048 
1049 	switch (req) {
1050 	case ICE_RESET_PFR:
1051 		return ice_pf_reset(hw);
1052 	case ICE_RESET_CORER:
1053 		ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1054 		val = GLGEN_RTRIG_CORER_M;
1055 		break;
1056 	case ICE_RESET_GLOBR:
1057 		ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1058 		val = GLGEN_RTRIG_GLOBR_M;
1059 		break;
1060 	default:
1061 		return ICE_ERR_PARAM;
1062 	}
1063 
1064 	val |= rd32(hw, GLGEN_RTRIG);
1065 	wr32(hw, GLGEN_RTRIG, val);
1066 	ice_flush(hw);
1067 
1068 	/* wait for the FW to be ready */
1069 	return ice_check_reset(hw);
1070 }
1071 
1072 /**
1073  * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
1074  * @hw: pointer to hardware structure
1075  * @module_tlv: pointer to module TLV to return
1076  * @module_tlv_len: pointer to module TLV length to return
1077  * @module_type: module type requested
1078  *
1079  * Finds the requested sub module TLV type from the Preserved Field
1080  * Area (PFA) and returns the TLV pointer and length. The caller can
1081  * use these to read the variable length TLV value.
1082  */
1083 enum ice_status
1084 ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
1085 		       u16 module_type)
1086 {
1087 	enum ice_status status;
1088 	u16 pfa_len, pfa_ptr;
1089 	u16 next_tlv;
1090 
1091 	status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
1092 	if (status) {
1093 		ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n");
1094 		return status;
1095 	}
1096 	status = ice_read_sr_word(hw, pfa_ptr, &pfa_len);
1097 	if (status) {
1098 		ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
1099 		return status;
1100 	}
1101 	/* Starting with first TLV after PFA length, iterate through the list
1102 	 * of TLVs to find the requested one.
1103 	 */
1104 	next_tlv = pfa_ptr + 1;
1105 	while (next_tlv < pfa_ptr + pfa_len) {
1106 		u16 tlv_sub_module_type;
1107 		u16 tlv_len;
1108 
1109 		/* Read TLV type */
1110 		status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type);
1111 		if (status) {
1112 			ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n");
1113 			break;
1114 		}
1115 		/* Read TLV length */
1116 		status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len);
1117 		if (status) {
1118 			ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n");
1119 			break;
1120 		}
1121 		if (tlv_sub_module_type == module_type) {
1122 			if (tlv_len) {
1123 				*module_tlv = next_tlv;
1124 				*module_tlv_len = tlv_len;
1125 				return 0;
1126 			}
1127 			return ICE_ERR_INVAL_SIZE;
1128 		}
1129 		/* Check next TLV, i.e. current TLV pointer + length + 2 words
1130 		 * (for current TLV's type and length)
1131 		 */
1132 		next_tlv = next_tlv + tlv_len + 2;
1133 	}
1134 	/* Module does not exist */
1135 	return ICE_ERR_DOES_NOT_EXIST;
1136 }
1137 
1138 /**
1139  * ice_copy_rxq_ctx_to_hw
1140  * @hw: pointer to the hardware structure
1141  * @ice_rxq_ctx: pointer to the rxq context
1142  * @rxq_index: the index of the Rx queue
1143  *
1144  * Copies rxq context from dense structure to HW register space
1145  */
1146 static enum ice_status
1147 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1148 {
1149 	u8 i;
1150 
1151 	if (!ice_rxq_ctx)
1152 		return ICE_ERR_BAD_PTR;
1153 
1154 	if (rxq_index > QRX_CTRL_MAX_INDEX)
1155 		return ICE_ERR_PARAM;
1156 
1157 	/* Copy each dword separately to HW */
1158 	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1159 		wr32(hw, QRX_CONTEXT(i, rxq_index),
1160 		     *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1161 
1162 		ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1163 			  *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1164 	}
1165 
1166 	return 0;
1167 }
1168 
1169 /* LAN Rx Queue Context */
1170 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1171 	/* Field		Width	LSB */
1172 	ICE_CTX_STORE(ice_rlan_ctx, head,		13,	0),
1173 	ICE_CTX_STORE(ice_rlan_ctx, cpuid,		8,	13),
1174 	ICE_CTX_STORE(ice_rlan_ctx, base,		57,	32),
1175 	ICE_CTX_STORE(ice_rlan_ctx, qlen,		13,	89),
1176 	ICE_CTX_STORE(ice_rlan_ctx, dbuf,		7,	102),
1177 	ICE_CTX_STORE(ice_rlan_ctx, hbuf,		5,	109),
1178 	ICE_CTX_STORE(ice_rlan_ctx, dtype,		2,	114),
1179 	ICE_CTX_STORE(ice_rlan_ctx, dsize,		1,	116),
1180 	ICE_CTX_STORE(ice_rlan_ctx, crcstrip,		1,	117),
1181 	ICE_CTX_STORE(ice_rlan_ctx, l2tsel,		1,	119),
1182 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_0,		4,	120),
1183 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_1,		2,	124),
1184 	ICE_CTX_STORE(ice_rlan_ctx, showiv,		1,	127),
1185 	ICE_CTX_STORE(ice_rlan_ctx, rxmax,		14,	174),
1186 	ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena,	1,	193),
1187 	ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena,	1,	194),
1188 	ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena,	1,	195),
1189 	ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena,	1,	196),
1190 	ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh,		3,	198),
1191 	ICE_CTX_STORE(ice_rlan_ctx, prefena,		1,	201),
1192 	{ 0 }
1193 };
1194 
1195 /**
1196  * ice_write_rxq_ctx
1197  * @hw: pointer to the hardware structure
1198  * @rlan_ctx: pointer to the rxq context
1199  * @rxq_index: the index of the Rx queue
1200  *
1201  * Converts rxq context from sparse to dense structure and then writes
1202  * it to HW register space and enables the hardware to prefetch descriptors
1203  * instead of only fetching them on demand
1204  */
1205 enum ice_status
1206 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1207 		  u32 rxq_index)
1208 {
1209 	u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1210 
1211 	if (!rlan_ctx)
1212 		return ICE_ERR_BAD_PTR;
1213 
1214 	rlan_ctx->prefena = 1;
1215 
1216 	ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1217 	return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1218 }
1219 
1220 /* LAN Tx Queue Context */
1221 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1222 				    /* Field			Width	LSB */
1223 	ICE_CTX_STORE(ice_tlan_ctx, base,			57,	0),
1224 	ICE_CTX_STORE(ice_tlan_ctx, port_num,			3,	57),
1225 	ICE_CTX_STORE(ice_tlan_ctx, cgd_num,			5,	60),
1226 	ICE_CTX_STORE(ice_tlan_ctx, pf_num,			3,	65),
1227 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_num,			10,	68),
1228 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_type,			2,	78),
1229 	ICE_CTX_STORE(ice_tlan_ctx, src_vsi,			10,	80),
1230 	ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena,			1,	90),
1231 	ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag,	1,	91),
1232 	ICE_CTX_STORE(ice_tlan_ctx, alt_vlan,			1,	92),
1233 	ICE_CTX_STORE(ice_tlan_ctx, cpuid,			8,	93),
1234 	ICE_CTX_STORE(ice_tlan_ctx, wb_mode,			1,	101),
1235 	ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc,			1,	102),
1236 	ICE_CTX_STORE(ice_tlan_ctx, tphrd,			1,	103),
1237 	ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc,			1,	104),
1238 	ICE_CTX_STORE(ice_tlan_ctx, cmpq_id,			9,	105),
1239 	ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func,		14,	114),
1240 	ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode,	1,	128),
1241 	ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id,		6,	129),
1242 	ICE_CTX_STORE(ice_tlan_ctx, qlen,			13,	135),
1243 	ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx,		4,	148),
1244 	ICE_CTX_STORE(ice_tlan_ctx, tso_ena,			1,	152),
1245 	ICE_CTX_STORE(ice_tlan_ctx, tso_qnum,			11,	153),
1246 	ICE_CTX_STORE(ice_tlan_ctx, legacy_int,			1,	164),
1247 	ICE_CTX_STORE(ice_tlan_ctx, drop_ena,			1,	165),
1248 	ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx,		2,	166),
1249 	ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx,	3,	168),
1250 	ICE_CTX_STORE(ice_tlan_ctx, int_q_state,		122,	171),
1251 	{ 0 }
1252 };
1253 
1254 /* FW Admin Queue command wrappers */
1255 
1256 /* Software lock/mutex that is meant to be held while the Global Config Lock
1257  * in firmware is acquired by the software to prevent most (but not all) types
1258  * of AQ commands from being sent to FW
1259  */
1260 DEFINE_MUTEX(ice_global_cfg_lock_sw);
1261 
1262 /**
1263  * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1264  * @hw: pointer to the HW struct
1265  * @desc: descriptor describing the command
1266  * @buf: buffer to use for indirect commands (NULL for direct commands)
1267  * @buf_size: size of buffer for indirect commands (0 for direct commands)
1268  * @cd: pointer to command details structure
1269  *
1270  * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1271  */
1272 enum ice_status
1273 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1274 		u16 buf_size, struct ice_sq_cd *cd)
1275 {
1276 	struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1277 	bool lock_acquired = false;
1278 	enum ice_status status;
1279 
1280 	/* When a package download is in process (i.e. when the firmware's
1281 	 * Global Configuration Lock resource is held), only the Download
1282 	 * Package, Get Version, Get Package Info List and Release Resource
1283 	 * (with resource ID set to Global Config Lock) AdminQ commands are
1284 	 * allowed; all others must block until the package download completes
1285 	 * and the Global Config Lock is released.  See also
1286 	 * ice_acquire_global_cfg_lock().
1287 	 */
1288 	switch (le16_to_cpu(desc->opcode)) {
1289 	case ice_aqc_opc_download_pkg:
1290 	case ice_aqc_opc_get_pkg_info_list:
1291 	case ice_aqc_opc_get_ver:
1292 		break;
1293 	case ice_aqc_opc_release_res:
1294 		if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1295 			break;
1296 		/* fall-through */
1297 	default:
1298 		mutex_lock(&ice_global_cfg_lock_sw);
1299 		lock_acquired = true;
1300 		break;
1301 	}
1302 
1303 	status = ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1304 	if (lock_acquired)
1305 		mutex_unlock(&ice_global_cfg_lock_sw);
1306 
1307 	return status;
1308 }
1309 
1310 /**
1311  * ice_aq_get_fw_ver
1312  * @hw: pointer to the HW struct
1313  * @cd: pointer to command details structure or NULL
1314  *
1315  * Get the firmware version (0x0001) from the admin queue commands
1316  */
1317 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1318 {
1319 	struct ice_aqc_get_ver *resp;
1320 	struct ice_aq_desc desc;
1321 	enum ice_status status;
1322 
1323 	resp = &desc.params.get_ver;
1324 
1325 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1326 
1327 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1328 
1329 	if (!status) {
1330 		hw->fw_branch = resp->fw_branch;
1331 		hw->fw_maj_ver = resp->fw_major;
1332 		hw->fw_min_ver = resp->fw_minor;
1333 		hw->fw_patch = resp->fw_patch;
1334 		hw->fw_build = le32_to_cpu(resp->fw_build);
1335 		hw->api_branch = resp->api_branch;
1336 		hw->api_maj_ver = resp->api_major;
1337 		hw->api_min_ver = resp->api_minor;
1338 		hw->api_patch = resp->api_patch;
1339 	}
1340 
1341 	return status;
1342 }
1343 
1344 /**
1345  * ice_aq_send_driver_ver
1346  * @hw: pointer to the HW struct
1347  * @dv: driver's major, minor version
1348  * @cd: pointer to command details structure or NULL
1349  *
1350  * Send the driver version (0x0002) to the firmware
1351  */
1352 enum ice_status
1353 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1354 		       struct ice_sq_cd *cd)
1355 {
1356 	struct ice_aqc_driver_ver *cmd;
1357 	struct ice_aq_desc desc;
1358 	u16 len;
1359 
1360 	cmd = &desc.params.driver_ver;
1361 
1362 	if (!dv)
1363 		return ICE_ERR_PARAM;
1364 
1365 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1366 
1367 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1368 	cmd->major_ver = dv->major_ver;
1369 	cmd->minor_ver = dv->minor_ver;
1370 	cmd->build_ver = dv->build_ver;
1371 	cmd->subbuild_ver = dv->subbuild_ver;
1372 
1373 	len = 0;
1374 	while (len < sizeof(dv->driver_string) &&
1375 	       isascii(dv->driver_string[len]) && dv->driver_string[len])
1376 		len++;
1377 
1378 	return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1379 }
1380 
1381 /**
1382  * ice_aq_q_shutdown
1383  * @hw: pointer to the HW struct
1384  * @unloading: is the driver unloading itself
1385  *
1386  * Tell the Firmware that we're shutting down the AdminQ and whether
1387  * or not the driver is unloading as well (0x0003).
1388  */
1389 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1390 {
1391 	struct ice_aqc_q_shutdown *cmd;
1392 	struct ice_aq_desc desc;
1393 
1394 	cmd = &desc.params.q_shutdown;
1395 
1396 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1397 
1398 	if (unloading)
1399 		cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1400 
1401 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1402 }
1403 
1404 /**
1405  * ice_aq_req_res
1406  * @hw: pointer to the HW struct
1407  * @res: resource ID
1408  * @access: access type
1409  * @sdp_number: resource number
1410  * @timeout: the maximum time in ms that the driver may hold the resource
1411  * @cd: pointer to command details structure or NULL
1412  *
1413  * Requests common resource using the admin queue commands (0x0008).
1414  * When attempting to acquire the Global Config Lock, the driver can
1415  * learn of three states:
1416  *  1) ICE_SUCCESS -        acquired lock, and can perform download package
1417  *  2) ICE_ERR_AQ_ERROR -   did not get lock, driver should fail to load
1418  *  3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1419  *                          successfully downloaded the package; the driver does
1420  *                          not have to download the package and can continue
1421  *                          loading
1422  *
1423  * Note that if the caller is in an acquire lock, perform action, release lock
1424  * phase of operation, it is possible that the FW may detect a timeout and issue
1425  * a CORER. In this case, the driver will receive a CORER interrupt and will
1426  * have to determine its cause. The calling thread that is handling this flow
1427  * will likely get an error propagated back to it indicating the Download
1428  * Package, Update Package or the Release Resource AQ commands timed out.
1429  */
1430 static enum ice_status
1431 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1432 	       enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1433 	       struct ice_sq_cd *cd)
1434 {
1435 	struct ice_aqc_req_res *cmd_resp;
1436 	struct ice_aq_desc desc;
1437 	enum ice_status status;
1438 
1439 	cmd_resp = &desc.params.res_owner;
1440 
1441 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1442 
1443 	cmd_resp->res_id = cpu_to_le16(res);
1444 	cmd_resp->access_type = cpu_to_le16(access);
1445 	cmd_resp->res_number = cpu_to_le32(sdp_number);
1446 	cmd_resp->timeout = cpu_to_le32(*timeout);
1447 	*timeout = 0;
1448 
1449 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1450 
1451 	/* The completion specifies the maximum time in ms that the driver
1452 	 * may hold the resource in the Timeout field.
1453 	 */
1454 
1455 	/* Global config lock response utilizes an additional status field.
1456 	 *
1457 	 * If the Global config lock resource is held by some other driver, the
1458 	 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1459 	 * and the timeout field indicates the maximum time the current owner
1460 	 * of the resource has to free it.
1461 	 */
1462 	if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1463 		if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1464 			*timeout = le32_to_cpu(cmd_resp->timeout);
1465 			return 0;
1466 		} else if (le16_to_cpu(cmd_resp->status) ==
1467 			   ICE_AQ_RES_GLBL_IN_PROG) {
1468 			*timeout = le32_to_cpu(cmd_resp->timeout);
1469 			return ICE_ERR_AQ_ERROR;
1470 		} else if (le16_to_cpu(cmd_resp->status) ==
1471 			   ICE_AQ_RES_GLBL_DONE) {
1472 			return ICE_ERR_AQ_NO_WORK;
1473 		}
1474 
1475 		/* invalid FW response, force a timeout immediately */
1476 		*timeout = 0;
1477 		return ICE_ERR_AQ_ERROR;
1478 	}
1479 
1480 	/* If the resource is held by some other driver, the command completes
1481 	 * with a busy return value and the timeout field indicates the maximum
1482 	 * time the current owner of the resource has to free it.
1483 	 */
1484 	if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1485 		*timeout = le32_to_cpu(cmd_resp->timeout);
1486 
1487 	return status;
1488 }
1489 
1490 /**
1491  * ice_aq_release_res
1492  * @hw: pointer to the HW struct
1493  * @res: resource ID
1494  * @sdp_number: resource number
1495  * @cd: pointer to command details structure or NULL
1496  *
1497  * release common resource using the admin queue commands (0x0009)
1498  */
1499 static enum ice_status
1500 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1501 		   struct ice_sq_cd *cd)
1502 {
1503 	struct ice_aqc_req_res *cmd;
1504 	struct ice_aq_desc desc;
1505 
1506 	cmd = &desc.params.res_owner;
1507 
1508 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1509 
1510 	cmd->res_id = cpu_to_le16(res);
1511 	cmd->res_number = cpu_to_le32(sdp_number);
1512 
1513 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1514 }
1515 
1516 /**
1517  * ice_acquire_res
1518  * @hw: pointer to the HW structure
1519  * @res: resource ID
1520  * @access: access type (read or write)
1521  * @timeout: timeout in milliseconds
1522  *
1523  * This function will attempt to acquire the ownership of a resource.
1524  */
1525 enum ice_status
1526 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1527 		enum ice_aq_res_access_type access, u32 timeout)
1528 {
1529 #define ICE_RES_POLLING_DELAY_MS	10
1530 	u32 delay = ICE_RES_POLLING_DELAY_MS;
1531 	u32 time_left = timeout;
1532 	enum ice_status status;
1533 
1534 	status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1535 
1536 	/* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1537 	 * previously acquired the resource and performed any necessary updates;
1538 	 * in this case the caller does not obtain the resource and has no
1539 	 * further work to do.
1540 	 */
1541 	if (status == ICE_ERR_AQ_NO_WORK)
1542 		goto ice_acquire_res_exit;
1543 
1544 	if (status)
1545 		ice_debug(hw, ICE_DBG_RES,
1546 			  "resource %d acquire type %d failed.\n", res, access);
1547 
1548 	/* If necessary, poll until the current lock owner timeouts */
1549 	timeout = time_left;
1550 	while (status && timeout && time_left) {
1551 		mdelay(delay);
1552 		timeout = (timeout > delay) ? timeout - delay : 0;
1553 		status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1554 
1555 		if (status == ICE_ERR_AQ_NO_WORK)
1556 			/* lock free, but no work to do */
1557 			break;
1558 
1559 		if (!status)
1560 			/* lock acquired */
1561 			break;
1562 	}
1563 	if (status && status != ICE_ERR_AQ_NO_WORK)
1564 		ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1565 
1566 ice_acquire_res_exit:
1567 	if (status == ICE_ERR_AQ_NO_WORK) {
1568 		if (access == ICE_RES_WRITE)
1569 			ice_debug(hw, ICE_DBG_RES,
1570 				  "resource indicates no work to do.\n");
1571 		else
1572 			ice_debug(hw, ICE_DBG_RES,
1573 				  "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1574 	}
1575 	return status;
1576 }
1577 
1578 /**
1579  * ice_release_res
1580  * @hw: pointer to the HW structure
1581  * @res: resource ID
1582  *
1583  * This function will release a resource using the proper Admin Command.
1584  */
1585 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1586 {
1587 	enum ice_status status;
1588 	u32 total_delay = 0;
1589 
1590 	status = ice_aq_release_res(hw, res, 0, NULL);
1591 
1592 	/* there are some rare cases when trying to release the resource
1593 	 * results in an admin queue timeout, so handle them correctly
1594 	 */
1595 	while ((status == ICE_ERR_AQ_TIMEOUT) &&
1596 	       (total_delay < hw->adminq.sq_cmd_timeout)) {
1597 		mdelay(1);
1598 		status = ice_aq_release_res(hw, res, 0, NULL);
1599 		total_delay++;
1600 	}
1601 }
1602 
1603 /**
1604  * ice_get_num_per_func - determine number of resources per PF
1605  * @hw: pointer to the HW structure
1606  * @max: value to be evenly split between each PF
1607  *
1608  * Determine the number of valid functions by going through the bitmap returned
1609  * from parsing capabilities and use this to calculate the number of resources
1610  * per PF based on the max value passed in.
1611  */
1612 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1613 {
1614 	u8 funcs;
1615 
1616 #define ICE_CAPS_VALID_FUNCS_M	0xFF
1617 	funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1618 			 ICE_CAPS_VALID_FUNCS_M);
1619 
1620 	if (!funcs)
1621 		return 0;
1622 
1623 	return max / funcs;
1624 }
1625 
1626 /**
1627  * ice_parse_caps - parse function/device capabilities
1628  * @hw: pointer to the HW struct
1629  * @buf: pointer to a buffer containing function/device capability records
1630  * @cap_count: number of capability records in the list
1631  * @opc: type of capabilities list to parse
1632  *
1633  * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
1634  */
1635 static void
1636 ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1637 	       enum ice_adminq_opc opc)
1638 {
1639 	struct ice_aqc_list_caps_elem *cap_resp;
1640 	struct ice_hw_func_caps *func_p = NULL;
1641 	struct ice_hw_dev_caps *dev_p = NULL;
1642 	struct ice_hw_common_caps *caps;
1643 	char const *prefix;
1644 	u32 i;
1645 
1646 	if (!buf)
1647 		return;
1648 
1649 	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1650 
1651 	if (opc == ice_aqc_opc_list_dev_caps) {
1652 		dev_p = &hw->dev_caps;
1653 		caps = &dev_p->common_cap;
1654 		prefix = "dev cap";
1655 	} else if (opc == ice_aqc_opc_list_func_caps) {
1656 		func_p = &hw->func_caps;
1657 		caps = &func_p->common_cap;
1658 		prefix = "func cap";
1659 	} else {
1660 		ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1661 		return;
1662 	}
1663 
1664 	for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1665 		u32 logical_id = le32_to_cpu(cap_resp->logical_id);
1666 		u32 phys_id = le32_to_cpu(cap_resp->phys_id);
1667 		u32 number = le32_to_cpu(cap_resp->number);
1668 		u16 cap = le16_to_cpu(cap_resp->cap);
1669 
1670 		switch (cap) {
1671 		case ICE_AQC_CAPS_VALID_FUNCTIONS:
1672 			caps->valid_functions = number;
1673 			ice_debug(hw, ICE_DBG_INIT,
1674 				  "%s: valid_functions (bitmap) = %d\n", prefix,
1675 				  caps->valid_functions);
1676 
1677 			/* store func count for resource management purposes */
1678 			if (dev_p)
1679 				dev_p->num_funcs = hweight32(number);
1680 			break;
1681 		case ICE_AQC_CAPS_SRIOV:
1682 			caps->sr_iov_1_1 = (number == 1);
1683 			ice_debug(hw, ICE_DBG_INIT,
1684 				  "%s: sr_iov_1_1 = %d\n", prefix,
1685 				  caps->sr_iov_1_1);
1686 			break;
1687 		case ICE_AQC_CAPS_VF:
1688 			if (dev_p) {
1689 				dev_p->num_vfs_exposed = number;
1690 				ice_debug(hw, ICE_DBG_INIT,
1691 					  "%s: num_vfs_exposed = %d\n", prefix,
1692 					  dev_p->num_vfs_exposed);
1693 			} else if (func_p) {
1694 				func_p->num_allocd_vfs = number;
1695 				func_p->vf_base_id = logical_id;
1696 				ice_debug(hw, ICE_DBG_INIT,
1697 					  "%s: num_allocd_vfs = %d\n", prefix,
1698 					  func_p->num_allocd_vfs);
1699 				ice_debug(hw, ICE_DBG_INIT,
1700 					  "%s: vf_base_id = %d\n", prefix,
1701 					  func_p->vf_base_id);
1702 			}
1703 			break;
1704 		case ICE_AQC_CAPS_VSI:
1705 			if (dev_p) {
1706 				dev_p->num_vsi_allocd_to_host = number;
1707 				ice_debug(hw, ICE_DBG_INIT,
1708 					  "%s: num_vsi_allocd_to_host = %d\n",
1709 					  prefix,
1710 					  dev_p->num_vsi_allocd_to_host);
1711 			} else if (func_p) {
1712 				func_p->guar_num_vsi =
1713 					ice_get_num_per_func(hw, ICE_MAX_VSI);
1714 				ice_debug(hw, ICE_DBG_INIT,
1715 					  "%s: guar_num_vsi (fw) = %d\n",
1716 					  prefix, number);
1717 				ice_debug(hw, ICE_DBG_INIT,
1718 					  "%s: guar_num_vsi = %d\n",
1719 					  prefix, func_p->guar_num_vsi);
1720 			}
1721 			break;
1722 		case ICE_AQC_CAPS_DCB:
1723 			caps->dcb = (number == 1);
1724 			caps->active_tc_bitmap = logical_id;
1725 			caps->maxtc = phys_id;
1726 			ice_debug(hw, ICE_DBG_INIT,
1727 				  "%s: dcb = %d\n", prefix, caps->dcb);
1728 			ice_debug(hw, ICE_DBG_INIT,
1729 				  "%s: active_tc_bitmap = %d\n", prefix,
1730 				  caps->active_tc_bitmap);
1731 			ice_debug(hw, ICE_DBG_INIT,
1732 				  "%s: maxtc = %d\n", prefix, caps->maxtc);
1733 			break;
1734 		case ICE_AQC_CAPS_RSS:
1735 			caps->rss_table_size = number;
1736 			caps->rss_table_entry_width = logical_id;
1737 			ice_debug(hw, ICE_DBG_INIT,
1738 				  "%s: rss_table_size = %d\n", prefix,
1739 				  caps->rss_table_size);
1740 			ice_debug(hw, ICE_DBG_INIT,
1741 				  "%s: rss_table_entry_width = %d\n", prefix,
1742 				  caps->rss_table_entry_width);
1743 			break;
1744 		case ICE_AQC_CAPS_RXQS:
1745 			caps->num_rxq = number;
1746 			caps->rxq_first_id = phys_id;
1747 			ice_debug(hw, ICE_DBG_INIT,
1748 				  "%s: num_rxq = %d\n", prefix,
1749 				  caps->num_rxq);
1750 			ice_debug(hw, ICE_DBG_INIT,
1751 				  "%s: rxq_first_id = %d\n", prefix,
1752 				  caps->rxq_first_id);
1753 			break;
1754 		case ICE_AQC_CAPS_TXQS:
1755 			caps->num_txq = number;
1756 			caps->txq_first_id = phys_id;
1757 			ice_debug(hw, ICE_DBG_INIT,
1758 				  "%s: num_txq = %d\n", prefix,
1759 				  caps->num_txq);
1760 			ice_debug(hw, ICE_DBG_INIT,
1761 				  "%s: txq_first_id = %d\n", prefix,
1762 				  caps->txq_first_id);
1763 			break;
1764 		case ICE_AQC_CAPS_MSIX:
1765 			caps->num_msix_vectors = number;
1766 			caps->msix_vector_first_id = phys_id;
1767 			ice_debug(hw, ICE_DBG_INIT,
1768 				  "%s: num_msix_vectors = %d\n", prefix,
1769 				  caps->num_msix_vectors);
1770 			ice_debug(hw, ICE_DBG_INIT,
1771 				  "%s: msix_vector_first_id = %d\n", prefix,
1772 				  caps->msix_vector_first_id);
1773 			break;
1774 		case ICE_AQC_CAPS_MAX_MTU:
1775 			caps->max_mtu = number;
1776 			ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
1777 				  prefix, caps->max_mtu);
1778 			break;
1779 		default:
1780 			ice_debug(hw, ICE_DBG_INIT,
1781 				  "%s: unknown capability[%d]: 0x%x\n", prefix,
1782 				  i, cap);
1783 			break;
1784 		}
1785 	}
1786 
1787 	/* Re-calculate capabilities that are dependent on the number of
1788 	 * physical ports; i.e. some features are not supported or function
1789 	 * differently on devices with more than 4 ports.
1790 	 */
1791 	if (hw->dev_caps.num_funcs > 4) {
1792 		/* Max 4 TCs per port */
1793 		caps->maxtc = 4;
1794 		ice_debug(hw, ICE_DBG_INIT,
1795 			  "%s: maxtc = %d (based on #ports)\n", prefix,
1796 			  caps->maxtc);
1797 	}
1798 }
1799 
1800 /**
1801  * ice_aq_discover_caps - query function/device capabilities
1802  * @hw: pointer to the HW struct
1803  * @buf: a virtual buffer to hold the capabilities
1804  * @buf_size: Size of the virtual buffer
1805  * @cap_count: cap count needed if AQ err==ENOMEM
1806  * @opc: capabilities type to discover - pass in the command opcode
1807  * @cd: pointer to command details structure or NULL
1808  *
1809  * Get the function(0x000a)/device(0x000b) capabilities description from
1810  * the firmware.
1811  */
1812 static enum ice_status
1813 ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
1814 		     enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1815 {
1816 	struct ice_aqc_list_caps *cmd;
1817 	struct ice_aq_desc desc;
1818 	enum ice_status status;
1819 
1820 	cmd = &desc.params.get_cap;
1821 
1822 	if (opc != ice_aqc_opc_list_func_caps &&
1823 	    opc != ice_aqc_opc_list_dev_caps)
1824 		return ICE_ERR_PARAM;
1825 
1826 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1827 
1828 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1829 	if (!status)
1830 		ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
1831 	else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
1832 		*cap_count = le32_to_cpu(cmd->count);
1833 	return status;
1834 }
1835 
1836 /**
1837  * ice_discover_caps - get info about the HW
1838  * @hw: pointer to the hardware structure
1839  * @opc: capabilities type to discover - pass in the command opcode
1840  */
1841 static enum ice_status
1842 ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
1843 {
1844 	enum ice_status status;
1845 	u32 cap_count;
1846 	u16 cbuf_len;
1847 	u8 retries;
1848 
1849 	/* The driver doesn't know how many capabilities the device will return
1850 	 * so the buffer size required isn't known ahead of time. The driver
1851 	 * starts with cbuf_len and if this turns out to be insufficient, the
1852 	 * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
1853 	 * The driver then allocates the buffer based on the count and retries
1854 	 * the operation. So it follows that the retry count is 2.
1855 	 */
1856 #define ICE_GET_CAP_BUF_COUNT	40
1857 #define ICE_GET_CAP_RETRY_COUNT	2
1858 
1859 	cap_count = ICE_GET_CAP_BUF_COUNT;
1860 	retries = ICE_GET_CAP_RETRY_COUNT;
1861 
1862 	do {
1863 		void *cbuf;
1864 
1865 		cbuf_len = (u16)(cap_count *
1866 				 sizeof(struct ice_aqc_list_caps_elem));
1867 		cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
1868 		if (!cbuf)
1869 			return ICE_ERR_NO_MEMORY;
1870 
1871 		status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
1872 					      opc, NULL);
1873 		devm_kfree(ice_hw_to_dev(hw), cbuf);
1874 
1875 		if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
1876 			break;
1877 
1878 		/* If ENOMEM is returned, try again with bigger buffer */
1879 	} while (--retries);
1880 
1881 	return status;
1882 }
1883 
1884 /**
1885  * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
1886  * @hw: pointer to the hardware structure
1887  */
1888 void ice_set_safe_mode_caps(struct ice_hw *hw)
1889 {
1890 	struct ice_hw_func_caps *func_caps = &hw->func_caps;
1891 	struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
1892 	u32 valid_func, rxq_first_id, txq_first_id;
1893 	u32 msix_vector_first_id, max_mtu;
1894 	u32 num_funcs;
1895 
1896 	/* cache some func_caps values that should be restored after memset */
1897 	valid_func = func_caps->common_cap.valid_functions;
1898 	txq_first_id = func_caps->common_cap.txq_first_id;
1899 	rxq_first_id = func_caps->common_cap.rxq_first_id;
1900 	msix_vector_first_id = func_caps->common_cap.msix_vector_first_id;
1901 	max_mtu = func_caps->common_cap.max_mtu;
1902 
1903 	/* unset func capabilities */
1904 	memset(func_caps, 0, sizeof(*func_caps));
1905 
1906 	/* restore cached values */
1907 	func_caps->common_cap.valid_functions = valid_func;
1908 	func_caps->common_cap.txq_first_id = txq_first_id;
1909 	func_caps->common_cap.rxq_first_id = rxq_first_id;
1910 	func_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
1911 	func_caps->common_cap.max_mtu = max_mtu;
1912 
1913 	/* one Tx and one Rx queue in safe mode */
1914 	func_caps->common_cap.num_rxq = 1;
1915 	func_caps->common_cap.num_txq = 1;
1916 
1917 	/* two MSIX vectors, one for traffic and one for misc causes */
1918 	func_caps->common_cap.num_msix_vectors = 2;
1919 	func_caps->guar_num_vsi = 1;
1920 
1921 	/* cache some dev_caps values that should be restored after memset */
1922 	valid_func = dev_caps->common_cap.valid_functions;
1923 	txq_first_id = dev_caps->common_cap.txq_first_id;
1924 	rxq_first_id = dev_caps->common_cap.rxq_first_id;
1925 	msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
1926 	max_mtu = dev_caps->common_cap.max_mtu;
1927 	num_funcs = dev_caps->num_funcs;
1928 
1929 	/* unset dev capabilities */
1930 	memset(dev_caps, 0, sizeof(*dev_caps));
1931 
1932 	/* restore cached values */
1933 	dev_caps->common_cap.valid_functions = valid_func;
1934 	dev_caps->common_cap.txq_first_id = txq_first_id;
1935 	dev_caps->common_cap.rxq_first_id = rxq_first_id;
1936 	dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
1937 	dev_caps->common_cap.max_mtu = max_mtu;
1938 	dev_caps->num_funcs = num_funcs;
1939 
1940 	/* one Tx and one Rx queue per function in safe mode */
1941 	dev_caps->common_cap.num_rxq = num_funcs;
1942 	dev_caps->common_cap.num_txq = num_funcs;
1943 
1944 	/* two MSIX vectors per function */
1945 	dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
1946 }
1947 
1948 /**
1949  * ice_get_caps - get info about the HW
1950  * @hw: pointer to the hardware structure
1951  */
1952 enum ice_status ice_get_caps(struct ice_hw *hw)
1953 {
1954 	enum ice_status status;
1955 
1956 	status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
1957 	if (!status)
1958 		status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
1959 
1960 	return status;
1961 }
1962 
1963 /**
1964  * ice_aq_manage_mac_write - manage MAC address write command
1965  * @hw: pointer to the HW struct
1966  * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
1967  * @flags: flags to control write behavior
1968  * @cd: pointer to command details structure or NULL
1969  *
1970  * This function is used to write MAC address to the NVM (0x0108).
1971  */
1972 enum ice_status
1973 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
1974 			struct ice_sq_cd *cd)
1975 {
1976 	struct ice_aqc_manage_mac_write *cmd;
1977 	struct ice_aq_desc desc;
1978 
1979 	cmd = &desc.params.mac_write;
1980 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
1981 
1982 	cmd->flags = flags;
1983 
1984 	/* Prep values for flags, sah, sal */
1985 	cmd->sah = htons(*((const u16 *)mac_addr));
1986 	cmd->sal = htonl(*((const u32 *)(mac_addr + 2)));
1987 
1988 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1989 }
1990 
1991 /**
1992  * ice_aq_clear_pxe_mode
1993  * @hw: pointer to the HW struct
1994  *
1995  * Tell the firmware that the driver is taking over from PXE (0x0110).
1996  */
1997 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
1998 {
1999 	struct ice_aq_desc desc;
2000 
2001 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2002 	desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2003 
2004 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2005 }
2006 
2007 /**
2008  * ice_clear_pxe_mode - clear pxe operations mode
2009  * @hw: pointer to the HW struct
2010  *
2011  * Make sure all PXE mode settings are cleared, including things
2012  * like descriptor fetch/write-back mode.
2013  */
2014 void ice_clear_pxe_mode(struct ice_hw *hw)
2015 {
2016 	if (ice_check_sq_alive(hw, &hw->adminq))
2017 		ice_aq_clear_pxe_mode(hw);
2018 }
2019 
2020 /**
2021  * ice_get_link_speed_based_on_phy_type - returns link speed
2022  * @phy_type_low: lower part of phy_type
2023  * @phy_type_high: higher part of phy_type
2024  *
2025  * This helper function will convert an entry in PHY type structure
2026  * [phy_type_low, phy_type_high] to its corresponding link speed.
2027  * Note: In the structure of [phy_type_low, phy_type_high], there should
2028  * be one bit set, as this function will convert one PHY type to its
2029  * speed.
2030  * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2031  * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2032  */
2033 static u16
2034 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2035 {
2036 	u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2037 	u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2038 
2039 	switch (phy_type_low) {
2040 	case ICE_PHY_TYPE_LOW_100BASE_TX:
2041 	case ICE_PHY_TYPE_LOW_100M_SGMII:
2042 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2043 		break;
2044 	case ICE_PHY_TYPE_LOW_1000BASE_T:
2045 	case ICE_PHY_TYPE_LOW_1000BASE_SX:
2046 	case ICE_PHY_TYPE_LOW_1000BASE_LX:
2047 	case ICE_PHY_TYPE_LOW_1000BASE_KX:
2048 	case ICE_PHY_TYPE_LOW_1G_SGMII:
2049 		speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2050 		break;
2051 	case ICE_PHY_TYPE_LOW_2500BASE_T:
2052 	case ICE_PHY_TYPE_LOW_2500BASE_X:
2053 	case ICE_PHY_TYPE_LOW_2500BASE_KX:
2054 		speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2055 		break;
2056 	case ICE_PHY_TYPE_LOW_5GBASE_T:
2057 	case ICE_PHY_TYPE_LOW_5GBASE_KR:
2058 		speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2059 		break;
2060 	case ICE_PHY_TYPE_LOW_10GBASE_T:
2061 	case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2062 	case ICE_PHY_TYPE_LOW_10GBASE_SR:
2063 	case ICE_PHY_TYPE_LOW_10GBASE_LR:
2064 	case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2065 	case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2066 	case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2067 		speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2068 		break;
2069 	case ICE_PHY_TYPE_LOW_25GBASE_T:
2070 	case ICE_PHY_TYPE_LOW_25GBASE_CR:
2071 	case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2072 	case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2073 	case ICE_PHY_TYPE_LOW_25GBASE_SR:
2074 	case ICE_PHY_TYPE_LOW_25GBASE_LR:
2075 	case ICE_PHY_TYPE_LOW_25GBASE_KR:
2076 	case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2077 	case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2078 	case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2079 	case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2080 		speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2081 		break;
2082 	case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2083 	case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2084 	case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2085 	case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2086 	case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2087 	case ICE_PHY_TYPE_LOW_40G_XLAUI:
2088 		speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2089 		break;
2090 	case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2091 	case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2092 	case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2093 	case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2094 	case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2095 	case ICE_PHY_TYPE_LOW_50G_LAUI2:
2096 	case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2097 	case ICE_PHY_TYPE_LOW_50G_AUI2:
2098 	case ICE_PHY_TYPE_LOW_50GBASE_CP:
2099 	case ICE_PHY_TYPE_LOW_50GBASE_SR:
2100 	case ICE_PHY_TYPE_LOW_50GBASE_FR:
2101 	case ICE_PHY_TYPE_LOW_50GBASE_LR:
2102 	case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2103 	case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2104 	case ICE_PHY_TYPE_LOW_50G_AUI1:
2105 		speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2106 		break;
2107 	case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2108 	case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2109 	case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2110 	case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2111 	case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2112 	case ICE_PHY_TYPE_LOW_100G_CAUI4:
2113 	case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2114 	case ICE_PHY_TYPE_LOW_100G_AUI4:
2115 	case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2116 	case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2117 	case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2118 	case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2119 	case ICE_PHY_TYPE_LOW_100GBASE_DR:
2120 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2121 		break;
2122 	default:
2123 		speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2124 		break;
2125 	}
2126 
2127 	switch (phy_type_high) {
2128 	case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2129 	case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2130 	case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2131 	case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2132 	case ICE_PHY_TYPE_HIGH_100G_AUI2:
2133 		speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2134 		break;
2135 	default:
2136 		speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2137 		break;
2138 	}
2139 
2140 	if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2141 	    speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2142 		return ICE_AQ_LINK_SPEED_UNKNOWN;
2143 	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2144 		 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2145 		return ICE_AQ_LINK_SPEED_UNKNOWN;
2146 	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2147 		 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2148 		return speed_phy_type_low;
2149 	else
2150 		return speed_phy_type_high;
2151 }
2152 
2153 /**
2154  * ice_update_phy_type
2155  * @phy_type_low: pointer to the lower part of phy_type
2156  * @phy_type_high: pointer to the higher part of phy_type
2157  * @link_speeds_bitmap: targeted link speeds bitmap
2158  *
2159  * Note: For the link_speeds_bitmap structure, you can check it at
2160  * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2161  * link_speeds_bitmap include multiple speeds.
2162  *
2163  * Each entry in this [phy_type_low, phy_type_high] structure will
2164  * present a certain link speed. This helper function will turn on bits
2165  * in [phy_type_low, phy_type_high] structure based on the value of
2166  * link_speeds_bitmap input parameter.
2167  */
2168 void
2169 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2170 		    u16 link_speeds_bitmap)
2171 {
2172 	u64 pt_high;
2173 	u64 pt_low;
2174 	int index;
2175 	u16 speed;
2176 
2177 	/* We first check with low part of phy_type */
2178 	for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2179 		pt_low = BIT_ULL(index);
2180 		speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2181 
2182 		if (link_speeds_bitmap & speed)
2183 			*phy_type_low |= BIT_ULL(index);
2184 	}
2185 
2186 	/* We then check with high part of phy_type */
2187 	for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2188 		pt_high = BIT_ULL(index);
2189 		speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2190 
2191 		if (link_speeds_bitmap & speed)
2192 			*phy_type_high |= BIT_ULL(index);
2193 	}
2194 }
2195 
2196 /**
2197  * ice_aq_set_phy_cfg
2198  * @hw: pointer to the HW struct
2199  * @lport: logical port number
2200  * @cfg: structure with PHY configuration data to be set
2201  * @cd: pointer to command details structure or NULL
2202  *
2203  * Set the various PHY configuration parameters supported on the Port.
2204  * One or more of the Set PHY config parameters may be ignored in an MFP
2205  * mode as the PF may not have the privilege to set some of the PHY Config
2206  * parameters. This status will be indicated by the command response (0x0601).
2207  */
2208 enum ice_status
2209 ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
2210 		   struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2211 {
2212 	struct ice_aq_desc desc;
2213 
2214 	if (!cfg)
2215 		return ICE_ERR_PARAM;
2216 
2217 	/* Ensure that only valid bits of cfg->caps can be turned on. */
2218 	if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2219 		ice_debug(hw, ICE_DBG_PHY,
2220 			  "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2221 			  cfg->caps);
2222 
2223 		cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2224 	}
2225 
2226 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2227 	desc.params.set_phy.lport_num = lport;
2228 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2229 
2230 	ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
2231 		  (unsigned long long)le64_to_cpu(cfg->phy_type_low));
2232 	ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
2233 		  (unsigned long long)le64_to_cpu(cfg->phy_type_high));
2234 	ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps);
2235 	ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl = 0x%x\n",
2236 		  cfg->low_power_ctrl);
2237 	ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap);
2238 	ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
2239 	ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
2240 
2241 	return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2242 }
2243 
2244 /**
2245  * ice_update_link_info - update status of the HW network link
2246  * @pi: port info structure of the interested logical port
2247  */
2248 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2249 {
2250 	struct ice_link_status *li;
2251 	enum ice_status status;
2252 
2253 	if (!pi)
2254 		return ICE_ERR_PARAM;
2255 
2256 	li = &pi->phy.link_info;
2257 
2258 	status = ice_aq_get_link_info(pi, true, NULL, NULL);
2259 	if (status)
2260 		return status;
2261 
2262 	if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2263 		struct ice_aqc_get_phy_caps_data *pcaps;
2264 		struct ice_hw *hw;
2265 
2266 		hw = pi->hw;
2267 		pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
2268 				     GFP_KERNEL);
2269 		if (!pcaps)
2270 			return ICE_ERR_NO_MEMORY;
2271 
2272 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2273 					     pcaps, NULL);
2274 		if (!status)
2275 			memcpy(li->module_type, &pcaps->module_type,
2276 			       sizeof(li->module_type));
2277 
2278 		devm_kfree(ice_hw_to_dev(hw), pcaps);
2279 	}
2280 
2281 	return status;
2282 }
2283 
2284 /**
2285  * ice_set_fc
2286  * @pi: port information structure
2287  * @aq_failures: pointer to status code, specific to ice_set_fc routine
2288  * @ena_auto_link_update: enable automatic link update
2289  *
2290  * Set the requested flow control mode.
2291  */
2292 enum ice_status
2293 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2294 {
2295 	struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2296 	struct ice_aqc_get_phy_caps_data *pcaps;
2297 	enum ice_status status;
2298 	u8 pause_mask = 0x0;
2299 	struct ice_hw *hw;
2300 
2301 	if (!pi)
2302 		return ICE_ERR_PARAM;
2303 	hw = pi->hw;
2304 	*aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
2305 
2306 	switch (pi->fc.req_mode) {
2307 	case ICE_FC_FULL:
2308 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2309 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2310 		break;
2311 	case ICE_FC_RX_PAUSE:
2312 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2313 		break;
2314 	case ICE_FC_TX_PAUSE:
2315 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2316 		break;
2317 	default:
2318 		break;
2319 	}
2320 
2321 	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
2322 	if (!pcaps)
2323 		return ICE_ERR_NO_MEMORY;
2324 
2325 	/* Get the current PHY config */
2326 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
2327 				     NULL);
2328 	if (status) {
2329 		*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2330 		goto out;
2331 	}
2332 
2333 	/* clear the old pause settings */
2334 	cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2335 				   ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2336 
2337 	/* set the new capabilities */
2338 	cfg.caps |= pause_mask;
2339 
2340 	/* If the capabilities have changed, then set the new config */
2341 	if (cfg.caps != pcaps->caps) {
2342 		int retry_count, retry_max = 10;
2343 
2344 		/* Auto restart link so settings take effect */
2345 		if (ena_auto_link_update)
2346 			cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2347 		/* Copy over all the old settings */
2348 		cfg.phy_type_high = pcaps->phy_type_high;
2349 		cfg.phy_type_low = pcaps->phy_type_low;
2350 		cfg.low_power_ctrl = pcaps->low_power_ctrl;
2351 		cfg.eee_cap = pcaps->eee_cap;
2352 		cfg.eeer_value = pcaps->eeer_value;
2353 		cfg.link_fec_opt = pcaps->link_fec_options;
2354 
2355 		status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
2356 		if (status) {
2357 			*aq_failures = ICE_SET_FC_AQ_FAIL_SET;
2358 			goto out;
2359 		}
2360 
2361 		/* Update the link info
2362 		 * It sometimes takes a really long time for link to
2363 		 * come back from the atomic reset. Thus, we wait a
2364 		 * little bit.
2365 		 */
2366 		for (retry_count = 0; retry_count < retry_max; retry_count++) {
2367 			status = ice_update_link_info(pi);
2368 
2369 			if (!status)
2370 				break;
2371 
2372 			mdelay(100);
2373 		}
2374 
2375 		if (status)
2376 			*aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
2377 	}
2378 
2379 out:
2380 	devm_kfree(ice_hw_to_dev(hw), pcaps);
2381 	return status;
2382 }
2383 
2384 /**
2385  * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
2386  * @caps: PHY ability structure to copy date from
2387  * @cfg: PHY configuration structure to copy data to
2388  *
2389  * Helper function to copy AQC PHY get ability data to PHY set configuration
2390  * data structure
2391  */
2392 void
2393 ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps,
2394 			 struct ice_aqc_set_phy_cfg_data *cfg)
2395 {
2396 	if (!caps || !cfg)
2397 		return;
2398 
2399 	cfg->phy_type_low = caps->phy_type_low;
2400 	cfg->phy_type_high = caps->phy_type_high;
2401 	cfg->caps = caps->caps;
2402 	cfg->low_power_ctrl = caps->low_power_ctrl;
2403 	cfg->eee_cap = caps->eee_cap;
2404 	cfg->eeer_value = caps->eeer_value;
2405 	cfg->link_fec_opt = caps->link_fec_options;
2406 }
2407 
2408 /**
2409  * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
2410  * @cfg: PHY configuration data to set FEC mode
2411  * @fec: FEC mode to configure
2412  *
2413  * Caller should copy ice_aqc_get_phy_caps_data.caps ICE_AQC_PHY_EN_AUTO_FEC
2414  * (bit 7) and ice_aqc_get_phy_caps_data.link_fec_options to cfg.caps
2415  * ICE_AQ_PHY_ENA_AUTO_FEC (bit 7) and cfg.link_fec_options before calling.
2416  */
2417 void
2418 ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
2419 {
2420 	switch (fec) {
2421 	case ICE_FEC_BASER:
2422 		/* Clear RS bits, and AND BASE-R ability
2423 		 * bits and OR request bits.
2424 		 */
2425 		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2426 				     ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
2427 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2428 				     ICE_AQC_PHY_FEC_25G_KR_REQ;
2429 		break;
2430 	case ICE_FEC_RS:
2431 		/* Clear BASE-R bits, and AND RS ability
2432 		 * bits and OR request bits.
2433 		 */
2434 		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
2435 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2436 				     ICE_AQC_PHY_FEC_25G_RS_544_REQ;
2437 		break;
2438 	case ICE_FEC_NONE:
2439 		/* Clear all FEC option bits. */
2440 		cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
2441 		break;
2442 	case ICE_FEC_AUTO:
2443 		/* AND auto FEC bit, and all caps bits. */
2444 		cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
2445 		break;
2446 	}
2447 }
2448 
2449 /**
2450  * ice_get_link_status - get status of the HW network link
2451  * @pi: port information structure
2452  * @link_up: pointer to bool (true/false = linkup/linkdown)
2453  *
2454  * Variable link_up is true if link is up, false if link is down.
2455  * The variable link_up is invalid if status is non zero. As a
2456  * result of this call, link status reporting becomes enabled
2457  */
2458 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
2459 {
2460 	struct ice_phy_info *phy_info;
2461 	enum ice_status status = 0;
2462 
2463 	if (!pi || !link_up)
2464 		return ICE_ERR_PARAM;
2465 
2466 	phy_info = &pi->phy;
2467 
2468 	if (phy_info->get_link_info) {
2469 		status = ice_update_link_info(pi);
2470 
2471 		if (status)
2472 			ice_debug(pi->hw, ICE_DBG_LINK,
2473 				  "get link status error, status = %d\n",
2474 				  status);
2475 	}
2476 
2477 	*link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
2478 
2479 	return status;
2480 }
2481 
2482 /**
2483  * ice_aq_set_link_restart_an
2484  * @pi: pointer to the port information structure
2485  * @ena_link: if true: enable link, if false: disable link
2486  * @cd: pointer to command details structure or NULL
2487  *
2488  * Sets up the link and restarts the Auto-Negotiation over the link.
2489  */
2490 enum ice_status
2491 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
2492 			   struct ice_sq_cd *cd)
2493 {
2494 	struct ice_aqc_restart_an *cmd;
2495 	struct ice_aq_desc desc;
2496 
2497 	cmd = &desc.params.restart_an;
2498 
2499 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
2500 
2501 	cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
2502 	cmd->lport_num = pi->lport;
2503 	if (ena_link)
2504 		cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
2505 	else
2506 		cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
2507 
2508 	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
2509 }
2510 
2511 /**
2512  * ice_aq_set_event_mask
2513  * @hw: pointer to the HW struct
2514  * @port_num: port number of the physical function
2515  * @mask: event mask to be set
2516  * @cd: pointer to command details structure or NULL
2517  *
2518  * Set event mask (0x0613)
2519  */
2520 enum ice_status
2521 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
2522 		      struct ice_sq_cd *cd)
2523 {
2524 	struct ice_aqc_set_event_mask *cmd;
2525 	struct ice_aq_desc desc;
2526 
2527 	cmd = &desc.params.set_event_mask;
2528 
2529 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
2530 
2531 	cmd->lport_num = port_num;
2532 
2533 	cmd->event_mask = cpu_to_le16(mask);
2534 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2535 }
2536 
2537 /**
2538  * ice_aq_set_mac_loopback
2539  * @hw: pointer to the HW struct
2540  * @ena_lpbk: Enable or Disable loopback
2541  * @cd: pointer to command details structure or NULL
2542  *
2543  * Enable/disable loopback on a given port
2544  */
2545 enum ice_status
2546 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
2547 {
2548 	struct ice_aqc_set_mac_lb *cmd;
2549 	struct ice_aq_desc desc;
2550 
2551 	cmd = &desc.params.set_mac_lb;
2552 
2553 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
2554 	if (ena_lpbk)
2555 		cmd->lb_mode = ICE_AQ_MAC_LB_EN;
2556 
2557 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2558 }
2559 
2560 /**
2561  * ice_aq_set_port_id_led
2562  * @pi: pointer to the port information
2563  * @is_orig_mode: is this LED set to original mode (by the net-list)
2564  * @cd: pointer to command details structure or NULL
2565  *
2566  * Set LED value for the given port (0x06e9)
2567  */
2568 enum ice_status
2569 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
2570 		       struct ice_sq_cd *cd)
2571 {
2572 	struct ice_aqc_set_port_id_led *cmd;
2573 	struct ice_hw *hw = pi->hw;
2574 	struct ice_aq_desc desc;
2575 
2576 	cmd = &desc.params.set_port_id_led;
2577 
2578 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
2579 
2580 	if (is_orig_mode)
2581 		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
2582 	else
2583 		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
2584 
2585 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2586 }
2587 
2588 /**
2589  * ice_aq_sff_eeprom
2590  * @hw: pointer to the HW struct
2591  * @lport: bits [7:0] = logical port, bit [8] = logical port valid
2592  * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
2593  * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
2594  * @page: QSFP page
2595  * @set_page: set or ignore the page
2596  * @data: pointer to data buffer to be read/written to the I2C device.
2597  * @length: 1-16 for read, 1 for write.
2598  * @write: 0 read, 1 for write.
2599  * @cd: pointer to command details structure or NULL
2600  *
2601  * Read/Write SFF EEPROM (0x06EE)
2602  */
2603 enum ice_status
2604 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
2605 		  u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
2606 		  bool write, struct ice_sq_cd *cd)
2607 {
2608 	struct ice_aqc_sff_eeprom *cmd;
2609 	struct ice_aq_desc desc;
2610 	enum ice_status status;
2611 
2612 	if (!data || (mem_addr & 0xff00))
2613 		return ICE_ERR_PARAM;
2614 
2615 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
2616 	cmd = &desc.params.read_write_sff_param;
2617 	desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF);
2618 	cmd->lport_num = (u8)(lport & 0xff);
2619 	cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
2620 	cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
2621 					 ICE_AQC_SFF_I2CBUS_7BIT_M) |
2622 					((set_page <<
2623 					  ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
2624 					 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
2625 	cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
2626 	cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
2627 	if (write)
2628 		cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
2629 
2630 	status = ice_aq_send_cmd(hw, &desc, data, length, cd);
2631 	return status;
2632 }
2633 
2634 /**
2635  * __ice_aq_get_set_rss_lut
2636  * @hw: pointer to the hardware structure
2637  * @vsi_id: VSI FW index
2638  * @lut_type: LUT table type
2639  * @lut: pointer to the LUT buffer provided by the caller
2640  * @lut_size: size of the LUT buffer
2641  * @glob_lut_idx: global LUT index
2642  * @set: set true to set the table, false to get the table
2643  *
2644  * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
2645  */
2646 static enum ice_status
2647 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
2648 			 u16 lut_size, u8 glob_lut_idx, bool set)
2649 {
2650 	struct ice_aqc_get_set_rss_lut *cmd_resp;
2651 	struct ice_aq_desc desc;
2652 	enum ice_status status;
2653 	u16 flags = 0;
2654 
2655 	cmd_resp = &desc.params.get_set_rss_lut;
2656 
2657 	if (set) {
2658 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
2659 		desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2660 	} else {
2661 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
2662 	}
2663 
2664 	cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2665 					 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
2666 					ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
2667 				       ICE_AQC_GSET_RSS_LUT_VSI_VALID);
2668 
2669 	switch (lut_type) {
2670 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
2671 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
2672 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
2673 		flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
2674 			  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
2675 		break;
2676 	default:
2677 		status = ICE_ERR_PARAM;
2678 		goto ice_aq_get_set_rss_lut_exit;
2679 	}
2680 
2681 	if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
2682 		flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
2683 			  ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
2684 
2685 		if (!set)
2686 			goto ice_aq_get_set_rss_lut_send;
2687 	} else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2688 		if (!set)
2689 			goto ice_aq_get_set_rss_lut_send;
2690 	} else {
2691 		goto ice_aq_get_set_rss_lut_send;
2692 	}
2693 
2694 	/* LUT size is only valid for Global and PF table types */
2695 	switch (lut_size) {
2696 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
2697 		break;
2698 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
2699 		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
2700 			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2701 			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2702 		break;
2703 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
2704 		if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2705 			flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
2706 				  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2707 				 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2708 			break;
2709 		}
2710 		/* fall-through */
2711 	default:
2712 		status = ICE_ERR_PARAM;
2713 		goto ice_aq_get_set_rss_lut_exit;
2714 	}
2715 
2716 ice_aq_get_set_rss_lut_send:
2717 	cmd_resp->flags = cpu_to_le16(flags);
2718 	status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
2719 
2720 ice_aq_get_set_rss_lut_exit:
2721 	return status;
2722 }
2723 
2724 /**
2725  * ice_aq_get_rss_lut
2726  * @hw: pointer to the hardware structure
2727  * @vsi_handle: software VSI handle
2728  * @lut_type: LUT table type
2729  * @lut: pointer to the LUT buffer provided by the caller
2730  * @lut_size: size of the LUT buffer
2731  *
2732  * get the RSS lookup table, PF or VSI type
2733  */
2734 enum ice_status
2735 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2736 		   u8 *lut, u16 lut_size)
2737 {
2738 	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2739 		return ICE_ERR_PARAM;
2740 
2741 	return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2742 					lut_type, lut, lut_size, 0, false);
2743 }
2744 
2745 /**
2746  * ice_aq_set_rss_lut
2747  * @hw: pointer to the hardware structure
2748  * @vsi_handle: software VSI handle
2749  * @lut_type: LUT table type
2750  * @lut: pointer to the LUT buffer provided by the caller
2751  * @lut_size: size of the LUT buffer
2752  *
2753  * set the RSS lookup table, PF or VSI type
2754  */
2755 enum ice_status
2756 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2757 		   u8 *lut, u16 lut_size)
2758 {
2759 	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2760 		return ICE_ERR_PARAM;
2761 
2762 	return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2763 					lut_type, lut, lut_size, 0, true);
2764 }
2765 
2766 /**
2767  * __ice_aq_get_set_rss_key
2768  * @hw: pointer to the HW struct
2769  * @vsi_id: VSI FW index
2770  * @key: pointer to key info struct
2771  * @set: set true to set the key, false to get the key
2772  *
2773  * get (0x0B04) or set (0x0B02) the RSS key per VSI
2774  */
2775 static enum
2776 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
2777 				    struct ice_aqc_get_set_rss_keys *key,
2778 				    bool set)
2779 {
2780 	struct ice_aqc_get_set_rss_key *cmd_resp;
2781 	u16 key_size = sizeof(*key);
2782 	struct ice_aq_desc desc;
2783 
2784 	cmd_resp = &desc.params.get_set_rss_key;
2785 
2786 	if (set) {
2787 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
2788 		desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2789 	} else {
2790 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
2791 	}
2792 
2793 	cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2794 					 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
2795 					ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
2796 				       ICE_AQC_GSET_RSS_KEY_VSI_VALID);
2797 
2798 	return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
2799 }
2800 
2801 /**
2802  * ice_aq_get_rss_key
2803  * @hw: pointer to the HW struct
2804  * @vsi_handle: software VSI handle
2805  * @key: pointer to key info struct
2806  *
2807  * get the RSS key per VSI
2808  */
2809 enum ice_status
2810 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
2811 		   struct ice_aqc_get_set_rss_keys *key)
2812 {
2813 	if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
2814 		return ICE_ERR_PARAM;
2815 
2816 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2817 					key, false);
2818 }
2819 
2820 /**
2821  * ice_aq_set_rss_key
2822  * @hw: pointer to the HW struct
2823  * @vsi_handle: software VSI handle
2824  * @keys: pointer to key info struct
2825  *
2826  * set the RSS key per VSI
2827  */
2828 enum ice_status
2829 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
2830 		   struct ice_aqc_get_set_rss_keys *keys)
2831 {
2832 	if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
2833 		return ICE_ERR_PARAM;
2834 
2835 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2836 					keys, true);
2837 }
2838 
2839 /**
2840  * ice_aq_add_lan_txq
2841  * @hw: pointer to the hardware structure
2842  * @num_qgrps: Number of added queue groups
2843  * @qg_list: list of queue groups to be added
2844  * @buf_size: size of buffer for indirect command
2845  * @cd: pointer to command details structure or NULL
2846  *
2847  * Add Tx LAN queue (0x0C30)
2848  *
2849  * NOTE:
2850  * Prior to calling add Tx LAN queue:
2851  * Initialize the following as part of the Tx queue context:
2852  * Completion queue ID if the queue uses Completion queue, Quanta profile,
2853  * Cache profile and Packet shaper profile.
2854  *
2855  * After add Tx LAN queue AQ command is completed:
2856  * Interrupts should be associated with specific queues,
2857  * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
2858  * flow.
2859  */
2860 static enum ice_status
2861 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2862 		   struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
2863 		   struct ice_sq_cd *cd)
2864 {
2865 	u16 i, sum_header_size, sum_q_size = 0;
2866 	struct ice_aqc_add_tx_qgrp *list;
2867 	struct ice_aqc_add_txqs *cmd;
2868 	struct ice_aq_desc desc;
2869 
2870 	cmd = &desc.params.add_txqs;
2871 
2872 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
2873 
2874 	if (!qg_list)
2875 		return ICE_ERR_PARAM;
2876 
2877 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2878 		return ICE_ERR_PARAM;
2879 
2880 	sum_header_size = num_qgrps *
2881 		(sizeof(*qg_list) - sizeof(*qg_list->txqs));
2882 
2883 	list = qg_list;
2884 	for (i = 0; i < num_qgrps; i++) {
2885 		struct ice_aqc_add_txqs_perq *q = list->txqs;
2886 
2887 		sum_q_size += list->num_txqs * sizeof(*q);
2888 		list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
2889 	}
2890 
2891 	if (buf_size != (sum_header_size + sum_q_size))
2892 		return ICE_ERR_PARAM;
2893 
2894 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2895 
2896 	cmd->num_qgrps = num_qgrps;
2897 
2898 	return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2899 }
2900 
2901 /**
2902  * ice_aq_dis_lan_txq
2903  * @hw: pointer to the hardware structure
2904  * @num_qgrps: number of groups in the list
2905  * @qg_list: the list of groups to disable
2906  * @buf_size: the total size of the qg_list buffer in bytes
2907  * @rst_src: if called due to reset, specifies the reset source
2908  * @vmvf_num: the relative VM or VF number that is undergoing the reset
2909  * @cd: pointer to command details structure or NULL
2910  *
2911  * Disable LAN Tx queue (0x0C31)
2912  */
2913 static enum ice_status
2914 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2915 		   struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
2916 		   enum ice_disq_rst_src rst_src, u16 vmvf_num,
2917 		   struct ice_sq_cd *cd)
2918 {
2919 	struct ice_aqc_dis_txqs *cmd;
2920 	struct ice_aq_desc desc;
2921 	enum ice_status status;
2922 	u16 i, sz = 0;
2923 
2924 	cmd = &desc.params.dis_txqs;
2925 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
2926 
2927 	/* qg_list can be NULL only in VM/VF reset flow */
2928 	if (!qg_list && !rst_src)
2929 		return ICE_ERR_PARAM;
2930 
2931 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2932 		return ICE_ERR_PARAM;
2933 
2934 	cmd->num_entries = num_qgrps;
2935 
2936 	cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
2937 					    ICE_AQC_Q_DIS_TIMEOUT_M);
2938 
2939 	switch (rst_src) {
2940 	case ICE_VM_RESET:
2941 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
2942 		cmd->vmvf_and_timeout |=
2943 			cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
2944 		break;
2945 	case ICE_VF_RESET:
2946 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
2947 		/* In this case, FW expects vmvf_num to be absolute VF ID */
2948 		cmd->vmvf_and_timeout |=
2949 			cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
2950 				    ICE_AQC_Q_DIS_VMVF_NUM_M);
2951 		break;
2952 	case ICE_NO_RESET:
2953 	default:
2954 		break;
2955 	}
2956 
2957 	/* flush pipe on time out */
2958 	cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
2959 	/* If no queue group info, we are in a reset flow. Issue the AQ */
2960 	if (!qg_list)
2961 		goto do_aq;
2962 
2963 	/* set RD bit to indicate that command buffer is provided by the driver
2964 	 * and it needs to be read by the firmware
2965 	 */
2966 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2967 
2968 	for (i = 0; i < num_qgrps; ++i) {
2969 		/* Calculate the size taken up by the queue IDs in this group */
2970 		sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
2971 
2972 		/* Add the size of the group header */
2973 		sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
2974 
2975 		/* If the num of queues is even, add 2 bytes of padding */
2976 		if ((qg_list[i].num_qs % 2) == 0)
2977 			sz += 2;
2978 	}
2979 
2980 	if (buf_size != sz)
2981 		return ICE_ERR_PARAM;
2982 
2983 do_aq:
2984 	status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2985 	if (status) {
2986 		if (!qg_list)
2987 			ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
2988 				  vmvf_num, hw->adminq.sq_last_status);
2989 		else
2990 			ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
2991 				  le16_to_cpu(qg_list[0].q_id[0]),
2992 				  hw->adminq.sq_last_status);
2993 	}
2994 	return status;
2995 }
2996 
2997 /* End of FW Admin Queue command wrappers */
2998 
2999 /**
3000  * ice_write_byte - write a byte to a packed context structure
3001  * @src_ctx:  the context structure to read from
3002  * @dest_ctx: the context to be written to
3003  * @ce_info:  a description of the struct to be filled
3004  */
3005 static void
3006 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3007 {
3008 	u8 src_byte, dest_byte, mask;
3009 	u8 *from, *dest;
3010 	u16 shift_width;
3011 
3012 	/* copy from the next struct field */
3013 	from = src_ctx + ce_info->offset;
3014 
3015 	/* prepare the bits and mask */
3016 	shift_width = ce_info->lsb % 8;
3017 	mask = (u8)(BIT(ce_info->width) - 1);
3018 
3019 	src_byte = *from;
3020 	src_byte &= mask;
3021 
3022 	/* shift to correct alignment */
3023 	mask <<= shift_width;
3024 	src_byte <<= shift_width;
3025 
3026 	/* get the current bits from the target bit string */
3027 	dest = dest_ctx + (ce_info->lsb / 8);
3028 
3029 	memcpy(&dest_byte, dest, sizeof(dest_byte));
3030 
3031 	dest_byte &= ~mask;	/* get the bits not changing */
3032 	dest_byte |= src_byte;	/* add in the new bits */
3033 
3034 	/* put it all back */
3035 	memcpy(dest, &dest_byte, sizeof(dest_byte));
3036 }
3037 
3038 /**
3039  * ice_write_word - write a word to a packed context structure
3040  * @src_ctx:  the context structure to read from
3041  * @dest_ctx: the context to be written to
3042  * @ce_info:  a description of the struct to be filled
3043  */
3044 static void
3045 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3046 {
3047 	u16 src_word, mask;
3048 	__le16 dest_word;
3049 	u8 *from, *dest;
3050 	u16 shift_width;
3051 
3052 	/* copy from the next struct field */
3053 	from = src_ctx + ce_info->offset;
3054 
3055 	/* prepare the bits and mask */
3056 	shift_width = ce_info->lsb % 8;
3057 	mask = BIT(ce_info->width) - 1;
3058 
3059 	/* don't swizzle the bits until after the mask because the mask bits
3060 	 * will be in a different bit position on big endian machines
3061 	 */
3062 	src_word = *(u16 *)from;
3063 	src_word &= mask;
3064 
3065 	/* shift to correct alignment */
3066 	mask <<= shift_width;
3067 	src_word <<= shift_width;
3068 
3069 	/* get the current bits from the target bit string */
3070 	dest = dest_ctx + (ce_info->lsb / 8);
3071 
3072 	memcpy(&dest_word, dest, sizeof(dest_word));
3073 
3074 	dest_word &= ~(cpu_to_le16(mask));	/* get the bits not changing */
3075 	dest_word |= cpu_to_le16(src_word);	/* add in the new bits */
3076 
3077 	/* put it all back */
3078 	memcpy(dest, &dest_word, sizeof(dest_word));
3079 }
3080 
3081 /**
3082  * ice_write_dword - write a dword to a packed context structure
3083  * @src_ctx:  the context structure to read from
3084  * @dest_ctx: the context to be written to
3085  * @ce_info:  a description of the struct to be filled
3086  */
3087 static void
3088 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3089 {
3090 	u32 src_dword, mask;
3091 	__le32 dest_dword;
3092 	u8 *from, *dest;
3093 	u16 shift_width;
3094 
3095 	/* copy from the next struct field */
3096 	from = src_ctx + ce_info->offset;
3097 
3098 	/* prepare the bits and mask */
3099 	shift_width = ce_info->lsb % 8;
3100 
3101 	/* if the field width is exactly 32 on an x86 machine, then the shift
3102 	 * operation will not work because the SHL instructions count is masked
3103 	 * to 5 bits so the shift will do nothing
3104 	 */
3105 	if (ce_info->width < 32)
3106 		mask = BIT(ce_info->width) - 1;
3107 	else
3108 		mask = (u32)~0;
3109 
3110 	/* don't swizzle the bits until after the mask because the mask bits
3111 	 * will be in a different bit position on big endian machines
3112 	 */
3113 	src_dword = *(u32 *)from;
3114 	src_dword &= mask;
3115 
3116 	/* shift to correct alignment */
3117 	mask <<= shift_width;
3118 	src_dword <<= shift_width;
3119 
3120 	/* get the current bits from the target bit string */
3121 	dest = dest_ctx + (ce_info->lsb / 8);
3122 
3123 	memcpy(&dest_dword, dest, sizeof(dest_dword));
3124 
3125 	dest_dword &= ~(cpu_to_le32(mask));	/* get the bits not changing */
3126 	dest_dword |= cpu_to_le32(src_dword);	/* add in the new bits */
3127 
3128 	/* put it all back */
3129 	memcpy(dest, &dest_dword, sizeof(dest_dword));
3130 }
3131 
3132 /**
3133  * ice_write_qword - write a qword to a packed context structure
3134  * @src_ctx:  the context structure to read from
3135  * @dest_ctx: the context to be written to
3136  * @ce_info:  a description of the struct to be filled
3137  */
3138 static void
3139 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3140 {
3141 	u64 src_qword, mask;
3142 	__le64 dest_qword;
3143 	u8 *from, *dest;
3144 	u16 shift_width;
3145 
3146 	/* copy from the next struct field */
3147 	from = src_ctx + ce_info->offset;
3148 
3149 	/* prepare the bits and mask */
3150 	shift_width = ce_info->lsb % 8;
3151 
3152 	/* if the field width is exactly 64 on an x86 machine, then the shift
3153 	 * operation will not work because the SHL instructions count is masked
3154 	 * to 6 bits so the shift will do nothing
3155 	 */
3156 	if (ce_info->width < 64)
3157 		mask = BIT_ULL(ce_info->width) - 1;
3158 	else
3159 		mask = (u64)~0;
3160 
3161 	/* don't swizzle the bits until after the mask because the mask bits
3162 	 * will be in a different bit position on big endian machines
3163 	 */
3164 	src_qword = *(u64 *)from;
3165 	src_qword &= mask;
3166 
3167 	/* shift to correct alignment */
3168 	mask <<= shift_width;
3169 	src_qword <<= shift_width;
3170 
3171 	/* get the current bits from the target bit string */
3172 	dest = dest_ctx + (ce_info->lsb / 8);
3173 
3174 	memcpy(&dest_qword, dest, sizeof(dest_qword));
3175 
3176 	dest_qword &= ~(cpu_to_le64(mask));	/* get the bits not changing */
3177 	dest_qword |= cpu_to_le64(src_qword);	/* add in the new bits */
3178 
3179 	/* put it all back */
3180 	memcpy(dest, &dest_qword, sizeof(dest_qword));
3181 }
3182 
3183 /**
3184  * ice_set_ctx - set context bits in packed structure
3185  * @src_ctx:  pointer to a generic non-packed context structure
3186  * @dest_ctx: pointer to memory for the packed structure
3187  * @ce_info:  a description of the structure to be transformed
3188  */
3189 enum ice_status
3190 ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3191 {
3192 	int f;
3193 
3194 	for (f = 0; ce_info[f].width; f++) {
3195 		/* We have to deal with each element of the FW response
3196 		 * using the correct size so that we are correct regardless
3197 		 * of the endianness of the machine.
3198 		 */
3199 		switch (ce_info[f].size_of) {
3200 		case sizeof(u8):
3201 			ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
3202 			break;
3203 		case sizeof(u16):
3204 			ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
3205 			break;
3206 		case sizeof(u32):
3207 			ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
3208 			break;
3209 		case sizeof(u64):
3210 			ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
3211 			break;
3212 		default:
3213 			return ICE_ERR_INVAL_SIZE;
3214 		}
3215 	}
3216 
3217 	return 0;
3218 }
3219 
3220 /**
3221  * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
3222  * @hw: pointer to the HW struct
3223  * @vsi_handle: software VSI handle
3224  * @tc: TC number
3225  * @q_handle: software queue handle
3226  */
3227 struct ice_q_ctx *
3228 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
3229 {
3230 	struct ice_vsi_ctx *vsi;
3231 	struct ice_q_ctx *q_ctx;
3232 
3233 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
3234 	if (!vsi)
3235 		return NULL;
3236 	if (q_handle >= vsi->num_lan_q_entries[tc])
3237 		return NULL;
3238 	if (!vsi->lan_q_ctx[tc])
3239 		return NULL;
3240 	q_ctx = vsi->lan_q_ctx[tc];
3241 	return &q_ctx[q_handle];
3242 }
3243 
3244 /**
3245  * ice_ena_vsi_txq
3246  * @pi: port information structure
3247  * @vsi_handle: software VSI handle
3248  * @tc: TC number
3249  * @q_handle: software queue handle
3250  * @num_qgrps: Number of added queue groups
3251  * @buf: list of queue groups to be added
3252  * @buf_size: size of buffer for indirect command
3253  * @cd: pointer to command details structure or NULL
3254  *
3255  * This function adds one LAN queue
3256  */
3257 enum ice_status
3258 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
3259 		u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
3260 		struct ice_sq_cd *cd)
3261 {
3262 	struct ice_aqc_txsched_elem_data node = { 0 };
3263 	struct ice_sched_node *parent;
3264 	struct ice_q_ctx *q_ctx;
3265 	enum ice_status status;
3266 	struct ice_hw *hw;
3267 
3268 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3269 		return ICE_ERR_CFG;
3270 
3271 	if (num_qgrps > 1 || buf->num_txqs > 1)
3272 		return ICE_ERR_MAX_LIMIT;
3273 
3274 	hw = pi->hw;
3275 
3276 	if (!ice_is_vsi_valid(hw, vsi_handle))
3277 		return ICE_ERR_PARAM;
3278 
3279 	mutex_lock(&pi->sched_lock);
3280 
3281 	q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
3282 	if (!q_ctx) {
3283 		ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
3284 			  q_handle);
3285 		status = ICE_ERR_PARAM;
3286 		goto ena_txq_exit;
3287 	}
3288 
3289 	/* find a parent node */
3290 	parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
3291 					    ICE_SCHED_NODE_OWNER_LAN);
3292 	if (!parent) {
3293 		status = ICE_ERR_PARAM;
3294 		goto ena_txq_exit;
3295 	}
3296 
3297 	buf->parent_teid = parent->info.node_teid;
3298 	node.parent_teid = parent->info.node_teid;
3299 	/* Mark that the values in the "generic" section as valid. The default
3300 	 * value in the "generic" section is zero. This means that :
3301 	 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
3302 	 * - 0 priority among siblings, indicated by Bit 1-3.
3303 	 * - WFQ, indicated by Bit 4.
3304 	 * - 0 Adjustment value is used in PSM credit update flow, indicated by
3305 	 * Bit 5-6.
3306 	 * - Bit 7 is reserved.
3307 	 * Without setting the generic section as valid in valid_sections, the
3308 	 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
3309 	 */
3310 	buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
3311 
3312 	/* add the LAN queue */
3313 	status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
3314 	if (status) {
3315 		ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
3316 			  le16_to_cpu(buf->txqs[0].txq_id),
3317 			  hw->adminq.sq_last_status);
3318 		goto ena_txq_exit;
3319 	}
3320 
3321 	node.node_teid = buf->txqs[0].q_teid;
3322 	node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
3323 	q_ctx->q_handle = q_handle;
3324 	q_ctx->q_teid = le32_to_cpu(node.node_teid);
3325 
3326 	/* add a leaf node into scheduler tree queue layer */
3327 	status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
3328 	if (!status)
3329 		status = ice_sched_replay_q_bw(pi, q_ctx);
3330 
3331 ena_txq_exit:
3332 	mutex_unlock(&pi->sched_lock);
3333 	return status;
3334 }
3335 
3336 /**
3337  * ice_dis_vsi_txq
3338  * @pi: port information structure
3339  * @vsi_handle: software VSI handle
3340  * @tc: TC number
3341  * @num_queues: number of queues
3342  * @q_handles: pointer to software queue handle array
3343  * @q_ids: pointer to the q_id array
3344  * @q_teids: pointer to queue node teids
3345  * @rst_src: if called due to reset, specifies the reset source
3346  * @vmvf_num: the relative VM or VF number that is undergoing the reset
3347  * @cd: pointer to command details structure or NULL
3348  *
3349  * This function removes queues and their corresponding nodes in SW DB
3350  */
3351 enum ice_status
3352 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
3353 		u16 *q_handles, u16 *q_ids, u32 *q_teids,
3354 		enum ice_disq_rst_src rst_src, u16 vmvf_num,
3355 		struct ice_sq_cd *cd)
3356 {
3357 	enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
3358 	struct ice_aqc_dis_txq_item qg_list;
3359 	struct ice_q_ctx *q_ctx;
3360 	u16 i;
3361 
3362 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3363 		return ICE_ERR_CFG;
3364 
3365 	if (!num_queues) {
3366 		/* if queue is disabled already yet the disable queue command
3367 		 * has to be sent to complete the VF reset, then call
3368 		 * ice_aq_dis_lan_txq without any queue information
3369 		 */
3370 		if (rst_src)
3371 			return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
3372 						  vmvf_num, NULL);
3373 		return ICE_ERR_CFG;
3374 	}
3375 
3376 	mutex_lock(&pi->sched_lock);
3377 
3378 	for (i = 0; i < num_queues; i++) {
3379 		struct ice_sched_node *node;
3380 
3381 		node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
3382 		if (!node)
3383 			continue;
3384 		q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
3385 		if (!q_ctx) {
3386 			ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
3387 				  q_handles[i]);
3388 			continue;
3389 		}
3390 		if (q_ctx->q_handle != q_handles[i]) {
3391 			ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
3392 				  q_ctx->q_handle, q_handles[i]);
3393 			continue;
3394 		}
3395 		qg_list.parent_teid = node->info.parent_teid;
3396 		qg_list.num_qs = 1;
3397 		qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
3398 		status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
3399 					    sizeof(qg_list), rst_src, vmvf_num,
3400 					    cd);
3401 
3402 		if (status)
3403 			break;
3404 		ice_free_sched_node(pi, node);
3405 		q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
3406 	}
3407 	mutex_unlock(&pi->sched_lock);
3408 	return status;
3409 }
3410 
3411 /**
3412  * ice_cfg_vsi_qs - configure the new/existing VSI queues
3413  * @pi: port information structure
3414  * @vsi_handle: software VSI handle
3415  * @tc_bitmap: TC bitmap
3416  * @maxqs: max queues array per TC
3417  * @owner: LAN or RDMA
3418  *
3419  * This function adds/updates the VSI queues per TC.
3420  */
3421 static enum ice_status
3422 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
3423 	       u16 *maxqs, u8 owner)
3424 {
3425 	enum ice_status status = 0;
3426 	u8 i;
3427 
3428 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3429 		return ICE_ERR_CFG;
3430 
3431 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3432 		return ICE_ERR_PARAM;
3433 
3434 	mutex_lock(&pi->sched_lock);
3435 
3436 	ice_for_each_traffic_class(i) {
3437 		/* configuration is possible only if TC node is present */
3438 		if (!ice_sched_get_tc_node(pi, i))
3439 			continue;
3440 
3441 		status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
3442 					   ice_is_tc_ena(tc_bitmap, i));
3443 		if (status)
3444 			break;
3445 	}
3446 
3447 	mutex_unlock(&pi->sched_lock);
3448 	return status;
3449 }
3450 
3451 /**
3452  * ice_cfg_vsi_lan - configure VSI LAN queues
3453  * @pi: port information structure
3454  * @vsi_handle: software VSI handle
3455  * @tc_bitmap: TC bitmap
3456  * @max_lanqs: max LAN queues array per TC
3457  *
3458  * This function adds/updates the VSI LAN queues per TC.
3459  */
3460 enum ice_status
3461 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
3462 		u16 *max_lanqs)
3463 {
3464 	return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
3465 			      ICE_SCHED_NODE_OWNER_LAN);
3466 }
3467 
3468 /**
3469  * ice_replay_pre_init - replay pre initialization
3470  * @hw: pointer to the HW struct
3471  *
3472  * Initializes required config data for VSI, FD, ACL, and RSS before replay.
3473  */
3474 static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
3475 {
3476 	struct ice_switch_info *sw = hw->switch_info;
3477 	u8 i;
3478 
3479 	/* Delete old entries from replay filter list head if there is any */
3480 	ice_rm_all_sw_replay_rule_info(hw);
3481 	/* In start of replay, move entries into replay_rules list, it
3482 	 * will allow adding rules entries back to filt_rules list,
3483 	 * which is operational list.
3484 	 */
3485 	for (i = 0; i < ICE_SW_LKUP_LAST; i++)
3486 		list_replace_init(&sw->recp_list[i].filt_rules,
3487 				  &sw->recp_list[i].filt_replay_rules);
3488 
3489 	return 0;
3490 }
3491 
3492 /**
3493  * ice_replay_vsi - replay VSI configuration
3494  * @hw: pointer to the HW struct
3495  * @vsi_handle: driver VSI handle
3496  *
3497  * Restore all VSI configuration after reset. It is required to call this
3498  * function with main VSI first.
3499  */
3500 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
3501 {
3502 	enum ice_status status;
3503 
3504 	if (!ice_is_vsi_valid(hw, vsi_handle))
3505 		return ICE_ERR_PARAM;
3506 
3507 	/* Replay pre-initialization if there is any */
3508 	if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
3509 		status = ice_replay_pre_init(hw);
3510 		if (status)
3511 			return status;
3512 	}
3513 
3514 	/* Replay per VSI all filters */
3515 	status = ice_replay_vsi_all_fltr(hw, vsi_handle);
3516 	return status;
3517 }
3518 
3519 /**
3520  * ice_replay_post - post replay configuration cleanup
3521  * @hw: pointer to the HW struct
3522  *
3523  * Post replay cleanup.
3524  */
3525 void ice_replay_post(struct ice_hw *hw)
3526 {
3527 	/* Delete old entries from replay filter list head */
3528 	ice_rm_all_sw_replay_rule_info(hw);
3529 }
3530 
3531 /**
3532  * ice_stat_update40 - read 40 bit stat from the chip and update stat values
3533  * @hw: ptr to the hardware info
3534  * @reg: offset of 64 bit HW register to read from
3535  * @prev_stat_loaded: bool to specify if previous stats are loaded
3536  * @prev_stat: ptr to previous loaded stat value
3537  * @cur_stat: ptr to current stat value
3538  */
3539 void
3540 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
3541 		  u64 *prev_stat, u64 *cur_stat)
3542 {
3543 	u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
3544 
3545 	/* device stats are not reset at PFR, they likely will not be zeroed
3546 	 * when the driver starts. Thus, save the value from the first read
3547 	 * without adding to the statistic value so that we report stats which
3548 	 * count up from zero.
3549 	 */
3550 	if (!prev_stat_loaded) {
3551 		*prev_stat = new_data;
3552 		return;
3553 	}
3554 
3555 	/* Calculate the difference between the new and old values, and then
3556 	 * add it to the software stat value.
3557 	 */
3558 	if (new_data >= *prev_stat)
3559 		*cur_stat += new_data - *prev_stat;
3560 	else
3561 		/* to manage the potential roll-over */
3562 		*cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
3563 
3564 	/* Update the previously stored value to prepare for next read */
3565 	*prev_stat = new_data;
3566 }
3567 
3568 /**
3569  * ice_stat_update32 - read 32 bit stat from the chip and update stat values
3570  * @hw: ptr to the hardware info
3571  * @reg: offset of HW register to read from
3572  * @prev_stat_loaded: bool to specify if previous stats are loaded
3573  * @prev_stat: ptr to previous loaded stat value
3574  * @cur_stat: ptr to current stat value
3575  */
3576 void
3577 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
3578 		  u64 *prev_stat, u64 *cur_stat)
3579 {
3580 	u32 new_data;
3581 
3582 	new_data = rd32(hw, reg);
3583 
3584 	/* device stats are not reset at PFR, they likely will not be zeroed
3585 	 * when the driver starts. Thus, save the value from the first read
3586 	 * without adding to the statistic value so that we report stats which
3587 	 * count up from zero.
3588 	 */
3589 	if (!prev_stat_loaded) {
3590 		*prev_stat = new_data;
3591 		return;
3592 	}
3593 
3594 	/* Calculate the difference between the new and old values, and then
3595 	 * add it to the software stat value.
3596 	 */
3597 	if (new_data >= *prev_stat)
3598 		*cur_stat += new_data - *prev_stat;
3599 	else
3600 		/* to manage the potential roll-over */
3601 		*cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
3602 
3603 	/* Update the previously stored value to prepare for next read */
3604 	*prev_stat = new_data;
3605 }
3606 
3607 /**
3608  * ice_sched_query_elem - query element information from HW
3609  * @hw: pointer to the HW struct
3610  * @node_teid: node TEID to be queried
3611  * @buf: buffer to element information
3612  *
3613  * This function queries HW element information
3614  */
3615 enum ice_status
3616 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
3617 		     struct ice_aqc_get_elem *buf)
3618 {
3619 	u16 buf_size, num_elem_ret = 0;
3620 	enum ice_status status;
3621 
3622 	buf_size = sizeof(*buf);
3623 	memset(buf, 0, buf_size);
3624 	buf->generic[0].node_teid = cpu_to_le32(node_teid);
3625 	status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
3626 					  NULL);
3627 	if (status || num_elem_ret != 1)
3628 		ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
3629 	return status;
3630 }
3631