xref: /openbmc/linux/drivers/net/ethernet/intel/ice/ice_common.c (revision f6e71efdf9fb1044361edbb89ff59d89120af6a6)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_common.h"
5 #include "ice_sched.h"
6 #include "ice_adminq_cmd.h"
7 
8 #define ICE_PF_RESET_WAIT_COUNT	200
9 
10 #define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \
11 	wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \
12 	     ((ICE_RX_OPC_MDID << \
13 	       GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
14 	      GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
15 	     (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
16 	      GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
17 
18 #define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \
19 	wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \
20 	     (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
21 	      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
22 	     (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
23 	      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \
24 	     (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \
25 	      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \
26 	     (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \
27 	      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M))
28 
29 /**
30  * ice_set_mac_type - Sets MAC type
31  * @hw: pointer to the HW structure
32  *
33  * This function sets the MAC type of the adapter based on the
34  * vendor ID and device ID stored in the hw structure.
35  */
36 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
37 {
38 	if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
39 		return ICE_ERR_DEVICE_NOT_SUPPORTED;
40 
41 	hw->mac_type = ICE_MAC_GENERIC;
42 	return 0;
43 }
44 
45 /**
46  * ice_clear_pf_cfg - Clear PF configuration
47  * @hw: pointer to the hardware structure
48  *
49  * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
50  * configuration, flow director filters, etc.).
51  */
52 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
53 {
54 	struct ice_aq_desc desc;
55 
56 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
57 
58 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
59 }
60 
61 /**
62  * ice_aq_manage_mac_read - manage MAC address read command
63  * @hw: pointer to the hw struct
64  * @buf: a virtual buffer to hold the manage MAC read response
65  * @buf_size: Size of the virtual buffer
66  * @cd: pointer to command details structure or NULL
67  *
68  * This function is used to return per PF station MAC address (0x0107).
69  * NOTE: Upon successful completion of this command, MAC address information
70  * is returned in user specified buffer. Please interpret user specified
71  * buffer as "manage_mac_read" response.
72  * Response such as various MAC addresses are stored in HW struct (port.mac)
73  * ice_aq_discover_caps is expected to be called before this function is called.
74  */
75 static enum ice_status
76 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
77 		       struct ice_sq_cd *cd)
78 {
79 	struct ice_aqc_manage_mac_read_resp *resp;
80 	struct ice_aqc_manage_mac_read *cmd;
81 	struct ice_aq_desc desc;
82 	enum ice_status status;
83 	u16 flags;
84 	u8 i;
85 
86 	cmd = &desc.params.mac_read;
87 
88 	if (buf_size < sizeof(*resp))
89 		return ICE_ERR_BUF_TOO_SHORT;
90 
91 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
92 
93 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
94 	if (status)
95 		return status;
96 
97 	resp = (struct ice_aqc_manage_mac_read_resp *)buf;
98 	flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
99 
100 	if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
101 		ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
102 		return ICE_ERR_CFG;
103 	}
104 
105 	/* A single port can report up to two (LAN and WoL) addresses */
106 	for (i = 0; i < cmd->num_addr; i++)
107 		if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
108 			ether_addr_copy(hw->port_info->mac.lan_addr,
109 					resp[i].mac_addr);
110 			ether_addr_copy(hw->port_info->mac.perm_addr,
111 					resp[i].mac_addr);
112 			break;
113 		}
114 
115 	return 0;
116 }
117 
118 /**
119  * ice_aq_get_phy_caps - returns PHY capabilities
120  * @pi: port information structure
121  * @qual_mods: report qualified modules
122  * @report_mode: report mode capabilities
123  * @pcaps: structure for PHY capabilities to be filled
124  * @cd: pointer to command details structure or NULL
125  *
126  * Returns the various PHY capabilities supported on the Port (0x0600)
127  */
128 enum ice_status
129 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
130 		    struct ice_aqc_get_phy_caps_data *pcaps,
131 		    struct ice_sq_cd *cd)
132 {
133 	struct ice_aqc_get_phy_caps *cmd;
134 	u16 pcaps_size = sizeof(*pcaps);
135 	struct ice_aq_desc desc;
136 	enum ice_status status;
137 
138 	cmd = &desc.params.get_phy;
139 
140 	if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
141 		return ICE_ERR_PARAM;
142 
143 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
144 
145 	if (qual_mods)
146 		cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
147 
148 	cmd->param0 |= cpu_to_le16(report_mode);
149 	status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
150 
151 	if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP)
152 		pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
153 
154 	return status;
155 }
156 
157 /**
158  * ice_get_media_type - Gets media type
159  * @pi: port information structure
160  */
161 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
162 {
163 	struct ice_link_status *hw_link_info;
164 
165 	if (!pi)
166 		return ICE_MEDIA_UNKNOWN;
167 
168 	hw_link_info = &pi->phy.link_info;
169 
170 	if (hw_link_info->phy_type_low) {
171 		switch (hw_link_info->phy_type_low) {
172 		case ICE_PHY_TYPE_LOW_1000BASE_SX:
173 		case ICE_PHY_TYPE_LOW_1000BASE_LX:
174 		case ICE_PHY_TYPE_LOW_10GBASE_SR:
175 		case ICE_PHY_TYPE_LOW_10GBASE_LR:
176 		case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
177 		case ICE_PHY_TYPE_LOW_25GBASE_SR:
178 		case ICE_PHY_TYPE_LOW_25GBASE_LR:
179 		case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
180 		case ICE_PHY_TYPE_LOW_40GBASE_SR4:
181 		case ICE_PHY_TYPE_LOW_40GBASE_LR4:
182 			return ICE_MEDIA_FIBER;
183 		case ICE_PHY_TYPE_LOW_100BASE_TX:
184 		case ICE_PHY_TYPE_LOW_1000BASE_T:
185 		case ICE_PHY_TYPE_LOW_2500BASE_T:
186 		case ICE_PHY_TYPE_LOW_5GBASE_T:
187 		case ICE_PHY_TYPE_LOW_10GBASE_T:
188 		case ICE_PHY_TYPE_LOW_25GBASE_T:
189 			return ICE_MEDIA_BASET;
190 		case ICE_PHY_TYPE_LOW_10G_SFI_DA:
191 		case ICE_PHY_TYPE_LOW_25GBASE_CR:
192 		case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
193 		case ICE_PHY_TYPE_LOW_25GBASE_CR1:
194 		case ICE_PHY_TYPE_LOW_40GBASE_CR4:
195 			return ICE_MEDIA_DA;
196 		case ICE_PHY_TYPE_LOW_1000BASE_KX:
197 		case ICE_PHY_TYPE_LOW_2500BASE_KX:
198 		case ICE_PHY_TYPE_LOW_2500BASE_X:
199 		case ICE_PHY_TYPE_LOW_5GBASE_KR:
200 		case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
201 		case ICE_PHY_TYPE_LOW_25GBASE_KR:
202 		case ICE_PHY_TYPE_LOW_25GBASE_KR1:
203 		case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
204 		case ICE_PHY_TYPE_LOW_40GBASE_KR4:
205 			return ICE_MEDIA_BACKPLANE;
206 		}
207 	}
208 
209 	return ICE_MEDIA_UNKNOWN;
210 }
211 
212 /**
213  * ice_aq_get_link_info
214  * @pi: port information structure
215  * @ena_lse: enable/disable LinkStatusEvent reporting
216  * @link: pointer to link status structure - optional
217  * @cd: pointer to command details structure or NULL
218  *
219  * Get Link Status (0x607). Returns the link status of the adapter.
220  */
221 enum ice_status
222 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
223 		     struct ice_link_status *link, struct ice_sq_cd *cd)
224 {
225 	struct ice_link_status *hw_link_info_old, *hw_link_info;
226 	struct ice_aqc_get_link_status_data link_data = { 0 };
227 	struct ice_aqc_get_link_status *resp;
228 	enum ice_media_type *hw_media_type;
229 	struct ice_fc_info *hw_fc_info;
230 	bool tx_pause, rx_pause;
231 	struct ice_aq_desc desc;
232 	enum ice_status status;
233 	u16 cmd_flags;
234 
235 	if (!pi)
236 		return ICE_ERR_PARAM;
237 	hw_link_info_old = &pi->phy.link_info_old;
238 	hw_media_type = &pi->phy.media_type;
239 	hw_link_info = &pi->phy.link_info;
240 	hw_fc_info = &pi->fc;
241 
242 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
243 	cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
244 	resp = &desc.params.get_link_status;
245 	resp->cmd_flags = cpu_to_le16(cmd_flags);
246 	resp->lport_num = pi->lport;
247 
248 	status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data),
249 				 cd);
250 
251 	if (status)
252 		return status;
253 
254 	/* save off old link status information */
255 	*hw_link_info_old = *hw_link_info;
256 
257 	/* update current link status information */
258 	hw_link_info->link_speed = le16_to_cpu(link_data.link_speed);
259 	hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low);
260 	*hw_media_type = ice_get_media_type(pi);
261 	hw_link_info->link_info = link_data.link_info;
262 	hw_link_info->an_info = link_data.an_info;
263 	hw_link_info->ext_info = link_data.ext_info;
264 	hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size);
265 	hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M;
266 
267 	/* update fc info */
268 	tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
269 	rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
270 	if (tx_pause && rx_pause)
271 		hw_fc_info->current_mode = ICE_FC_FULL;
272 	else if (tx_pause)
273 		hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
274 	else if (rx_pause)
275 		hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
276 	else
277 		hw_fc_info->current_mode = ICE_FC_NONE;
278 
279 	hw_link_info->lse_ena =
280 		!!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
281 
282 	/* save link status information */
283 	if (link)
284 		*link = *hw_link_info;
285 
286 	/* flag cleared so calling functions don't call AQ again */
287 	pi->phy.get_link_info = false;
288 
289 	return status;
290 }
291 
292 /**
293  * ice_init_flex_flags
294  * @hw: pointer to the hardware structure
295  * @prof_id: Rx Descriptor Builder profile ID
296  *
297  * Function to initialize Rx flex flags
298  */
299 static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
300 {
301 	u8 idx = 0;
302 
303 	/* Flex-flag fields (0-2) are programmed with FLG64 bits with layout:
304 	 * flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE
305 	 * flexiflags1[3:0] - Not used for flag programming
306 	 * flexiflags2[7:0] - Tunnel and VLAN types
307 	 * 2 invalid fields in last index
308 	 */
309 	switch (prof_id) {
310 	/* Rx flex flags are currently programmed for the NIC profiles only.
311 	 * Different flag bit programming configurations can be added per
312 	 * profile as needed.
313 	 */
314 	case ICE_RXDID_FLEX_NIC:
315 	case ICE_RXDID_FLEX_NIC_2:
316 		ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_FRG,
317 				   ICE_RXFLG_UDP_GRE, ICE_RXFLG_PKT_DSI,
318 				   ICE_RXFLG_FIN, idx++);
319 		/* flex flag 1 is not used for flexi-flag programming, skipping
320 		 * these four FLG64 bits.
321 		 */
322 		ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_SYN, ICE_RXFLG_RST,
323 				   ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++);
324 		ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_DSI,
325 				   ICE_RXFLG_PKT_DSI, ICE_RXFLG_EVLAN_x8100,
326 				   ICE_RXFLG_EVLAN_x9100, idx++);
327 		ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_VLAN_x8100,
328 				   ICE_RXFLG_TNL_VLAN, ICE_RXFLG_TNL_MAC,
329 				   ICE_RXFLG_TNL0, idx++);
330 		ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2,
331 				   ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx);
332 		break;
333 
334 	default:
335 		ice_debug(hw, ICE_DBG_INIT,
336 			  "Flag programming for profile ID %d not supported\n",
337 			  prof_id);
338 	}
339 }
340 
341 /**
342  * ice_init_flex_flds
343  * @hw: pointer to the hardware structure
344  * @prof_id: Rx Descriptor Builder profile ID
345  *
346  * Function to initialize flex descriptors
347  */
348 static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
349 {
350 	enum ice_flex_rx_mdid mdid;
351 
352 	switch (prof_id) {
353 	case ICE_RXDID_FLEX_NIC:
354 	case ICE_RXDID_FLEX_NIC_2:
355 		ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0);
356 		ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1);
357 		ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2);
358 
359 		mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ?
360 			ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH;
361 
362 		ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3);
363 
364 		ice_init_flex_flags(hw, prof_id);
365 		break;
366 
367 	default:
368 		ice_debug(hw, ICE_DBG_INIT,
369 			  "Field init for profile ID %d not supported\n",
370 			  prof_id);
371 	}
372 }
373 
374 /**
375  * ice_init_fltr_mgmt_struct - initializes filter management list and locks
376  * @hw: pointer to the hw struct
377  */
378 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
379 {
380 	struct ice_switch_info *sw;
381 
382 	hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
383 				       sizeof(*hw->switch_info), GFP_KERNEL);
384 	sw = hw->switch_info;
385 
386 	if (!sw)
387 		return ICE_ERR_NO_MEMORY;
388 
389 	INIT_LIST_HEAD(&sw->vsi_list_map_head);
390 
391 	ice_init_def_sw_recp(hw);
392 
393 	return 0;
394 }
395 
396 /**
397  * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
398  * @hw: pointer to the hw struct
399  */
400 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
401 {
402 	struct ice_switch_info *sw = hw->switch_info;
403 	struct ice_vsi_list_map_info *v_pos_map;
404 	struct ice_vsi_list_map_info *v_tmp_map;
405 	struct ice_sw_recipe *recps;
406 	u8 i;
407 
408 	list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
409 				 list_entry) {
410 		list_del(&v_pos_map->list_entry);
411 		devm_kfree(ice_hw_to_dev(hw), v_pos_map);
412 	}
413 	recps = hw->switch_info->recp_list;
414 	for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
415 		struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
416 
417 		recps[i].root_rid = i;
418 		mutex_destroy(&recps[i].filt_rule_lock);
419 		list_for_each_entry_safe(lst_itr, tmp_entry,
420 					 &recps[i].filt_rules, list_entry) {
421 			list_del(&lst_itr->list_entry);
422 			devm_kfree(ice_hw_to_dev(hw), lst_itr);
423 		}
424 	}
425 
426 	devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
427 	devm_kfree(ice_hw_to_dev(hw), sw);
428 }
429 
430 #define ICE_FW_LOG_DESC_SIZE(n)	(sizeof(struct ice_aqc_fw_logging_data) + \
431 	(((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry)))
432 #define ICE_FW_LOG_DESC_SIZE_MAX	\
433 	ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX)
434 
435 /**
436  * ice_cfg_fw_log - configure FW logging
437  * @hw: pointer to the hw struct
438  * @enable: enable certain FW logging events if true, disable all if false
439  *
440  * This function enables/disables the FW logging via Rx CQ events and a UART
441  * port based on predetermined configurations. FW logging via the Rx CQ can be
442  * enabled/disabled for individual PF's. However, FW logging via the UART can
443  * only be enabled/disabled for all PFs on the same device.
444  *
445  * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
446  * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
447  * before initializing the device.
448  *
449  * When re/configuring FW logging, callers need to update the "cfg" elements of
450  * the hw->fw_log.evnts array with the desired logging event configurations for
451  * modules of interest. When disabling FW logging completely, the callers can
452  * just pass false in the "enable" parameter. On completion, the function will
453  * update the "cur" element of the hw->fw_log.evnts array with the resulting
454  * logging event configurations of the modules that are being re/configured. FW
455  * logging modules that are not part of a reconfiguration operation retain their
456  * previous states.
457  *
458  * Before resetting the device, it is recommended that the driver disables FW
459  * logging before shutting down the control queue. When disabling FW logging
460  * ("enable" = false), the latest configurations of FW logging events stored in
461  * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
462  * a device reset.
463  *
464  * When enabling FW logging to emit log messages via the Rx CQ during the
465  * device's initialization phase, a mechanism alternative to interrupt handlers
466  * needs to be used to extract FW log messages from the Rx CQ periodically and
467  * to prevent the Rx CQ from being full and stalling other types of control
468  * messages from FW to SW. Interrupts are typically disabled during the device's
469  * initialization phase.
470  */
471 static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
472 {
473 	struct ice_aqc_fw_logging_data *data = NULL;
474 	struct ice_aqc_fw_logging *cmd;
475 	enum ice_status status = 0;
476 	u16 i, chgs = 0, len = 0;
477 	struct ice_aq_desc desc;
478 	u8 actv_evnts = 0;
479 	void *buf = NULL;
480 
481 	if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
482 		return 0;
483 
484 	/* Disable FW logging only when the control queue is still responsive */
485 	if (!enable &&
486 	    (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
487 		return 0;
488 
489 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
490 	cmd = &desc.params.fw_logging;
491 
492 	/* Indicate which controls are valid */
493 	if (hw->fw_log.cq_en)
494 		cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
495 
496 	if (hw->fw_log.uart_en)
497 		cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
498 
499 	if (enable) {
500 		/* Fill in an array of entries with FW logging modules and
501 		 * logging events being reconfigured.
502 		 */
503 		for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
504 			u16 val;
505 
506 			/* Keep track of enabled event types */
507 			actv_evnts |= hw->fw_log.evnts[i].cfg;
508 
509 			if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
510 				continue;
511 
512 			if (!data) {
513 				data = devm_kzalloc(ice_hw_to_dev(hw),
514 						    ICE_FW_LOG_DESC_SIZE_MAX,
515 						    GFP_KERNEL);
516 				if (!data)
517 					return ICE_ERR_NO_MEMORY;
518 			}
519 
520 			val = i << ICE_AQC_FW_LOG_ID_S;
521 			val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
522 			data->entry[chgs++] = cpu_to_le16(val);
523 		}
524 
525 		/* Only enable FW logging if at least one module is specified.
526 		 * If FW logging is currently enabled but all modules are not
527 		 * enabled to emit log messages, disable FW logging altogether.
528 		 */
529 		if (actv_evnts) {
530 			/* Leave if there is effectively no change */
531 			if (!chgs)
532 				goto out;
533 
534 			if (hw->fw_log.cq_en)
535 				cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
536 
537 			if (hw->fw_log.uart_en)
538 				cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
539 
540 			buf = data;
541 			len = ICE_FW_LOG_DESC_SIZE(chgs);
542 			desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
543 		}
544 	}
545 
546 	status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
547 	if (!status) {
548 		/* Update the current configuration to reflect events enabled.
549 		 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
550 		 * logging mode is enabled for the device. They do not reflect
551 		 * actual modules being enabled to emit log messages. So, their
552 		 * values remain unchanged even when all modules are disabled.
553 		 */
554 		u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
555 
556 		hw->fw_log.actv_evnts = actv_evnts;
557 		for (i = 0; i < cnt; i++) {
558 			u16 v, m;
559 
560 			if (!enable) {
561 				/* When disabling all FW logging events as part
562 				 * of device's de-initialization, the original
563 				 * configurations are retained, and can be used
564 				 * to reconfigure FW logging later if the device
565 				 * is re-initialized.
566 				 */
567 				hw->fw_log.evnts[i].cur = 0;
568 				continue;
569 			}
570 
571 			v = le16_to_cpu(data->entry[i]);
572 			m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
573 			hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
574 		}
575 	}
576 
577 out:
578 	if (data)
579 		devm_kfree(ice_hw_to_dev(hw), data);
580 
581 	return status;
582 }
583 
584 /**
585  * ice_output_fw_log
586  * @hw: pointer to the hw struct
587  * @desc: pointer to the AQ message descriptor
588  * @buf: pointer to the buffer accompanying the AQ message
589  *
590  * Formats a FW Log message and outputs it via the standard driver logs.
591  */
592 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
593 {
594 	ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg Start ]\n");
595 	ice_debug_array(hw, ICE_DBG_AQ_MSG, 16, 1, (u8 *)buf,
596 			le16_to_cpu(desc->datalen));
597 	ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg End ]\n");
598 }
599 
600 /**
601  * ice_init_hw - main hardware initialization routine
602  * @hw: pointer to the hardware structure
603  */
604 enum ice_status ice_init_hw(struct ice_hw *hw)
605 {
606 	struct ice_aqc_get_phy_caps_data *pcaps;
607 	enum ice_status status;
608 	u16 mac_buf_len;
609 	void *mac_buf;
610 
611 	/* Set MAC type based on DeviceID */
612 	status = ice_set_mac_type(hw);
613 	if (status)
614 		return status;
615 
616 	hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
617 			 PF_FUNC_RID_FUNC_NUM_M) >>
618 		PF_FUNC_RID_FUNC_NUM_S;
619 
620 	status = ice_reset(hw, ICE_RESET_PFR);
621 	if (status)
622 		return status;
623 
624 	/* set these values to minimum allowed */
625 	hw->itr_gran_200 = ICE_ITR_GRAN_MIN_200;
626 	hw->itr_gran_100 = ICE_ITR_GRAN_MIN_100;
627 	hw->itr_gran_50 = ICE_ITR_GRAN_MIN_50;
628 	hw->itr_gran_25 = ICE_ITR_GRAN_MIN_25;
629 
630 	status = ice_init_all_ctrlq(hw);
631 	if (status)
632 		goto err_unroll_cqinit;
633 
634 	/* Enable FW logging. Not fatal if this fails. */
635 	status = ice_cfg_fw_log(hw, true);
636 	if (status)
637 		ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
638 
639 	status = ice_clear_pf_cfg(hw);
640 	if (status)
641 		goto err_unroll_cqinit;
642 
643 	ice_clear_pxe_mode(hw);
644 
645 	status = ice_init_nvm(hw);
646 	if (status)
647 		goto err_unroll_cqinit;
648 
649 	status = ice_get_caps(hw);
650 	if (status)
651 		goto err_unroll_cqinit;
652 
653 	hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
654 				     sizeof(*hw->port_info), GFP_KERNEL);
655 	if (!hw->port_info) {
656 		status = ICE_ERR_NO_MEMORY;
657 		goto err_unroll_cqinit;
658 	}
659 
660 	/* set the back pointer to hw */
661 	hw->port_info->hw = hw;
662 
663 	/* Initialize port_info struct with switch configuration data */
664 	status = ice_get_initial_sw_cfg(hw);
665 	if (status)
666 		goto err_unroll_alloc;
667 
668 	hw->evb_veb = true;
669 
670 	/* Query the allocated resources for tx scheduler */
671 	status = ice_sched_query_res_alloc(hw);
672 	if (status) {
673 		ice_debug(hw, ICE_DBG_SCHED,
674 			  "Failed to get scheduler allocated resources\n");
675 		goto err_unroll_alloc;
676 	}
677 
678 	/* Initialize port_info struct with scheduler data */
679 	status = ice_sched_init_port(hw->port_info);
680 	if (status)
681 		goto err_unroll_sched;
682 
683 	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
684 	if (!pcaps) {
685 		status = ICE_ERR_NO_MEMORY;
686 		goto err_unroll_sched;
687 	}
688 
689 	/* Initialize port_info struct with PHY capabilities */
690 	status = ice_aq_get_phy_caps(hw->port_info, false,
691 				     ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
692 	devm_kfree(ice_hw_to_dev(hw), pcaps);
693 	if (status)
694 		goto err_unroll_sched;
695 
696 	/* Initialize port_info struct with link information */
697 	status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
698 	if (status)
699 		goto err_unroll_sched;
700 
701 	/* need a valid SW entry point to build a Tx tree */
702 	if (!hw->sw_entry_point_layer) {
703 		ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
704 		status = ICE_ERR_CFG;
705 		goto err_unroll_sched;
706 	}
707 
708 	status = ice_init_fltr_mgmt_struct(hw);
709 	if (status)
710 		goto err_unroll_sched;
711 
712 	/* Get MAC information */
713 	/* A single port can report up to two (LAN and WoL) addresses */
714 	mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
715 			       sizeof(struct ice_aqc_manage_mac_read_resp),
716 			       GFP_KERNEL);
717 	mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
718 
719 	if (!mac_buf) {
720 		status = ICE_ERR_NO_MEMORY;
721 		goto err_unroll_fltr_mgmt_struct;
722 	}
723 
724 	status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
725 	devm_kfree(ice_hw_to_dev(hw), mac_buf);
726 
727 	if (status)
728 		goto err_unroll_fltr_mgmt_struct;
729 
730 	ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
731 	ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
732 
733 	return 0;
734 
735 err_unroll_fltr_mgmt_struct:
736 	ice_cleanup_fltr_mgmt_struct(hw);
737 err_unroll_sched:
738 	ice_sched_cleanup_all(hw);
739 err_unroll_alloc:
740 	devm_kfree(ice_hw_to_dev(hw), hw->port_info);
741 err_unroll_cqinit:
742 	ice_shutdown_all_ctrlq(hw);
743 	return status;
744 }
745 
746 /**
747  * ice_deinit_hw - unroll initialization operations done by ice_init_hw
748  * @hw: pointer to the hardware structure
749  */
750 void ice_deinit_hw(struct ice_hw *hw)
751 {
752 	ice_cleanup_fltr_mgmt_struct(hw);
753 
754 	ice_sched_cleanup_all(hw);
755 
756 	if (hw->port_info) {
757 		devm_kfree(ice_hw_to_dev(hw), hw->port_info);
758 		hw->port_info = NULL;
759 	}
760 
761 	/* Attempt to disable FW logging before shutting down control queues */
762 	ice_cfg_fw_log(hw, false);
763 	ice_shutdown_all_ctrlq(hw);
764 }
765 
766 /**
767  * ice_check_reset - Check to see if a global reset is complete
768  * @hw: pointer to the hardware structure
769  */
770 enum ice_status ice_check_reset(struct ice_hw *hw)
771 {
772 	u32 cnt, reg = 0, grst_delay;
773 
774 	/* Poll for Device Active state in case a recent CORER, GLOBR,
775 	 * or EMPR has occurred. The grst delay value is in 100ms units.
776 	 * Add 1sec for outstanding AQ commands that can take a long time.
777 	 */
778 	grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
779 		      GLGEN_RSTCTL_GRSTDEL_S) + 10;
780 
781 	for (cnt = 0; cnt < grst_delay; cnt++) {
782 		mdelay(100);
783 		reg = rd32(hw, GLGEN_RSTAT);
784 		if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
785 			break;
786 	}
787 
788 	if (cnt == grst_delay) {
789 		ice_debug(hw, ICE_DBG_INIT,
790 			  "Global reset polling failed to complete.\n");
791 		return ICE_ERR_RESET_FAILED;
792 	}
793 
794 #define ICE_RESET_DONE_MASK	(GLNVM_ULD_CORER_DONE_M | \
795 				 GLNVM_ULD_GLOBR_DONE_M)
796 
797 	/* Device is Active; check Global Reset processes are done */
798 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
799 		reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
800 		if (reg == ICE_RESET_DONE_MASK) {
801 			ice_debug(hw, ICE_DBG_INIT,
802 				  "Global reset processes done. %d\n", cnt);
803 			break;
804 		}
805 		mdelay(10);
806 	}
807 
808 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
809 		ice_debug(hw, ICE_DBG_INIT,
810 			  "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
811 			  reg);
812 		return ICE_ERR_RESET_FAILED;
813 	}
814 
815 	return 0;
816 }
817 
818 /**
819  * ice_pf_reset - Reset the PF
820  * @hw: pointer to the hardware structure
821  *
822  * If a global reset has been triggered, this function checks
823  * for its completion and then issues the PF reset
824  */
825 static enum ice_status ice_pf_reset(struct ice_hw *hw)
826 {
827 	u32 cnt, reg;
828 
829 	/* If at function entry a global reset was already in progress, i.e.
830 	 * state is not 'device active' or any of the reset done bits are not
831 	 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
832 	 * global reset is done.
833 	 */
834 	if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
835 	    (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
836 		/* poll on global reset currently in progress until done */
837 		if (ice_check_reset(hw))
838 			return ICE_ERR_RESET_FAILED;
839 
840 		return 0;
841 	}
842 
843 	/* Reset the PF */
844 	reg = rd32(hw, PFGEN_CTRL);
845 
846 	wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
847 
848 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
849 		reg = rd32(hw, PFGEN_CTRL);
850 		if (!(reg & PFGEN_CTRL_PFSWR_M))
851 			break;
852 
853 		mdelay(1);
854 	}
855 
856 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
857 		ice_debug(hw, ICE_DBG_INIT,
858 			  "PF reset polling failed to complete.\n");
859 		return ICE_ERR_RESET_FAILED;
860 	}
861 
862 	return 0;
863 }
864 
865 /**
866  * ice_reset - Perform different types of reset
867  * @hw: pointer to the hardware structure
868  * @req: reset request
869  *
870  * This function triggers a reset as specified by the req parameter.
871  *
872  * Note:
873  * If anything other than a PF reset is triggered, PXE mode is restored.
874  * This has to be cleared using ice_clear_pxe_mode again, once the AQ
875  * interface has been restored in the rebuild flow.
876  */
877 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
878 {
879 	u32 val = 0;
880 
881 	switch (req) {
882 	case ICE_RESET_PFR:
883 		return ice_pf_reset(hw);
884 	case ICE_RESET_CORER:
885 		ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
886 		val = GLGEN_RTRIG_CORER_M;
887 		break;
888 	case ICE_RESET_GLOBR:
889 		ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
890 		val = GLGEN_RTRIG_GLOBR_M;
891 		break;
892 	default:
893 		return ICE_ERR_PARAM;
894 	}
895 
896 	val |= rd32(hw, GLGEN_RTRIG);
897 	wr32(hw, GLGEN_RTRIG, val);
898 	ice_flush(hw);
899 
900 	/* wait for the FW to be ready */
901 	return ice_check_reset(hw);
902 }
903 
904 /**
905  * ice_copy_rxq_ctx_to_hw
906  * @hw: pointer to the hardware structure
907  * @ice_rxq_ctx: pointer to the rxq context
908  * @rxq_index: the index of the rx queue
909  *
910  * Copies rxq context from dense structure to hw register space
911  */
912 static enum ice_status
913 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
914 {
915 	u8 i;
916 
917 	if (!ice_rxq_ctx)
918 		return ICE_ERR_BAD_PTR;
919 
920 	if (rxq_index > QRX_CTRL_MAX_INDEX)
921 		return ICE_ERR_PARAM;
922 
923 	/* Copy each dword separately to hw */
924 	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
925 		wr32(hw, QRX_CONTEXT(i, rxq_index),
926 		     *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
927 
928 		ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
929 			  *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
930 	}
931 
932 	return 0;
933 }
934 
935 /* LAN Rx Queue Context */
936 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
937 	/* Field		Width	LSB */
938 	ICE_CTX_STORE(ice_rlan_ctx, head,		13,	0),
939 	ICE_CTX_STORE(ice_rlan_ctx, cpuid,		8,	13),
940 	ICE_CTX_STORE(ice_rlan_ctx, base,		57,	32),
941 	ICE_CTX_STORE(ice_rlan_ctx, qlen,		13,	89),
942 	ICE_CTX_STORE(ice_rlan_ctx, dbuf,		7,	102),
943 	ICE_CTX_STORE(ice_rlan_ctx, hbuf,		5,	109),
944 	ICE_CTX_STORE(ice_rlan_ctx, dtype,		2,	114),
945 	ICE_CTX_STORE(ice_rlan_ctx, dsize,		1,	116),
946 	ICE_CTX_STORE(ice_rlan_ctx, crcstrip,		1,	117),
947 	ICE_CTX_STORE(ice_rlan_ctx, l2tsel,		1,	119),
948 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_0,		4,	120),
949 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_1,		2,	124),
950 	ICE_CTX_STORE(ice_rlan_ctx, showiv,		1,	127),
951 	ICE_CTX_STORE(ice_rlan_ctx, rxmax,		14,	174),
952 	ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena,	1,	193),
953 	ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena,	1,	194),
954 	ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena,	1,	195),
955 	ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena,	1,	196),
956 	ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh,		3,	198),
957 	{ 0 }
958 };
959 
960 /**
961  * ice_write_rxq_ctx
962  * @hw: pointer to the hardware structure
963  * @rlan_ctx: pointer to the rxq context
964  * @rxq_index: the index of the rx queue
965  *
966  * Converts rxq context from sparse to dense structure and then writes
967  * it to hw register space
968  */
969 enum ice_status
970 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
971 		  u32 rxq_index)
972 {
973 	u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
974 
975 	ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
976 	return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
977 }
978 
979 /* LAN Tx Queue Context */
980 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
981 				    /* Field			Width	LSB */
982 	ICE_CTX_STORE(ice_tlan_ctx, base,			57,	0),
983 	ICE_CTX_STORE(ice_tlan_ctx, port_num,			3,	57),
984 	ICE_CTX_STORE(ice_tlan_ctx, cgd_num,			5,	60),
985 	ICE_CTX_STORE(ice_tlan_ctx, pf_num,			3,	65),
986 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_num,			10,	68),
987 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_type,			2,	78),
988 	ICE_CTX_STORE(ice_tlan_ctx, src_vsi,			10,	80),
989 	ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena,			1,	90),
990 	ICE_CTX_STORE(ice_tlan_ctx, alt_vlan,			1,	92),
991 	ICE_CTX_STORE(ice_tlan_ctx, cpuid,			8,	93),
992 	ICE_CTX_STORE(ice_tlan_ctx, wb_mode,			1,	101),
993 	ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc,			1,	102),
994 	ICE_CTX_STORE(ice_tlan_ctx, tphrd,			1,	103),
995 	ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc,			1,	104),
996 	ICE_CTX_STORE(ice_tlan_ctx, cmpq_id,			9,	105),
997 	ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func,		14,	114),
998 	ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode,	1,	128),
999 	ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id,		6,	129),
1000 	ICE_CTX_STORE(ice_tlan_ctx, qlen,			13,	135),
1001 	ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx,		4,	148),
1002 	ICE_CTX_STORE(ice_tlan_ctx, tso_ena,			1,	152),
1003 	ICE_CTX_STORE(ice_tlan_ctx, tso_qnum,			11,	153),
1004 	ICE_CTX_STORE(ice_tlan_ctx, legacy_int,			1,	164),
1005 	ICE_CTX_STORE(ice_tlan_ctx, drop_ena,			1,	165),
1006 	ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx,		2,	166),
1007 	ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx,	3,	168),
1008 	ICE_CTX_STORE(ice_tlan_ctx, int_q_state,		110,	171),
1009 	{ 0 }
1010 };
1011 
1012 /**
1013  * ice_debug_cq
1014  * @hw: pointer to the hardware structure
1015  * @mask: debug mask
1016  * @desc: pointer to control queue descriptor
1017  * @buf: pointer to command buffer
1018  * @buf_len: max length of buf
1019  *
1020  * Dumps debug log about control command with descriptor contents.
1021  */
1022 void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
1023 		  void *buf, u16 buf_len)
1024 {
1025 	struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
1026 	u16 len;
1027 
1028 #ifndef CONFIG_DYNAMIC_DEBUG
1029 	if (!(mask & hw->debug_mask))
1030 		return;
1031 #endif
1032 
1033 	if (!desc)
1034 		return;
1035 
1036 	len = le16_to_cpu(cq_desc->datalen);
1037 
1038 	ice_debug(hw, mask,
1039 		  "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
1040 		  le16_to_cpu(cq_desc->opcode),
1041 		  le16_to_cpu(cq_desc->flags),
1042 		  le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
1043 	ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
1044 		  le32_to_cpu(cq_desc->cookie_high),
1045 		  le32_to_cpu(cq_desc->cookie_low));
1046 	ice_debug(hw, mask, "\tparam (0,1)  0x%08X 0x%08X\n",
1047 		  le32_to_cpu(cq_desc->params.generic.param0),
1048 		  le32_to_cpu(cq_desc->params.generic.param1));
1049 	ice_debug(hw, mask, "\taddr (h,l)   0x%08X 0x%08X\n",
1050 		  le32_to_cpu(cq_desc->params.generic.addr_high),
1051 		  le32_to_cpu(cq_desc->params.generic.addr_low));
1052 	if (buf && cq_desc->datalen != 0) {
1053 		ice_debug(hw, mask, "Buffer:\n");
1054 		if (buf_len < len)
1055 			len = buf_len;
1056 
1057 		ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
1058 	}
1059 }
1060 
1061 /* FW Admin Queue command wrappers */
1062 
1063 /**
1064  * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1065  * @hw: pointer to the hw struct
1066  * @desc: descriptor describing the command
1067  * @buf: buffer to use for indirect commands (NULL for direct commands)
1068  * @buf_size: size of buffer for indirect commands (0 for direct commands)
1069  * @cd: pointer to command details structure
1070  *
1071  * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1072  */
1073 enum ice_status
1074 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1075 		u16 buf_size, struct ice_sq_cd *cd)
1076 {
1077 	return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1078 }
1079 
1080 /**
1081  * ice_aq_get_fw_ver
1082  * @hw: pointer to the hw struct
1083  * @cd: pointer to command details structure or NULL
1084  *
1085  * Get the firmware version (0x0001) from the admin queue commands
1086  */
1087 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1088 {
1089 	struct ice_aqc_get_ver *resp;
1090 	struct ice_aq_desc desc;
1091 	enum ice_status status;
1092 
1093 	resp = &desc.params.get_ver;
1094 
1095 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1096 
1097 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1098 
1099 	if (!status) {
1100 		hw->fw_branch = resp->fw_branch;
1101 		hw->fw_maj_ver = resp->fw_major;
1102 		hw->fw_min_ver = resp->fw_minor;
1103 		hw->fw_patch = resp->fw_patch;
1104 		hw->fw_build = le32_to_cpu(resp->fw_build);
1105 		hw->api_branch = resp->api_branch;
1106 		hw->api_maj_ver = resp->api_major;
1107 		hw->api_min_ver = resp->api_minor;
1108 		hw->api_patch = resp->api_patch;
1109 	}
1110 
1111 	return status;
1112 }
1113 
1114 /**
1115  * ice_aq_q_shutdown
1116  * @hw: pointer to the hw struct
1117  * @unloading: is the driver unloading itself
1118  *
1119  * Tell the Firmware that we're shutting down the AdminQ and whether
1120  * or not the driver is unloading as well (0x0003).
1121  */
1122 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1123 {
1124 	struct ice_aqc_q_shutdown *cmd;
1125 	struct ice_aq_desc desc;
1126 
1127 	cmd = &desc.params.q_shutdown;
1128 
1129 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1130 
1131 	if (unloading)
1132 		cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING);
1133 
1134 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1135 }
1136 
1137 /**
1138  * ice_aq_req_res
1139  * @hw: pointer to the hw struct
1140  * @res: resource id
1141  * @access: access type
1142  * @sdp_number: resource number
1143  * @timeout: the maximum time in ms that the driver may hold the resource
1144  * @cd: pointer to command details structure or NULL
1145  *
1146  * Requests common resource using the admin queue commands (0x0008).
1147  * When attempting to acquire the Global Config Lock, the driver can
1148  * learn of three states:
1149  *  1) ICE_SUCCESS -        acquired lock, and can perform download package
1150  *  2) ICE_ERR_AQ_ERROR -   did not get lock, driver should fail to load
1151  *  3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1152  *                          successfully downloaded the package; the driver does
1153  *                          not have to download the package and can continue
1154  *                          loading
1155  *
1156  * Note that if the caller is in an acquire lock, perform action, release lock
1157  * phase of operation, it is possible that the FW may detect a timeout and issue
1158  * a CORER. In this case, the driver will receive a CORER interrupt and will
1159  * have to determine its cause. The calling thread that is handling this flow
1160  * will likely get an error propagated back to it indicating the Download
1161  * Package, Update Package or the Release Resource AQ commands timed out.
1162  */
1163 static enum ice_status
1164 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1165 	       enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1166 	       struct ice_sq_cd *cd)
1167 {
1168 	struct ice_aqc_req_res *cmd_resp;
1169 	struct ice_aq_desc desc;
1170 	enum ice_status status;
1171 
1172 	cmd_resp = &desc.params.res_owner;
1173 
1174 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1175 
1176 	cmd_resp->res_id = cpu_to_le16(res);
1177 	cmd_resp->access_type = cpu_to_le16(access);
1178 	cmd_resp->res_number = cpu_to_le32(sdp_number);
1179 	cmd_resp->timeout = cpu_to_le32(*timeout);
1180 	*timeout = 0;
1181 
1182 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1183 
1184 	/* The completion specifies the maximum time in ms that the driver
1185 	 * may hold the resource in the Timeout field.
1186 	 */
1187 
1188 	/* Global config lock response utilizes an additional status field.
1189 	 *
1190 	 * If the Global config lock resource is held by some other driver, the
1191 	 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1192 	 * and the timeout field indicates the maximum time the current owner
1193 	 * of the resource has to free it.
1194 	 */
1195 	if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1196 		if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1197 			*timeout = le32_to_cpu(cmd_resp->timeout);
1198 			return 0;
1199 		} else if (le16_to_cpu(cmd_resp->status) ==
1200 			   ICE_AQ_RES_GLBL_IN_PROG) {
1201 			*timeout = le32_to_cpu(cmd_resp->timeout);
1202 			return ICE_ERR_AQ_ERROR;
1203 		} else if (le16_to_cpu(cmd_resp->status) ==
1204 			   ICE_AQ_RES_GLBL_DONE) {
1205 			return ICE_ERR_AQ_NO_WORK;
1206 		}
1207 
1208 		/* invalid FW response, force a timeout immediately */
1209 		*timeout = 0;
1210 		return ICE_ERR_AQ_ERROR;
1211 	}
1212 
1213 	/* If the resource is held by some other driver, the command completes
1214 	 * with a busy return value and the timeout field indicates the maximum
1215 	 * time the current owner of the resource has to free it.
1216 	 */
1217 	if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1218 		*timeout = le32_to_cpu(cmd_resp->timeout);
1219 
1220 	return status;
1221 }
1222 
1223 /**
1224  * ice_aq_release_res
1225  * @hw: pointer to the hw struct
1226  * @res: resource id
1227  * @sdp_number: resource number
1228  * @cd: pointer to command details structure or NULL
1229  *
1230  * release common resource using the admin queue commands (0x0009)
1231  */
1232 static enum ice_status
1233 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1234 		   struct ice_sq_cd *cd)
1235 {
1236 	struct ice_aqc_req_res *cmd;
1237 	struct ice_aq_desc desc;
1238 
1239 	cmd = &desc.params.res_owner;
1240 
1241 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1242 
1243 	cmd->res_id = cpu_to_le16(res);
1244 	cmd->res_number = cpu_to_le32(sdp_number);
1245 
1246 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1247 }
1248 
1249 /**
1250  * ice_acquire_res
1251  * @hw: pointer to the HW structure
1252  * @res: resource id
1253  * @access: access type (read or write)
1254  * @timeout: timeout in milliseconds
1255  *
1256  * This function will attempt to acquire the ownership of a resource.
1257  */
1258 enum ice_status
1259 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1260 		enum ice_aq_res_access_type access, u32 timeout)
1261 {
1262 #define ICE_RES_POLLING_DELAY_MS	10
1263 	u32 delay = ICE_RES_POLLING_DELAY_MS;
1264 	u32 time_left = timeout;
1265 	enum ice_status status;
1266 
1267 	status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1268 
1269 	/* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1270 	 * previously acquired the resource and performed any necessary updates;
1271 	 * in this case the caller does not obtain the resource and has no
1272 	 * further work to do.
1273 	 */
1274 	if (status == ICE_ERR_AQ_NO_WORK)
1275 		goto ice_acquire_res_exit;
1276 
1277 	if (status)
1278 		ice_debug(hw, ICE_DBG_RES,
1279 			  "resource %d acquire type %d failed.\n", res, access);
1280 
1281 	/* If necessary, poll until the current lock owner timeouts */
1282 	timeout = time_left;
1283 	while (status && timeout && time_left) {
1284 		mdelay(delay);
1285 		timeout = (timeout > delay) ? timeout - delay : 0;
1286 		status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1287 
1288 		if (status == ICE_ERR_AQ_NO_WORK)
1289 			/* lock free, but no work to do */
1290 			break;
1291 
1292 		if (!status)
1293 			/* lock acquired */
1294 			break;
1295 	}
1296 	if (status && status != ICE_ERR_AQ_NO_WORK)
1297 		ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1298 
1299 ice_acquire_res_exit:
1300 	if (status == ICE_ERR_AQ_NO_WORK) {
1301 		if (access == ICE_RES_WRITE)
1302 			ice_debug(hw, ICE_DBG_RES,
1303 				  "resource indicates no work to do.\n");
1304 		else
1305 			ice_debug(hw, ICE_DBG_RES,
1306 				  "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1307 	}
1308 	return status;
1309 }
1310 
1311 /**
1312  * ice_release_res
1313  * @hw: pointer to the HW structure
1314  * @res: resource id
1315  *
1316  * This function will release a resource using the proper Admin Command.
1317  */
1318 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1319 {
1320 	enum ice_status status;
1321 	u32 total_delay = 0;
1322 
1323 	status = ice_aq_release_res(hw, res, 0, NULL);
1324 
1325 	/* there are some rare cases when trying to release the resource
1326 	 * results in an admin Q timeout, so handle them correctly
1327 	 */
1328 	while ((status == ICE_ERR_AQ_TIMEOUT) &&
1329 	       (total_delay < hw->adminq.sq_cmd_timeout)) {
1330 		mdelay(1);
1331 		status = ice_aq_release_res(hw, res, 0, NULL);
1332 		total_delay++;
1333 	}
1334 }
1335 
1336 /**
1337  * ice_parse_caps - parse function/device capabilities
1338  * @hw: pointer to the hw struct
1339  * @buf: pointer to a buffer containing function/device capability records
1340  * @cap_count: number of capability records in the list
1341  * @opc: type of capabilities list to parse
1342  *
1343  * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
1344  */
1345 static void
1346 ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1347 	       enum ice_adminq_opc opc)
1348 {
1349 	struct ice_aqc_list_caps_elem *cap_resp;
1350 	struct ice_hw_func_caps *func_p = NULL;
1351 	struct ice_hw_dev_caps *dev_p = NULL;
1352 	struct ice_hw_common_caps *caps;
1353 	u32 i;
1354 
1355 	if (!buf)
1356 		return;
1357 
1358 	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1359 
1360 	if (opc == ice_aqc_opc_list_dev_caps) {
1361 		dev_p = &hw->dev_caps;
1362 		caps = &dev_p->common_cap;
1363 	} else if (opc == ice_aqc_opc_list_func_caps) {
1364 		func_p = &hw->func_caps;
1365 		caps = &func_p->common_cap;
1366 	} else {
1367 		ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1368 		return;
1369 	}
1370 
1371 	for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1372 		u32 logical_id = le32_to_cpu(cap_resp->logical_id);
1373 		u32 phys_id = le32_to_cpu(cap_resp->phys_id);
1374 		u32 number = le32_to_cpu(cap_resp->number);
1375 		u16 cap = le16_to_cpu(cap_resp->cap);
1376 
1377 		switch (cap) {
1378 		case ICE_AQC_CAPS_VSI:
1379 			if (dev_p) {
1380 				dev_p->num_vsi_allocd_to_host = number;
1381 				ice_debug(hw, ICE_DBG_INIT,
1382 					  "HW caps: Dev.VSI cnt = %d\n",
1383 					  dev_p->num_vsi_allocd_to_host);
1384 			} else if (func_p) {
1385 				func_p->guaranteed_num_vsi = number;
1386 				ice_debug(hw, ICE_DBG_INIT,
1387 					  "HW caps: Func.VSI cnt = %d\n",
1388 					  func_p->guaranteed_num_vsi);
1389 			}
1390 			break;
1391 		case ICE_AQC_CAPS_RSS:
1392 			caps->rss_table_size = number;
1393 			caps->rss_table_entry_width = logical_id;
1394 			ice_debug(hw, ICE_DBG_INIT,
1395 				  "HW caps: RSS table size = %d\n",
1396 				  caps->rss_table_size);
1397 			ice_debug(hw, ICE_DBG_INIT,
1398 				  "HW caps: RSS table width = %d\n",
1399 				  caps->rss_table_entry_width);
1400 			break;
1401 		case ICE_AQC_CAPS_RXQS:
1402 			caps->num_rxq = number;
1403 			caps->rxq_first_id = phys_id;
1404 			ice_debug(hw, ICE_DBG_INIT,
1405 				  "HW caps: Num Rx Qs = %d\n", caps->num_rxq);
1406 			ice_debug(hw, ICE_DBG_INIT,
1407 				  "HW caps: Rx first queue ID = %d\n",
1408 				  caps->rxq_first_id);
1409 			break;
1410 		case ICE_AQC_CAPS_TXQS:
1411 			caps->num_txq = number;
1412 			caps->txq_first_id = phys_id;
1413 			ice_debug(hw, ICE_DBG_INIT,
1414 				  "HW caps: Num Tx Qs = %d\n", caps->num_txq);
1415 			ice_debug(hw, ICE_DBG_INIT,
1416 				  "HW caps: Tx first queue ID = %d\n",
1417 				  caps->txq_first_id);
1418 			break;
1419 		case ICE_AQC_CAPS_MSIX:
1420 			caps->num_msix_vectors = number;
1421 			caps->msix_vector_first_id = phys_id;
1422 			ice_debug(hw, ICE_DBG_INIT,
1423 				  "HW caps: MSIX vector count = %d\n",
1424 				  caps->num_msix_vectors);
1425 			ice_debug(hw, ICE_DBG_INIT,
1426 				  "HW caps: MSIX first vector index = %d\n",
1427 				  caps->msix_vector_first_id);
1428 			break;
1429 		case ICE_AQC_CAPS_MAX_MTU:
1430 			caps->max_mtu = number;
1431 			if (dev_p)
1432 				ice_debug(hw, ICE_DBG_INIT,
1433 					  "HW caps: Dev.MaxMTU = %d\n",
1434 					  caps->max_mtu);
1435 			else if (func_p)
1436 				ice_debug(hw, ICE_DBG_INIT,
1437 					  "HW caps: func.MaxMTU = %d\n",
1438 					  caps->max_mtu);
1439 			break;
1440 		default:
1441 			ice_debug(hw, ICE_DBG_INIT,
1442 				  "HW caps: Unknown capability[%d]: 0x%x\n", i,
1443 				  cap);
1444 			break;
1445 		}
1446 	}
1447 }
1448 
1449 /**
1450  * ice_aq_discover_caps - query function/device capabilities
1451  * @hw: pointer to the hw struct
1452  * @buf: a virtual buffer to hold the capabilities
1453  * @buf_size: Size of the virtual buffer
1454  * @data_size: Size of the returned data, or buf size needed if AQ err==ENOMEM
1455  * @opc: capabilities type to discover - pass in the command opcode
1456  * @cd: pointer to command details structure or NULL
1457  *
1458  * Get the function(0x000a)/device(0x000b) capabilities description from
1459  * the firmware.
1460  */
1461 static enum ice_status
1462 ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size,
1463 		     enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1464 {
1465 	struct ice_aqc_list_caps *cmd;
1466 	struct ice_aq_desc desc;
1467 	enum ice_status status;
1468 
1469 	cmd = &desc.params.get_cap;
1470 
1471 	if (opc != ice_aqc_opc_list_func_caps &&
1472 	    opc != ice_aqc_opc_list_dev_caps)
1473 		return ICE_ERR_PARAM;
1474 
1475 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1476 
1477 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1478 	if (!status)
1479 		ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
1480 	*data_size = le16_to_cpu(desc.datalen);
1481 
1482 	return status;
1483 }
1484 
1485 /**
1486  * ice_get_caps - get info about the HW
1487  * @hw: pointer to the hardware structure
1488  */
1489 enum ice_status ice_get_caps(struct ice_hw *hw)
1490 {
1491 	enum ice_status status;
1492 	u16 data_size = 0;
1493 	u16 cbuf_len;
1494 	u8 retries;
1495 
1496 	/* The driver doesn't know how many capabilities the device will return
1497 	 * so the buffer size required isn't known ahead of time. The driver
1498 	 * starts with cbuf_len and if this turns out to be insufficient, the
1499 	 * device returns ICE_AQ_RC_ENOMEM and also the buffer size it needs.
1500 	 * The driver then allocates the buffer of this size and retries the
1501 	 * operation. So it follows that the retry count is 2.
1502 	 */
1503 #define ICE_GET_CAP_BUF_COUNT	40
1504 #define ICE_GET_CAP_RETRY_COUNT	2
1505 
1506 	cbuf_len = ICE_GET_CAP_BUF_COUNT *
1507 		sizeof(struct ice_aqc_list_caps_elem);
1508 
1509 	retries = ICE_GET_CAP_RETRY_COUNT;
1510 
1511 	do {
1512 		void *cbuf;
1513 
1514 		cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
1515 		if (!cbuf)
1516 			return ICE_ERR_NO_MEMORY;
1517 
1518 		status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &data_size,
1519 					      ice_aqc_opc_list_func_caps, NULL);
1520 		devm_kfree(ice_hw_to_dev(hw), cbuf);
1521 
1522 		if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
1523 			break;
1524 
1525 		/* If ENOMEM is returned, try again with bigger buffer */
1526 		cbuf_len = data_size;
1527 	} while (--retries);
1528 
1529 	return status;
1530 }
1531 
1532 /**
1533  * ice_aq_manage_mac_write - manage MAC address write command
1534  * @hw: pointer to the hw struct
1535  * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
1536  * @flags: flags to control write behavior
1537  * @cd: pointer to command details structure or NULL
1538  *
1539  * This function is used to write MAC address to the NVM (0x0108).
1540  */
1541 enum ice_status
1542 ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags,
1543 			struct ice_sq_cd *cd)
1544 {
1545 	struct ice_aqc_manage_mac_write *cmd;
1546 	struct ice_aq_desc desc;
1547 
1548 	cmd = &desc.params.mac_write;
1549 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
1550 
1551 	cmd->flags = flags;
1552 
1553 	/* Prep values for flags, sah, sal */
1554 	cmd->sah = htons(*((u16 *)mac_addr));
1555 	cmd->sal = htonl(*((u32 *)(mac_addr + 2)));
1556 
1557 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1558 }
1559 
1560 /**
1561  * ice_aq_clear_pxe_mode
1562  * @hw: pointer to the hw struct
1563  *
1564  * Tell the firmware that the driver is taking over from PXE (0x0110).
1565  */
1566 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
1567 {
1568 	struct ice_aq_desc desc;
1569 
1570 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
1571 	desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
1572 
1573 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1574 }
1575 
1576 /**
1577  * ice_clear_pxe_mode - clear pxe operations mode
1578  * @hw: pointer to the hw struct
1579  *
1580  * Make sure all PXE mode settings are cleared, including things
1581  * like descriptor fetch/write-back mode.
1582  */
1583 void ice_clear_pxe_mode(struct ice_hw *hw)
1584 {
1585 	if (ice_check_sq_alive(hw, &hw->adminq))
1586 		ice_aq_clear_pxe_mode(hw);
1587 }
1588 
1589 /**
1590  * ice_get_link_speed_based_on_phy_type - returns link speed
1591  * @phy_type_low: lower part of phy_type
1592  *
1593  * This helper function will convert a phy_type_low to its corresponding link
1594  * speed.
1595  * Note: In the structure of phy_type_low, there should be one bit set, as
1596  * this function will convert one phy type to its speed.
1597  * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
1598  * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
1599  */
1600 static u16
1601 ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
1602 {
1603 	u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
1604 
1605 	switch (phy_type_low) {
1606 	case ICE_PHY_TYPE_LOW_100BASE_TX:
1607 	case ICE_PHY_TYPE_LOW_100M_SGMII:
1608 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
1609 		break;
1610 	case ICE_PHY_TYPE_LOW_1000BASE_T:
1611 	case ICE_PHY_TYPE_LOW_1000BASE_SX:
1612 	case ICE_PHY_TYPE_LOW_1000BASE_LX:
1613 	case ICE_PHY_TYPE_LOW_1000BASE_KX:
1614 	case ICE_PHY_TYPE_LOW_1G_SGMII:
1615 		speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
1616 		break;
1617 	case ICE_PHY_TYPE_LOW_2500BASE_T:
1618 	case ICE_PHY_TYPE_LOW_2500BASE_X:
1619 	case ICE_PHY_TYPE_LOW_2500BASE_KX:
1620 		speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
1621 		break;
1622 	case ICE_PHY_TYPE_LOW_5GBASE_T:
1623 	case ICE_PHY_TYPE_LOW_5GBASE_KR:
1624 		speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
1625 		break;
1626 	case ICE_PHY_TYPE_LOW_10GBASE_T:
1627 	case ICE_PHY_TYPE_LOW_10G_SFI_DA:
1628 	case ICE_PHY_TYPE_LOW_10GBASE_SR:
1629 	case ICE_PHY_TYPE_LOW_10GBASE_LR:
1630 	case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
1631 	case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
1632 	case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
1633 		speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
1634 		break;
1635 	case ICE_PHY_TYPE_LOW_25GBASE_T:
1636 	case ICE_PHY_TYPE_LOW_25GBASE_CR:
1637 	case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
1638 	case ICE_PHY_TYPE_LOW_25GBASE_CR1:
1639 	case ICE_PHY_TYPE_LOW_25GBASE_SR:
1640 	case ICE_PHY_TYPE_LOW_25GBASE_LR:
1641 	case ICE_PHY_TYPE_LOW_25GBASE_KR:
1642 	case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
1643 	case ICE_PHY_TYPE_LOW_25GBASE_KR1:
1644 	case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
1645 	case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
1646 		speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
1647 		break;
1648 	case ICE_PHY_TYPE_LOW_40GBASE_CR4:
1649 	case ICE_PHY_TYPE_LOW_40GBASE_SR4:
1650 	case ICE_PHY_TYPE_LOW_40GBASE_LR4:
1651 	case ICE_PHY_TYPE_LOW_40GBASE_KR4:
1652 	case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
1653 	case ICE_PHY_TYPE_LOW_40G_XLAUI:
1654 		speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
1655 		break;
1656 	default:
1657 		speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
1658 		break;
1659 	}
1660 
1661 	return speed_phy_type_low;
1662 }
1663 
1664 /**
1665  * ice_update_phy_type
1666  * @phy_type_low: pointer to the lower part of phy_type
1667  * @link_speeds_bitmap: targeted link speeds bitmap
1668  *
1669  * Note: For the link_speeds_bitmap structure, you can check it at
1670  * [ice_aqc_get_link_status->link_speed]. Caller can pass in
1671  * link_speeds_bitmap include multiple speeds.
1672  *
1673  * The value of phy_type_low will present a certain link speed. This helper
1674  * function will turn on bits in the phy_type_low based on the value of
1675  * link_speeds_bitmap input parameter.
1676  */
1677 void ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap)
1678 {
1679 	u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN;
1680 	u64 pt_low;
1681 	int index;
1682 
1683 	/* We first check with low part of phy_type */
1684 	for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
1685 		pt_low = BIT_ULL(index);
1686 		speed = ice_get_link_speed_based_on_phy_type(pt_low);
1687 
1688 		if (link_speeds_bitmap & speed)
1689 			*phy_type_low |= BIT_ULL(index);
1690 	}
1691 }
1692 
1693 /**
1694  * ice_aq_set_phy_cfg
1695  * @hw: pointer to the hw struct
1696  * @lport: logical port number
1697  * @cfg: structure with PHY configuration data to be set
1698  * @cd: pointer to command details structure or NULL
1699  *
1700  * Set the various PHY configuration parameters supported on the Port.
1701  * One or more of the Set PHY config parameters may be ignored in an MFP
1702  * mode as the PF may not have the privilege to set some of the PHY Config
1703  * parameters. This status will be indicated by the command response (0x0601).
1704  */
1705 enum ice_status
1706 ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
1707 		   struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
1708 {
1709 	struct ice_aq_desc desc;
1710 
1711 	if (!cfg)
1712 		return ICE_ERR_PARAM;
1713 
1714 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
1715 	desc.params.set_phy.lport_num = lport;
1716 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1717 
1718 	return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
1719 }
1720 
1721 /**
1722  * ice_update_link_info - update status of the HW network link
1723  * @pi: port info structure of the interested logical port
1724  */
1725 static enum ice_status
1726 ice_update_link_info(struct ice_port_info *pi)
1727 {
1728 	struct ice_aqc_get_phy_caps_data *pcaps;
1729 	struct ice_phy_info *phy_info;
1730 	enum ice_status status;
1731 	struct ice_hw *hw;
1732 
1733 	if (!pi)
1734 		return ICE_ERR_PARAM;
1735 
1736 	hw = pi->hw;
1737 
1738 	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
1739 	if (!pcaps)
1740 		return ICE_ERR_NO_MEMORY;
1741 
1742 	phy_info = &pi->phy;
1743 	status = ice_aq_get_link_info(pi, true, NULL, NULL);
1744 	if (status)
1745 		goto out;
1746 
1747 	if (phy_info->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
1748 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
1749 					     pcaps, NULL);
1750 		if (status)
1751 			goto out;
1752 
1753 		memcpy(phy_info->link_info.module_type, &pcaps->module_type,
1754 		       sizeof(phy_info->link_info.module_type));
1755 	}
1756 out:
1757 	devm_kfree(ice_hw_to_dev(hw), pcaps);
1758 	return status;
1759 }
1760 
1761 /**
1762  * ice_set_fc
1763  * @pi: port information structure
1764  * @aq_failures: pointer to status code, specific to ice_set_fc routine
1765  * @ena_auto_link_update: enable automatic link update
1766  *
1767  * Set the requested flow control mode.
1768  */
1769 enum ice_status
1770 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
1771 {
1772 	struct ice_aqc_set_phy_cfg_data cfg = { 0 };
1773 	struct ice_aqc_get_phy_caps_data *pcaps;
1774 	enum ice_status status;
1775 	u8 pause_mask = 0x0;
1776 	struct ice_hw *hw;
1777 
1778 	if (!pi)
1779 		return ICE_ERR_PARAM;
1780 	hw = pi->hw;
1781 	*aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
1782 
1783 	switch (pi->fc.req_mode) {
1784 	case ICE_FC_FULL:
1785 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
1786 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
1787 		break;
1788 	case ICE_FC_RX_PAUSE:
1789 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
1790 		break;
1791 	case ICE_FC_TX_PAUSE:
1792 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
1793 		break;
1794 	default:
1795 		break;
1796 	}
1797 
1798 	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
1799 	if (!pcaps)
1800 		return ICE_ERR_NO_MEMORY;
1801 
1802 	/* Get the current phy config */
1803 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
1804 				     NULL);
1805 	if (status) {
1806 		*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
1807 		goto out;
1808 	}
1809 
1810 	/* clear the old pause settings */
1811 	cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
1812 				   ICE_AQC_PHY_EN_RX_LINK_PAUSE);
1813 	/* set the new capabilities */
1814 	cfg.caps |= pause_mask;
1815 	/* If the capabilities have changed, then set the new config */
1816 	if (cfg.caps != pcaps->caps) {
1817 		int retry_count, retry_max = 10;
1818 
1819 		/* Auto restart link so settings take effect */
1820 		if (ena_auto_link_update)
1821 			cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1822 		/* Copy over all the old settings */
1823 		cfg.phy_type_low = pcaps->phy_type_low;
1824 		cfg.low_power_ctrl = pcaps->low_power_ctrl;
1825 		cfg.eee_cap = pcaps->eee_cap;
1826 		cfg.eeer_value = pcaps->eeer_value;
1827 		cfg.link_fec_opt = pcaps->link_fec_options;
1828 
1829 		status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
1830 		if (status) {
1831 			*aq_failures = ICE_SET_FC_AQ_FAIL_SET;
1832 			goto out;
1833 		}
1834 
1835 		/* Update the link info
1836 		 * It sometimes takes a really long time for link to
1837 		 * come back from the atomic reset. Thus, we wait a
1838 		 * little bit.
1839 		 */
1840 		for (retry_count = 0; retry_count < retry_max; retry_count++) {
1841 			status = ice_update_link_info(pi);
1842 
1843 			if (!status)
1844 				break;
1845 
1846 			mdelay(100);
1847 		}
1848 
1849 		if (status)
1850 			*aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
1851 	}
1852 
1853 out:
1854 	devm_kfree(ice_hw_to_dev(hw), pcaps);
1855 	return status;
1856 }
1857 
1858 /**
1859  * ice_get_link_status - get status of the HW network link
1860  * @pi: port information structure
1861  * @link_up: pointer to bool (true/false = linkup/linkdown)
1862  *
1863  * Variable link_up is true if link is up, false if link is down.
1864  * The variable link_up is invalid if status is non zero. As a
1865  * result of this call, link status reporting becomes enabled
1866  */
1867 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
1868 {
1869 	struct ice_phy_info *phy_info;
1870 	enum ice_status status = 0;
1871 
1872 	if (!pi || !link_up)
1873 		return ICE_ERR_PARAM;
1874 
1875 	phy_info = &pi->phy;
1876 
1877 	if (phy_info->get_link_info) {
1878 		status = ice_update_link_info(pi);
1879 
1880 		if (status)
1881 			ice_debug(pi->hw, ICE_DBG_LINK,
1882 				  "get link status error, status = %d\n",
1883 				  status);
1884 	}
1885 
1886 	*link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
1887 
1888 	return status;
1889 }
1890 
1891 /**
1892  * ice_aq_set_link_restart_an
1893  * @pi: pointer to the port information structure
1894  * @ena_link: if true: enable link, if false: disable link
1895  * @cd: pointer to command details structure or NULL
1896  *
1897  * Sets up the link and restarts the Auto-Negotiation over the link.
1898  */
1899 enum ice_status
1900 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
1901 			   struct ice_sq_cd *cd)
1902 {
1903 	struct ice_aqc_restart_an *cmd;
1904 	struct ice_aq_desc desc;
1905 
1906 	cmd = &desc.params.restart_an;
1907 
1908 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
1909 
1910 	cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
1911 	cmd->lport_num = pi->lport;
1912 	if (ena_link)
1913 		cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
1914 	else
1915 		cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
1916 
1917 	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
1918 }
1919 
1920 /**
1921  * ice_aq_set_event_mask
1922  * @hw: pointer to the hw struct
1923  * @port_num: port number of the physical function
1924  * @mask: event mask to be set
1925  * @cd: pointer to command details structure or NULL
1926  *
1927  * Set event mask (0x0613)
1928  */
1929 enum ice_status
1930 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
1931 		      struct ice_sq_cd *cd)
1932 {
1933 	struct ice_aqc_set_event_mask *cmd;
1934 	struct ice_aq_desc desc;
1935 
1936 	cmd = &desc.params.set_event_mask;
1937 
1938 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
1939 
1940 	cmd->lport_num = port_num;
1941 
1942 	cmd->event_mask = cpu_to_le16(mask);
1943 
1944 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1945 }
1946 
1947 /**
1948  * __ice_aq_get_set_rss_lut
1949  * @hw: pointer to the hardware structure
1950  * @vsi_id: VSI FW index
1951  * @lut_type: LUT table type
1952  * @lut: pointer to the LUT buffer provided by the caller
1953  * @lut_size: size of the LUT buffer
1954  * @glob_lut_idx: global LUT index
1955  * @set: set true to set the table, false to get the table
1956  *
1957  * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
1958  */
1959 static enum ice_status
1960 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
1961 			 u16 lut_size, u8 glob_lut_idx, bool set)
1962 {
1963 	struct ice_aqc_get_set_rss_lut *cmd_resp;
1964 	struct ice_aq_desc desc;
1965 	enum ice_status status;
1966 	u16 flags = 0;
1967 
1968 	cmd_resp = &desc.params.get_set_rss_lut;
1969 
1970 	if (set) {
1971 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
1972 		desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1973 	} else {
1974 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
1975 	}
1976 
1977 	cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
1978 					 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
1979 					ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
1980 				       ICE_AQC_GSET_RSS_LUT_VSI_VALID);
1981 
1982 	switch (lut_type) {
1983 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
1984 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
1985 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
1986 		flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
1987 			  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
1988 		break;
1989 	default:
1990 		status = ICE_ERR_PARAM;
1991 		goto ice_aq_get_set_rss_lut_exit;
1992 	}
1993 
1994 	if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
1995 		flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
1996 			  ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
1997 
1998 		if (!set)
1999 			goto ice_aq_get_set_rss_lut_send;
2000 	} else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2001 		if (!set)
2002 			goto ice_aq_get_set_rss_lut_send;
2003 	} else {
2004 		goto ice_aq_get_set_rss_lut_send;
2005 	}
2006 
2007 	/* LUT size is only valid for Global and PF table types */
2008 	switch (lut_size) {
2009 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
2010 		break;
2011 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
2012 		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
2013 			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2014 			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2015 		break;
2016 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
2017 		if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2018 			flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
2019 				  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2020 				 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2021 			break;
2022 		}
2023 		/* fall-through */
2024 	default:
2025 		status = ICE_ERR_PARAM;
2026 		goto ice_aq_get_set_rss_lut_exit;
2027 	}
2028 
2029 ice_aq_get_set_rss_lut_send:
2030 	cmd_resp->flags = cpu_to_le16(flags);
2031 	status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
2032 
2033 ice_aq_get_set_rss_lut_exit:
2034 	return status;
2035 }
2036 
2037 /**
2038  * ice_aq_get_rss_lut
2039  * @hw: pointer to the hardware structure
2040  * @vsi_id: VSI FW index
2041  * @lut_type: LUT table type
2042  * @lut: pointer to the LUT buffer provided by the caller
2043  * @lut_size: size of the LUT buffer
2044  *
2045  * get the RSS lookup table, PF or VSI type
2046  */
2047 enum ice_status
2048 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
2049 		   u16 lut_size)
2050 {
2051 	return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0,
2052 					false);
2053 }
2054 
2055 /**
2056  * ice_aq_set_rss_lut
2057  * @hw: pointer to the hardware structure
2058  * @vsi_id: VSI FW index
2059  * @lut_type: LUT table type
2060  * @lut: pointer to the LUT buffer provided by the caller
2061  * @lut_size: size of the LUT buffer
2062  *
2063  * set the RSS lookup table, PF or VSI type
2064  */
2065 enum ice_status
2066 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
2067 		   u16 lut_size)
2068 {
2069 	return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0,
2070 					true);
2071 }
2072 
2073 /**
2074  * __ice_aq_get_set_rss_key
2075  * @hw: pointer to the hw struct
2076  * @vsi_id: VSI FW index
2077  * @key: pointer to key info struct
2078  * @set: set true to set the key, false to get the key
2079  *
2080  * get (0x0B04) or set (0x0B02) the RSS key per VSI
2081  */
2082 static enum
2083 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
2084 				    struct ice_aqc_get_set_rss_keys *key,
2085 				    bool set)
2086 {
2087 	struct ice_aqc_get_set_rss_key *cmd_resp;
2088 	u16 key_size = sizeof(*key);
2089 	struct ice_aq_desc desc;
2090 
2091 	cmd_resp = &desc.params.get_set_rss_key;
2092 
2093 	if (set) {
2094 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
2095 		desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2096 	} else {
2097 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
2098 	}
2099 
2100 	cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2101 					 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
2102 					ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
2103 				       ICE_AQC_GSET_RSS_KEY_VSI_VALID);
2104 
2105 	return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
2106 }
2107 
2108 /**
2109  * ice_aq_get_rss_key
2110  * @hw: pointer to the hw struct
2111  * @vsi_id: VSI FW index
2112  * @key: pointer to key info struct
2113  *
2114  * get the RSS key per VSI
2115  */
2116 enum ice_status
2117 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_id,
2118 		   struct ice_aqc_get_set_rss_keys *key)
2119 {
2120 	return __ice_aq_get_set_rss_key(hw, vsi_id, key, false);
2121 }
2122 
2123 /**
2124  * ice_aq_set_rss_key
2125  * @hw: pointer to the hw struct
2126  * @vsi_id: VSI FW index
2127  * @keys: pointer to key info struct
2128  *
2129  * set the RSS key per VSI
2130  */
2131 enum ice_status
2132 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_id,
2133 		   struct ice_aqc_get_set_rss_keys *keys)
2134 {
2135 	return __ice_aq_get_set_rss_key(hw, vsi_id, keys, true);
2136 }
2137 
2138 /**
2139  * ice_aq_add_lan_txq
2140  * @hw: pointer to the hardware structure
2141  * @num_qgrps: Number of added queue groups
2142  * @qg_list: list of queue groups to be added
2143  * @buf_size: size of buffer for indirect command
2144  * @cd: pointer to command details structure or NULL
2145  *
2146  * Add Tx LAN queue (0x0C30)
2147  *
2148  * NOTE:
2149  * Prior to calling add Tx LAN queue:
2150  * Initialize the following as part of the Tx queue context:
2151  * Completion queue ID if the queue uses Completion queue, Quanta profile,
2152  * Cache profile and Packet shaper profile.
2153  *
2154  * After add Tx LAN queue AQ command is completed:
2155  * Interrupts should be associated with specific queues,
2156  * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
2157  * flow.
2158  */
2159 static enum ice_status
2160 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2161 		   struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
2162 		   struct ice_sq_cd *cd)
2163 {
2164 	u16 i, sum_header_size, sum_q_size = 0;
2165 	struct ice_aqc_add_tx_qgrp *list;
2166 	struct ice_aqc_add_txqs *cmd;
2167 	struct ice_aq_desc desc;
2168 
2169 	cmd = &desc.params.add_txqs;
2170 
2171 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
2172 
2173 	if (!qg_list)
2174 		return ICE_ERR_PARAM;
2175 
2176 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2177 		return ICE_ERR_PARAM;
2178 
2179 	sum_header_size = num_qgrps *
2180 		(sizeof(*qg_list) - sizeof(*qg_list->txqs));
2181 
2182 	list = qg_list;
2183 	for (i = 0; i < num_qgrps; i++) {
2184 		struct ice_aqc_add_txqs_perq *q = list->txqs;
2185 
2186 		sum_q_size += list->num_txqs * sizeof(*q);
2187 		list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
2188 	}
2189 
2190 	if (buf_size != (sum_header_size + sum_q_size))
2191 		return ICE_ERR_PARAM;
2192 
2193 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2194 
2195 	cmd->num_qgrps = num_qgrps;
2196 
2197 	return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2198 }
2199 
2200 /**
2201  * ice_aq_dis_lan_txq
2202  * @hw: pointer to the hardware structure
2203  * @num_qgrps: number of groups in the list
2204  * @qg_list: the list of groups to disable
2205  * @buf_size: the total size of the qg_list buffer in bytes
2206  * @cd: pointer to command details structure or NULL
2207  *
2208  * Disable LAN Tx queue (0x0C31)
2209  */
2210 static enum ice_status
2211 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2212 		   struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
2213 		   struct ice_sq_cd *cd)
2214 {
2215 	struct ice_aqc_dis_txqs *cmd;
2216 	struct ice_aq_desc desc;
2217 	u16 i, sz = 0;
2218 
2219 	cmd = &desc.params.dis_txqs;
2220 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
2221 
2222 	if (!qg_list)
2223 		return ICE_ERR_PARAM;
2224 
2225 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2226 		return ICE_ERR_PARAM;
2227 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2228 	cmd->num_entries = num_qgrps;
2229 
2230 	for (i = 0; i < num_qgrps; ++i) {
2231 		/* Calculate the size taken up by the queue IDs in this group */
2232 		sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
2233 
2234 		/* Add the size of the group header */
2235 		sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
2236 
2237 		/* If the num of queues is even, add 2 bytes of padding */
2238 		if ((qg_list[i].num_qs % 2) == 0)
2239 			sz += 2;
2240 	}
2241 
2242 	if (buf_size != sz)
2243 		return ICE_ERR_PARAM;
2244 
2245 	return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2246 }
2247 
2248 /* End of FW Admin Queue command wrappers */
2249 
2250 /**
2251  * ice_write_byte - write a byte to a packed context structure
2252  * @src_ctx:  the context structure to read from
2253  * @dest_ctx: the context to be written to
2254  * @ce_info:  a description of the struct to be filled
2255  */
2256 static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx,
2257 			   const struct ice_ctx_ele *ce_info)
2258 {
2259 	u8 src_byte, dest_byte, mask;
2260 	u8 *from, *dest;
2261 	u16 shift_width;
2262 
2263 	/* copy from the next struct field */
2264 	from = src_ctx + ce_info->offset;
2265 
2266 	/* prepare the bits and mask */
2267 	shift_width = ce_info->lsb % 8;
2268 	mask = (u8)(BIT(ce_info->width) - 1);
2269 
2270 	src_byte = *from;
2271 	src_byte &= mask;
2272 
2273 	/* shift to correct alignment */
2274 	mask <<= shift_width;
2275 	src_byte <<= shift_width;
2276 
2277 	/* get the current bits from the target bit string */
2278 	dest = dest_ctx + (ce_info->lsb / 8);
2279 
2280 	memcpy(&dest_byte, dest, sizeof(dest_byte));
2281 
2282 	dest_byte &= ~mask;	/* get the bits not changing */
2283 	dest_byte |= src_byte;	/* add in the new bits */
2284 
2285 	/* put it all back */
2286 	memcpy(dest, &dest_byte, sizeof(dest_byte));
2287 }
2288 
2289 /**
2290  * ice_write_word - write a word to a packed context structure
2291  * @src_ctx:  the context structure to read from
2292  * @dest_ctx: the context to be written to
2293  * @ce_info:  a description of the struct to be filled
2294  */
2295 static void ice_write_word(u8 *src_ctx, u8 *dest_ctx,
2296 			   const struct ice_ctx_ele *ce_info)
2297 {
2298 	u16 src_word, mask;
2299 	__le16 dest_word;
2300 	u8 *from, *dest;
2301 	u16 shift_width;
2302 
2303 	/* copy from the next struct field */
2304 	from = src_ctx + ce_info->offset;
2305 
2306 	/* prepare the bits and mask */
2307 	shift_width = ce_info->lsb % 8;
2308 	mask = BIT(ce_info->width) - 1;
2309 
2310 	/* don't swizzle the bits until after the mask because the mask bits
2311 	 * will be in a different bit position on big endian machines
2312 	 */
2313 	src_word = *(u16 *)from;
2314 	src_word &= mask;
2315 
2316 	/* shift to correct alignment */
2317 	mask <<= shift_width;
2318 	src_word <<= shift_width;
2319 
2320 	/* get the current bits from the target bit string */
2321 	dest = dest_ctx + (ce_info->lsb / 8);
2322 
2323 	memcpy(&dest_word, dest, sizeof(dest_word));
2324 
2325 	dest_word &= ~(cpu_to_le16(mask));	/* get the bits not changing */
2326 	dest_word |= cpu_to_le16(src_word);	/* add in the new bits */
2327 
2328 	/* put it all back */
2329 	memcpy(dest, &dest_word, sizeof(dest_word));
2330 }
2331 
2332 /**
2333  * ice_write_dword - write a dword to a packed context structure
2334  * @src_ctx:  the context structure to read from
2335  * @dest_ctx: the context to be written to
2336  * @ce_info:  a description of the struct to be filled
2337  */
2338 static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx,
2339 			    const struct ice_ctx_ele *ce_info)
2340 {
2341 	u32 src_dword, mask;
2342 	__le32 dest_dword;
2343 	u8 *from, *dest;
2344 	u16 shift_width;
2345 
2346 	/* copy from the next struct field */
2347 	from = src_ctx + ce_info->offset;
2348 
2349 	/* prepare the bits and mask */
2350 	shift_width = ce_info->lsb % 8;
2351 
2352 	/* if the field width is exactly 32 on an x86 machine, then the shift
2353 	 * operation will not work because the SHL instructions count is masked
2354 	 * to 5 bits so the shift will do nothing
2355 	 */
2356 	if (ce_info->width < 32)
2357 		mask = BIT(ce_info->width) - 1;
2358 	else
2359 		mask = (u32)~0;
2360 
2361 	/* don't swizzle the bits until after the mask because the mask bits
2362 	 * will be in a different bit position on big endian machines
2363 	 */
2364 	src_dword = *(u32 *)from;
2365 	src_dword &= mask;
2366 
2367 	/* shift to correct alignment */
2368 	mask <<= shift_width;
2369 	src_dword <<= shift_width;
2370 
2371 	/* get the current bits from the target bit string */
2372 	dest = dest_ctx + (ce_info->lsb / 8);
2373 
2374 	memcpy(&dest_dword, dest, sizeof(dest_dword));
2375 
2376 	dest_dword &= ~(cpu_to_le32(mask));	/* get the bits not changing */
2377 	dest_dword |= cpu_to_le32(src_dword);	/* add in the new bits */
2378 
2379 	/* put it all back */
2380 	memcpy(dest, &dest_dword, sizeof(dest_dword));
2381 }
2382 
2383 /**
2384  * ice_write_qword - write a qword to a packed context structure
2385  * @src_ctx:  the context structure to read from
2386  * @dest_ctx: the context to be written to
2387  * @ce_info:  a description of the struct to be filled
2388  */
2389 static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx,
2390 			    const struct ice_ctx_ele *ce_info)
2391 {
2392 	u64 src_qword, mask;
2393 	__le64 dest_qword;
2394 	u8 *from, *dest;
2395 	u16 shift_width;
2396 
2397 	/* copy from the next struct field */
2398 	from = src_ctx + ce_info->offset;
2399 
2400 	/* prepare the bits and mask */
2401 	shift_width = ce_info->lsb % 8;
2402 
2403 	/* if the field width is exactly 64 on an x86 machine, then the shift
2404 	 * operation will not work because the SHL instructions count is masked
2405 	 * to 6 bits so the shift will do nothing
2406 	 */
2407 	if (ce_info->width < 64)
2408 		mask = BIT_ULL(ce_info->width) - 1;
2409 	else
2410 		mask = (u64)~0;
2411 
2412 	/* don't swizzle the bits until after the mask because the mask bits
2413 	 * will be in a different bit position on big endian machines
2414 	 */
2415 	src_qword = *(u64 *)from;
2416 	src_qword &= mask;
2417 
2418 	/* shift to correct alignment */
2419 	mask <<= shift_width;
2420 	src_qword <<= shift_width;
2421 
2422 	/* get the current bits from the target bit string */
2423 	dest = dest_ctx + (ce_info->lsb / 8);
2424 
2425 	memcpy(&dest_qword, dest, sizeof(dest_qword));
2426 
2427 	dest_qword &= ~(cpu_to_le64(mask));	/* get the bits not changing */
2428 	dest_qword |= cpu_to_le64(src_qword);	/* add in the new bits */
2429 
2430 	/* put it all back */
2431 	memcpy(dest, &dest_qword, sizeof(dest_qword));
2432 }
2433 
2434 /**
2435  * ice_set_ctx - set context bits in packed structure
2436  * @src_ctx:  pointer to a generic non-packed context structure
2437  * @dest_ctx: pointer to memory for the packed structure
2438  * @ce_info:  a description of the structure to be transformed
2439  */
2440 enum ice_status
2441 ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2442 {
2443 	int f;
2444 
2445 	for (f = 0; ce_info[f].width; f++) {
2446 		/* We have to deal with each element of the FW response
2447 		 * using the correct size so that we are correct regardless
2448 		 * of the endianness of the machine.
2449 		 */
2450 		switch (ce_info[f].size_of) {
2451 		case sizeof(u8):
2452 			ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
2453 			break;
2454 		case sizeof(u16):
2455 			ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
2456 			break;
2457 		case sizeof(u32):
2458 			ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
2459 			break;
2460 		case sizeof(u64):
2461 			ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
2462 			break;
2463 		default:
2464 			return ICE_ERR_INVAL_SIZE;
2465 		}
2466 	}
2467 
2468 	return 0;
2469 }
2470 
2471 /**
2472  * ice_ena_vsi_txq
2473  * @pi: port information structure
2474  * @vsi_id: VSI id
2475  * @tc: tc number
2476  * @num_qgrps: Number of added queue groups
2477  * @buf: list of queue groups to be added
2478  * @buf_size: size of buffer for indirect command
2479  * @cd: pointer to command details structure or NULL
2480  *
2481  * This function adds one lan q
2482  */
2483 enum ice_status
2484 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps,
2485 		struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
2486 		struct ice_sq_cd *cd)
2487 {
2488 	struct ice_aqc_txsched_elem_data node = { 0 };
2489 	struct ice_sched_node *parent;
2490 	enum ice_status status;
2491 	struct ice_hw *hw;
2492 
2493 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2494 		return ICE_ERR_CFG;
2495 
2496 	if (num_qgrps > 1 || buf->num_txqs > 1)
2497 		return ICE_ERR_MAX_LIMIT;
2498 
2499 	hw = pi->hw;
2500 
2501 	mutex_lock(&pi->sched_lock);
2502 
2503 	/* find a parent node */
2504 	parent = ice_sched_get_free_qparent(pi, vsi_id, tc,
2505 					    ICE_SCHED_NODE_OWNER_LAN);
2506 	if (!parent) {
2507 		status = ICE_ERR_PARAM;
2508 		goto ena_txq_exit;
2509 	}
2510 	buf->parent_teid = parent->info.node_teid;
2511 	node.parent_teid = parent->info.node_teid;
2512 	/* Mark that the values in the "generic" section as valid. The default
2513 	 * value in the "generic" section is zero. This means that :
2514 	 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
2515 	 * - 0 priority among siblings, indicated by Bit 1-3.
2516 	 * - WFQ, indicated by Bit 4.
2517 	 * - 0 Adjustment value is used in PSM credit update flow, indicated by
2518 	 * Bit 5-6.
2519 	 * - Bit 7 is reserved.
2520 	 * Without setting the generic section as valid in valid_sections, the
2521 	 * Admin Q command will fail with error code ICE_AQ_RC_EINVAL.
2522 	 */
2523 	buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
2524 
2525 	/* add the lan q */
2526 	status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
2527 	if (status)
2528 		goto ena_txq_exit;
2529 
2530 	node.node_teid = buf->txqs[0].q_teid;
2531 	node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
2532 
2533 	/* add a leaf node into schduler tree q layer */
2534 	status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
2535 
2536 ena_txq_exit:
2537 	mutex_unlock(&pi->sched_lock);
2538 	return status;
2539 }
2540 
2541 /**
2542  * ice_dis_vsi_txq
2543  * @pi: port information structure
2544  * @num_queues: number of queues
2545  * @q_ids: pointer to the q_id array
2546  * @q_teids: pointer to queue node teids
2547  * @cd: pointer to command details structure or NULL
2548  *
2549  * This function removes queues and their corresponding nodes in SW DB
2550  */
2551 enum ice_status
2552 ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
2553 		u32 *q_teids, struct ice_sq_cd *cd)
2554 {
2555 	enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
2556 	struct ice_aqc_dis_txq_item qg_list;
2557 	u16 i;
2558 
2559 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2560 		return ICE_ERR_CFG;
2561 
2562 	mutex_lock(&pi->sched_lock);
2563 
2564 	for (i = 0; i < num_queues; i++) {
2565 		struct ice_sched_node *node;
2566 
2567 		node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
2568 		if (!node)
2569 			continue;
2570 		qg_list.parent_teid = node->info.parent_teid;
2571 		qg_list.num_qs = 1;
2572 		qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
2573 		status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
2574 					    sizeof(qg_list), cd);
2575 
2576 		if (status)
2577 			break;
2578 		ice_free_sched_node(pi, node);
2579 	}
2580 	mutex_unlock(&pi->sched_lock);
2581 	return status;
2582 }
2583 
2584 /**
2585  * ice_cfg_vsi_qs - configure the new/exisiting VSI queues
2586  * @pi: port information structure
2587  * @vsi_id: VSI Id
2588  * @tc_bitmap: TC bitmap
2589  * @maxqs: max queues array per TC
2590  * @owner: lan or rdma
2591  *
2592  * This function adds/updates the VSI queues per TC.
2593  */
2594 static enum ice_status
2595 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
2596 	       u16 *maxqs, u8 owner)
2597 {
2598 	enum ice_status status = 0;
2599 	u8 i;
2600 
2601 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2602 		return ICE_ERR_CFG;
2603 
2604 	mutex_lock(&pi->sched_lock);
2605 
2606 	for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
2607 		/* configuration is possible only if TC node is present */
2608 		if (!ice_sched_get_tc_node(pi, i))
2609 			continue;
2610 
2611 		status = ice_sched_cfg_vsi(pi, vsi_id, i, maxqs[i], owner,
2612 					   ice_is_tc_ena(tc_bitmap, i));
2613 		if (status)
2614 			break;
2615 	}
2616 
2617 	mutex_unlock(&pi->sched_lock);
2618 	return status;
2619 }
2620 
2621 /**
2622  * ice_cfg_vsi_lan - configure VSI lan queues
2623  * @pi: port information structure
2624  * @vsi_id: VSI Id
2625  * @tc_bitmap: TC bitmap
2626  * @max_lanqs: max lan queues array per TC
2627  *
2628  * This function adds/updates the VSI lan queues per TC.
2629  */
2630 enum ice_status
2631 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
2632 		u16 *max_lanqs)
2633 {
2634 	return ice_cfg_vsi_qs(pi, vsi_id, tc_bitmap, max_lanqs,
2635 			      ICE_SCHED_NODE_OWNER_LAN);
2636 }
2637