1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_common.h"
5 #include "ice_sched.h"
6 #include "ice_adminq_cmd.h"
7 #include "ice_flow.h"
8 
9 #define ICE_PF_RESET_WAIT_COUNT	300
10 
11 /**
12  * ice_set_mac_type - Sets MAC type
13  * @hw: pointer to the HW structure
14  *
15  * This function sets the MAC type of the adapter based on the
16  * vendor ID and device ID stored in the HW structure.
17  */
18 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
19 {
20 	if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
21 		return ICE_ERR_DEVICE_NOT_SUPPORTED;
22 
23 	hw->mac_type = ICE_MAC_GENERIC;
24 	return 0;
25 }
26 
27 /**
28  * ice_clear_pf_cfg - Clear PF configuration
29  * @hw: pointer to the hardware structure
30  *
31  * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
32  * configuration, flow director filters, etc.).
33  */
34 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
35 {
36 	struct ice_aq_desc desc;
37 
38 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
39 
40 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
41 }
42 
43 /**
44  * ice_aq_manage_mac_read - manage MAC address read command
45  * @hw: pointer to the HW struct
46  * @buf: a virtual buffer to hold the manage MAC read response
47  * @buf_size: Size of the virtual buffer
48  * @cd: pointer to command details structure or NULL
49  *
50  * This function is used to return per PF station MAC address (0x0107).
51  * NOTE: Upon successful completion of this command, MAC address information
52  * is returned in user specified buffer. Please interpret user specified
53  * buffer as "manage_mac_read" response.
54  * Response such as various MAC addresses are stored in HW struct (port.mac)
55  * ice_aq_discover_caps is expected to be called before this function is called.
56  */
57 static enum ice_status
58 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
59 		       struct ice_sq_cd *cd)
60 {
61 	struct ice_aqc_manage_mac_read_resp *resp;
62 	struct ice_aqc_manage_mac_read *cmd;
63 	struct ice_aq_desc desc;
64 	enum ice_status status;
65 	u16 flags;
66 	u8 i;
67 
68 	cmd = &desc.params.mac_read;
69 
70 	if (buf_size < sizeof(*resp))
71 		return ICE_ERR_BUF_TOO_SHORT;
72 
73 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
74 
75 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
76 	if (status)
77 		return status;
78 
79 	resp = (struct ice_aqc_manage_mac_read_resp *)buf;
80 	flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
81 
82 	if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
83 		ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
84 		return ICE_ERR_CFG;
85 	}
86 
87 	/* A single port can report up to two (LAN and WoL) addresses */
88 	for (i = 0; i < cmd->num_addr; i++)
89 		if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
90 			ether_addr_copy(hw->port_info->mac.lan_addr,
91 					resp[i].mac_addr);
92 			ether_addr_copy(hw->port_info->mac.perm_addr,
93 					resp[i].mac_addr);
94 			break;
95 		}
96 
97 	return 0;
98 }
99 
100 /**
101  * ice_aq_get_phy_caps - returns PHY capabilities
102  * @pi: port information structure
103  * @qual_mods: report qualified modules
104  * @report_mode: report mode capabilities
105  * @pcaps: structure for PHY capabilities to be filled
106  * @cd: pointer to command details structure or NULL
107  *
108  * Returns the various PHY capabilities supported on the Port (0x0600)
109  */
110 enum ice_status
111 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
112 		    struct ice_aqc_get_phy_caps_data *pcaps,
113 		    struct ice_sq_cd *cd)
114 {
115 	struct ice_aqc_get_phy_caps *cmd;
116 	u16 pcaps_size = sizeof(*pcaps);
117 	struct ice_aq_desc desc;
118 	enum ice_status status;
119 
120 	cmd = &desc.params.get_phy;
121 
122 	if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
123 		return ICE_ERR_PARAM;
124 
125 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
126 
127 	if (qual_mods)
128 		cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
129 
130 	cmd->param0 |= cpu_to_le16(report_mode);
131 	status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
132 
133 	if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
134 		pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
135 		pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
136 	}
137 
138 	return status;
139 }
140 
141 /**
142  * ice_get_media_type - Gets media type
143  * @pi: port information structure
144  */
145 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
146 {
147 	struct ice_link_status *hw_link_info;
148 
149 	if (!pi)
150 		return ICE_MEDIA_UNKNOWN;
151 
152 	hw_link_info = &pi->phy.link_info;
153 	if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
154 		/* If more than one media type is selected, report unknown */
155 		return ICE_MEDIA_UNKNOWN;
156 
157 	if (hw_link_info->phy_type_low) {
158 		switch (hw_link_info->phy_type_low) {
159 		case ICE_PHY_TYPE_LOW_1000BASE_SX:
160 		case ICE_PHY_TYPE_LOW_1000BASE_LX:
161 		case ICE_PHY_TYPE_LOW_10GBASE_SR:
162 		case ICE_PHY_TYPE_LOW_10GBASE_LR:
163 		case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
164 		case ICE_PHY_TYPE_LOW_25GBASE_SR:
165 		case ICE_PHY_TYPE_LOW_25GBASE_LR:
166 		case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
167 		case ICE_PHY_TYPE_LOW_40GBASE_SR4:
168 		case ICE_PHY_TYPE_LOW_40GBASE_LR4:
169 		case ICE_PHY_TYPE_LOW_50GBASE_SR2:
170 		case ICE_PHY_TYPE_LOW_50GBASE_LR2:
171 		case ICE_PHY_TYPE_LOW_50GBASE_SR:
172 		case ICE_PHY_TYPE_LOW_50GBASE_FR:
173 		case ICE_PHY_TYPE_LOW_50GBASE_LR:
174 		case ICE_PHY_TYPE_LOW_100GBASE_SR4:
175 		case ICE_PHY_TYPE_LOW_100GBASE_LR4:
176 		case ICE_PHY_TYPE_LOW_100GBASE_SR2:
177 		case ICE_PHY_TYPE_LOW_100GBASE_DR:
178 			return ICE_MEDIA_FIBER;
179 		case ICE_PHY_TYPE_LOW_100BASE_TX:
180 		case ICE_PHY_TYPE_LOW_1000BASE_T:
181 		case ICE_PHY_TYPE_LOW_2500BASE_T:
182 		case ICE_PHY_TYPE_LOW_5GBASE_T:
183 		case ICE_PHY_TYPE_LOW_10GBASE_T:
184 		case ICE_PHY_TYPE_LOW_25GBASE_T:
185 			return ICE_MEDIA_BASET;
186 		case ICE_PHY_TYPE_LOW_10G_SFI_DA:
187 		case ICE_PHY_TYPE_LOW_25GBASE_CR:
188 		case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
189 		case ICE_PHY_TYPE_LOW_25GBASE_CR1:
190 		case ICE_PHY_TYPE_LOW_40GBASE_CR4:
191 		case ICE_PHY_TYPE_LOW_50GBASE_CR2:
192 		case ICE_PHY_TYPE_LOW_50GBASE_CP:
193 		case ICE_PHY_TYPE_LOW_100GBASE_CR4:
194 		case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
195 		case ICE_PHY_TYPE_LOW_100GBASE_CP2:
196 			return ICE_MEDIA_DA;
197 		case ICE_PHY_TYPE_LOW_1000BASE_KX:
198 		case ICE_PHY_TYPE_LOW_2500BASE_KX:
199 		case ICE_PHY_TYPE_LOW_2500BASE_X:
200 		case ICE_PHY_TYPE_LOW_5GBASE_KR:
201 		case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
202 		case ICE_PHY_TYPE_LOW_25GBASE_KR:
203 		case ICE_PHY_TYPE_LOW_25GBASE_KR1:
204 		case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
205 		case ICE_PHY_TYPE_LOW_40GBASE_KR4:
206 		case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
207 		case ICE_PHY_TYPE_LOW_50GBASE_KR2:
208 		case ICE_PHY_TYPE_LOW_100GBASE_KR4:
209 		case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
210 			return ICE_MEDIA_BACKPLANE;
211 		}
212 	} else {
213 		switch (hw_link_info->phy_type_high) {
214 		case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
215 			return ICE_MEDIA_BACKPLANE;
216 		}
217 	}
218 	return ICE_MEDIA_UNKNOWN;
219 }
220 
221 /**
222  * ice_aq_get_link_info
223  * @pi: port information structure
224  * @ena_lse: enable/disable LinkStatusEvent reporting
225  * @link: pointer to link status structure - optional
226  * @cd: pointer to command details structure or NULL
227  *
228  * Get Link Status (0x607). Returns the link status of the adapter.
229  */
230 enum ice_status
231 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
232 		     struct ice_link_status *link, struct ice_sq_cd *cd)
233 {
234 	struct ice_aqc_get_link_status_data link_data = { 0 };
235 	struct ice_aqc_get_link_status *resp;
236 	struct ice_link_status *li_old, *li;
237 	enum ice_media_type *hw_media_type;
238 	struct ice_fc_info *hw_fc_info;
239 	bool tx_pause, rx_pause;
240 	struct ice_aq_desc desc;
241 	enum ice_status status;
242 	struct ice_hw *hw;
243 	u16 cmd_flags;
244 
245 	if (!pi)
246 		return ICE_ERR_PARAM;
247 	hw = pi->hw;
248 	li_old = &pi->phy.link_info_old;
249 	hw_media_type = &pi->phy.media_type;
250 	li = &pi->phy.link_info;
251 	hw_fc_info = &pi->fc;
252 
253 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
254 	cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
255 	resp = &desc.params.get_link_status;
256 	resp->cmd_flags = cpu_to_le16(cmd_flags);
257 	resp->lport_num = pi->lport;
258 
259 	status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
260 
261 	if (status)
262 		return status;
263 
264 	/* save off old link status information */
265 	*li_old = *li;
266 
267 	/* update current link status information */
268 	li->link_speed = le16_to_cpu(link_data.link_speed);
269 	li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
270 	li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
271 	*hw_media_type = ice_get_media_type(pi);
272 	li->link_info = link_data.link_info;
273 	li->an_info = link_data.an_info;
274 	li->ext_info = link_data.ext_info;
275 	li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
276 	li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
277 	li->topo_media_conflict = link_data.topo_media_conflict;
278 	li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
279 				      ICE_AQ_CFG_PACING_TYPE_M);
280 
281 	/* update fc info */
282 	tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
283 	rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
284 	if (tx_pause && rx_pause)
285 		hw_fc_info->current_mode = ICE_FC_FULL;
286 	else if (tx_pause)
287 		hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
288 	else if (rx_pause)
289 		hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
290 	else
291 		hw_fc_info->current_mode = ICE_FC_NONE;
292 
293 	li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
294 
295 	ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed);
296 	ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
297 		  (unsigned long long)li->phy_type_low);
298 	ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
299 		  (unsigned long long)li->phy_type_high);
300 	ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type);
301 	ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info);
302 	ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info);
303 	ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info);
304 	ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena);
305 	ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size);
306 	ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing);
307 
308 	/* save link status information */
309 	if (link)
310 		*link = *li;
311 
312 	/* flag cleared so calling functions don't call AQ again */
313 	pi->phy.get_link_info = false;
314 
315 	return 0;
316 }
317 
318 /**
319  * ice_init_fltr_mgmt_struct - initializes filter management list and locks
320  * @hw: pointer to the HW struct
321  */
322 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
323 {
324 	struct ice_switch_info *sw;
325 
326 	hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
327 				       sizeof(*hw->switch_info), GFP_KERNEL);
328 	sw = hw->switch_info;
329 
330 	if (!sw)
331 		return ICE_ERR_NO_MEMORY;
332 
333 	INIT_LIST_HEAD(&sw->vsi_list_map_head);
334 
335 	return ice_init_def_sw_recp(hw);
336 }
337 
338 /**
339  * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
340  * @hw: pointer to the HW struct
341  */
342 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
343 {
344 	struct ice_switch_info *sw = hw->switch_info;
345 	struct ice_vsi_list_map_info *v_pos_map;
346 	struct ice_vsi_list_map_info *v_tmp_map;
347 	struct ice_sw_recipe *recps;
348 	u8 i;
349 
350 	list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
351 				 list_entry) {
352 		list_del(&v_pos_map->list_entry);
353 		devm_kfree(ice_hw_to_dev(hw), v_pos_map);
354 	}
355 	recps = hw->switch_info->recp_list;
356 	for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
357 		struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
358 
359 		recps[i].root_rid = i;
360 		mutex_destroy(&recps[i].filt_rule_lock);
361 		list_for_each_entry_safe(lst_itr, tmp_entry,
362 					 &recps[i].filt_rules, list_entry) {
363 			list_del(&lst_itr->list_entry);
364 			devm_kfree(ice_hw_to_dev(hw), lst_itr);
365 		}
366 	}
367 	ice_rm_all_sw_replay_rule_info(hw);
368 	devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
369 	devm_kfree(ice_hw_to_dev(hw), sw);
370 }
371 
372 #define ICE_FW_LOG_DESC_SIZE(n)	(sizeof(struct ice_aqc_fw_logging_data) + \
373 	(((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry)))
374 #define ICE_FW_LOG_DESC_SIZE_MAX	\
375 	ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX)
376 
377 /**
378  * ice_get_fw_log_cfg - get FW logging configuration
379  * @hw: pointer to the HW struct
380  */
381 static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
382 {
383 	struct ice_aqc_fw_logging_data *config;
384 	struct ice_aq_desc desc;
385 	enum ice_status status;
386 	u16 size;
387 
388 	size = ICE_FW_LOG_DESC_SIZE_MAX;
389 	config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
390 	if (!config)
391 		return ICE_ERR_NO_MEMORY;
392 
393 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
394 
395 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
396 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
397 
398 	status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
399 	if (!status) {
400 		u16 i;
401 
402 		/* Save FW logging information into the HW structure */
403 		for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
404 			u16 v, m, flgs;
405 
406 			v = le16_to_cpu(config->entry[i]);
407 			m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
408 			flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
409 
410 			if (m < ICE_AQC_FW_LOG_ID_MAX)
411 				hw->fw_log.evnts[m].cur = flgs;
412 		}
413 	}
414 
415 	devm_kfree(ice_hw_to_dev(hw), config);
416 
417 	return status;
418 }
419 
420 /**
421  * ice_cfg_fw_log - configure FW logging
422  * @hw: pointer to the HW struct
423  * @enable: enable certain FW logging events if true, disable all if false
424  *
425  * This function enables/disables the FW logging via Rx CQ events and a UART
426  * port based on predetermined configurations. FW logging via the Rx CQ can be
427  * enabled/disabled for individual PF's. However, FW logging via the UART can
428  * only be enabled/disabled for all PFs on the same device.
429  *
430  * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
431  * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
432  * before initializing the device.
433  *
434  * When re/configuring FW logging, callers need to update the "cfg" elements of
435  * the hw->fw_log.evnts array with the desired logging event configurations for
436  * modules of interest. When disabling FW logging completely, the callers can
437  * just pass false in the "enable" parameter. On completion, the function will
438  * update the "cur" element of the hw->fw_log.evnts array with the resulting
439  * logging event configurations of the modules that are being re/configured. FW
440  * logging modules that are not part of a reconfiguration operation retain their
441  * previous states.
442  *
443  * Before resetting the device, it is recommended that the driver disables FW
444  * logging before shutting down the control queue. When disabling FW logging
445  * ("enable" = false), the latest configurations of FW logging events stored in
446  * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
447  * a device reset.
448  *
449  * When enabling FW logging to emit log messages via the Rx CQ during the
450  * device's initialization phase, a mechanism alternative to interrupt handlers
451  * needs to be used to extract FW log messages from the Rx CQ periodically and
452  * to prevent the Rx CQ from being full and stalling other types of control
453  * messages from FW to SW. Interrupts are typically disabled during the device's
454  * initialization phase.
455  */
456 static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
457 {
458 	struct ice_aqc_fw_logging_data *data = NULL;
459 	struct ice_aqc_fw_logging *cmd;
460 	enum ice_status status = 0;
461 	u16 i, chgs = 0, len = 0;
462 	struct ice_aq_desc desc;
463 	u8 actv_evnts = 0;
464 	void *buf = NULL;
465 
466 	if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
467 		return 0;
468 
469 	/* Disable FW logging only when the control queue is still responsive */
470 	if (!enable &&
471 	    (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
472 		return 0;
473 
474 	/* Get current FW log settings */
475 	status = ice_get_fw_log_cfg(hw);
476 	if (status)
477 		return status;
478 
479 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
480 	cmd = &desc.params.fw_logging;
481 
482 	/* Indicate which controls are valid */
483 	if (hw->fw_log.cq_en)
484 		cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
485 
486 	if (hw->fw_log.uart_en)
487 		cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
488 
489 	if (enable) {
490 		/* Fill in an array of entries with FW logging modules and
491 		 * logging events being reconfigured.
492 		 */
493 		for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
494 			u16 val;
495 
496 			/* Keep track of enabled event types */
497 			actv_evnts |= hw->fw_log.evnts[i].cfg;
498 
499 			if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
500 				continue;
501 
502 			if (!data) {
503 				data = devm_kzalloc(ice_hw_to_dev(hw),
504 						    ICE_FW_LOG_DESC_SIZE_MAX,
505 						    GFP_KERNEL);
506 				if (!data)
507 					return ICE_ERR_NO_MEMORY;
508 			}
509 
510 			val = i << ICE_AQC_FW_LOG_ID_S;
511 			val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
512 			data->entry[chgs++] = cpu_to_le16(val);
513 		}
514 
515 		/* Only enable FW logging if at least one module is specified.
516 		 * If FW logging is currently enabled but all modules are not
517 		 * enabled to emit log messages, disable FW logging altogether.
518 		 */
519 		if (actv_evnts) {
520 			/* Leave if there is effectively no change */
521 			if (!chgs)
522 				goto out;
523 
524 			if (hw->fw_log.cq_en)
525 				cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
526 
527 			if (hw->fw_log.uart_en)
528 				cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
529 
530 			buf = data;
531 			len = ICE_FW_LOG_DESC_SIZE(chgs);
532 			desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
533 		}
534 	}
535 
536 	status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
537 	if (!status) {
538 		/* Update the current configuration to reflect events enabled.
539 		 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
540 		 * logging mode is enabled for the device. They do not reflect
541 		 * actual modules being enabled to emit log messages. So, their
542 		 * values remain unchanged even when all modules are disabled.
543 		 */
544 		u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
545 
546 		hw->fw_log.actv_evnts = actv_evnts;
547 		for (i = 0; i < cnt; i++) {
548 			u16 v, m;
549 
550 			if (!enable) {
551 				/* When disabling all FW logging events as part
552 				 * of device's de-initialization, the original
553 				 * configurations are retained, and can be used
554 				 * to reconfigure FW logging later if the device
555 				 * is re-initialized.
556 				 */
557 				hw->fw_log.evnts[i].cur = 0;
558 				continue;
559 			}
560 
561 			v = le16_to_cpu(data->entry[i]);
562 			m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
563 			hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
564 		}
565 	}
566 
567 out:
568 	if (data)
569 		devm_kfree(ice_hw_to_dev(hw), data);
570 
571 	return status;
572 }
573 
574 /**
575  * ice_output_fw_log
576  * @hw: pointer to the HW struct
577  * @desc: pointer to the AQ message descriptor
578  * @buf: pointer to the buffer accompanying the AQ message
579  *
580  * Formats a FW Log message and outputs it via the standard driver logs.
581  */
582 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
583 {
584 	ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
585 	ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
586 			le16_to_cpu(desc->datalen));
587 	ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
588 }
589 
590 /**
591  * ice_get_itr_intrl_gran
592  * @hw: pointer to the HW struct
593  *
594  * Determines the ITR/INTRL granularities based on the maximum aggregate
595  * bandwidth according to the device's configuration during power-on.
596  */
597 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
598 {
599 	u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
600 			 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
601 			GL_PWR_MODE_CTL_CAR_MAX_BW_S;
602 
603 	switch (max_agg_bw) {
604 	case ICE_MAX_AGG_BW_200G:
605 	case ICE_MAX_AGG_BW_100G:
606 	case ICE_MAX_AGG_BW_50G:
607 		hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
608 		hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
609 		break;
610 	case ICE_MAX_AGG_BW_25G:
611 		hw->itr_gran = ICE_ITR_GRAN_MAX_25;
612 		hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
613 		break;
614 	}
615 }
616 
617 /**
618  * ice_init_hw - main hardware initialization routine
619  * @hw: pointer to the hardware structure
620  */
621 enum ice_status ice_init_hw(struct ice_hw *hw)
622 {
623 	struct ice_aqc_get_phy_caps_data *pcaps;
624 	enum ice_status status;
625 	u16 mac_buf_len;
626 	void *mac_buf;
627 
628 	/* Set MAC type based on DeviceID */
629 	status = ice_set_mac_type(hw);
630 	if (status)
631 		return status;
632 
633 	hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
634 			 PF_FUNC_RID_FUNC_NUM_M) >>
635 		PF_FUNC_RID_FUNC_NUM_S;
636 
637 	status = ice_reset(hw, ICE_RESET_PFR);
638 	if (status)
639 		return status;
640 
641 	ice_get_itr_intrl_gran(hw);
642 
643 	status = ice_create_all_ctrlq(hw);
644 	if (status)
645 		goto err_unroll_cqinit;
646 
647 	/* Enable FW logging. Not fatal if this fails. */
648 	status = ice_cfg_fw_log(hw, true);
649 	if (status)
650 		ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
651 
652 	status = ice_clear_pf_cfg(hw);
653 	if (status)
654 		goto err_unroll_cqinit;
655 
656 	ice_clear_pxe_mode(hw);
657 
658 	status = ice_init_nvm(hw);
659 	if (status)
660 		goto err_unroll_cqinit;
661 
662 	status = ice_get_caps(hw);
663 	if (status)
664 		goto err_unroll_cqinit;
665 
666 	hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
667 				     sizeof(*hw->port_info), GFP_KERNEL);
668 	if (!hw->port_info) {
669 		status = ICE_ERR_NO_MEMORY;
670 		goto err_unroll_cqinit;
671 	}
672 
673 	/* set the back pointer to HW */
674 	hw->port_info->hw = hw;
675 
676 	/* Initialize port_info struct with switch configuration data */
677 	status = ice_get_initial_sw_cfg(hw);
678 	if (status)
679 		goto err_unroll_alloc;
680 
681 	hw->evb_veb = true;
682 
683 	/* Query the allocated resources for Tx scheduler */
684 	status = ice_sched_query_res_alloc(hw);
685 	if (status) {
686 		ice_debug(hw, ICE_DBG_SCHED,
687 			  "Failed to get scheduler allocated resources\n");
688 		goto err_unroll_alloc;
689 	}
690 
691 	/* Initialize port_info struct with scheduler data */
692 	status = ice_sched_init_port(hw->port_info);
693 	if (status)
694 		goto err_unroll_sched;
695 
696 	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
697 	if (!pcaps) {
698 		status = ICE_ERR_NO_MEMORY;
699 		goto err_unroll_sched;
700 	}
701 
702 	/* Initialize port_info struct with PHY capabilities */
703 	status = ice_aq_get_phy_caps(hw->port_info, false,
704 				     ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
705 	devm_kfree(ice_hw_to_dev(hw), pcaps);
706 	if (status)
707 		goto err_unroll_sched;
708 
709 	/* Initialize port_info struct with link information */
710 	status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
711 	if (status)
712 		goto err_unroll_sched;
713 
714 	/* need a valid SW entry point to build a Tx tree */
715 	if (!hw->sw_entry_point_layer) {
716 		ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
717 		status = ICE_ERR_CFG;
718 		goto err_unroll_sched;
719 	}
720 	INIT_LIST_HEAD(&hw->agg_list);
721 	/* Initialize max burst size */
722 	if (!hw->max_burst_size)
723 		ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
724 
725 	status = ice_init_fltr_mgmt_struct(hw);
726 	if (status)
727 		goto err_unroll_sched;
728 
729 	/* Get MAC information */
730 	/* A single port can report up to two (LAN and WoL) addresses */
731 	mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
732 			       sizeof(struct ice_aqc_manage_mac_read_resp),
733 			       GFP_KERNEL);
734 	mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
735 
736 	if (!mac_buf) {
737 		status = ICE_ERR_NO_MEMORY;
738 		goto err_unroll_fltr_mgmt_struct;
739 	}
740 
741 	status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
742 	devm_kfree(ice_hw_to_dev(hw), mac_buf);
743 
744 	if (status)
745 		goto err_unroll_fltr_mgmt_struct;
746 	status = ice_init_hw_tbls(hw);
747 	if (status)
748 		goto err_unroll_fltr_mgmt_struct;
749 	return 0;
750 
751 err_unroll_fltr_mgmt_struct:
752 	ice_cleanup_fltr_mgmt_struct(hw);
753 err_unroll_sched:
754 	ice_sched_cleanup_all(hw);
755 err_unroll_alloc:
756 	devm_kfree(ice_hw_to_dev(hw), hw->port_info);
757 err_unroll_cqinit:
758 	ice_destroy_all_ctrlq(hw);
759 	return status;
760 }
761 
762 /**
763  * ice_deinit_hw - unroll initialization operations done by ice_init_hw
764  * @hw: pointer to the hardware structure
765  *
766  * This should be called only during nominal operation, not as a result of
767  * ice_init_hw() failing since ice_init_hw() will take care of unrolling
768  * applicable initializations if it fails for any reason.
769  */
770 void ice_deinit_hw(struct ice_hw *hw)
771 {
772 	ice_cleanup_fltr_mgmt_struct(hw);
773 
774 	ice_sched_cleanup_all(hw);
775 	ice_sched_clear_agg(hw);
776 	ice_free_seg(hw);
777 	ice_free_hw_tbls(hw);
778 
779 	if (hw->port_info) {
780 		devm_kfree(ice_hw_to_dev(hw), hw->port_info);
781 		hw->port_info = NULL;
782 	}
783 
784 	/* Attempt to disable FW logging before shutting down control queues */
785 	ice_cfg_fw_log(hw, false);
786 	ice_destroy_all_ctrlq(hw);
787 
788 	/* Clear VSI contexts if not already cleared */
789 	ice_clear_all_vsi_ctx(hw);
790 }
791 
792 /**
793  * ice_check_reset - Check to see if a global reset is complete
794  * @hw: pointer to the hardware structure
795  */
796 enum ice_status ice_check_reset(struct ice_hw *hw)
797 {
798 	u32 cnt, reg = 0, grst_delay, uld_mask;
799 
800 	/* Poll for Device Active state in case a recent CORER, GLOBR,
801 	 * or EMPR has occurred. The grst delay value is in 100ms units.
802 	 * Add 1sec for outstanding AQ commands that can take a long time.
803 	 */
804 	grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
805 		      GLGEN_RSTCTL_GRSTDEL_S) + 10;
806 
807 	for (cnt = 0; cnt < grst_delay; cnt++) {
808 		mdelay(100);
809 		reg = rd32(hw, GLGEN_RSTAT);
810 		if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
811 			break;
812 	}
813 
814 	if (cnt == grst_delay) {
815 		ice_debug(hw, ICE_DBG_INIT,
816 			  "Global reset polling failed to complete.\n");
817 		return ICE_ERR_RESET_FAILED;
818 	}
819 
820 #define ICE_RESET_DONE_MASK	(GLNVM_ULD_PCIER_DONE_M |\
821 				 GLNVM_ULD_PCIER_DONE_1_M |\
822 				 GLNVM_ULD_CORER_DONE_M |\
823 				 GLNVM_ULD_GLOBR_DONE_M |\
824 				 GLNVM_ULD_POR_DONE_M |\
825 				 GLNVM_ULD_POR_DONE_1_M |\
826 				 GLNVM_ULD_PCIER_DONE_2_M)
827 
828 	uld_mask = ICE_RESET_DONE_MASK;
829 
830 	/* Device is Active; check Global Reset processes are done */
831 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
832 		reg = rd32(hw, GLNVM_ULD) & uld_mask;
833 		if (reg == uld_mask) {
834 			ice_debug(hw, ICE_DBG_INIT,
835 				  "Global reset processes done. %d\n", cnt);
836 			break;
837 		}
838 		mdelay(10);
839 	}
840 
841 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
842 		ice_debug(hw, ICE_DBG_INIT,
843 			  "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
844 			  reg);
845 		return ICE_ERR_RESET_FAILED;
846 	}
847 
848 	return 0;
849 }
850 
851 /**
852  * ice_pf_reset - Reset the PF
853  * @hw: pointer to the hardware structure
854  *
855  * If a global reset has been triggered, this function checks
856  * for its completion and then issues the PF reset
857  */
858 static enum ice_status ice_pf_reset(struct ice_hw *hw)
859 {
860 	u32 cnt, reg;
861 
862 	/* If at function entry a global reset was already in progress, i.e.
863 	 * state is not 'device active' or any of the reset done bits are not
864 	 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
865 	 * global reset is done.
866 	 */
867 	if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
868 	    (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
869 		/* poll on global reset currently in progress until done */
870 		if (ice_check_reset(hw))
871 			return ICE_ERR_RESET_FAILED;
872 
873 		return 0;
874 	}
875 
876 	/* Reset the PF */
877 	reg = rd32(hw, PFGEN_CTRL);
878 
879 	wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
880 
881 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
882 		reg = rd32(hw, PFGEN_CTRL);
883 		if (!(reg & PFGEN_CTRL_PFSWR_M))
884 			break;
885 
886 		mdelay(1);
887 	}
888 
889 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
890 		ice_debug(hw, ICE_DBG_INIT,
891 			  "PF reset polling failed to complete.\n");
892 		return ICE_ERR_RESET_FAILED;
893 	}
894 
895 	return 0;
896 }
897 
898 /**
899  * ice_reset - Perform different types of reset
900  * @hw: pointer to the hardware structure
901  * @req: reset request
902  *
903  * This function triggers a reset as specified by the req parameter.
904  *
905  * Note:
906  * If anything other than a PF reset is triggered, PXE mode is restored.
907  * This has to be cleared using ice_clear_pxe_mode again, once the AQ
908  * interface has been restored in the rebuild flow.
909  */
910 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
911 {
912 	u32 val = 0;
913 
914 	switch (req) {
915 	case ICE_RESET_PFR:
916 		return ice_pf_reset(hw);
917 	case ICE_RESET_CORER:
918 		ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
919 		val = GLGEN_RTRIG_CORER_M;
920 		break;
921 	case ICE_RESET_GLOBR:
922 		ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
923 		val = GLGEN_RTRIG_GLOBR_M;
924 		break;
925 	default:
926 		return ICE_ERR_PARAM;
927 	}
928 
929 	val |= rd32(hw, GLGEN_RTRIG);
930 	wr32(hw, GLGEN_RTRIG, val);
931 	ice_flush(hw);
932 
933 	/* wait for the FW to be ready */
934 	return ice_check_reset(hw);
935 }
936 
937 /**
938  * ice_copy_rxq_ctx_to_hw
939  * @hw: pointer to the hardware structure
940  * @ice_rxq_ctx: pointer to the rxq context
941  * @rxq_index: the index of the Rx queue
942  *
943  * Copies rxq context from dense structure to HW register space
944  */
945 static enum ice_status
946 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
947 {
948 	u8 i;
949 
950 	if (!ice_rxq_ctx)
951 		return ICE_ERR_BAD_PTR;
952 
953 	if (rxq_index > QRX_CTRL_MAX_INDEX)
954 		return ICE_ERR_PARAM;
955 
956 	/* Copy each dword separately to HW */
957 	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
958 		wr32(hw, QRX_CONTEXT(i, rxq_index),
959 		     *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
960 
961 		ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
962 			  *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
963 	}
964 
965 	return 0;
966 }
967 
968 /* LAN Rx Queue Context */
969 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
970 	/* Field		Width	LSB */
971 	ICE_CTX_STORE(ice_rlan_ctx, head,		13,	0),
972 	ICE_CTX_STORE(ice_rlan_ctx, cpuid,		8,	13),
973 	ICE_CTX_STORE(ice_rlan_ctx, base,		57,	32),
974 	ICE_CTX_STORE(ice_rlan_ctx, qlen,		13,	89),
975 	ICE_CTX_STORE(ice_rlan_ctx, dbuf,		7,	102),
976 	ICE_CTX_STORE(ice_rlan_ctx, hbuf,		5,	109),
977 	ICE_CTX_STORE(ice_rlan_ctx, dtype,		2,	114),
978 	ICE_CTX_STORE(ice_rlan_ctx, dsize,		1,	116),
979 	ICE_CTX_STORE(ice_rlan_ctx, crcstrip,		1,	117),
980 	ICE_CTX_STORE(ice_rlan_ctx, l2tsel,		1,	119),
981 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_0,		4,	120),
982 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_1,		2,	124),
983 	ICE_CTX_STORE(ice_rlan_ctx, showiv,		1,	127),
984 	ICE_CTX_STORE(ice_rlan_ctx, rxmax,		14,	174),
985 	ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena,	1,	193),
986 	ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena,	1,	194),
987 	ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena,	1,	195),
988 	ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena,	1,	196),
989 	ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh,		3,	198),
990 	ICE_CTX_STORE(ice_rlan_ctx, prefena,		1,	201),
991 	{ 0 }
992 };
993 
994 /**
995  * ice_write_rxq_ctx
996  * @hw: pointer to the hardware structure
997  * @rlan_ctx: pointer to the rxq context
998  * @rxq_index: the index of the Rx queue
999  *
1000  * Converts rxq context from sparse to dense structure and then writes
1001  * it to HW register space and enables the hardware to prefetch descriptors
1002  * instead of only fetching them on demand
1003  */
1004 enum ice_status
1005 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1006 		  u32 rxq_index)
1007 {
1008 	u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1009 
1010 	if (!rlan_ctx)
1011 		return ICE_ERR_BAD_PTR;
1012 
1013 	rlan_ctx->prefena = 1;
1014 
1015 	ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1016 	return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1017 }
1018 
1019 /* LAN Tx Queue Context */
1020 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1021 				    /* Field			Width	LSB */
1022 	ICE_CTX_STORE(ice_tlan_ctx, base,			57,	0),
1023 	ICE_CTX_STORE(ice_tlan_ctx, port_num,			3,	57),
1024 	ICE_CTX_STORE(ice_tlan_ctx, cgd_num,			5,	60),
1025 	ICE_CTX_STORE(ice_tlan_ctx, pf_num,			3,	65),
1026 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_num,			10,	68),
1027 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_type,			2,	78),
1028 	ICE_CTX_STORE(ice_tlan_ctx, src_vsi,			10,	80),
1029 	ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena,			1,	90),
1030 	ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag,	1,	91),
1031 	ICE_CTX_STORE(ice_tlan_ctx, alt_vlan,			1,	92),
1032 	ICE_CTX_STORE(ice_tlan_ctx, cpuid,			8,	93),
1033 	ICE_CTX_STORE(ice_tlan_ctx, wb_mode,			1,	101),
1034 	ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc,			1,	102),
1035 	ICE_CTX_STORE(ice_tlan_ctx, tphrd,			1,	103),
1036 	ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc,			1,	104),
1037 	ICE_CTX_STORE(ice_tlan_ctx, cmpq_id,			9,	105),
1038 	ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func,		14,	114),
1039 	ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode,	1,	128),
1040 	ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id,		6,	129),
1041 	ICE_CTX_STORE(ice_tlan_ctx, qlen,			13,	135),
1042 	ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx,		4,	148),
1043 	ICE_CTX_STORE(ice_tlan_ctx, tso_ena,			1,	152),
1044 	ICE_CTX_STORE(ice_tlan_ctx, tso_qnum,			11,	153),
1045 	ICE_CTX_STORE(ice_tlan_ctx, legacy_int,			1,	164),
1046 	ICE_CTX_STORE(ice_tlan_ctx, drop_ena,			1,	165),
1047 	ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx,		2,	166),
1048 	ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx,	3,	168),
1049 	ICE_CTX_STORE(ice_tlan_ctx, int_q_state,		122,	171),
1050 	{ 0 }
1051 };
1052 
1053 /* FW Admin Queue command wrappers */
1054 
1055 /* Software lock/mutex that is meant to be held while the Global Config Lock
1056  * in firmware is acquired by the software to prevent most (but not all) types
1057  * of AQ commands from being sent to FW
1058  */
1059 DEFINE_MUTEX(ice_global_cfg_lock_sw);
1060 
1061 /**
1062  * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1063  * @hw: pointer to the HW struct
1064  * @desc: descriptor describing the command
1065  * @buf: buffer to use for indirect commands (NULL for direct commands)
1066  * @buf_size: size of buffer for indirect commands (0 for direct commands)
1067  * @cd: pointer to command details structure
1068  *
1069  * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1070  */
1071 enum ice_status
1072 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1073 		u16 buf_size, struct ice_sq_cd *cd)
1074 {
1075 	struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1076 	bool lock_acquired = false;
1077 	enum ice_status status;
1078 
1079 	/* When a package download is in process (i.e. when the firmware's
1080 	 * Global Configuration Lock resource is held), only the Download
1081 	 * Package, Get Version, Get Package Info List and Release Resource
1082 	 * (with resource ID set to Global Config Lock) AdminQ commands are
1083 	 * allowed; all others must block until the package download completes
1084 	 * and the Global Config Lock is released.  See also
1085 	 * ice_acquire_global_cfg_lock().
1086 	 */
1087 	switch (le16_to_cpu(desc->opcode)) {
1088 	case ice_aqc_opc_download_pkg:
1089 	case ice_aqc_opc_get_pkg_info_list:
1090 	case ice_aqc_opc_get_ver:
1091 		break;
1092 	case ice_aqc_opc_release_res:
1093 		if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1094 			break;
1095 		fallthrough;
1096 	default:
1097 		mutex_lock(&ice_global_cfg_lock_sw);
1098 		lock_acquired = true;
1099 		break;
1100 	}
1101 
1102 	status = ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1103 	if (lock_acquired)
1104 		mutex_unlock(&ice_global_cfg_lock_sw);
1105 
1106 	return status;
1107 }
1108 
1109 /**
1110  * ice_aq_get_fw_ver
1111  * @hw: pointer to the HW struct
1112  * @cd: pointer to command details structure or NULL
1113  *
1114  * Get the firmware version (0x0001) from the admin queue commands
1115  */
1116 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1117 {
1118 	struct ice_aqc_get_ver *resp;
1119 	struct ice_aq_desc desc;
1120 	enum ice_status status;
1121 
1122 	resp = &desc.params.get_ver;
1123 
1124 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1125 
1126 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1127 
1128 	if (!status) {
1129 		hw->fw_branch = resp->fw_branch;
1130 		hw->fw_maj_ver = resp->fw_major;
1131 		hw->fw_min_ver = resp->fw_minor;
1132 		hw->fw_patch = resp->fw_patch;
1133 		hw->fw_build = le32_to_cpu(resp->fw_build);
1134 		hw->api_branch = resp->api_branch;
1135 		hw->api_maj_ver = resp->api_major;
1136 		hw->api_min_ver = resp->api_minor;
1137 		hw->api_patch = resp->api_patch;
1138 	}
1139 
1140 	return status;
1141 }
1142 
1143 /**
1144  * ice_aq_send_driver_ver
1145  * @hw: pointer to the HW struct
1146  * @dv: driver's major, minor version
1147  * @cd: pointer to command details structure or NULL
1148  *
1149  * Send the driver version (0x0002) to the firmware
1150  */
1151 enum ice_status
1152 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1153 		       struct ice_sq_cd *cd)
1154 {
1155 	struct ice_aqc_driver_ver *cmd;
1156 	struct ice_aq_desc desc;
1157 	u16 len;
1158 
1159 	cmd = &desc.params.driver_ver;
1160 
1161 	if (!dv)
1162 		return ICE_ERR_PARAM;
1163 
1164 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1165 
1166 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1167 	cmd->major_ver = dv->major_ver;
1168 	cmd->minor_ver = dv->minor_ver;
1169 	cmd->build_ver = dv->build_ver;
1170 	cmd->subbuild_ver = dv->subbuild_ver;
1171 
1172 	len = 0;
1173 	while (len < sizeof(dv->driver_string) &&
1174 	       isascii(dv->driver_string[len]) && dv->driver_string[len])
1175 		len++;
1176 
1177 	return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1178 }
1179 
1180 /**
1181  * ice_aq_q_shutdown
1182  * @hw: pointer to the HW struct
1183  * @unloading: is the driver unloading itself
1184  *
1185  * Tell the Firmware that we're shutting down the AdminQ and whether
1186  * or not the driver is unloading as well (0x0003).
1187  */
1188 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1189 {
1190 	struct ice_aqc_q_shutdown *cmd;
1191 	struct ice_aq_desc desc;
1192 
1193 	cmd = &desc.params.q_shutdown;
1194 
1195 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1196 
1197 	if (unloading)
1198 		cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1199 
1200 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1201 }
1202 
1203 /**
1204  * ice_aq_req_res
1205  * @hw: pointer to the HW struct
1206  * @res: resource ID
1207  * @access: access type
1208  * @sdp_number: resource number
1209  * @timeout: the maximum time in ms that the driver may hold the resource
1210  * @cd: pointer to command details structure or NULL
1211  *
1212  * Requests common resource using the admin queue commands (0x0008).
1213  * When attempting to acquire the Global Config Lock, the driver can
1214  * learn of three states:
1215  *  1) ICE_SUCCESS -        acquired lock, and can perform download package
1216  *  2) ICE_ERR_AQ_ERROR -   did not get lock, driver should fail to load
1217  *  3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1218  *                          successfully downloaded the package; the driver does
1219  *                          not have to download the package and can continue
1220  *                          loading
1221  *
1222  * Note that if the caller is in an acquire lock, perform action, release lock
1223  * phase of operation, it is possible that the FW may detect a timeout and issue
1224  * a CORER. In this case, the driver will receive a CORER interrupt and will
1225  * have to determine its cause. The calling thread that is handling this flow
1226  * will likely get an error propagated back to it indicating the Download
1227  * Package, Update Package or the Release Resource AQ commands timed out.
1228  */
1229 static enum ice_status
1230 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1231 	       enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1232 	       struct ice_sq_cd *cd)
1233 {
1234 	struct ice_aqc_req_res *cmd_resp;
1235 	struct ice_aq_desc desc;
1236 	enum ice_status status;
1237 
1238 	cmd_resp = &desc.params.res_owner;
1239 
1240 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1241 
1242 	cmd_resp->res_id = cpu_to_le16(res);
1243 	cmd_resp->access_type = cpu_to_le16(access);
1244 	cmd_resp->res_number = cpu_to_le32(sdp_number);
1245 	cmd_resp->timeout = cpu_to_le32(*timeout);
1246 	*timeout = 0;
1247 
1248 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1249 
1250 	/* The completion specifies the maximum time in ms that the driver
1251 	 * may hold the resource in the Timeout field.
1252 	 */
1253 
1254 	/* Global config lock response utilizes an additional status field.
1255 	 *
1256 	 * If the Global config lock resource is held by some other driver, the
1257 	 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1258 	 * and the timeout field indicates the maximum time the current owner
1259 	 * of the resource has to free it.
1260 	 */
1261 	if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1262 		if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1263 			*timeout = le32_to_cpu(cmd_resp->timeout);
1264 			return 0;
1265 		} else if (le16_to_cpu(cmd_resp->status) ==
1266 			   ICE_AQ_RES_GLBL_IN_PROG) {
1267 			*timeout = le32_to_cpu(cmd_resp->timeout);
1268 			return ICE_ERR_AQ_ERROR;
1269 		} else if (le16_to_cpu(cmd_resp->status) ==
1270 			   ICE_AQ_RES_GLBL_DONE) {
1271 			return ICE_ERR_AQ_NO_WORK;
1272 		}
1273 
1274 		/* invalid FW response, force a timeout immediately */
1275 		*timeout = 0;
1276 		return ICE_ERR_AQ_ERROR;
1277 	}
1278 
1279 	/* If the resource is held by some other driver, the command completes
1280 	 * with a busy return value and the timeout field indicates the maximum
1281 	 * time the current owner of the resource has to free it.
1282 	 */
1283 	if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1284 		*timeout = le32_to_cpu(cmd_resp->timeout);
1285 
1286 	return status;
1287 }
1288 
1289 /**
1290  * ice_aq_release_res
1291  * @hw: pointer to the HW struct
1292  * @res: resource ID
1293  * @sdp_number: resource number
1294  * @cd: pointer to command details structure or NULL
1295  *
1296  * release common resource using the admin queue commands (0x0009)
1297  */
1298 static enum ice_status
1299 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1300 		   struct ice_sq_cd *cd)
1301 {
1302 	struct ice_aqc_req_res *cmd;
1303 	struct ice_aq_desc desc;
1304 
1305 	cmd = &desc.params.res_owner;
1306 
1307 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1308 
1309 	cmd->res_id = cpu_to_le16(res);
1310 	cmd->res_number = cpu_to_le32(sdp_number);
1311 
1312 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1313 }
1314 
1315 /**
1316  * ice_acquire_res
1317  * @hw: pointer to the HW structure
1318  * @res: resource ID
1319  * @access: access type (read or write)
1320  * @timeout: timeout in milliseconds
1321  *
1322  * This function will attempt to acquire the ownership of a resource.
1323  */
1324 enum ice_status
1325 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1326 		enum ice_aq_res_access_type access, u32 timeout)
1327 {
1328 #define ICE_RES_POLLING_DELAY_MS	10
1329 	u32 delay = ICE_RES_POLLING_DELAY_MS;
1330 	u32 time_left = timeout;
1331 	enum ice_status status;
1332 
1333 	status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1334 
1335 	/* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1336 	 * previously acquired the resource and performed any necessary updates;
1337 	 * in this case the caller does not obtain the resource and has no
1338 	 * further work to do.
1339 	 */
1340 	if (status == ICE_ERR_AQ_NO_WORK)
1341 		goto ice_acquire_res_exit;
1342 
1343 	if (status)
1344 		ice_debug(hw, ICE_DBG_RES,
1345 			  "resource %d acquire type %d failed.\n", res, access);
1346 
1347 	/* If necessary, poll until the current lock owner timeouts */
1348 	timeout = time_left;
1349 	while (status && timeout && time_left) {
1350 		mdelay(delay);
1351 		timeout = (timeout > delay) ? timeout - delay : 0;
1352 		status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1353 
1354 		if (status == ICE_ERR_AQ_NO_WORK)
1355 			/* lock free, but no work to do */
1356 			break;
1357 
1358 		if (!status)
1359 			/* lock acquired */
1360 			break;
1361 	}
1362 	if (status && status != ICE_ERR_AQ_NO_WORK)
1363 		ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1364 
1365 ice_acquire_res_exit:
1366 	if (status == ICE_ERR_AQ_NO_WORK) {
1367 		if (access == ICE_RES_WRITE)
1368 			ice_debug(hw, ICE_DBG_RES,
1369 				  "resource indicates no work to do.\n");
1370 		else
1371 			ice_debug(hw, ICE_DBG_RES,
1372 				  "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1373 	}
1374 	return status;
1375 }
1376 
1377 /**
1378  * ice_release_res
1379  * @hw: pointer to the HW structure
1380  * @res: resource ID
1381  *
1382  * This function will release a resource using the proper Admin Command.
1383  */
1384 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1385 {
1386 	enum ice_status status;
1387 	u32 total_delay = 0;
1388 
1389 	status = ice_aq_release_res(hw, res, 0, NULL);
1390 
1391 	/* there are some rare cases when trying to release the resource
1392 	 * results in an admin queue timeout, so handle them correctly
1393 	 */
1394 	while ((status == ICE_ERR_AQ_TIMEOUT) &&
1395 	       (total_delay < hw->adminq.sq_cmd_timeout)) {
1396 		mdelay(1);
1397 		status = ice_aq_release_res(hw, res, 0, NULL);
1398 		total_delay++;
1399 	}
1400 }
1401 
1402 /**
1403  * ice_aq_alloc_free_res - command to allocate/free resources
1404  * @hw: pointer to the HW struct
1405  * @num_entries: number of resource entries in buffer
1406  * @buf: Indirect buffer to hold data parameters and response
1407  * @buf_size: size of buffer for indirect commands
1408  * @opc: pass in the command opcode
1409  * @cd: pointer to command details structure or NULL
1410  *
1411  * Helper function to allocate/free resources using the admin queue commands
1412  */
1413 enum ice_status
1414 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1415 		      struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1416 		      enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1417 {
1418 	struct ice_aqc_alloc_free_res_cmd *cmd;
1419 	struct ice_aq_desc desc;
1420 
1421 	cmd = &desc.params.sw_res_ctrl;
1422 
1423 	if (!buf)
1424 		return ICE_ERR_PARAM;
1425 
1426 	if (buf_size < (num_entries * sizeof(buf->elem[0])))
1427 		return ICE_ERR_PARAM;
1428 
1429 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1430 
1431 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1432 
1433 	cmd->num_entries = cpu_to_le16(num_entries);
1434 
1435 	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1436 }
1437 
1438 /**
1439  * ice_alloc_hw_res - allocate resource
1440  * @hw: pointer to the HW struct
1441  * @type: type of resource
1442  * @num: number of resources to allocate
1443  * @btm: allocate from bottom
1444  * @res: pointer to array that will receive the resources
1445  */
1446 enum ice_status
1447 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1448 {
1449 	struct ice_aqc_alloc_free_res_elem *buf;
1450 	enum ice_status status;
1451 	u16 buf_len;
1452 
1453 	buf_len = struct_size(buf, elem, num - 1);
1454 	buf = kzalloc(buf_len, GFP_KERNEL);
1455 	if (!buf)
1456 		return ICE_ERR_NO_MEMORY;
1457 
1458 	/* Prepare buffer to allocate resource. */
1459 	buf->num_elems = cpu_to_le16(num);
1460 	buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1461 				    ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1462 	if (btm)
1463 		buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1464 
1465 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1466 				       ice_aqc_opc_alloc_res, NULL);
1467 	if (status)
1468 		goto ice_alloc_res_exit;
1469 
1470 	memcpy(res, buf->elem, sizeof(buf->elem) * num);
1471 
1472 ice_alloc_res_exit:
1473 	kfree(buf);
1474 	return status;
1475 }
1476 
1477 /**
1478  * ice_free_hw_res - free allocated HW resource
1479  * @hw: pointer to the HW struct
1480  * @type: type of resource to free
1481  * @num: number of resources
1482  * @res: pointer to array that contains the resources to free
1483  */
1484 enum ice_status
1485 ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1486 {
1487 	struct ice_aqc_alloc_free_res_elem *buf;
1488 	enum ice_status status;
1489 	u16 buf_len;
1490 
1491 	buf_len = struct_size(buf, elem, num - 1);
1492 	buf = kzalloc(buf_len, GFP_KERNEL);
1493 	if (!buf)
1494 		return ICE_ERR_NO_MEMORY;
1495 
1496 	/* Prepare buffer to free resource. */
1497 	buf->num_elems = cpu_to_le16(num);
1498 	buf->res_type = cpu_to_le16(type);
1499 	memcpy(buf->elem, res, sizeof(buf->elem) * num);
1500 
1501 	status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1502 				       ice_aqc_opc_free_res, NULL);
1503 	if (status)
1504 		ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1505 
1506 	kfree(buf);
1507 	return status;
1508 }
1509 
1510 /**
1511  * ice_get_num_per_func - determine number of resources per PF
1512  * @hw: pointer to the HW structure
1513  * @max: value to be evenly split between each PF
1514  *
1515  * Determine the number of valid functions by going through the bitmap returned
1516  * from parsing capabilities and use this to calculate the number of resources
1517  * per PF based on the max value passed in.
1518  */
1519 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1520 {
1521 	u8 funcs;
1522 
1523 #define ICE_CAPS_VALID_FUNCS_M	0xFF
1524 	funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1525 			 ICE_CAPS_VALID_FUNCS_M);
1526 
1527 	if (!funcs)
1528 		return 0;
1529 
1530 	return max / funcs;
1531 }
1532 
1533 /**
1534  * ice_parse_caps - parse function/device capabilities
1535  * @hw: pointer to the HW struct
1536  * @buf: pointer to a buffer containing function/device capability records
1537  * @cap_count: number of capability records in the list
1538  * @opc: type of capabilities list to parse
1539  *
1540  * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
1541  */
1542 static void
1543 ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1544 	       enum ice_adminq_opc opc)
1545 {
1546 	struct ice_aqc_list_caps_elem *cap_resp;
1547 	struct ice_hw_func_caps *func_p = NULL;
1548 	struct ice_hw_dev_caps *dev_p = NULL;
1549 	struct ice_hw_common_caps *caps;
1550 	char const *prefix;
1551 	u32 i;
1552 
1553 	if (!buf)
1554 		return;
1555 
1556 	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1557 
1558 	if (opc == ice_aqc_opc_list_dev_caps) {
1559 		dev_p = &hw->dev_caps;
1560 		caps = &dev_p->common_cap;
1561 		prefix = "dev cap";
1562 	} else if (opc == ice_aqc_opc_list_func_caps) {
1563 		func_p = &hw->func_caps;
1564 		caps = &func_p->common_cap;
1565 		prefix = "func cap";
1566 	} else {
1567 		ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1568 		return;
1569 	}
1570 
1571 	for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1572 		u32 logical_id = le32_to_cpu(cap_resp->logical_id);
1573 		u32 phys_id = le32_to_cpu(cap_resp->phys_id);
1574 		u32 number = le32_to_cpu(cap_resp->number);
1575 		u16 cap = le16_to_cpu(cap_resp->cap);
1576 
1577 		switch (cap) {
1578 		case ICE_AQC_CAPS_VALID_FUNCTIONS:
1579 			caps->valid_functions = number;
1580 			ice_debug(hw, ICE_DBG_INIT,
1581 				  "%s: valid_functions (bitmap) = %d\n", prefix,
1582 				  caps->valid_functions);
1583 
1584 			/* store func count for resource management purposes */
1585 			if (dev_p)
1586 				dev_p->num_funcs = hweight32(number);
1587 			break;
1588 		case ICE_AQC_CAPS_SRIOV:
1589 			caps->sr_iov_1_1 = (number == 1);
1590 			ice_debug(hw, ICE_DBG_INIT,
1591 				  "%s: sr_iov_1_1 = %d\n", prefix,
1592 				  caps->sr_iov_1_1);
1593 			break;
1594 		case ICE_AQC_CAPS_VF:
1595 			if (dev_p) {
1596 				dev_p->num_vfs_exposed = number;
1597 				ice_debug(hw, ICE_DBG_INIT,
1598 					  "%s: num_vfs_exposed = %d\n", prefix,
1599 					  dev_p->num_vfs_exposed);
1600 			} else if (func_p) {
1601 				func_p->num_allocd_vfs = number;
1602 				func_p->vf_base_id = logical_id;
1603 				ice_debug(hw, ICE_DBG_INIT,
1604 					  "%s: num_allocd_vfs = %d\n", prefix,
1605 					  func_p->num_allocd_vfs);
1606 				ice_debug(hw, ICE_DBG_INIT,
1607 					  "%s: vf_base_id = %d\n", prefix,
1608 					  func_p->vf_base_id);
1609 			}
1610 			break;
1611 		case ICE_AQC_CAPS_VSI:
1612 			if (dev_p) {
1613 				dev_p->num_vsi_allocd_to_host = number;
1614 				ice_debug(hw, ICE_DBG_INIT,
1615 					  "%s: num_vsi_allocd_to_host = %d\n",
1616 					  prefix,
1617 					  dev_p->num_vsi_allocd_to_host);
1618 			} else if (func_p) {
1619 				func_p->guar_num_vsi =
1620 					ice_get_num_per_func(hw, ICE_MAX_VSI);
1621 				ice_debug(hw, ICE_DBG_INIT,
1622 					  "%s: guar_num_vsi (fw) = %d\n",
1623 					  prefix, number);
1624 				ice_debug(hw, ICE_DBG_INIT,
1625 					  "%s: guar_num_vsi = %d\n",
1626 					  prefix, func_p->guar_num_vsi);
1627 			}
1628 			break;
1629 		case ICE_AQC_CAPS_DCB:
1630 			caps->dcb = (number == 1);
1631 			caps->active_tc_bitmap = logical_id;
1632 			caps->maxtc = phys_id;
1633 			ice_debug(hw, ICE_DBG_INIT,
1634 				  "%s: dcb = %d\n", prefix, caps->dcb);
1635 			ice_debug(hw, ICE_DBG_INIT,
1636 				  "%s: active_tc_bitmap = %d\n", prefix,
1637 				  caps->active_tc_bitmap);
1638 			ice_debug(hw, ICE_DBG_INIT,
1639 				  "%s: maxtc = %d\n", prefix, caps->maxtc);
1640 			break;
1641 		case ICE_AQC_CAPS_RSS:
1642 			caps->rss_table_size = number;
1643 			caps->rss_table_entry_width = logical_id;
1644 			ice_debug(hw, ICE_DBG_INIT,
1645 				  "%s: rss_table_size = %d\n", prefix,
1646 				  caps->rss_table_size);
1647 			ice_debug(hw, ICE_DBG_INIT,
1648 				  "%s: rss_table_entry_width = %d\n", prefix,
1649 				  caps->rss_table_entry_width);
1650 			break;
1651 		case ICE_AQC_CAPS_RXQS:
1652 			caps->num_rxq = number;
1653 			caps->rxq_first_id = phys_id;
1654 			ice_debug(hw, ICE_DBG_INIT,
1655 				  "%s: num_rxq = %d\n", prefix,
1656 				  caps->num_rxq);
1657 			ice_debug(hw, ICE_DBG_INIT,
1658 				  "%s: rxq_first_id = %d\n", prefix,
1659 				  caps->rxq_first_id);
1660 			break;
1661 		case ICE_AQC_CAPS_TXQS:
1662 			caps->num_txq = number;
1663 			caps->txq_first_id = phys_id;
1664 			ice_debug(hw, ICE_DBG_INIT,
1665 				  "%s: num_txq = %d\n", prefix,
1666 				  caps->num_txq);
1667 			ice_debug(hw, ICE_DBG_INIT,
1668 				  "%s: txq_first_id = %d\n", prefix,
1669 				  caps->txq_first_id);
1670 			break;
1671 		case ICE_AQC_CAPS_MSIX:
1672 			caps->num_msix_vectors = number;
1673 			caps->msix_vector_first_id = phys_id;
1674 			ice_debug(hw, ICE_DBG_INIT,
1675 				  "%s: num_msix_vectors = %d\n", prefix,
1676 				  caps->num_msix_vectors);
1677 			ice_debug(hw, ICE_DBG_INIT,
1678 				  "%s: msix_vector_first_id = %d\n", prefix,
1679 				  caps->msix_vector_first_id);
1680 			break;
1681 		case ICE_AQC_CAPS_MAX_MTU:
1682 			caps->max_mtu = number;
1683 			ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
1684 				  prefix, caps->max_mtu);
1685 			break;
1686 		default:
1687 			ice_debug(hw, ICE_DBG_INIT,
1688 				  "%s: unknown capability[%d]: 0x%x\n", prefix,
1689 				  i, cap);
1690 			break;
1691 		}
1692 	}
1693 
1694 	/* Re-calculate capabilities that are dependent on the number of
1695 	 * physical ports; i.e. some features are not supported or function
1696 	 * differently on devices with more than 4 ports.
1697 	 */
1698 	if (hw->dev_caps.num_funcs > 4) {
1699 		/* Max 4 TCs per port */
1700 		caps->maxtc = 4;
1701 		ice_debug(hw, ICE_DBG_INIT,
1702 			  "%s: maxtc = %d (based on #ports)\n", prefix,
1703 			  caps->maxtc);
1704 	}
1705 }
1706 
1707 /**
1708  * ice_aq_discover_caps - query function/device capabilities
1709  * @hw: pointer to the HW struct
1710  * @buf: a virtual buffer to hold the capabilities
1711  * @buf_size: Size of the virtual buffer
1712  * @cap_count: cap count needed if AQ err==ENOMEM
1713  * @opc: capabilities type to discover - pass in the command opcode
1714  * @cd: pointer to command details structure or NULL
1715  *
1716  * Get the function(0x000a)/device(0x000b) capabilities description from
1717  * the firmware.
1718  */
1719 static enum ice_status
1720 ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
1721 		     enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1722 {
1723 	struct ice_aqc_list_caps *cmd;
1724 	struct ice_aq_desc desc;
1725 	enum ice_status status;
1726 
1727 	cmd = &desc.params.get_cap;
1728 
1729 	if (opc != ice_aqc_opc_list_func_caps &&
1730 	    opc != ice_aqc_opc_list_dev_caps)
1731 		return ICE_ERR_PARAM;
1732 
1733 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1734 
1735 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1736 	if (!status)
1737 		ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
1738 	else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
1739 		*cap_count = le32_to_cpu(cmd->count);
1740 	return status;
1741 }
1742 
1743 /**
1744  * ice_discover_caps - get info about the HW
1745  * @hw: pointer to the hardware structure
1746  * @opc: capabilities type to discover - pass in the command opcode
1747  */
1748 static enum ice_status
1749 ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
1750 {
1751 	enum ice_status status;
1752 	u32 cap_count;
1753 	u16 cbuf_len;
1754 	u8 retries;
1755 
1756 	/* The driver doesn't know how many capabilities the device will return
1757 	 * so the buffer size required isn't known ahead of time. The driver
1758 	 * starts with cbuf_len and if this turns out to be insufficient, the
1759 	 * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
1760 	 * The driver then allocates the buffer based on the count and retries
1761 	 * the operation. So it follows that the retry count is 2.
1762 	 */
1763 #define ICE_GET_CAP_BUF_COUNT	40
1764 #define ICE_GET_CAP_RETRY_COUNT	2
1765 
1766 	cap_count = ICE_GET_CAP_BUF_COUNT;
1767 	retries = ICE_GET_CAP_RETRY_COUNT;
1768 
1769 	do {
1770 		void *cbuf;
1771 
1772 		cbuf_len = (u16)(cap_count *
1773 				 sizeof(struct ice_aqc_list_caps_elem));
1774 		cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
1775 		if (!cbuf)
1776 			return ICE_ERR_NO_MEMORY;
1777 
1778 		status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
1779 					      opc, NULL);
1780 		devm_kfree(ice_hw_to_dev(hw), cbuf);
1781 
1782 		if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
1783 			break;
1784 
1785 		/* If ENOMEM is returned, try again with bigger buffer */
1786 	} while (--retries);
1787 
1788 	return status;
1789 }
1790 
1791 /**
1792  * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
1793  * @hw: pointer to the hardware structure
1794  */
1795 void ice_set_safe_mode_caps(struct ice_hw *hw)
1796 {
1797 	struct ice_hw_func_caps *func_caps = &hw->func_caps;
1798 	struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
1799 	u32 valid_func, rxq_first_id, txq_first_id;
1800 	u32 msix_vector_first_id, max_mtu;
1801 	u32 num_funcs;
1802 
1803 	/* cache some func_caps values that should be restored after memset */
1804 	valid_func = func_caps->common_cap.valid_functions;
1805 	txq_first_id = func_caps->common_cap.txq_first_id;
1806 	rxq_first_id = func_caps->common_cap.rxq_first_id;
1807 	msix_vector_first_id = func_caps->common_cap.msix_vector_first_id;
1808 	max_mtu = func_caps->common_cap.max_mtu;
1809 
1810 	/* unset func capabilities */
1811 	memset(func_caps, 0, sizeof(*func_caps));
1812 
1813 	/* restore cached values */
1814 	func_caps->common_cap.valid_functions = valid_func;
1815 	func_caps->common_cap.txq_first_id = txq_first_id;
1816 	func_caps->common_cap.rxq_first_id = rxq_first_id;
1817 	func_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
1818 	func_caps->common_cap.max_mtu = max_mtu;
1819 
1820 	/* one Tx and one Rx queue in safe mode */
1821 	func_caps->common_cap.num_rxq = 1;
1822 	func_caps->common_cap.num_txq = 1;
1823 
1824 	/* two MSIX vectors, one for traffic and one for misc causes */
1825 	func_caps->common_cap.num_msix_vectors = 2;
1826 	func_caps->guar_num_vsi = 1;
1827 
1828 	/* cache some dev_caps values that should be restored after memset */
1829 	valid_func = dev_caps->common_cap.valid_functions;
1830 	txq_first_id = dev_caps->common_cap.txq_first_id;
1831 	rxq_first_id = dev_caps->common_cap.rxq_first_id;
1832 	msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
1833 	max_mtu = dev_caps->common_cap.max_mtu;
1834 	num_funcs = dev_caps->num_funcs;
1835 
1836 	/* unset dev capabilities */
1837 	memset(dev_caps, 0, sizeof(*dev_caps));
1838 
1839 	/* restore cached values */
1840 	dev_caps->common_cap.valid_functions = valid_func;
1841 	dev_caps->common_cap.txq_first_id = txq_first_id;
1842 	dev_caps->common_cap.rxq_first_id = rxq_first_id;
1843 	dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
1844 	dev_caps->common_cap.max_mtu = max_mtu;
1845 	dev_caps->num_funcs = num_funcs;
1846 
1847 	/* one Tx and one Rx queue per function in safe mode */
1848 	dev_caps->common_cap.num_rxq = num_funcs;
1849 	dev_caps->common_cap.num_txq = num_funcs;
1850 
1851 	/* two MSIX vectors per function */
1852 	dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
1853 }
1854 
1855 /**
1856  * ice_get_caps - get info about the HW
1857  * @hw: pointer to the hardware structure
1858  */
1859 enum ice_status ice_get_caps(struct ice_hw *hw)
1860 {
1861 	enum ice_status status;
1862 
1863 	status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
1864 	if (!status)
1865 		status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
1866 
1867 	return status;
1868 }
1869 
1870 /**
1871  * ice_aq_manage_mac_write - manage MAC address write command
1872  * @hw: pointer to the HW struct
1873  * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
1874  * @flags: flags to control write behavior
1875  * @cd: pointer to command details structure or NULL
1876  *
1877  * This function is used to write MAC address to the NVM (0x0108).
1878  */
1879 enum ice_status
1880 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
1881 			struct ice_sq_cd *cd)
1882 {
1883 	struct ice_aqc_manage_mac_write *cmd;
1884 	struct ice_aq_desc desc;
1885 
1886 	cmd = &desc.params.mac_write;
1887 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
1888 
1889 	cmd->flags = flags;
1890 
1891 	/* Prep values for flags, sah, sal */
1892 	cmd->sah = htons(*((const u16 *)mac_addr));
1893 	cmd->sal = htonl(*((const u32 *)(mac_addr + 2)));
1894 
1895 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1896 }
1897 
1898 /**
1899  * ice_aq_clear_pxe_mode
1900  * @hw: pointer to the HW struct
1901  *
1902  * Tell the firmware that the driver is taking over from PXE (0x0110).
1903  */
1904 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
1905 {
1906 	struct ice_aq_desc desc;
1907 
1908 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
1909 	desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
1910 
1911 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1912 }
1913 
1914 /**
1915  * ice_clear_pxe_mode - clear pxe operations mode
1916  * @hw: pointer to the HW struct
1917  *
1918  * Make sure all PXE mode settings are cleared, including things
1919  * like descriptor fetch/write-back mode.
1920  */
1921 void ice_clear_pxe_mode(struct ice_hw *hw)
1922 {
1923 	if (ice_check_sq_alive(hw, &hw->adminq))
1924 		ice_aq_clear_pxe_mode(hw);
1925 }
1926 
1927 /**
1928  * ice_get_link_speed_based_on_phy_type - returns link speed
1929  * @phy_type_low: lower part of phy_type
1930  * @phy_type_high: higher part of phy_type
1931  *
1932  * This helper function will convert an entry in PHY type structure
1933  * [phy_type_low, phy_type_high] to its corresponding link speed.
1934  * Note: In the structure of [phy_type_low, phy_type_high], there should
1935  * be one bit set, as this function will convert one PHY type to its
1936  * speed.
1937  * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
1938  * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
1939  */
1940 static u16
1941 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
1942 {
1943 	u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
1944 	u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
1945 
1946 	switch (phy_type_low) {
1947 	case ICE_PHY_TYPE_LOW_100BASE_TX:
1948 	case ICE_PHY_TYPE_LOW_100M_SGMII:
1949 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
1950 		break;
1951 	case ICE_PHY_TYPE_LOW_1000BASE_T:
1952 	case ICE_PHY_TYPE_LOW_1000BASE_SX:
1953 	case ICE_PHY_TYPE_LOW_1000BASE_LX:
1954 	case ICE_PHY_TYPE_LOW_1000BASE_KX:
1955 	case ICE_PHY_TYPE_LOW_1G_SGMII:
1956 		speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
1957 		break;
1958 	case ICE_PHY_TYPE_LOW_2500BASE_T:
1959 	case ICE_PHY_TYPE_LOW_2500BASE_X:
1960 	case ICE_PHY_TYPE_LOW_2500BASE_KX:
1961 		speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
1962 		break;
1963 	case ICE_PHY_TYPE_LOW_5GBASE_T:
1964 	case ICE_PHY_TYPE_LOW_5GBASE_KR:
1965 		speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
1966 		break;
1967 	case ICE_PHY_TYPE_LOW_10GBASE_T:
1968 	case ICE_PHY_TYPE_LOW_10G_SFI_DA:
1969 	case ICE_PHY_TYPE_LOW_10GBASE_SR:
1970 	case ICE_PHY_TYPE_LOW_10GBASE_LR:
1971 	case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
1972 	case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
1973 	case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
1974 		speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
1975 		break;
1976 	case ICE_PHY_TYPE_LOW_25GBASE_T:
1977 	case ICE_PHY_TYPE_LOW_25GBASE_CR:
1978 	case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
1979 	case ICE_PHY_TYPE_LOW_25GBASE_CR1:
1980 	case ICE_PHY_TYPE_LOW_25GBASE_SR:
1981 	case ICE_PHY_TYPE_LOW_25GBASE_LR:
1982 	case ICE_PHY_TYPE_LOW_25GBASE_KR:
1983 	case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
1984 	case ICE_PHY_TYPE_LOW_25GBASE_KR1:
1985 	case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
1986 	case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
1987 		speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
1988 		break;
1989 	case ICE_PHY_TYPE_LOW_40GBASE_CR4:
1990 	case ICE_PHY_TYPE_LOW_40GBASE_SR4:
1991 	case ICE_PHY_TYPE_LOW_40GBASE_LR4:
1992 	case ICE_PHY_TYPE_LOW_40GBASE_KR4:
1993 	case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
1994 	case ICE_PHY_TYPE_LOW_40G_XLAUI:
1995 		speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
1996 		break;
1997 	case ICE_PHY_TYPE_LOW_50GBASE_CR2:
1998 	case ICE_PHY_TYPE_LOW_50GBASE_SR2:
1999 	case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2000 	case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2001 	case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2002 	case ICE_PHY_TYPE_LOW_50G_LAUI2:
2003 	case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2004 	case ICE_PHY_TYPE_LOW_50G_AUI2:
2005 	case ICE_PHY_TYPE_LOW_50GBASE_CP:
2006 	case ICE_PHY_TYPE_LOW_50GBASE_SR:
2007 	case ICE_PHY_TYPE_LOW_50GBASE_FR:
2008 	case ICE_PHY_TYPE_LOW_50GBASE_LR:
2009 	case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2010 	case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2011 	case ICE_PHY_TYPE_LOW_50G_AUI1:
2012 		speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2013 		break;
2014 	case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2015 	case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2016 	case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2017 	case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2018 	case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2019 	case ICE_PHY_TYPE_LOW_100G_CAUI4:
2020 	case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2021 	case ICE_PHY_TYPE_LOW_100G_AUI4:
2022 	case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2023 	case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2024 	case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2025 	case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2026 	case ICE_PHY_TYPE_LOW_100GBASE_DR:
2027 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2028 		break;
2029 	default:
2030 		speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2031 		break;
2032 	}
2033 
2034 	switch (phy_type_high) {
2035 	case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2036 	case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2037 	case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2038 	case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2039 	case ICE_PHY_TYPE_HIGH_100G_AUI2:
2040 		speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2041 		break;
2042 	default:
2043 		speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2044 		break;
2045 	}
2046 
2047 	if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2048 	    speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2049 		return ICE_AQ_LINK_SPEED_UNKNOWN;
2050 	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2051 		 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2052 		return ICE_AQ_LINK_SPEED_UNKNOWN;
2053 	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2054 		 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2055 		return speed_phy_type_low;
2056 	else
2057 		return speed_phy_type_high;
2058 }
2059 
2060 /**
2061  * ice_update_phy_type
2062  * @phy_type_low: pointer to the lower part of phy_type
2063  * @phy_type_high: pointer to the higher part of phy_type
2064  * @link_speeds_bitmap: targeted link speeds bitmap
2065  *
2066  * Note: For the link_speeds_bitmap structure, you can check it at
2067  * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2068  * link_speeds_bitmap include multiple speeds.
2069  *
2070  * Each entry in this [phy_type_low, phy_type_high] structure will
2071  * present a certain link speed. This helper function will turn on bits
2072  * in [phy_type_low, phy_type_high] structure based on the value of
2073  * link_speeds_bitmap input parameter.
2074  */
2075 void
2076 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2077 		    u16 link_speeds_bitmap)
2078 {
2079 	u64 pt_high;
2080 	u64 pt_low;
2081 	int index;
2082 	u16 speed;
2083 
2084 	/* We first check with low part of phy_type */
2085 	for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2086 		pt_low = BIT_ULL(index);
2087 		speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2088 
2089 		if (link_speeds_bitmap & speed)
2090 			*phy_type_low |= BIT_ULL(index);
2091 	}
2092 
2093 	/* We then check with high part of phy_type */
2094 	for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2095 		pt_high = BIT_ULL(index);
2096 		speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2097 
2098 		if (link_speeds_bitmap & speed)
2099 			*phy_type_high |= BIT_ULL(index);
2100 	}
2101 }
2102 
2103 /**
2104  * ice_aq_set_phy_cfg
2105  * @hw: pointer to the HW struct
2106  * @lport: logical port number
2107  * @cfg: structure with PHY configuration data to be set
2108  * @cd: pointer to command details structure or NULL
2109  *
2110  * Set the various PHY configuration parameters supported on the Port.
2111  * One or more of the Set PHY config parameters may be ignored in an MFP
2112  * mode as the PF may not have the privilege to set some of the PHY Config
2113  * parameters. This status will be indicated by the command response (0x0601).
2114  */
2115 enum ice_status
2116 ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
2117 		   struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2118 {
2119 	struct ice_aq_desc desc;
2120 
2121 	if (!cfg)
2122 		return ICE_ERR_PARAM;
2123 
2124 	/* Ensure that only valid bits of cfg->caps can be turned on. */
2125 	if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2126 		ice_debug(hw, ICE_DBG_PHY,
2127 			  "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2128 			  cfg->caps);
2129 
2130 		cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2131 	}
2132 
2133 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2134 	desc.params.set_phy.lport_num = lport;
2135 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2136 
2137 	ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
2138 		  (unsigned long long)le64_to_cpu(cfg->phy_type_low));
2139 	ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
2140 		  (unsigned long long)le64_to_cpu(cfg->phy_type_high));
2141 	ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps);
2142 	ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl = 0x%x\n",
2143 		  cfg->low_power_ctrl);
2144 	ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap);
2145 	ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
2146 	ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
2147 
2148 	return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2149 }
2150 
2151 /**
2152  * ice_update_link_info - update status of the HW network link
2153  * @pi: port info structure of the interested logical port
2154  */
2155 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2156 {
2157 	struct ice_link_status *li;
2158 	enum ice_status status;
2159 
2160 	if (!pi)
2161 		return ICE_ERR_PARAM;
2162 
2163 	li = &pi->phy.link_info;
2164 
2165 	status = ice_aq_get_link_info(pi, true, NULL, NULL);
2166 	if (status)
2167 		return status;
2168 
2169 	if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2170 		struct ice_aqc_get_phy_caps_data *pcaps;
2171 		struct ice_hw *hw;
2172 
2173 		hw = pi->hw;
2174 		pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
2175 				     GFP_KERNEL);
2176 		if (!pcaps)
2177 			return ICE_ERR_NO_MEMORY;
2178 
2179 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2180 					     pcaps, NULL);
2181 		if (!status)
2182 			memcpy(li->module_type, &pcaps->module_type,
2183 			       sizeof(li->module_type));
2184 
2185 		devm_kfree(ice_hw_to_dev(hw), pcaps);
2186 	}
2187 
2188 	return status;
2189 }
2190 
2191 /**
2192  * ice_set_fc
2193  * @pi: port information structure
2194  * @aq_failures: pointer to status code, specific to ice_set_fc routine
2195  * @ena_auto_link_update: enable automatic link update
2196  *
2197  * Set the requested flow control mode.
2198  */
2199 enum ice_status
2200 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2201 {
2202 	struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2203 	struct ice_aqc_get_phy_caps_data *pcaps;
2204 	enum ice_status status;
2205 	u8 pause_mask = 0x0;
2206 	struct ice_hw *hw;
2207 
2208 	if (!pi)
2209 		return ICE_ERR_PARAM;
2210 	hw = pi->hw;
2211 	*aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
2212 
2213 	switch (pi->fc.req_mode) {
2214 	case ICE_FC_FULL:
2215 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2216 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2217 		break;
2218 	case ICE_FC_RX_PAUSE:
2219 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2220 		break;
2221 	case ICE_FC_TX_PAUSE:
2222 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2223 		break;
2224 	default:
2225 		break;
2226 	}
2227 
2228 	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
2229 	if (!pcaps)
2230 		return ICE_ERR_NO_MEMORY;
2231 
2232 	/* Get the current PHY config */
2233 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
2234 				     NULL);
2235 	if (status) {
2236 		*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2237 		goto out;
2238 	}
2239 
2240 	/* clear the old pause settings */
2241 	cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2242 				   ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2243 
2244 	/* set the new capabilities */
2245 	cfg.caps |= pause_mask;
2246 
2247 	/* If the capabilities have changed, then set the new config */
2248 	if (cfg.caps != pcaps->caps) {
2249 		int retry_count, retry_max = 10;
2250 
2251 		/* Auto restart link so settings take effect */
2252 		if (ena_auto_link_update)
2253 			cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2254 		/* Copy over all the old settings */
2255 		cfg.phy_type_high = pcaps->phy_type_high;
2256 		cfg.phy_type_low = pcaps->phy_type_low;
2257 		cfg.low_power_ctrl = pcaps->low_power_ctrl;
2258 		cfg.eee_cap = pcaps->eee_cap;
2259 		cfg.eeer_value = pcaps->eeer_value;
2260 		cfg.link_fec_opt = pcaps->link_fec_options;
2261 
2262 		status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
2263 		if (status) {
2264 			*aq_failures = ICE_SET_FC_AQ_FAIL_SET;
2265 			goto out;
2266 		}
2267 
2268 		/* Update the link info
2269 		 * It sometimes takes a really long time for link to
2270 		 * come back from the atomic reset. Thus, we wait a
2271 		 * little bit.
2272 		 */
2273 		for (retry_count = 0; retry_count < retry_max; retry_count++) {
2274 			status = ice_update_link_info(pi);
2275 
2276 			if (!status)
2277 				break;
2278 
2279 			mdelay(100);
2280 		}
2281 
2282 		if (status)
2283 			*aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
2284 	}
2285 
2286 out:
2287 	devm_kfree(ice_hw_to_dev(hw), pcaps);
2288 	return status;
2289 }
2290 
2291 /**
2292  * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
2293  * @caps: PHY ability structure to copy date from
2294  * @cfg: PHY configuration structure to copy data to
2295  *
2296  * Helper function to copy AQC PHY get ability data to PHY set configuration
2297  * data structure
2298  */
2299 void
2300 ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps,
2301 			 struct ice_aqc_set_phy_cfg_data *cfg)
2302 {
2303 	if (!caps || !cfg)
2304 		return;
2305 
2306 	cfg->phy_type_low = caps->phy_type_low;
2307 	cfg->phy_type_high = caps->phy_type_high;
2308 	cfg->caps = caps->caps;
2309 	cfg->low_power_ctrl = caps->low_power_ctrl;
2310 	cfg->eee_cap = caps->eee_cap;
2311 	cfg->eeer_value = caps->eeer_value;
2312 	cfg->link_fec_opt = caps->link_fec_options;
2313 }
2314 
2315 /**
2316  * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
2317  * @cfg: PHY configuration data to set FEC mode
2318  * @fec: FEC mode to configure
2319  *
2320  * Caller should copy ice_aqc_get_phy_caps_data.caps ICE_AQC_PHY_EN_AUTO_FEC
2321  * (bit 7) and ice_aqc_get_phy_caps_data.link_fec_options to cfg.caps
2322  * ICE_AQ_PHY_ENA_AUTO_FEC (bit 7) and cfg.link_fec_options before calling.
2323  */
2324 void
2325 ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
2326 {
2327 	switch (fec) {
2328 	case ICE_FEC_BASER:
2329 		/* Clear RS bits, and AND BASE-R ability
2330 		 * bits and OR request bits.
2331 		 */
2332 		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2333 				     ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
2334 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2335 				     ICE_AQC_PHY_FEC_25G_KR_REQ;
2336 		break;
2337 	case ICE_FEC_RS:
2338 		/* Clear BASE-R bits, and AND RS ability
2339 		 * bits and OR request bits.
2340 		 */
2341 		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
2342 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2343 				     ICE_AQC_PHY_FEC_25G_RS_544_REQ;
2344 		break;
2345 	case ICE_FEC_NONE:
2346 		/* Clear all FEC option bits. */
2347 		cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
2348 		break;
2349 	case ICE_FEC_AUTO:
2350 		/* AND auto FEC bit, and all caps bits. */
2351 		cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
2352 		break;
2353 	}
2354 }
2355 
2356 /**
2357  * ice_get_link_status - get status of the HW network link
2358  * @pi: port information structure
2359  * @link_up: pointer to bool (true/false = linkup/linkdown)
2360  *
2361  * Variable link_up is true if link is up, false if link is down.
2362  * The variable link_up is invalid if status is non zero. As a
2363  * result of this call, link status reporting becomes enabled
2364  */
2365 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
2366 {
2367 	struct ice_phy_info *phy_info;
2368 	enum ice_status status = 0;
2369 
2370 	if (!pi || !link_up)
2371 		return ICE_ERR_PARAM;
2372 
2373 	phy_info = &pi->phy;
2374 
2375 	if (phy_info->get_link_info) {
2376 		status = ice_update_link_info(pi);
2377 
2378 		if (status)
2379 			ice_debug(pi->hw, ICE_DBG_LINK,
2380 				  "get link status error, status = %d\n",
2381 				  status);
2382 	}
2383 
2384 	*link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
2385 
2386 	return status;
2387 }
2388 
2389 /**
2390  * ice_aq_set_link_restart_an
2391  * @pi: pointer to the port information structure
2392  * @ena_link: if true: enable link, if false: disable link
2393  * @cd: pointer to command details structure or NULL
2394  *
2395  * Sets up the link and restarts the Auto-Negotiation over the link.
2396  */
2397 enum ice_status
2398 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
2399 			   struct ice_sq_cd *cd)
2400 {
2401 	struct ice_aqc_restart_an *cmd;
2402 	struct ice_aq_desc desc;
2403 
2404 	cmd = &desc.params.restart_an;
2405 
2406 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
2407 
2408 	cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
2409 	cmd->lport_num = pi->lport;
2410 	if (ena_link)
2411 		cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
2412 	else
2413 		cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
2414 
2415 	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
2416 }
2417 
2418 /**
2419  * ice_aq_set_event_mask
2420  * @hw: pointer to the HW struct
2421  * @port_num: port number of the physical function
2422  * @mask: event mask to be set
2423  * @cd: pointer to command details structure or NULL
2424  *
2425  * Set event mask (0x0613)
2426  */
2427 enum ice_status
2428 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
2429 		      struct ice_sq_cd *cd)
2430 {
2431 	struct ice_aqc_set_event_mask *cmd;
2432 	struct ice_aq_desc desc;
2433 
2434 	cmd = &desc.params.set_event_mask;
2435 
2436 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
2437 
2438 	cmd->lport_num = port_num;
2439 
2440 	cmd->event_mask = cpu_to_le16(mask);
2441 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2442 }
2443 
2444 /**
2445  * ice_aq_set_mac_loopback
2446  * @hw: pointer to the HW struct
2447  * @ena_lpbk: Enable or Disable loopback
2448  * @cd: pointer to command details structure or NULL
2449  *
2450  * Enable/disable loopback on a given port
2451  */
2452 enum ice_status
2453 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
2454 {
2455 	struct ice_aqc_set_mac_lb *cmd;
2456 	struct ice_aq_desc desc;
2457 
2458 	cmd = &desc.params.set_mac_lb;
2459 
2460 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
2461 	if (ena_lpbk)
2462 		cmd->lb_mode = ICE_AQ_MAC_LB_EN;
2463 
2464 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2465 }
2466 
2467 /**
2468  * ice_aq_set_port_id_led
2469  * @pi: pointer to the port information
2470  * @is_orig_mode: is this LED set to original mode (by the net-list)
2471  * @cd: pointer to command details structure or NULL
2472  *
2473  * Set LED value for the given port (0x06e9)
2474  */
2475 enum ice_status
2476 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
2477 		       struct ice_sq_cd *cd)
2478 {
2479 	struct ice_aqc_set_port_id_led *cmd;
2480 	struct ice_hw *hw = pi->hw;
2481 	struct ice_aq_desc desc;
2482 
2483 	cmd = &desc.params.set_port_id_led;
2484 
2485 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
2486 
2487 	if (is_orig_mode)
2488 		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
2489 	else
2490 		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
2491 
2492 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2493 }
2494 
2495 /**
2496  * ice_aq_sff_eeprom
2497  * @hw: pointer to the HW struct
2498  * @lport: bits [7:0] = logical port, bit [8] = logical port valid
2499  * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
2500  * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
2501  * @page: QSFP page
2502  * @set_page: set or ignore the page
2503  * @data: pointer to data buffer to be read/written to the I2C device.
2504  * @length: 1-16 for read, 1 for write.
2505  * @write: 0 read, 1 for write.
2506  * @cd: pointer to command details structure or NULL
2507  *
2508  * Read/Write SFF EEPROM (0x06EE)
2509  */
2510 enum ice_status
2511 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
2512 		  u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
2513 		  bool write, struct ice_sq_cd *cd)
2514 {
2515 	struct ice_aqc_sff_eeprom *cmd;
2516 	struct ice_aq_desc desc;
2517 	enum ice_status status;
2518 
2519 	if (!data || (mem_addr & 0xff00))
2520 		return ICE_ERR_PARAM;
2521 
2522 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
2523 	cmd = &desc.params.read_write_sff_param;
2524 	desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF);
2525 	cmd->lport_num = (u8)(lport & 0xff);
2526 	cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
2527 	cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
2528 					 ICE_AQC_SFF_I2CBUS_7BIT_M) |
2529 					((set_page <<
2530 					  ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
2531 					 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
2532 	cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
2533 	cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
2534 	if (write)
2535 		cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
2536 
2537 	status = ice_aq_send_cmd(hw, &desc, data, length, cd);
2538 	return status;
2539 }
2540 
2541 /**
2542  * __ice_aq_get_set_rss_lut
2543  * @hw: pointer to the hardware structure
2544  * @vsi_id: VSI FW index
2545  * @lut_type: LUT table type
2546  * @lut: pointer to the LUT buffer provided by the caller
2547  * @lut_size: size of the LUT buffer
2548  * @glob_lut_idx: global LUT index
2549  * @set: set true to set the table, false to get the table
2550  *
2551  * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
2552  */
2553 static enum ice_status
2554 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
2555 			 u16 lut_size, u8 glob_lut_idx, bool set)
2556 {
2557 	struct ice_aqc_get_set_rss_lut *cmd_resp;
2558 	struct ice_aq_desc desc;
2559 	enum ice_status status;
2560 	u16 flags = 0;
2561 
2562 	cmd_resp = &desc.params.get_set_rss_lut;
2563 
2564 	if (set) {
2565 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
2566 		desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2567 	} else {
2568 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
2569 	}
2570 
2571 	cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2572 					 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
2573 					ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
2574 				       ICE_AQC_GSET_RSS_LUT_VSI_VALID);
2575 
2576 	switch (lut_type) {
2577 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
2578 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
2579 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
2580 		flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
2581 			  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
2582 		break;
2583 	default:
2584 		status = ICE_ERR_PARAM;
2585 		goto ice_aq_get_set_rss_lut_exit;
2586 	}
2587 
2588 	if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
2589 		flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
2590 			  ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
2591 
2592 		if (!set)
2593 			goto ice_aq_get_set_rss_lut_send;
2594 	} else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2595 		if (!set)
2596 			goto ice_aq_get_set_rss_lut_send;
2597 	} else {
2598 		goto ice_aq_get_set_rss_lut_send;
2599 	}
2600 
2601 	/* LUT size is only valid for Global and PF table types */
2602 	switch (lut_size) {
2603 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
2604 		break;
2605 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
2606 		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
2607 			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2608 			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2609 		break;
2610 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
2611 		if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2612 			flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
2613 				  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2614 				 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2615 			break;
2616 		}
2617 		fallthrough;
2618 	default:
2619 		status = ICE_ERR_PARAM;
2620 		goto ice_aq_get_set_rss_lut_exit;
2621 	}
2622 
2623 ice_aq_get_set_rss_lut_send:
2624 	cmd_resp->flags = cpu_to_le16(flags);
2625 	status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
2626 
2627 ice_aq_get_set_rss_lut_exit:
2628 	return status;
2629 }
2630 
2631 /**
2632  * ice_aq_get_rss_lut
2633  * @hw: pointer to the hardware structure
2634  * @vsi_handle: software VSI handle
2635  * @lut_type: LUT table type
2636  * @lut: pointer to the LUT buffer provided by the caller
2637  * @lut_size: size of the LUT buffer
2638  *
2639  * get the RSS lookup table, PF or VSI type
2640  */
2641 enum ice_status
2642 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2643 		   u8 *lut, u16 lut_size)
2644 {
2645 	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2646 		return ICE_ERR_PARAM;
2647 
2648 	return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2649 					lut_type, lut, lut_size, 0, false);
2650 }
2651 
2652 /**
2653  * ice_aq_set_rss_lut
2654  * @hw: pointer to the hardware structure
2655  * @vsi_handle: software VSI handle
2656  * @lut_type: LUT table type
2657  * @lut: pointer to the LUT buffer provided by the caller
2658  * @lut_size: size of the LUT buffer
2659  *
2660  * set the RSS lookup table, PF or VSI type
2661  */
2662 enum ice_status
2663 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2664 		   u8 *lut, u16 lut_size)
2665 {
2666 	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2667 		return ICE_ERR_PARAM;
2668 
2669 	return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2670 					lut_type, lut, lut_size, 0, true);
2671 }
2672 
2673 /**
2674  * __ice_aq_get_set_rss_key
2675  * @hw: pointer to the HW struct
2676  * @vsi_id: VSI FW index
2677  * @key: pointer to key info struct
2678  * @set: set true to set the key, false to get the key
2679  *
2680  * get (0x0B04) or set (0x0B02) the RSS key per VSI
2681  */
2682 static enum
2683 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
2684 				    struct ice_aqc_get_set_rss_keys *key,
2685 				    bool set)
2686 {
2687 	struct ice_aqc_get_set_rss_key *cmd_resp;
2688 	u16 key_size = sizeof(*key);
2689 	struct ice_aq_desc desc;
2690 
2691 	cmd_resp = &desc.params.get_set_rss_key;
2692 
2693 	if (set) {
2694 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
2695 		desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2696 	} else {
2697 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
2698 	}
2699 
2700 	cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2701 					 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
2702 					ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
2703 				       ICE_AQC_GSET_RSS_KEY_VSI_VALID);
2704 
2705 	return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
2706 }
2707 
2708 /**
2709  * ice_aq_get_rss_key
2710  * @hw: pointer to the HW struct
2711  * @vsi_handle: software VSI handle
2712  * @key: pointer to key info struct
2713  *
2714  * get the RSS key per VSI
2715  */
2716 enum ice_status
2717 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
2718 		   struct ice_aqc_get_set_rss_keys *key)
2719 {
2720 	if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
2721 		return ICE_ERR_PARAM;
2722 
2723 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2724 					key, false);
2725 }
2726 
2727 /**
2728  * ice_aq_set_rss_key
2729  * @hw: pointer to the HW struct
2730  * @vsi_handle: software VSI handle
2731  * @keys: pointer to key info struct
2732  *
2733  * set the RSS key per VSI
2734  */
2735 enum ice_status
2736 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
2737 		   struct ice_aqc_get_set_rss_keys *keys)
2738 {
2739 	if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
2740 		return ICE_ERR_PARAM;
2741 
2742 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2743 					keys, true);
2744 }
2745 
2746 /**
2747  * ice_aq_add_lan_txq
2748  * @hw: pointer to the hardware structure
2749  * @num_qgrps: Number of added queue groups
2750  * @qg_list: list of queue groups to be added
2751  * @buf_size: size of buffer for indirect command
2752  * @cd: pointer to command details structure or NULL
2753  *
2754  * Add Tx LAN queue (0x0C30)
2755  *
2756  * NOTE:
2757  * Prior to calling add Tx LAN queue:
2758  * Initialize the following as part of the Tx queue context:
2759  * Completion queue ID if the queue uses Completion queue, Quanta profile,
2760  * Cache profile and Packet shaper profile.
2761  *
2762  * After add Tx LAN queue AQ command is completed:
2763  * Interrupts should be associated with specific queues,
2764  * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
2765  * flow.
2766  */
2767 static enum ice_status
2768 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2769 		   struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
2770 		   struct ice_sq_cd *cd)
2771 {
2772 	u16 i, sum_header_size, sum_q_size = 0;
2773 	struct ice_aqc_add_tx_qgrp *list;
2774 	struct ice_aqc_add_txqs *cmd;
2775 	struct ice_aq_desc desc;
2776 
2777 	cmd = &desc.params.add_txqs;
2778 
2779 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
2780 
2781 	if (!qg_list)
2782 		return ICE_ERR_PARAM;
2783 
2784 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2785 		return ICE_ERR_PARAM;
2786 
2787 	sum_header_size = num_qgrps *
2788 		(sizeof(*qg_list) - sizeof(*qg_list->txqs));
2789 
2790 	list = qg_list;
2791 	for (i = 0; i < num_qgrps; i++) {
2792 		struct ice_aqc_add_txqs_perq *q = list->txqs;
2793 
2794 		sum_q_size += list->num_txqs * sizeof(*q);
2795 		list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
2796 	}
2797 
2798 	if (buf_size != (sum_header_size + sum_q_size))
2799 		return ICE_ERR_PARAM;
2800 
2801 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2802 
2803 	cmd->num_qgrps = num_qgrps;
2804 
2805 	return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2806 }
2807 
2808 /**
2809  * ice_aq_dis_lan_txq
2810  * @hw: pointer to the hardware structure
2811  * @num_qgrps: number of groups in the list
2812  * @qg_list: the list of groups to disable
2813  * @buf_size: the total size of the qg_list buffer in bytes
2814  * @rst_src: if called due to reset, specifies the reset source
2815  * @vmvf_num: the relative VM or VF number that is undergoing the reset
2816  * @cd: pointer to command details structure or NULL
2817  *
2818  * Disable LAN Tx queue (0x0C31)
2819  */
2820 static enum ice_status
2821 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2822 		   struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
2823 		   enum ice_disq_rst_src rst_src, u16 vmvf_num,
2824 		   struct ice_sq_cd *cd)
2825 {
2826 	struct ice_aqc_dis_txqs *cmd;
2827 	struct ice_aq_desc desc;
2828 	enum ice_status status;
2829 	u16 i, sz = 0;
2830 
2831 	cmd = &desc.params.dis_txqs;
2832 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
2833 
2834 	/* qg_list can be NULL only in VM/VF reset flow */
2835 	if (!qg_list && !rst_src)
2836 		return ICE_ERR_PARAM;
2837 
2838 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2839 		return ICE_ERR_PARAM;
2840 
2841 	cmd->num_entries = num_qgrps;
2842 
2843 	cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
2844 					    ICE_AQC_Q_DIS_TIMEOUT_M);
2845 
2846 	switch (rst_src) {
2847 	case ICE_VM_RESET:
2848 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
2849 		cmd->vmvf_and_timeout |=
2850 			cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
2851 		break;
2852 	case ICE_VF_RESET:
2853 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
2854 		/* In this case, FW expects vmvf_num to be absolute VF ID */
2855 		cmd->vmvf_and_timeout |=
2856 			cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
2857 				    ICE_AQC_Q_DIS_VMVF_NUM_M);
2858 		break;
2859 	case ICE_NO_RESET:
2860 	default:
2861 		break;
2862 	}
2863 
2864 	/* flush pipe on time out */
2865 	cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
2866 	/* If no queue group info, we are in a reset flow. Issue the AQ */
2867 	if (!qg_list)
2868 		goto do_aq;
2869 
2870 	/* set RD bit to indicate that command buffer is provided by the driver
2871 	 * and it needs to be read by the firmware
2872 	 */
2873 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2874 
2875 	for (i = 0; i < num_qgrps; ++i) {
2876 		/* Calculate the size taken up by the queue IDs in this group */
2877 		sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
2878 
2879 		/* Add the size of the group header */
2880 		sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
2881 
2882 		/* If the num of queues is even, add 2 bytes of padding */
2883 		if ((qg_list[i].num_qs % 2) == 0)
2884 			sz += 2;
2885 	}
2886 
2887 	if (buf_size != sz)
2888 		return ICE_ERR_PARAM;
2889 
2890 do_aq:
2891 	status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2892 	if (status) {
2893 		if (!qg_list)
2894 			ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
2895 				  vmvf_num, hw->adminq.sq_last_status);
2896 		else
2897 			ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
2898 				  le16_to_cpu(qg_list[0].q_id[0]),
2899 				  hw->adminq.sq_last_status);
2900 	}
2901 	return status;
2902 }
2903 
2904 /* End of FW Admin Queue command wrappers */
2905 
2906 /**
2907  * ice_write_byte - write a byte to a packed context structure
2908  * @src_ctx:  the context structure to read from
2909  * @dest_ctx: the context to be written to
2910  * @ce_info:  a description of the struct to be filled
2911  */
2912 static void
2913 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2914 {
2915 	u8 src_byte, dest_byte, mask;
2916 	u8 *from, *dest;
2917 	u16 shift_width;
2918 
2919 	/* copy from the next struct field */
2920 	from = src_ctx + ce_info->offset;
2921 
2922 	/* prepare the bits and mask */
2923 	shift_width = ce_info->lsb % 8;
2924 	mask = (u8)(BIT(ce_info->width) - 1);
2925 
2926 	src_byte = *from;
2927 	src_byte &= mask;
2928 
2929 	/* shift to correct alignment */
2930 	mask <<= shift_width;
2931 	src_byte <<= shift_width;
2932 
2933 	/* get the current bits from the target bit string */
2934 	dest = dest_ctx + (ce_info->lsb / 8);
2935 
2936 	memcpy(&dest_byte, dest, sizeof(dest_byte));
2937 
2938 	dest_byte &= ~mask;	/* get the bits not changing */
2939 	dest_byte |= src_byte;	/* add in the new bits */
2940 
2941 	/* put it all back */
2942 	memcpy(dest, &dest_byte, sizeof(dest_byte));
2943 }
2944 
2945 /**
2946  * ice_write_word - write a word to a packed context structure
2947  * @src_ctx:  the context structure to read from
2948  * @dest_ctx: the context to be written to
2949  * @ce_info:  a description of the struct to be filled
2950  */
2951 static void
2952 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2953 {
2954 	u16 src_word, mask;
2955 	__le16 dest_word;
2956 	u8 *from, *dest;
2957 	u16 shift_width;
2958 
2959 	/* copy from the next struct field */
2960 	from = src_ctx + ce_info->offset;
2961 
2962 	/* prepare the bits and mask */
2963 	shift_width = ce_info->lsb % 8;
2964 	mask = BIT(ce_info->width) - 1;
2965 
2966 	/* don't swizzle the bits until after the mask because the mask bits
2967 	 * will be in a different bit position on big endian machines
2968 	 */
2969 	src_word = *(u16 *)from;
2970 	src_word &= mask;
2971 
2972 	/* shift to correct alignment */
2973 	mask <<= shift_width;
2974 	src_word <<= shift_width;
2975 
2976 	/* get the current bits from the target bit string */
2977 	dest = dest_ctx + (ce_info->lsb / 8);
2978 
2979 	memcpy(&dest_word, dest, sizeof(dest_word));
2980 
2981 	dest_word &= ~(cpu_to_le16(mask));	/* get the bits not changing */
2982 	dest_word |= cpu_to_le16(src_word);	/* add in the new bits */
2983 
2984 	/* put it all back */
2985 	memcpy(dest, &dest_word, sizeof(dest_word));
2986 }
2987 
2988 /**
2989  * ice_write_dword - write a dword to a packed context structure
2990  * @src_ctx:  the context structure to read from
2991  * @dest_ctx: the context to be written to
2992  * @ce_info:  a description of the struct to be filled
2993  */
2994 static void
2995 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2996 {
2997 	u32 src_dword, mask;
2998 	__le32 dest_dword;
2999 	u8 *from, *dest;
3000 	u16 shift_width;
3001 
3002 	/* copy from the next struct field */
3003 	from = src_ctx + ce_info->offset;
3004 
3005 	/* prepare the bits and mask */
3006 	shift_width = ce_info->lsb % 8;
3007 
3008 	/* if the field width is exactly 32 on an x86 machine, then the shift
3009 	 * operation will not work because the SHL instructions count is masked
3010 	 * to 5 bits so the shift will do nothing
3011 	 */
3012 	if (ce_info->width < 32)
3013 		mask = BIT(ce_info->width) - 1;
3014 	else
3015 		mask = (u32)~0;
3016 
3017 	/* don't swizzle the bits until after the mask because the mask bits
3018 	 * will be in a different bit position on big endian machines
3019 	 */
3020 	src_dword = *(u32 *)from;
3021 	src_dword &= mask;
3022 
3023 	/* shift to correct alignment */
3024 	mask <<= shift_width;
3025 	src_dword <<= shift_width;
3026 
3027 	/* get the current bits from the target bit string */
3028 	dest = dest_ctx + (ce_info->lsb / 8);
3029 
3030 	memcpy(&dest_dword, dest, sizeof(dest_dword));
3031 
3032 	dest_dword &= ~(cpu_to_le32(mask));	/* get the bits not changing */
3033 	dest_dword |= cpu_to_le32(src_dword);	/* add in the new bits */
3034 
3035 	/* put it all back */
3036 	memcpy(dest, &dest_dword, sizeof(dest_dword));
3037 }
3038 
3039 /**
3040  * ice_write_qword - write a qword to a packed context structure
3041  * @src_ctx:  the context structure to read from
3042  * @dest_ctx: the context to be written to
3043  * @ce_info:  a description of the struct to be filled
3044  */
3045 static void
3046 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3047 {
3048 	u64 src_qword, mask;
3049 	__le64 dest_qword;
3050 	u8 *from, *dest;
3051 	u16 shift_width;
3052 
3053 	/* copy from the next struct field */
3054 	from = src_ctx + ce_info->offset;
3055 
3056 	/* prepare the bits and mask */
3057 	shift_width = ce_info->lsb % 8;
3058 
3059 	/* if the field width is exactly 64 on an x86 machine, then the shift
3060 	 * operation will not work because the SHL instructions count is masked
3061 	 * to 6 bits so the shift will do nothing
3062 	 */
3063 	if (ce_info->width < 64)
3064 		mask = BIT_ULL(ce_info->width) - 1;
3065 	else
3066 		mask = (u64)~0;
3067 
3068 	/* don't swizzle the bits until after the mask because the mask bits
3069 	 * will be in a different bit position on big endian machines
3070 	 */
3071 	src_qword = *(u64 *)from;
3072 	src_qword &= mask;
3073 
3074 	/* shift to correct alignment */
3075 	mask <<= shift_width;
3076 	src_qword <<= shift_width;
3077 
3078 	/* get the current bits from the target bit string */
3079 	dest = dest_ctx + (ce_info->lsb / 8);
3080 
3081 	memcpy(&dest_qword, dest, sizeof(dest_qword));
3082 
3083 	dest_qword &= ~(cpu_to_le64(mask));	/* get the bits not changing */
3084 	dest_qword |= cpu_to_le64(src_qword);	/* add in the new bits */
3085 
3086 	/* put it all back */
3087 	memcpy(dest, &dest_qword, sizeof(dest_qword));
3088 }
3089 
3090 /**
3091  * ice_set_ctx - set context bits in packed structure
3092  * @src_ctx:  pointer to a generic non-packed context structure
3093  * @dest_ctx: pointer to memory for the packed structure
3094  * @ce_info:  a description of the structure to be transformed
3095  */
3096 enum ice_status
3097 ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3098 {
3099 	int f;
3100 
3101 	for (f = 0; ce_info[f].width; f++) {
3102 		/* We have to deal with each element of the FW response
3103 		 * using the correct size so that we are correct regardless
3104 		 * of the endianness of the machine.
3105 		 */
3106 		switch (ce_info[f].size_of) {
3107 		case sizeof(u8):
3108 			ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
3109 			break;
3110 		case sizeof(u16):
3111 			ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
3112 			break;
3113 		case sizeof(u32):
3114 			ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
3115 			break;
3116 		case sizeof(u64):
3117 			ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
3118 			break;
3119 		default:
3120 			return ICE_ERR_INVAL_SIZE;
3121 		}
3122 	}
3123 
3124 	return 0;
3125 }
3126 
3127 /**
3128  * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
3129  * @hw: pointer to the HW struct
3130  * @vsi_handle: software VSI handle
3131  * @tc: TC number
3132  * @q_handle: software queue handle
3133  */
3134 struct ice_q_ctx *
3135 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
3136 {
3137 	struct ice_vsi_ctx *vsi;
3138 	struct ice_q_ctx *q_ctx;
3139 
3140 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
3141 	if (!vsi)
3142 		return NULL;
3143 	if (q_handle >= vsi->num_lan_q_entries[tc])
3144 		return NULL;
3145 	if (!vsi->lan_q_ctx[tc])
3146 		return NULL;
3147 	q_ctx = vsi->lan_q_ctx[tc];
3148 	return &q_ctx[q_handle];
3149 }
3150 
3151 /**
3152  * ice_ena_vsi_txq
3153  * @pi: port information structure
3154  * @vsi_handle: software VSI handle
3155  * @tc: TC number
3156  * @q_handle: software queue handle
3157  * @num_qgrps: Number of added queue groups
3158  * @buf: list of queue groups to be added
3159  * @buf_size: size of buffer for indirect command
3160  * @cd: pointer to command details structure or NULL
3161  *
3162  * This function adds one LAN queue
3163  */
3164 enum ice_status
3165 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
3166 		u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
3167 		struct ice_sq_cd *cd)
3168 {
3169 	struct ice_aqc_txsched_elem_data node = { 0 };
3170 	struct ice_sched_node *parent;
3171 	struct ice_q_ctx *q_ctx;
3172 	enum ice_status status;
3173 	struct ice_hw *hw;
3174 
3175 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3176 		return ICE_ERR_CFG;
3177 
3178 	if (num_qgrps > 1 || buf->num_txqs > 1)
3179 		return ICE_ERR_MAX_LIMIT;
3180 
3181 	hw = pi->hw;
3182 
3183 	if (!ice_is_vsi_valid(hw, vsi_handle))
3184 		return ICE_ERR_PARAM;
3185 
3186 	mutex_lock(&pi->sched_lock);
3187 
3188 	q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
3189 	if (!q_ctx) {
3190 		ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
3191 			  q_handle);
3192 		status = ICE_ERR_PARAM;
3193 		goto ena_txq_exit;
3194 	}
3195 
3196 	/* find a parent node */
3197 	parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
3198 					    ICE_SCHED_NODE_OWNER_LAN);
3199 	if (!parent) {
3200 		status = ICE_ERR_PARAM;
3201 		goto ena_txq_exit;
3202 	}
3203 
3204 	buf->parent_teid = parent->info.node_teid;
3205 	node.parent_teid = parent->info.node_teid;
3206 	/* Mark that the values in the "generic" section as valid. The default
3207 	 * value in the "generic" section is zero. This means that :
3208 	 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
3209 	 * - 0 priority among siblings, indicated by Bit 1-3.
3210 	 * - WFQ, indicated by Bit 4.
3211 	 * - 0 Adjustment value is used in PSM credit update flow, indicated by
3212 	 * Bit 5-6.
3213 	 * - Bit 7 is reserved.
3214 	 * Without setting the generic section as valid in valid_sections, the
3215 	 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
3216 	 */
3217 	buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
3218 
3219 	/* add the LAN queue */
3220 	status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
3221 	if (status) {
3222 		ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
3223 			  le16_to_cpu(buf->txqs[0].txq_id),
3224 			  hw->adminq.sq_last_status);
3225 		goto ena_txq_exit;
3226 	}
3227 
3228 	node.node_teid = buf->txqs[0].q_teid;
3229 	node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
3230 	q_ctx->q_handle = q_handle;
3231 	q_ctx->q_teid = le32_to_cpu(node.node_teid);
3232 
3233 	/* add a leaf node into scheduler tree queue layer */
3234 	status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
3235 	if (!status)
3236 		status = ice_sched_replay_q_bw(pi, q_ctx);
3237 
3238 ena_txq_exit:
3239 	mutex_unlock(&pi->sched_lock);
3240 	return status;
3241 }
3242 
3243 /**
3244  * ice_dis_vsi_txq
3245  * @pi: port information structure
3246  * @vsi_handle: software VSI handle
3247  * @tc: TC number
3248  * @num_queues: number of queues
3249  * @q_handles: pointer to software queue handle array
3250  * @q_ids: pointer to the q_id array
3251  * @q_teids: pointer to queue node teids
3252  * @rst_src: if called due to reset, specifies the reset source
3253  * @vmvf_num: the relative VM or VF number that is undergoing the reset
3254  * @cd: pointer to command details structure or NULL
3255  *
3256  * This function removes queues and their corresponding nodes in SW DB
3257  */
3258 enum ice_status
3259 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
3260 		u16 *q_handles, u16 *q_ids, u32 *q_teids,
3261 		enum ice_disq_rst_src rst_src, u16 vmvf_num,
3262 		struct ice_sq_cd *cd)
3263 {
3264 	enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
3265 	struct ice_aqc_dis_txq_item qg_list;
3266 	struct ice_q_ctx *q_ctx;
3267 	u16 i;
3268 
3269 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3270 		return ICE_ERR_CFG;
3271 
3272 	if (!num_queues) {
3273 		/* if queue is disabled already yet the disable queue command
3274 		 * has to be sent to complete the VF reset, then call
3275 		 * ice_aq_dis_lan_txq without any queue information
3276 		 */
3277 		if (rst_src)
3278 			return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
3279 						  vmvf_num, NULL);
3280 		return ICE_ERR_CFG;
3281 	}
3282 
3283 	mutex_lock(&pi->sched_lock);
3284 
3285 	for (i = 0; i < num_queues; i++) {
3286 		struct ice_sched_node *node;
3287 
3288 		node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
3289 		if (!node)
3290 			continue;
3291 		q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
3292 		if (!q_ctx) {
3293 			ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
3294 				  q_handles[i]);
3295 			continue;
3296 		}
3297 		if (q_ctx->q_handle != q_handles[i]) {
3298 			ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
3299 				  q_ctx->q_handle, q_handles[i]);
3300 			continue;
3301 		}
3302 		qg_list.parent_teid = node->info.parent_teid;
3303 		qg_list.num_qs = 1;
3304 		qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
3305 		status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
3306 					    sizeof(qg_list), rst_src, vmvf_num,
3307 					    cd);
3308 
3309 		if (status)
3310 			break;
3311 		ice_free_sched_node(pi, node);
3312 		q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
3313 	}
3314 	mutex_unlock(&pi->sched_lock);
3315 	return status;
3316 }
3317 
3318 /**
3319  * ice_cfg_vsi_qs - configure the new/existing VSI queues
3320  * @pi: port information structure
3321  * @vsi_handle: software VSI handle
3322  * @tc_bitmap: TC bitmap
3323  * @maxqs: max queues array per TC
3324  * @owner: LAN or RDMA
3325  *
3326  * This function adds/updates the VSI queues per TC.
3327  */
3328 static enum ice_status
3329 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
3330 	       u16 *maxqs, u8 owner)
3331 {
3332 	enum ice_status status = 0;
3333 	u8 i;
3334 
3335 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3336 		return ICE_ERR_CFG;
3337 
3338 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3339 		return ICE_ERR_PARAM;
3340 
3341 	mutex_lock(&pi->sched_lock);
3342 
3343 	ice_for_each_traffic_class(i) {
3344 		/* configuration is possible only if TC node is present */
3345 		if (!ice_sched_get_tc_node(pi, i))
3346 			continue;
3347 
3348 		status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
3349 					   ice_is_tc_ena(tc_bitmap, i));
3350 		if (status)
3351 			break;
3352 	}
3353 
3354 	mutex_unlock(&pi->sched_lock);
3355 	return status;
3356 }
3357 
3358 /**
3359  * ice_cfg_vsi_lan - configure VSI LAN queues
3360  * @pi: port information structure
3361  * @vsi_handle: software VSI handle
3362  * @tc_bitmap: TC bitmap
3363  * @max_lanqs: max LAN queues array per TC
3364  *
3365  * This function adds/updates the VSI LAN queues per TC.
3366  */
3367 enum ice_status
3368 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
3369 		u16 *max_lanqs)
3370 {
3371 	return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
3372 			      ICE_SCHED_NODE_OWNER_LAN);
3373 }
3374 
3375 /**
3376  * ice_replay_pre_init - replay pre initialization
3377  * @hw: pointer to the HW struct
3378  *
3379  * Initializes required config data for VSI, FD, ACL, and RSS before replay.
3380  */
3381 static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
3382 {
3383 	struct ice_switch_info *sw = hw->switch_info;
3384 	u8 i;
3385 
3386 	/* Delete old entries from replay filter list head if there is any */
3387 	ice_rm_all_sw_replay_rule_info(hw);
3388 	/* In start of replay, move entries into replay_rules list, it
3389 	 * will allow adding rules entries back to filt_rules list,
3390 	 * which is operational list.
3391 	 */
3392 	for (i = 0; i < ICE_SW_LKUP_LAST; i++)
3393 		list_replace_init(&sw->recp_list[i].filt_rules,
3394 				  &sw->recp_list[i].filt_replay_rules);
3395 
3396 	return 0;
3397 }
3398 
3399 /**
3400  * ice_replay_vsi - replay VSI configuration
3401  * @hw: pointer to the HW struct
3402  * @vsi_handle: driver VSI handle
3403  *
3404  * Restore all VSI configuration after reset. It is required to call this
3405  * function with main VSI first.
3406  */
3407 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
3408 {
3409 	enum ice_status status;
3410 
3411 	if (!ice_is_vsi_valid(hw, vsi_handle))
3412 		return ICE_ERR_PARAM;
3413 
3414 	/* Replay pre-initialization if there is any */
3415 	if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
3416 		status = ice_replay_pre_init(hw);
3417 		if (status)
3418 			return status;
3419 	}
3420 	/* Replay per VSI all RSS configurations */
3421 	status = ice_replay_rss_cfg(hw, vsi_handle);
3422 	if (status)
3423 		return status;
3424 	/* Replay per VSI all filters */
3425 	status = ice_replay_vsi_all_fltr(hw, vsi_handle);
3426 	return status;
3427 }
3428 
3429 /**
3430  * ice_replay_post - post replay configuration cleanup
3431  * @hw: pointer to the HW struct
3432  *
3433  * Post replay cleanup.
3434  */
3435 void ice_replay_post(struct ice_hw *hw)
3436 {
3437 	/* Delete old entries from replay filter list head */
3438 	ice_rm_all_sw_replay_rule_info(hw);
3439 }
3440 
3441 /**
3442  * ice_stat_update40 - read 40 bit stat from the chip and update stat values
3443  * @hw: ptr to the hardware info
3444  * @reg: offset of 64 bit HW register to read from
3445  * @prev_stat_loaded: bool to specify if previous stats are loaded
3446  * @prev_stat: ptr to previous loaded stat value
3447  * @cur_stat: ptr to current stat value
3448  */
3449 void
3450 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
3451 		  u64 *prev_stat, u64 *cur_stat)
3452 {
3453 	u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
3454 
3455 	/* device stats are not reset at PFR, they likely will not be zeroed
3456 	 * when the driver starts. Thus, save the value from the first read
3457 	 * without adding to the statistic value so that we report stats which
3458 	 * count up from zero.
3459 	 */
3460 	if (!prev_stat_loaded) {
3461 		*prev_stat = new_data;
3462 		return;
3463 	}
3464 
3465 	/* Calculate the difference between the new and old values, and then
3466 	 * add it to the software stat value.
3467 	 */
3468 	if (new_data >= *prev_stat)
3469 		*cur_stat += new_data - *prev_stat;
3470 	else
3471 		/* to manage the potential roll-over */
3472 		*cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
3473 
3474 	/* Update the previously stored value to prepare for next read */
3475 	*prev_stat = new_data;
3476 }
3477 
3478 /**
3479  * ice_stat_update32 - read 32 bit stat from the chip and update stat values
3480  * @hw: ptr to the hardware info
3481  * @reg: offset of HW register to read from
3482  * @prev_stat_loaded: bool to specify if previous stats are loaded
3483  * @prev_stat: ptr to previous loaded stat value
3484  * @cur_stat: ptr to current stat value
3485  */
3486 void
3487 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
3488 		  u64 *prev_stat, u64 *cur_stat)
3489 {
3490 	u32 new_data;
3491 
3492 	new_data = rd32(hw, reg);
3493 
3494 	/* device stats are not reset at PFR, they likely will not be zeroed
3495 	 * when the driver starts. Thus, save the value from the first read
3496 	 * without adding to the statistic value so that we report stats which
3497 	 * count up from zero.
3498 	 */
3499 	if (!prev_stat_loaded) {
3500 		*prev_stat = new_data;
3501 		return;
3502 	}
3503 
3504 	/* Calculate the difference between the new and old values, and then
3505 	 * add it to the software stat value.
3506 	 */
3507 	if (new_data >= *prev_stat)
3508 		*cur_stat += new_data - *prev_stat;
3509 	else
3510 		/* to manage the potential roll-over */
3511 		*cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
3512 
3513 	/* Update the previously stored value to prepare for next read */
3514 	*prev_stat = new_data;
3515 }
3516 
3517 /**
3518  * ice_sched_query_elem - query element information from HW
3519  * @hw: pointer to the HW struct
3520  * @node_teid: node TEID to be queried
3521  * @buf: buffer to element information
3522  *
3523  * This function queries HW element information
3524  */
3525 enum ice_status
3526 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
3527 		     struct ice_aqc_get_elem *buf)
3528 {
3529 	u16 buf_size, num_elem_ret = 0;
3530 	enum ice_status status;
3531 
3532 	buf_size = sizeof(*buf);
3533 	memset(buf, 0, buf_size);
3534 	buf->generic[0].node_teid = cpu_to_le32(node_teid);
3535 	status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
3536 					  NULL);
3537 	if (status || num_elem_ret != 1)
3538 		ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
3539 	return status;
3540 }
3541