1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_common.h"
5 #include "ice_sched.h"
6 #include "ice_adminq_cmd.h"
7 #include "ice_flow.h"
8 
9 #define ICE_PF_RESET_WAIT_COUNT	200
10 
11 /**
12  * ice_set_mac_type - Sets MAC type
13  * @hw: pointer to the HW structure
14  *
15  * This function sets the MAC type of the adapter based on the
16  * vendor ID and device ID stored in the HW structure.
17  */
18 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
19 {
20 	if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
21 		return ICE_ERR_DEVICE_NOT_SUPPORTED;
22 
23 	hw->mac_type = ICE_MAC_GENERIC;
24 	return 0;
25 }
26 
27 /**
28  * ice_clear_pf_cfg - Clear PF configuration
29  * @hw: pointer to the hardware structure
30  *
31  * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
32  * configuration, flow director filters, etc.).
33  */
34 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
35 {
36 	struct ice_aq_desc desc;
37 
38 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
39 
40 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
41 }
42 
43 /**
44  * ice_aq_manage_mac_read - manage MAC address read command
45  * @hw: pointer to the HW struct
46  * @buf: a virtual buffer to hold the manage MAC read response
47  * @buf_size: Size of the virtual buffer
48  * @cd: pointer to command details structure or NULL
49  *
50  * This function is used to return per PF station MAC address (0x0107).
51  * NOTE: Upon successful completion of this command, MAC address information
52  * is returned in user specified buffer. Please interpret user specified
53  * buffer as "manage_mac_read" response.
54  * Response such as various MAC addresses are stored in HW struct (port.mac)
55  * ice_aq_discover_caps is expected to be called before this function is called.
56  */
57 static enum ice_status
58 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
59 		       struct ice_sq_cd *cd)
60 {
61 	struct ice_aqc_manage_mac_read_resp *resp;
62 	struct ice_aqc_manage_mac_read *cmd;
63 	struct ice_aq_desc desc;
64 	enum ice_status status;
65 	u16 flags;
66 	u8 i;
67 
68 	cmd = &desc.params.mac_read;
69 
70 	if (buf_size < sizeof(*resp))
71 		return ICE_ERR_BUF_TOO_SHORT;
72 
73 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
74 
75 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
76 	if (status)
77 		return status;
78 
79 	resp = (struct ice_aqc_manage_mac_read_resp *)buf;
80 	flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
81 
82 	if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
83 		ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
84 		return ICE_ERR_CFG;
85 	}
86 
87 	/* A single port can report up to two (LAN and WoL) addresses */
88 	for (i = 0; i < cmd->num_addr; i++)
89 		if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
90 			ether_addr_copy(hw->port_info->mac.lan_addr,
91 					resp[i].mac_addr);
92 			ether_addr_copy(hw->port_info->mac.perm_addr,
93 					resp[i].mac_addr);
94 			break;
95 		}
96 
97 	return 0;
98 }
99 
100 /**
101  * ice_aq_get_phy_caps - returns PHY capabilities
102  * @pi: port information structure
103  * @qual_mods: report qualified modules
104  * @report_mode: report mode capabilities
105  * @pcaps: structure for PHY capabilities to be filled
106  * @cd: pointer to command details structure or NULL
107  *
108  * Returns the various PHY capabilities supported on the Port (0x0600)
109  */
110 enum ice_status
111 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
112 		    struct ice_aqc_get_phy_caps_data *pcaps,
113 		    struct ice_sq_cd *cd)
114 {
115 	struct ice_aqc_get_phy_caps *cmd;
116 	u16 pcaps_size = sizeof(*pcaps);
117 	struct ice_aq_desc desc;
118 	enum ice_status status;
119 
120 	cmd = &desc.params.get_phy;
121 
122 	if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
123 		return ICE_ERR_PARAM;
124 
125 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
126 
127 	if (qual_mods)
128 		cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
129 
130 	cmd->param0 |= cpu_to_le16(report_mode);
131 	status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
132 
133 	if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
134 		pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
135 		pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
136 	}
137 
138 	return status;
139 }
140 
141 /**
142  * ice_get_media_type - Gets media type
143  * @pi: port information structure
144  */
145 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
146 {
147 	struct ice_link_status *hw_link_info;
148 
149 	if (!pi)
150 		return ICE_MEDIA_UNKNOWN;
151 
152 	hw_link_info = &pi->phy.link_info;
153 	if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
154 		/* If more than one media type is selected, report unknown */
155 		return ICE_MEDIA_UNKNOWN;
156 
157 	if (hw_link_info->phy_type_low) {
158 		switch (hw_link_info->phy_type_low) {
159 		case ICE_PHY_TYPE_LOW_1000BASE_SX:
160 		case ICE_PHY_TYPE_LOW_1000BASE_LX:
161 		case ICE_PHY_TYPE_LOW_10GBASE_SR:
162 		case ICE_PHY_TYPE_LOW_10GBASE_LR:
163 		case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
164 		case ICE_PHY_TYPE_LOW_25GBASE_SR:
165 		case ICE_PHY_TYPE_LOW_25GBASE_LR:
166 		case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
167 		case ICE_PHY_TYPE_LOW_40GBASE_SR4:
168 		case ICE_PHY_TYPE_LOW_40GBASE_LR4:
169 		case ICE_PHY_TYPE_LOW_50GBASE_SR2:
170 		case ICE_PHY_TYPE_LOW_50GBASE_LR2:
171 		case ICE_PHY_TYPE_LOW_50GBASE_SR:
172 		case ICE_PHY_TYPE_LOW_50GBASE_FR:
173 		case ICE_PHY_TYPE_LOW_50GBASE_LR:
174 		case ICE_PHY_TYPE_LOW_100GBASE_SR4:
175 		case ICE_PHY_TYPE_LOW_100GBASE_LR4:
176 		case ICE_PHY_TYPE_LOW_100GBASE_SR2:
177 		case ICE_PHY_TYPE_LOW_100GBASE_DR:
178 			return ICE_MEDIA_FIBER;
179 		case ICE_PHY_TYPE_LOW_100BASE_TX:
180 		case ICE_PHY_TYPE_LOW_1000BASE_T:
181 		case ICE_PHY_TYPE_LOW_2500BASE_T:
182 		case ICE_PHY_TYPE_LOW_5GBASE_T:
183 		case ICE_PHY_TYPE_LOW_10GBASE_T:
184 		case ICE_PHY_TYPE_LOW_25GBASE_T:
185 			return ICE_MEDIA_BASET;
186 		case ICE_PHY_TYPE_LOW_10G_SFI_DA:
187 		case ICE_PHY_TYPE_LOW_25GBASE_CR:
188 		case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
189 		case ICE_PHY_TYPE_LOW_25GBASE_CR1:
190 		case ICE_PHY_TYPE_LOW_40GBASE_CR4:
191 		case ICE_PHY_TYPE_LOW_50GBASE_CR2:
192 		case ICE_PHY_TYPE_LOW_50GBASE_CP:
193 		case ICE_PHY_TYPE_LOW_100GBASE_CR4:
194 		case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
195 		case ICE_PHY_TYPE_LOW_100GBASE_CP2:
196 			return ICE_MEDIA_DA;
197 		case ICE_PHY_TYPE_LOW_1000BASE_KX:
198 		case ICE_PHY_TYPE_LOW_2500BASE_KX:
199 		case ICE_PHY_TYPE_LOW_2500BASE_X:
200 		case ICE_PHY_TYPE_LOW_5GBASE_KR:
201 		case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
202 		case ICE_PHY_TYPE_LOW_25GBASE_KR:
203 		case ICE_PHY_TYPE_LOW_25GBASE_KR1:
204 		case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
205 		case ICE_PHY_TYPE_LOW_40GBASE_KR4:
206 		case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
207 		case ICE_PHY_TYPE_LOW_50GBASE_KR2:
208 		case ICE_PHY_TYPE_LOW_100GBASE_KR4:
209 		case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
210 			return ICE_MEDIA_BACKPLANE;
211 		}
212 	} else {
213 		switch (hw_link_info->phy_type_high) {
214 		case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
215 			return ICE_MEDIA_BACKPLANE;
216 		}
217 	}
218 	return ICE_MEDIA_UNKNOWN;
219 }
220 
221 /**
222  * ice_aq_get_link_info
223  * @pi: port information structure
224  * @ena_lse: enable/disable LinkStatusEvent reporting
225  * @link: pointer to link status structure - optional
226  * @cd: pointer to command details structure or NULL
227  *
228  * Get Link Status (0x607). Returns the link status of the adapter.
229  */
230 enum ice_status
231 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
232 		     struct ice_link_status *link, struct ice_sq_cd *cd)
233 {
234 	struct ice_aqc_get_link_status_data link_data = { 0 };
235 	struct ice_aqc_get_link_status *resp;
236 	struct ice_link_status *li_old, *li;
237 	enum ice_media_type *hw_media_type;
238 	struct ice_fc_info *hw_fc_info;
239 	bool tx_pause, rx_pause;
240 	struct ice_aq_desc desc;
241 	enum ice_status status;
242 	struct ice_hw *hw;
243 	u16 cmd_flags;
244 
245 	if (!pi)
246 		return ICE_ERR_PARAM;
247 	hw = pi->hw;
248 	li_old = &pi->phy.link_info_old;
249 	hw_media_type = &pi->phy.media_type;
250 	li = &pi->phy.link_info;
251 	hw_fc_info = &pi->fc;
252 
253 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
254 	cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
255 	resp = &desc.params.get_link_status;
256 	resp->cmd_flags = cpu_to_le16(cmd_flags);
257 	resp->lport_num = pi->lport;
258 
259 	status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
260 
261 	if (status)
262 		return status;
263 
264 	/* save off old link status information */
265 	*li_old = *li;
266 
267 	/* update current link status information */
268 	li->link_speed = le16_to_cpu(link_data.link_speed);
269 	li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
270 	li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
271 	*hw_media_type = ice_get_media_type(pi);
272 	li->link_info = link_data.link_info;
273 	li->an_info = link_data.an_info;
274 	li->ext_info = link_data.ext_info;
275 	li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
276 	li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
277 	li->topo_media_conflict = link_data.topo_media_conflict;
278 	li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
279 				      ICE_AQ_CFG_PACING_TYPE_M);
280 
281 	/* update fc info */
282 	tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
283 	rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
284 	if (tx_pause && rx_pause)
285 		hw_fc_info->current_mode = ICE_FC_FULL;
286 	else if (tx_pause)
287 		hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
288 	else if (rx_pause)
289 		hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
290 	else
291 		hw_fc_info->current_mode = ICE_FC_NONE;
292 
293 	li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
294 
295 	ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed);
296 	ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
297 		  (unsigned long long)li->phy_type_low);
298 	ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
299 		  (unsigned long long)li->phy_type_high);
300 	ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type);
301 	ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info);
302 	ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info);
303 	ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info);
304 	ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena);
305 	ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size);
306 	ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing);
307 
308 	/* save link status information */
309 	if (link)
310 		*link = *li;
311 
312 	/* flag cleared so calling functions don't call AQ again */
313 	pi->phy.get_link_info = false;
314 
315 	return 0;
316 }
317 
318 /**
319  * ice_init_fltr_mgmt_struct - initializes filter management list and locks
320  * @hw: pointer to the HW struct
321  */
322 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
323 {
324 	struct ice_switch_info *sw;
325 
326 	hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
327 				       sizeof(*hw->switch_info), GFP_KERNEL);
328 	sw = hw->switch_info;
329 
330 	if (!sw)
331 		return ICE_ERR_NO_MEMORY;
332 
333 	INIT_LIST_HEAD(&sw->vsi_list_map_head);
334 
335 	return ice_init_def_sw_recp(hw);
336 }
337 
338 /**
339  * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
340  * @hw: pointer to the HW struct
341  */
342 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
343 {
344 	struct ice_switch_info *sw = hw->switch_info;
345 	struct ice_vsi_list_map_info *v_pos_map;
346 	struct ice_vsi_list_map_info *v_tmp_map;
347 	struct ice_sw_recipe *recps;
348 	u8 i;
349 
350 	list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
351 				 list_entry) {
352 		list_del(&v_pos_map->list_entry);
353 		devm_kfree(ice_hw_to_dev(hw), v_pos_map);
354 	}
355 	recps = hw->switch_info->recp_list;
356 	for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
357 		struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
358 
359 		recps[i].root_rid = i;
360 		mutex_destroy(&recps[i].filt_rule_lock);
361 		list_for_each_entry_safe(lst_itr, tmp_entry,
362 					 &recps[i].filt_rules, list_entry) {
363 			list_del(&lst_itr->list_entry);
364 			devm_kfree(ice_hw_to_dev(hw), lst_itr);
365 		}
366 	}
367 	ice_rm_all_sw_replay_rule_info(hw);
368 	devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
369 	devm_kfree(ice_hw_to_dev(hw), sw);
370 }
371 
372 #define ICE_FW_LOG_DESC_SIZE(n)	(sizeof(struct ice_aqc_fw_logging_data) + \
373 	(((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry)))
374 #define ICE_FW_LOG_DESC_SIZE_MAX	\
375 	ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX)
376 
377 /**
378  * ice_get_fw_log_cfg - get FW logging configuration
379  * @hw: pointer to the HW struct
380  */
381 static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
382 {
383 	struct ice_aqc_fw_logging_data *config;
384 	struct ice_aq_desc desc;
385 	enum ice_status status;
386 	u16 size;
387 
388 	size = ICE_FW_LOG_DESC_SIZE_MAX;
389 	config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
390 	if (!config)
391 		return ICE_ERR_NO_MEMORY;
392 
393 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
394 
395 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
396 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
397 
398 	status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
399 	if (!status) {
400 		u16 i;
401 
402 		/* Save FW logging information into the HW structure */
403 		for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
404 			u16 v, m, flgs;
405 
406 			v = le16_to_cpu(config->entry[i]);
407 			m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
408 			flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
409 
410 			if (m < ICE_AQC_FW_LOG_ID_MAX)
411 				hw->fw_log.evnts[m].cur = flgs;
412 		}
413 	}
414 
415 	devm_kfree(ice_hw_to_dev(hw), config);
416 
417 	return status;
418 }
419 
420 /**
421  * ice_cfg_fw_log - configure FW logging
422  * @hw: pointer to the HW struct
423  * @enable: enable certain FW logging events if true, disable all if false
424  *
425  * This function enables/disables the FW logging via Rx CQ events and a UART
426  * port based on predetermined configurations. FW logging via the Rx CQ can be
427  * enabled/disabled for individual PF's. However, FW logging via the UART can
428  * only be enabled/disabled for all PFs on the same device.
429  *
430  * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
431  * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
432  * before initializing the device.
433  *
434  * When re/configuring FW logging, callers need to update the "cfg" elements of
435  * the hw->fw_log.evnts array with the desired logging event configurations for
436  * modules of interest. When disabling FW logging completely, the callers can
437  * just pass false in the "enable" parameter. On completion, the function will
438  * update the "cur" element of the hw->fw_log.evnts array with the resulting
439  * logging event configurations of the modules that are being re/configured. FW
440  * logging modules that are not part of a reconfiguration operation retain their
441  * previous states.
442  *
443  * Before resetting the device, it is recommended that the driver disables FW
444  * logging before shutting down the control queue. When disabling FW logging
445  * ("enable" = false), the latest configurations of FW logging events stored in
446  * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
447  * a device reset.
448  *
449  * When enabling FW logging to emit log messages via the Rx CQ during the
450  * device's initialization phase, a mechanism alternative to interrupt handlers
451  * needs to be used to extract FW log messages from the Rx CQ periodically and
452  * to prevent the Rx CQ from being full and stalling other types of control
453  * messages from FW to SW. Interrupts are typically disabled during the device's
454  * initialization phase.
455  */
456 static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
457 {
458 	struct ice_aqc_fw_logging_data *data = NULL;
459 	struct ice_aqc_fw_logging *cmd;
460 	enum ice_status status = 0;
461 	u16 i, chgs = 0, len = 0;
462 	struct ice_aq_desc desc;
463 	u8 actv_evnts = 0;
464 	void *buf = NULL;
465 
466 	if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
467 		return 0;
468 
469 	/* Disable FW logging only when the control queue is still responsive */
470 	if (!enable &&
471 	    (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
472 		return 0;
473 
474 	/* Get current FW log settings */
475 	status = ice_get_fw_log_cfg(hw);
476 	if (status)
477 		return status;
478 
479 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
480 	cmd = &desc.params.fw_logging;
481 
482 	/* Indicate which controls are valid */
483 	if (hw->fw_log.cq_en)
484 		cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
485 
486 	if (hw->fw_log.uart_en)
487 		cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
488 
489 	if (enable) {
490 		/* Fill in an array of entries with FW logging modules and
491 		 * logging events being reconfigured.
492 		 */
493 		for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
494 			u16 val;
495 
496 			/* Keep track of enabled event types */
497 			actv_evnts |= hw->fw_log.evnts[i].cfg;
498 
499 			if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
500 				continue;
501 
502 			if (!data) {
503 				data = devm_kzalloc(ice_hw_to_dev(hw),
504 						    ICE_FW_LOG_DESC_SIZE_MAX,
505 						    GFP_KERNEL);
506 				if (!data)
507 					return ICE_ERR_NO_MEMORY;
508 			}
509 
510 			val = i << ICE_AQC_FW_LOG_ID_S;
511 			val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
512 			data->entry[chgs++] = cpu_to_le16(val);
513 		}
514 
515 		/* Only enable FW logging if at least one module is specified.
516 		 * If FW logging is currently enabled but all modules are not
517 		 * enabled to emit log messages, disable FW logging altogether.
518 		 */
519 		if (actv_evnts) {
520 			/* Leave if there is effectively no change */
521 			if (!chgs)
522 				goto out;
523 
524 			if (hw->fw_log.cq_en)
525 				cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
526 
527 			if (hw->fw_log.uart_en)
528 				cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
529 
530 			buf = data;
531 			len = ICE_FW_LOG_DESC_SIZE(chgs);
532 			desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
533 		}
534 	}
535 
536 	status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
537 	if (!status) {
538 		/* Update the current configuration to reflect events enabled.
539 		 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
540 		 * logging mode is enabled for the device. They do not reflect
541 		 * actual modules being enabled to emit log messages. So, their
542 		 * values remain unchanged even when all modules are disabled.
543 		 */
544 		u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
545 
546 		hw->fw_log.actv_evnts = actv_evnts;
547 		for (i = 0; i < cnt; i++) {
548 			u16 v, m;
549 
550 			if (!enable) {
551 				/* When disabling all FW logging events as part
552 				 * of device's de-initialization, the original
553 				 * configurations are retained, and can be used
554 				 * to reconfigure FW logging later if the device
555 				 * is re-initialized.
556 				 */
557 				hw->fw_log.evnts[i].cur = 0;
558 				continue;
559 			}
560 
561 			v = le16_to_cpu(data->entry[i]);
562 			m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
563 			hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
564 		}
565 	}
566 
567 out:
568 	if (data)
569 		devm_kfree(ice_hw_to_dev(hw), data);
570 
571 	return status;
572 }
573 
574 /**
575  * ice_output_fw_log
576  * @hw: pointer to the HW struct
577  * @desc: pointer to the AQ message descriptor
578  * @buf: pointer to the buffer accompanying the AQ message
579  *
580  * Formats a FW Log message and outputs it via the standard driver logs.
581  */
582 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
583 {
584 	ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
585 	ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
586 			le16_to_cpu(desc->datalen));
587 	ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
588 }
589 
590 /**
591  * ice_get_itr_intrl_gran
592  * @hw: pointer to the HW struct
593  *
594  * Determines the ITR/INTRL granularities based on the maximum aggregate
595  * bandwidth according to the device's configuration during power-on.
596  */
597 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
598 {
599 	u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
600 			 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
601 			GL_PWR_MODE_CTL_CAR_MAX_BW_S;
602 
603 	switch (max_agg_bw) {
604 	case ICE_MAX_AGG_BW_200G:
605 	case ICE_MAX_AGG_BW_100G:
606 	case ICE_MAX_AGG_BW_50G:
607 		hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
608 		hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
609 		break;
610 	case ICE_MAX_AGG_BW_25G:
611 		hw->itr_gran = ICE_ITR_GRAN_MAX_25;
612 		hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
613 		break;
614 	}
615 }
616 
617 /**
618  * ice_get_nvm_version - get cached NVM version data
619  * @hw: pointer to the hardware structure
620  * @oem_ver: 8 bit NVM version
621  * @oem_build: 16 bit NVM build number
622  * @oem_patch: 8 NVM patch number
623  * @ver_hi: high 16 bits of the NVM version
624  * @ver_lo: low 16 bits of the NVM version
625  */
626 void
627 ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build,
628 		    u8 *oem_patch, u8 *ver_hi, u8 *ver_lo)
629 {
630 	struct ice_nvm_info *nvm = &hw->nvm;
631 
632 	*oem_ver = (u8)((nvm->oem_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
633 	*oem_patch = (u8)(nvm->oem_ver & ICE_OEM_VER_PATCH_MASK);
634 	*oem_build = (u16)((nvm->oem_ver & ICE_OEM_VER_BUILD_MASK) >>
635 			   ICE_OEM_VER_BUILD_SHIFT);
636 	*ver_hi = (nvm->ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
637 	*ver_lo = (nvm->ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
638 }
639 
640 /**
641  * ice_init_hw - main hardware initialization routine
642  * @hw: pointer to the hardware structure
643  */
644 enum ice_status ice_init_hw(struct ice_hw *hw)
645 {
646 	struct ice_aqc_get_phy_caps_data *pcaps;
647 	enum ice_status status;
648 	u16 mac_buf_len;
649 	void *mac_buf;
650 
651 	/* Set MAC type based on DeviceID */
652 	status = ice_set_mac_type(hw);
653 	if (status)
654 		return status;
655 
656 	hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
657 			 PF_FUNC_RID_FUNC_NUM_M) >>
658 		PF_FUNC_RID_FUNC_NUM_S;
659 
660 	status = ice_reset(hw, ICE_RESET_PFR);
661 	if (status)
662 		return status;
663 
664 	ice_get_itr_intrl_gran(hw);
665 
666 	status = ice_create_all_ctrlq(hw);
667 	if (status)
668 		goto err_unroll_cqinit;
669 
670 	/* Enable FW logging. Not fatal if this fails. */
671 	status = ice_cfg_fw_log(hw, true);
672 	if (status)
673 		ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
674 
675 	status = ice_clear_pf_cfg(hw);
676 	if (status)
677 		goto err_unroll_cqinit;
678 
679 	ice_clear_pxe_mode(hw);
680 
681 	status = ice_init_nvm(hw);
682 	if (status)
683 		goto err_unroll_cqinit;
684 
685 	status = ice_get_caps(hw);
686 	if (status)
687 		goto err_unroll_cqinit;
688 
689 	hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
690 				     sizeof(*hw->port_info), GFP_KERNEL);
691 	if (!hw->port_info) {
692 		status = ICE_ERR_NO_MEMORY;
693 		goto err_unroll_cqinit;
694 	}
695 
696 	/* set the back pointer to HW */
697 	hw->port_info->hw = hw;
698 
699 	/* Initialize port_info struct with switch configuration data */
700 	status = ice_get_initial_sw_cfg(hw);
701 	if (status)
702 		goto err_unroll_alloc;
703 
704 	hw->evb_veb = true;
705 
706 	/* Query the allocated resources for Tx scheduler */
707 	status = ice_sched_query_res_alloc(hw);
708 	if (status) {
709 		ice_debug(hw, ICE_DBG_SCHED,
710 			  "Failed to get scheduler allocated resources\n");
711 		goto err_unroll_alloc;
712 	}
713 
714 	/* Initialize port_info struct with scheduler data */
715 	status = ice_sched_init_port(hw->port_info);
716 	if (status)
717 		goto err_unroll_sched;
718 
719 	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
720 	if (!pcaps) {
721 		status = ICE_ERR_NO_MEMORY;
722 		goto err_unroll_sched;
723 	}
724 
725 	/* Initialize port_info struct with PHY capabilities */
726 	status = ice_aq_get_phy_caps(hw->port_info, false,
727 				     ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
728 	devm_kfree(ice_hw_to_dev(hw), pcaps);
729 	if (status)
730 		goto err_unroll_sched;
731 
732 	/* Initialize port_info struct with link information */
733 	status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
734 	if (status)
735 		goto err_unroll_sched;
736 
737 	/* need a valid SW entry point to build a Tx tree */
738 	if (!hw->sw_entry_point_layer) {
739 		ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
740 		status = ICE_ERR_CFG;
741 		goto err_unroll_sched;
742 	}
743 	INIT_LIST_HEAD(&hw->agg_list);
744 	/* Initialize max burst size */
745 	if (!hw->max_burst_size)
746 		ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
747 
748 	status = ice_init_fltr_mgmt_struct(hw);
749 	if (status)
750 		goto err_unroll_sched;
751 
752 	/* Get MAC information */
753 	/* A single port can report up to two (LAN and WoL) addresses */
754 	mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
755 			       sizeof(struct ice_aqc_manage_mac_read_resp),
756 			       GFP_KERNEL);
757 	mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
758 
759 	if (!mac_buf) {
760 		status = ICE_ERR_NO_MEMORY;
761 		goto err_unroll_fltr_mgmt_struct;
762 	}
763 
764 	status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
765 	devm_kfree(ice_hw_to_dev(hw), mac_buf);
766 
767 	if (status)
768 		goto err_unroll_fltr_mgmt_struct;
769 	status = ice_init_hw_tbls(hw);
770 	if (status)
771 		goto err_unroll_fltr_mgmt_struct;
772 	return 0;
773 
774 err_unroll_fltr_mgmt_struct:
775 	ice_cleanup_fltr_mgmt_struct(hw);
776 err_unroll_sched:
777 	ice_sched_cleanup_all(hw);
778 err_unroll_alloc:
779 	devm_kfree(ice_hw_to_dev(hw), hw->port_info);
780 err_unroll_cqinit:
781 	ice_destroy_all_ctrlq(hw);
782 	return status;
783 }
784 
785 /**
786  * ice_deinit_hw - unroll initialization operations done by ice_init_hw
787  * @hw: pointer to the hardware structure
788  *
789  * This should be called only during nominal operation, not as a result of
790  * ice_init_hw() failing since ice_init_hw() will take care of unrolling
791  * applicable initializations if it fails for any reason.
792  */
793 void ice_deinit_hw(struct ice_hw *hw)
794 {
795 	ice_cleanup_fltr_mgmt_struct(hw);
796 
797 	ice_sched_cleanup_all(hw);
798 	ice_sched_clear_agg(hw);
799 	ice_free_seg(hw);
800 	ice_free_hw_tbls(hw);
801 
802 	if (hw->port_info) {
803 		devm_kfree(ice_hw_to_dev(hw), hw->port_info);
804 		hw->port_info = NULL;
805 	}
806 
807 	/* Attempt to disable FW logging before shutting down control queues */
808 	ice_cfg_fw_log(hw, false);
809 	ice_destroy_all_ctrlq(hw);
810 
811 	/* Clear VSI contexts if not already cleared */
812 	ice_clear_all_vsi_ctx(hw);
813 }
814 
815 /**
816  * ice_check_reset - Check to see if a global reset is complete
817  * @hw: pointer to the hardware structure
818  */
819 enum ice_status ice_check_reset(struct ice_hw *hw)
820 {
821 	u32 cnt, reg = 0, grst_delay, uld_mask;
822 
823 	/* Poll for Device Active state in case a recent CORER, GLOBR,
824 	 * or EMPR has occurred. The grst delay value is in 100ms units.
825 	 * Add 1sec for outstanding AQ commands that can take a long time.
826 	 */
827 	grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
828 		      GLGEN_RSTCTL_GRSTDEL_S) + 10;
829 
830 	for (cnt = 0; cnt < grst_delay; cnt++) {
831 		mdelay(100);
832 		reg = rd32(hw, GLGEN_RSTAT);
833 		if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
834 			break;
835 	}
836 
837 	if (cnt == grst_delay) {
838 		ice_debug(hw, ICE_DBG_INIT,
839 			  "Global reset polling failed to complete.\n");
840 		return ICE_ERR_RESET_FAILED;
841 	}
842 
843 #define ICE_RESET_DONE_MASK	(GLNVM_ULD_PCIER_DONE_M |\
844 				 GLNVM_ULD_PCIER_DONE_1_M |\
845 				 GLNVM_ULD_CORER_DONE_M |\
846 				 GLNVM_ULD_GLOBR_DONE_M |\
847 				 GLNVM_ULD_POR_DONE_M |\
848 				 GLNVM_ULD_POR_DONE_1_M |\
849 				 GLNVM_ULD_PCIER_DONE_2_M)
850 
851 	uld_mask = ICE_RESET_DONE_MASK;
852 
853 	/* Device is Active; check Global Reset processes are done */
854 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
855 		reg = rd32(hw, GLNVM_ULD) & uld_mask;
856 		if (reg == uld_mask) {
857 			ice_debug(hw, ICE_DBG_INIT,
858 				  "Global reset processes done. %d\n", cnt);
859 			break;
860 		}
861 		mdelay(10);
862 	}
863 
864 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
865 		ice_debug(hw, ICE_DBG_INIT,
866 			  "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
867 			  reg);
868 		return ICE_ERR_RESET_FAILED;
869 	}
870 
871 	return 0;
872 }
873 
874 /**
875  * ice_pf_reset - Reset the PF
876  * @hw: pointer to the hardware structure
877  *
878  * If a global reset has been triggered, this function checks
879  * for its completion and then issues the PF reset
880  */
881 static enum ice_status ice_pf_reset(struct ice_hw *hw)
882 {
883 	u32 cnt, reg;
884 
885 	/* If at function entry a global reset was already in progress, i.e.
886 	 * state is not 'device active' or any of the reset done bits are not
887 	 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
888 	 * global reset is done.
889 	 */
890 	if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
891 	    (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
892 		/* poll on global reset currently in progress until done */
893 		if (ice_check_reset(hw))
894 			return ICE_ERR_RESET_FAILED;
895 
896 		return 0;
897 	}
898 
899 	/* Reset the PF */
900 	reg = rd32(hw, PFGEN_CTRL);
901 
902 	wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
903 
904 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
905 		reg = rd32(hw, PFGEN_CTRL);
906 		if (!(reg & PFGEN_CTRL_PFSWR_M))
907 			break;
908 
909 		mdelay(1);
910 	}
911 
912 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
913 		ice_debug(hw, ICE_DBG_INIT,
914 			  "PF reset polling failed to complete.\n");
915 		return ICE_ERR_RESET_FAILED;
916 	}
917 
918 	return 0;
919 }
920 
921 /**
922  * ice_reset - Perform different types of reset
923  * @hw: pointer to the hardware structure
924  * @req: reset request
925  *
926  * This function triggers a reset as specified by the req parameter.
927  *
928  * Note:
929  * If anything other than a PF reset is triggered, PXE mode is restored.
930  * This has to be cleared using ice_clear_pxe_mode again, once the AQ
931  * interface has been restored in the rebuild flow.
932  */
933 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
934 {
935 	u32 val = 0;
936 
937 	switch (req) {
938 	case ICE_RESET_PFR:
939 		return ice_pf_reset(hw);
940 	case ICE_RESET_CORER:
941 		ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
942 		val = GLGEN_RTRIG_CORER_M;
943 		break;
944 	case ICE_RESET_GLOBR:
945 		ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
946 		val = GLGEN_RTRIG_GLOBR_M;
947 		break;
948 	default:
949 		return ICE_ERR_PARAM;
950 	}
951 
952 	val |= rd32(hw, GLGEN_RTRIG);
953 	wr32(hw, GLGEN_RTRIG, val);
954 	ice_flush(hw);
955 
956 	/* wait for the FW to be ready */
957 	return ice_check_reset(hw);
958 }
959 
960 /**
961  * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
962  * @hw: pointer to hardware structure
963  * @module_tlv: pointer to module TLV to return
964  * @module_tlv_len: pointer to module TLV length to return
965  * @module_type: module type requested
966  *
967  * Finds the requested sub module TLV type from the Preserved Field
968  * Area (PFA) and returns the TLV pointer and length. The caller can
969  * use these to read the variable length TLV value.
970  */
971 enum ice_status
972 ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
973 		       u16 module_type)
974 {
975 	enum ice_status status;
976 	u16 pfa_len, pfa_ptr;
977 	u16 next_tlv;
978 
979 	status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
980 	if (status) {
981 		ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n");
982 		return status;
983 	}
984 	status = ice_read_sr_word(hw, pfa_ptr, &pfa_len);
985 	if (status) {
986 		ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
987 		return status;
988 	}
989 	/* Starting with first TLV after PFA length, iterate through the list
990 	 * of TLVs to find the requested one.
991 	 */
992 	next_tlv = pfa_ptr + 1;
993 	while (next_tlv < pfa_ptr + pfa_len) {
994 		u16 tlv_sub_module_type;
995 		u16 tlv_len;
996 
997 		/* Read TLV type */
998 		status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type);
999 		if (status) {
1000 			ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n");
1001 			break;
1002 		}
1003 		/* Read TLV length */
1004 		status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len);
1005 		if (status) {
1006 			ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n");
1007 			break;
1008 		}
1009 		if (tlv_sub_module_type == module_type) {
1010 			if (tlv_len) {
1011 				*module_tlv = next_tlv;
1012 				*module_tlv_len = tlv_len;
1013 				return 0;
1014 			}
1015 			return ICE_ERR_INVAL_SIZE;
1016 		}
1017 		/* Check next TLV, i.e. current TLV pointer + length + 2 words
1018 		 * (for current TLV's type and length)
1019 		 */
1020 		next_tlv = next_tlv + tlv_len + 2;
1021 	}
1022 	/* Module does not exist */
1023 	return ICE_ERR_DOES_NOT_EXIST;
1024 }
1025 
1026 /**
1027  * ice_copy_rxq_ctx_to_hw
1028  * @hw: pointer to the hardware structure
1029  * @ice_rxq_ctx: pointer to the rxq context
1030  * @rxq_index: the index of the Rx queue
1031  *
1032  * Copies rxq context from dense structure to HW register space
1033  */
1034 static enum ice_status
1035 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1036 {
1037 	u8 i;
1038 
1039 	if (!ice_rxq_ctx)
1040 		return ICE_ERR_BAD_PTR;
1041 
1042 	if (rxq_index > QRX_CTRL_MAX_INDEX)
1043 		return ICE_ERR_PARAM;
1044 
1045 	/* Copy each dword separately to HW */
1046 	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1047 		wr32(hw, QRX_CONTEXT(i, rxq_index),
1048 		     *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1049 
1050 		ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1051 			  *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1052 	}
1053 
1054 	return 0;
1055 }
1056 
1057 /* LAN Rx Queue Context */
1058 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1059 	/* Field		Width	LSB */
1060 	ICE_CTX_STORE(ice_rlan_ctx, head,		13,	0),
1061 	ICE_CTX_STORE(ice_rlan_ctx, cpuid,		8,	13),
1062 	ICE_CTX_STORE(ice_rlan_ctx, base,		57,	32),
1063 	ICE_CTX_STORE(ice_rlan_ctx, qlen,		13,	89),
1064 	ICE_CTX_STORE(ice_rlan_ctx, dbuf,		7,	102),
1065 	ICE_CTX_STORE(ice_rlan_ctx, hbuf,		5,	109),
1066 	ICE_CTX_STORE(ice_rlan_ctx, dtype,		2,	114),
1067 	ICE_CTX_STORE(ice_rlan_ctx, dsize,		1,	116),
1068 	ICE_CTX_STORE(ice_rlan_ctx, crcstrip,		1,	117),
1069 	ICE_CTX_STORE(ice_rlan_ctx, l2tsel,		1,	119),
1070 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_0,		4,	120),
1071 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_1,		2,	124),
1072 	ICE_CTX_STORE(ice_rlan_ctx, showiv,		1,	127),
1073 	ICE_CTX_STORE(ice_rlan_ctx, rxmax,		14,	174),
1074 	ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena,	1,	193),
1075 	ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena,	1,	194),
1076 	ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena,	1,	195),
1077 	ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena,	1,	196),
1078 	ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh,		3,	198),
1079 	ICE_CTX_STORE(ice_rlan_ctx, prefena,		1,	201),
1080 	{ 0 }
1081 };
1082 
1083 /**
1084  * ice_write_rxq_ctx
1085  * @hw: pointer to the hardware structure
1086  * @rlan_ctx: pointer to the rxq context
1087  * @rxq_index: the index of the Rx queue
1088  *
1089  * Converts rxq context from sparse to dense structure and then writes
1090  * it to HW register space and enables the hardware to prefetch descriptors
1091  * instead of only fetching them on demand
1092  */
1093 enum ice_status
1094 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1095 		  u32 rxq_index)
1096 {
1097 	u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1098 
1099 	if (!rlan_ctx)
1100 		return ICE_ERR_BAD_PTR;
1101 
1102 	rlan_ctx->prefena = 1;
1103 
1104 	ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1105 	return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1106 }
1107 
1108 /* LAN Tx Queue Context */
1109 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1110 				    /* Field			Width	LSB */
1111 	ICE_CTX_STORE(ice_tlan_ctx, base,			57,	0),
1112 	ICE_CTX_STORE(ice_tlan_ctx, port_num,			3,	57),
1113 	ICE_CTX_STORE(ice_tlan_ctx, cgd_num,			5,	60),
1114 	ICE_CTX_STORE(ice_tlan_ctx, pf_num,			3,	65),
1115 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_num,			10,	68),
1116 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_type,			2,	78),
1117 	ICE_CTX_STORE(ice_tlan_ctx, src_vsi,			10,	80),
1118 	ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena,			1,	90),
1119 	ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag,	1,	91),
1120 	ICE_CTX_STORE(ice_tlan_ctx, alt_vlan,			1,	92),
1121 	ICE_CTX_STORE(ice_tlan_ctx, cpuid,			8,	93),
1122 	ICE_CTX_STORE(ice_tlan_ctx, wb_mode,			1,	101),
1123 	ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc,			1,	102),
1124 	ICE_CTX_STORE(ice_tlan_ctx, tphrd,			1,	103),
1125 	ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc,			1,	104),
1126 	ICE_CTX_STORE(ice_tlan_ctx, cmpq_id,			9,	105),
1127 	ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func,		14,	114),
1128 	ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode,	1,	128),
1129 	ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id,		6,	129),
1130 	ICE_CTX_STORE(ice_tlan_ctx, qlen,			13,	135),
1131 	ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx,		4,	148),
1132 	ICE_CTX_STORE(ice_tlan_ctx, tso_ena,			1,	152),
1133 	ICE_CTX_STORE(ice_tlan_ctx, tso_qnum,			11,	153),
1134 	ICE_CTX_STORE(ice_tlan_ctx, legacy_int,			1,	164),
1135 	ICE_CTX_STORE(ice_tlan_ctx, drop_ena,			1,	165),
1136 	ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx,		2,	166),
1137 	ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx,	3,	168),
1138 	ICE_CTX_STORE(ice_tlan_ctx, int_q_state,		122,	171),
1139 	{ 0 }
1140 };
1141 
1142 /* FW Admin Queue command wrappers */
1143 
1144 /* Software lock/mutex that is meant to be held while the Global Config Lock
1145  * in firmware is acquired by the software to prevent most (but not all) types
1146  * of AQ commands from being sent to FW
1147  */
1148 DEFINE_MUTEX(ice_global_cfg_lock_sw);
1149 
1150 /**
1151  * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1152  * @hw: pointer to the HW struct
1153  * @desc: descriptor describing the command
1154  * @buf: buffer to use for indirect commands (NULL for direct commands)
1155  * @buf_size: size of buffer for indirect commands (0 for direct commands)
1156  * @cd: pointer to command details structure
1157  *
1158  * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1159  */
1160 enum ice_status
1161 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1162 		u16 buf_size, struct ice_sq_cd *cd)
1163 {
1164 	struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1165 	bool lock_acquired = false;
1166 	enum ice_status status;
1167 
1168 	/* When a package download is in process (i.e. when the firmware's
1169 	 * Global Configuration Lock resource is held), only the Download
1170 	 * Package, Get Version, Get Package Info List and Release Resource
1171 	 * (with resource ID set to Global Config Lock) AdminQ commands are
1172 	 * allowed; all others must block until the package download completes
1173 	 * and the Global Config Lock is released.  See also
1174 	 * ice_acquire_global_cfg_lock().
1175 	 */
1176 	switch (le16_to_cpu(desc->opcode)) {
1177 	case ice_aqc_opc_download_pkg:
1178 	case ice_aqc_opc_get_pkg_info_list:
1179 	case ice_aqc_opc_get_ver:
1180 		break;
1181 	case ice_aqc_opc_release_res:
1182 		if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1183 			break;
1184 		/* fall-through */
1185 	default:
1186 		mutex_lock(&ice_global_cfg_lock_sw);
1187 		lock_acquired = true;
1188 		break;
1189 	}
1190 
1191 	status = ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1192 	if (lock_acquired)
1193 		mutex_unlock(&ice_global_cfg_lock_sw);
1194 
1195 	return status;
1196 }
1197 
1198 /**
1199  * ice_aq_get_fw_ver
1200  * @hw: pointer to the HW struct
1201  * @cd: pointer to command details structure or NULL
1202  *
1203  * Get the firmware version (0x0001) from the admin queue commands
1204  */
1205 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1206 {
1207 	struct ice_aqc_get_ver *resp;
1208 	struct ice_aq_desc desc;
1209 	enum ice_status status;
1210 
1211 	resp = &desc.params.get_ver;
1212 
1213 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1214 
1215 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1216 
1217 	if (!status) {
1218 		hw->fw_branch = resp->fw_branch;
1219 		hw->fw_maj_ver = resp->fw_major;
1220 		hw->fw_min_ver = resp->fw_minor;
1221 		hw->fw_patch = resp->fw_patch;
1222 		hw->fw_build = le32_to_cpu(resp->fw_build);
1223 		hw->api_branch = resp->api_branch;
1224 		hw->api_maj_ver = resp->api_major;
1225 		hw->api_min_ver = resp->api_minor;
1226 		hw->api_patch = resp->api_patch;
1227 	}
1228 
1229 	return status;
1230 }
1231 
1232 /**
1233  * ice_aq_send_driver_ver
1234  * @hw: pointer to the HW struct
1235  * @dv: driver's major, minor version
1236  * @cd: pointer to command details structure or NULL
1237  *
1238  * Send the driver version (0x0002) to the firmware
1239  */
1240 enum ice_status
1241 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1242 		       struct ice_sq_cd *cd)
1243 {
1244 	struct ice_aqc_driver_ver *cmd;
1245 	struct ice_aq_desc desc;
1246 	u16 len;
1247 
1248 	cmd = &desc.params.driver_ver;
1249 
1250 	if (!dv)
1251 		return ICE_ERR_PARAM;
1252 
1253 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1254 
1255 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1256 	cmd->major_ver = dv->major_ver;
1257 	cmd->minor_ver = dv->minor_ver;
1258 	cmd->build_ver = dv->build_ver;
1259 	cmd->subbuild_ver = dv->subbuild_ver;
1260 
1261 	len = 0;
1262 	while (len < sizeof(dv->driver_string) &&
1263 	       isascii(dv->driver_string[len]) && dv->driver_string[len])
1264 		len++;
1265 
1266 	return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1267 }
1268 
1269 /**
1270  * ice_aq_q_shutdown
1271  * @hw: pointer to the HW struct
1272  * @unloading: is the driver unloading itself
1273  *
1274  * Tell the Firmware that we're shutting down the AdminQ and whether
1275  * or not the driver is unloading as well (0x0003).
1276  */
1277 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1278 {
1279 	struct ice_aqc_q_shutdown *cmd;
1280 	struct ice_aq_desc desc;
1281 
1282 	cmd = &desc.params.q_shutdown;
1283 
1284 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1285 
1286 	if (unloading)
1287 		cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1288 
1289 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1290 }
1291 
1292 /**
1293  * ice_aq_req_res
1294  * @hw: pointer to the HW struct
1295  * @res: resource ID
1296  * @access: access type
1297  * @sdp_number: resource number
1298  * @timeout: the maximum time in ms that the driver may hold the resource
1299  * @cd: pointer to command details structure or NULL
1300  *
1301  * Requests common resource using the admin queue commands (0x0008).
1302  * When attempting to acquire the Global Config Lock, the driver can
1303  * learn of three states:
1304  *  1) ICE_SUCCESS -        acquired lock, and can perform download package
1305  *  2) ICE_ERR_AQ_ERROR -   did not get lock, driver should fail to load
1306  *  3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1307  *                          successfully downloaded the package; the driver does
1308  *                          not have to download the package and can continue
1309  *                          loading
1310  *
1311  * Note that if the caller is in an acquire lock, perform action, release lock
1312  * phase of operation, it is possible that the FW may detect a timeout and issue
1313  * a CORER. In this case, the driver will receive a CORER interrupt and will
1314  * have to determine its cause. The calling thread that is handling this flow
1315  * will likely get an error propagated back to it indicating the Download
1316  * Package, Update Package or the Release Resource AQ commands timed out.
1317  */
1318 static enum ice_status
1319 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1320 	       enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1321 	       struct ice_sq_cd *cd)
1322 {
1323 	struct ice_aqc_req_res *cmd_resp;
1324 	struct ice_aq_desc desc;
1325 	enum ice_status status;
1326 
1327 	cmd_resp = &desc.params.res_owner;
1328 
1329 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1330 
1331 	cmd_resp->res_id = cpu_to_le16(res);
1332 	cmd_resp->access_type = cpu_to_le16(access);
1333 	cmd_resp->res_number = cpu_to_le32(sdp_number);
1334 	cmd_resp->timeout = cpu_to_le32(*timeout);
1335 	*timeout = 0;
1336 
1337 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1338 
1339 	/* The completion specifies the maximum time in ms that the driver
1340 	 * may hold the resource in the Timeout field.
1341 	 */
1342 
1343 	/* Global config lock response utilizes an additional status field.
1344 	 *
1345 	 * If the Global config lock resource is held by some other driver, the
1346 	 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1347 	 * and the timeout field indicates the maximum time the current owner
1348 	 * of the resource has to free it.
1349 	 */
1350 	if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1351 		if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1352 			*timeout = le32_to_cpu(cmd_resp->timeout);
1353 			return 0;
1354 		} else if (le16_to_cpu(cmd_resp->status) ==
1355 			   ICE_AQ_RES_GLBL_IN_PROG) {
1356 			*timeout = le32_to_cpu(cmd_resp->timeout);
1357 			return ICE_ERR_AQ_ERROR;
1358 		} else if (le16_to_cpu(cmd_resp->status) ==
1359 			   ICE_AQ_RES_GLBL_DONE) {
1360 			return ICE_ERR_AQ_NO_WORK;
1361 		}
1362 
1363 		/* invalid FW response, force a timeout immediately */
1364 		*timeout = 0;
1365 		return ICE_ERR_AQ_ERROR;
1366 	}
1367 
1368 	/* If the resource is held by some other driver, the command completes
1369 	 * with a busy return value and the timeout field indicates the maximum
1370 	 * time the current owner of the resource has to free it.
1371 	 */
1372 	if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1373 		*timeout = le32_to_cpu(cmd_resp->timeout);
1374 
1375 	return status;
1376 }
1377 
1378 /**
1379  * ice_aq_release_res
1380  * @hw: pointer to the HW struct
1381  * @res: resource ID
1382  * @sdp_number: resource number
1383  * @cd: pointer to command details structure or NULL
1384  *
1385  * release common resource using the admin queue commands (0x0009)
1386  */
1387 static enum ice_status
1388 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1389 		   struct ice_sq_cd *cd)
1390 {
1391 	struct ice_aqc_req_res *cmd;
1392 	struct ice_aq_desc desc;
1393 
1394 	cmd = &desc.params.res_owner;
1395 
1396 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1397 
1398 	cmd->res_id = cpu_to_le16(res);
1399 	cmd->res_number = cpu_to_le32(sdp_number);
1400 
1401 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1402 }
1403 
1404 /**
1405  * ice_acquire_res
1406  * @hw: pointer to the HW structure
1407  * @res: resource ID
1408  * @access: access type (read or write)
1409  * @timeout: timeout in milliseconds
1410  *
1411  * This function will attempt to acquire the ownership of a resource.
1412  */
1413 enum ice_status
1414 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1415 		enum ice_aq_res_access_type access, u32 timeout)
1416 {
1417 #define ICE_RES_POLLING_DELAY_MS	10
1418 	u32 delay = ICE_RES_POLLING_DELAY_MS;
1419 	u32 time_left = timeout;
1420 	enum ice_status status;
1421 
1422 	status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1423 
1424 	/* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1425 	 * previously acquired the resource and performed any necessary updates;
1426 	 * in this case the caller does not obtain the resource and has no
1427 	 * further work to do.
1428 	 */
1429 	if (status == ICE_ERR_AQ_NO_WORK)
1430 		goto ice_acquire_res_exit;
1431 
1432 	if (status)
1433 		ice_debug(hw, ICE_DBG_RES,
1434 			  "resource %d acquire type %d failed.\n", res, access);
1435 
1436 	/* If necessary, poll until the current lock owner timeouts */
1437 	timeout = time_left;
1438 	while (status && timeout && time_left) {
1439 		mdelay(delay);
1440 		timeout = (timeout > delay) ? timeout - delay : 0;
1441 		status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1442 
1443 		if (status == ICE_ERR_AQ_NO_WORK)
1444 			/* lock free, but no work to do */
1445 			break;
1446 
1447 		if (!status)
1448 			/* lock acquired */
1449 			break;
1450 	}
1451 	if (status && status != ICE_ERR_AQ_NO_WORK)
1452 		ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1453 
1454 ice_acquire_res_exit:
1455 	if (status == ICE_ERR_AQ_NO_WORK) {
1456 		if (access == ICE_RES_WRITE)
1457 			ice_debug(hw, ICE_DBG_RES,
1458 				  "resource indicates no work to do.\n");
1459 		else
1460 			ice_debug(hw, ICE_DBG_RES,
1461 				  "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1462 	}
1463 	return status;
1464 }
1465 
1466 /**
1467  * ice_release_res
1468  * @hw: pointer to the HW structure
1469  * @res: resource ID
1470  *
1471  * This function will release a resource using the proper Admin Command.
1472  */
1473 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1474 {
1475 	enum ice_status status;
1476 	u32 total_delay = 0;
1477 
1478 	status = ice_aq_release_res(hw, res, 0, NULL);
1479 
1480 	/* there are some rare cases when trying to release the resource
1481 	 * results in an admin queue timeout, so handle them correctly
1482 	 */
1483 	while ((status == ICE_ERR_AQ_TIMEOUT) &&
1484 	       (total_delay < hw->adminq.sq_cmd_timeout)) {
1485 		mdelay(1);
1486 		status = ice_aq_release_res(hw, res, 0, NULL);
1487 		total_delay++;
1488 	}
1489 }
1490 
1491 /**
1492  * ice_aq_alloc_free_res - command to allocate/free resources
1493  * @hw: pointer to the HW struct
1494  * @num_entries: number of resource entries in buffer
1495  * @buf: Indirect buffer to hold data parameters and response
1496  * @buf_size: size of buffer for indirect commands
1497  * @opc: pass in the command opcode
1498  * @cd: pointer to command details structure or NULL
1499  *
1500  * Helper function to allocate/free resources using the admin queue commands
1501  */
1502 enum ice_status
1503 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1504 		      struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1505 		      enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1506 {
1507 	struct ice_aqc_alloc_free_res_cmd *cmd;
1508 	struct ice_aq_desc desc;
1509 
1510 	cmd = &desc.params.sw_res_ctrl;
1511 
1512 	if (!buf)
1513 		return ICE_ERR_PARAM;
1514 
1515 	if (buf_size < (num_entries * sizeof(buf->elem[0])))
1516 		return ICE_ERR_PARAM;
1517 
1518 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1519 
1520 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1521 
1522 	cmd->num_entries = cpu_to_le16(num_entries);
1523 
1524 	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1525 }
1526 
1527 /**
1528  * ice_alloc_hw_res - allocate resource
1529  * @hw: pointer to the HW struct
1530  * @type: type of resource
1531  * @num: number of resources to allocate
1532  * @btm: allocate from bottom
1533  * @res: pointer to array that will receive the resources
1534  */
1535 enum ice_status
1536 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1537 {
1538 	struct ice_aqc_alloc_free_res_elem *buf;
1539 	enum ice_status status;
1540 	u16 buf_len;
1541 
1542 	buf_len = struct_size(buf, elem, num - 1);
1543 	buf = kzalloc(buf_len, GFP_KERNEL);
1544 	if (!buf)
1545 		return ICE_ERR_NO_MEMORY;
1546 
1547 	/* Prepare buffer to allocate resource. */
1548 	buf->num_elems = cpu_to_le16(num);
1549 	buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1550 				    ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1551 	if (btm)
1552 		buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1553 
1554 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1555 				       ice_aqc_opc_alloc_res, NULL);
1556 	if (status)
1557 		goto ice_alloc_res_exit;
1558 
1559 	memcpy(res, buf->elem, sizeof(buf->elem) * num);
1560 
1561 ice_alloc_res_exit:
1562 	kfree(buf);
1563 	return status;
1564 }
1565 
1566 /**
1567  * ice_free_hw_res - free allocated HW resource
1568  * @hw: pointer to the HW struct
1569  * @type: type of resource to free
1570  * @num: number of resources
1571  * @res: pointer to array that contains the resources to free
1572  */
1573 enum ice_status
1574 ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1575 {
1576 	struct ice_aqc_alloc_free_res_elem *buf;
1577 	enum ice_status status;
1578 	u16 buf_len;
1579 
1580 	buf_len = struct_size(buf, elem, num - 1);
1581 	buf = kzalloc(buf_len, GFP_KERNEL);
1582 	if (!buf)
1583 		return ICE_ERR_NO_MEMORY;
1584 
1585 	/* Prepare buffer to free resource. */
1586 	buf->num_elems = cpu_to_le16(num);
1587 	buf->res_type = cpu_to_le16(type);
1588 	memcpy(buf->elem, res, sizeof(buf->elem) * num);
1589 
1590 	status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1591 				       ice_aqc_opc_free_res, NULL);
1592 	if (status)
1593 		ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1594 
1595 	kfree(buf);
1596 	return status;
1597 }
1598 
1599 /**
1600  * ice_get_num_per_func - determine number of resources per PF
1601  * @hw: pointer to the HW structure
1602  * @max: value to be evenly split between each PF
1603  *
1604  * Determine the number of valid functions by going through the bitmap returned
1605  * from parsing capabilities and use this to calculate the number of resources
1606  * per PF based on the max value passed in.
1607  */
1608 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1609 {
1610 	u8 funcs;
1611 
1612 #define ICE_CAPS_VALID_FUNCS_M	0xFF
1613 	funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1614 			 ICE_CAPS_VALID_FUNCS_M);
1615 
1616 	if (!funcs)
1617 		return 0;
1618 
1619 	return max / funcs;
1620 }
1621 
1622 /**
1623  * ice_parse_caps - parse function/device capabilities
1624  * @hw: pointer to the HW struct
1625  * @buf: pointer to a buffer containing function/device capability records
1626  * @cap_count: number of capability records in the list
1627  * @opc: type of capabilities list to parse
1628  *
1629  * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
1630  */
1631 static void
1632 ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1633 	       enum ice_adminq_opc opc)
1634 {
1635 	struct ice_aqc_list_caps_elem *cap_resp;
1636 	struct ice_hw_func_caps *func_p = NULL;
1637 	struct ice_hw_dev_caps *dev_p = NULL;
1638 	struct ice_hw_common_caps *caps;
1639 	char const *prefix;
1640 	u32 i;
1641 
1642 	if (!buf)
1643 		return;
1644 
1645 	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1646 
1647 	if (opc == ice_aqc_opc_list_dev_caps) {
1648 		dev_p = &hw->dev_caps;
1649 		caps = &dev_p->common_cap;
1650 		prefix = "dev cap";
1651 	} else if (opc == ice_aqc_opc_list_func_caps) {
1652 		func_p = &hw->func_caps;
1653 		caps = &func_p->common_cap;
1654 		prefix = "func cap";
1655 	} else {
1656 		ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1657 		return;
1658 	}
1659 
1660 	for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1661 		u32 logical_id = le32_to_cpu(cap_resp->logical_id);
1662 		u32 phys_id = le32_to_cpu(cap_resp->phys_id);
1663 		u32 number = le32_to_cpu(cap_resp->number);
1664 		u16 cap = le16_to_cpu(cap_resp->cap);
1665 
1666 		switch (cap) {
1667 		case ICE_AQC_CAPS_VALID_FUNCTIONS:
1668 			caps->valid_functions = number;
1669 			ice_debug(hw, ICE_DBG_INIT,
1670 				  "%s: valid_functions (bitmap) = %d\n", prefix,
1671 				  caps->valid_functions);
1672 
1673 			/* store func count for resource management purposes */
1674 			if (dev_p)
1675 				dev_p->num_funcs = hweight32(number);
1676 			break;
1677 		case ICE_AQC_CAPS_SRIOV:
1678 			caps->sr_iov_1_1 = (number == 1);
1679 			ice_debug(hw, ICE_DBG_INIT,
1680 				  "%s: sr_iov_1_1 = %d\n", prefix,
1681 				  caps->sr_iov_1_1);
1682 			break;
1683 		case ICE_AQC_CAPS_VF:
1684 			if (dev_p) {
1685 				dev_p->num_vfs_exposed = number;
1686 				ice_debug(hw, ICE_DBG_INIT,
1687 					  "%s: num_vfs_exposed = %d\n", prefix,
1688 					  dev_p->num_vfs_exposed);
1689 			} else if (func_p) {
1690 				func_p->num_allocd_vfs = number;
1691 				func_p->vf_base_id = logical_id;
1692 				ice_debug(hw, ICE_DBG_INIT,
1693 					  "%s: num_allocd_vfs = %d\n", prefix,
1694 					  func_p->num_allocd_vfs);
1695 				ice_debug(hw, ICE_DBG_INIT,
1696 					  "%s: vf_base_id = %d\n", prefix,
1697 					  func_p->vf_base_id);
1698 			}
1699 			break;
1700 		case ICE_AQC_CAPS_VSI:
1701 			if (dev_p) {
1702 				dev_p->num_vsi_allocd_to_host = number;
1703 				ice_debug(hw, ICE_DBG_INIT,
1704 					  "%s: num_vsi_allocd_to_host = %d\n",
1705 					  prefix,
1706 					  dev_p->num_vsi_allocd_to_host);
1707 			} else if (func_p) {
1708 				func_p->guar_num_vsi =
1709 					ice_get_num_per_func(hw, ICE_MAX_VSI);
1710 				ice_debug(hw, ICE_DBG_INIT,
1711 					  "%s: guar_num_vsi (fw) = %d\n",
1712 					  prefix, number);
1713 				ice_debug(hw, ICE_DBG_INIT,
1714 					  "%s: guar_num_vsi = %d\n",
1715 					  prefix, func_p->guar_num_vsi);
1716 			}
1717 			break;
1718 		case ICE_AQC_CAPS_DCB:
1719 			caps->dcb = (number == 1);
1720 			caps->active_tc_bitmap = logical_id;
1721 			caps->maxtc = phys_id;
1722 			ice_debug(hw, ICE_DBG_INIT,
1723 				  "%s: dcb = %d\n", prefix, caps->dcb);
1724 			ice_debug(hw, ICE_DBG_INIT,
1725 				  "%s: active_tc_bitmap = %d\n", prefix,
1726 				  caps->active_tc_bitmap);
1727 			ice_debug(hw, ICE_DBG_INIT,
1728 				  "%s: maxtc = %d\n", prefix, caps->maxtc);
1729 			break;
1730 		case ICE_AQC_CAPS_RSS:
1731 			caps->rss_table_size = number;
1732 			caps->rss_table_entry_width = logical_id;
1733 			ice_debug(hw, ICE_DBG_INIT,
1734 				  "%s: rss_table_size = %d\n", prefix,
1735 				  caps->rss_table_size);
1736 			ice_debug(hw, ICE_DBG_INIT,
1737 				  "%s: rss_table_entry_width = %d\n", prefix,
1738 				  caps->rss_table_entry_width);
1739 			break;
1740 		case ICE_AQC_CAPS_RXQS:
1741 			caps->num_rxq = number;
1742 			caps->rxq_first_id = phys_id;
1743 			ice_debug(hw, ICE_DBG_INIT,
1744 				  "%s: num_rxq = %d\n", prefix,
1745 				  caps->num_rxq);
1746 			ice_debug(hw, ICE_DBG_INIT,
1747 				  "%s: rxq_first_id = %d\n", prefix,
1748 				  caps->rxq_first_id);
1749 			break;
1750 		case ICE_AQC_CAPS_TXQS:
1751 			caps->num_txq = number;
1752 			caps->txq_first_id = phys_id;
1753 			ice_debug(hw, ICE_DBG_INIT,
1754 				  "%s: num_txq = %d\n", prefix,
1755 				  caps->num_txq);
1756 			ice_debug(hw, ICE_DBG_INIT,
1757 				  "%s: txq_first_id = %d\n", prefix,
1758 				  caps->txq_first_id);
1759 			break;
1760 		case ICE_AQC_CAPS_MSIX:
1761 			caps->num_msix_vectors = number;
1762 			caps->msix_vector_first_id = phys_id;
1763 			ice_debug(hw, ICE_DBG_INIT,
1764 				  "%s: num_msix_vectors = %d\n", prefix,
1765 				  caps->num_msix_vectors);
1766 			ice_debug(hw, ICE_DBG_INIT,
1767 				  "%s: msix_vector_first_id = %d\n", prefix,
1768 				  caps->msix_vector_first_id);
1769 			break;
1770 		case ICE_AQC_CAPS_MAX_MTU:
1771 			caps->max_mtu = number;
1772 			ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
1773 				  prefix, caps->max_mtu);
1774 			break;
1775 		default:
1776 			ice_debug(hw, ICE_DBG_INIT,
1777 				  "%s: unknown capability[%d]: 0x%x\n", prefix,
1778 				  i, cap);
1779 			break;
1780 		}
1781 	}
1782 
1783 	/* Re-calculate capabilities that are dependent on the number of
1784 	 * physical ports; i.e. some features are not supported or function
1785 	 * differently on devices with more than 4 ports.
1786 	 */
1787 	if (hw->dev_caps.num_funcs > 4) {
1788 		/* Max 4 TCs per port */
1789 		caps->maxtc = 4;
1790 		ice_debug(hw, ICE_DBG_INIT,
1791 			  "%s: maxtc = %d (based on #ports)\n", prefix,
1792 			  caps->maxtc);
1793 	}
1794 }
1795 
1796 /**
1797  * ice_aq_discover_caps - query function/device capabilities
1798  * @hw: pointer to the HW struct
1799  * @buf: a virtual buffer to hold the capabilities
1800  * @buf_size: Size of the virtual buffer
1801  * @cap_count: cap count needed if AQ err==ENOMEM
1802  * @opc: capabilities type to discover - pass in the command opcode
1803  * @cd: pointer to command details structure or NULL
1804  *
1805  * Get the function(0x000a)/device(0x000b) capabilities description from
1806  * the firmware.
1807  */
1808 static enum ice_status
1809 ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
1810 		     enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1811 {
1812 	struct ice_aqc_list_caps *cmd;
1813 	struct ice_aq_desc desc;
1814 	enum ice_status status;
1815 
1816 	cmd = &desc.params.get_cap;
1817 
1818 	if (opc != ice_aqc_opc_list_func_caps &&
1819 	    opc != ice_aqc_opc_list_dev_caps)
1820 		return ICE_ERR_PARAM;
1821 
1822 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1823 
1824 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1825 	if (!status)
1826 		ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
1827 	else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
1828 		*cap_count = le32_to_cpu(cmd->count);
1829 	return status;
1830 }
1831 
1832 /**
1833  * ice_discover_caps - get info about the HW
1834  * @hw: pointer to the hardware structure
1835  * @opc: capabilities type to discover - pass in the command opcode
1836  */
1837 static enum ice_status
1838 ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
1839 {
1840 	enum ice_status status;
1841 	u32 cap_count;
1842 	u16 cbuf_len;
1843 	u8 retries;
1844 
1845 	/* The driver doesn't know how many capabilities the device will return
1846 	 * so the buffer size required isn't known ahead of time. The driver
1847 	 * starts with cbuf_len and if this turns out to be insufficient, the
1848 	 * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
1849 	 * The driver then allocates the buffer based on the count and retries
1850 	 * the operation. So it follows that the retry count is 2.
1851 	 */
1852 #define ICE_GET_CAP_BUF_COUNT	40
1853 #define ICE_GET_CAP_RETRY_COUNT	2
1854 
1855 	cap_count = ICE_GET_CAP_BUF_COUNT;
1856 	retries = ICE_GET_CAP_RETRY_COUNT;
1857 
1858 	do {
1859 		void *cbuf;
1860 
1861 		cbuf_len = (u16)(cap_count *
1862 				 sizeof(struct ice_aqc_list_caps_elem));
1863 		cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
1864 		if (!cbuf)
1865 			return ICE_ERR_NO_MEMORY;
1866 
1867 		status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
1868 					      opc, NULL);
1869 		devm_kfree(ice_hw_to_dev(hw), cbuf);
1870 
1871 		if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
1872 			break;
1873 
1874 		/* If ENOMEM is returned, try again with bigger buffer */
1875 	} while (--retries);
1876 
1877 	return status;
1878 }
1879 
1880 /**
1881  * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
1882  * @hw: pointer to the hardware structure
1883  */
1884 void ice_set_safe_mode_caps(struct ice_hw *hw)
1885 {
1886 	struct ice_hw_func_caps *func_caps = &hw->func_caps;
1887 	struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
1888 	u32 valid_func, rxq_first_id, txq_first_id;
1889 	u32 msix_vector_first_id, max_mtu;
1890 	u32 num_funcs;
1891 
1892 	/* cache some func_caps values that should be restored after memset */
1893 	valid_func = func_caps->common_cap.valid_functions;
1894 	txq_first_id = func_caps->common_cap.txq_first_id;
1895 	rxq_first_id = func_caps->common_cap.rxq_first_id;
1896 	msix_vector_first_id = func_caps->common_cap.msix_vector_first_id;
1897 	max_mtu = func_caps->common_cap.max_mtu;
1898 
1899 	/* unset func capabilities */
1900 	memset(func_caps, 0, sizeof(*func_caps));
1901 
1902 	/* restore cached values */
1903 	func_caps->common_cap.valid_functions = valid_func;
1904 	func_caps->common_cap.txq_first_id = txq_first_id;
1905 	func_caps->common_cap.rxq_first_id = rxq_first_id;
1906 	func_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
1907 	func_caps->common_cap.max_mtu = max_mtu;
1908 
1909 	/* one Tx and one Rx queue in safe mode */
1910 	func_caps->common_cap.num_rxq = 1;
1911 	func_caps->common_cap.num_txq = 1;
1912 
1913 	/* two MSIX vectors, one for traffic and one for misc causes */
1914 	func_caps->common_cap.num_msix_vectors = 2;
1915 	func_caps->guar_num_vsi = 1;
1916 
1917 	/* cache some dev_caps values that should be restored after memset */
1918 	valid_func = dev_caps->common_cap.valid_functions;
1919 	txq_first_id = dev_caps->common_cap.txq_first_id;
1920 	rxq_first_id = dev_caps->common_cap.rxq_first_id;
1921 	msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
1922 	max_mtu = dev_caps->common_cap.max_mtu;
1923 	num_funcs = dev_caps->num_funcs;
1924 
1925 	/* unset dev capabilities */
1926 	memset(dev_caps, 0, sizeof(*dev_caps));
1927 
1928 	/* restore cached values */
1929 	dev_caps->common_cap.valid_functions = valid_func;
1930 	dev_caps->common_cap.txq_first_id = txq_first_id;
1931 	dev_caps->common_cap.rxq_first_id = rxq_first_id;
1932 	dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
1933 	dev_caps->common_cap.max_mtu = max_mtu;
1934 	dev_caps->num_funcs = num_funcs;
1935 
1936 	/* one Tx and one Rx queue per function in safe mode */
1937 	dev_caps->common_cap.num_rxq = num_funcs;
1938 	dev_caps->common_cap.num_txq = num_funcs;
1939 
1940 	/* two MSIX vectors per function */
1941 	dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
1942 }
1943 
1944 /**
1945  * ice_get_caps - get info about the HW
1946  * @hw: pointer to the hardware structure
1947  */
1948 enum ice_status ice_get_caps(struct ice_hw *hw)
1949 {
1950 	enum ice_status status;
1951 
1952 	status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
1953 	if (!status)
1954 		status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
1955 
1956 	return status;
1957 }
1958 
1959 /**
1960  * ice_aq_manage_mac_write - manage MAC address write command
1961  * @hw: pointer to the HW struct
1962  * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
1963  * @flags: flags to control write behavior
1964  * @cd: pointer to command details structure or NULL
1965  *
1966  * This function is used to write MAC address to the NVM (0x0108).
1967  */
1968 enum ice_status
1969 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
1970 			struct ice_sq_cd *cd)
1971 {
1972 	struct ice_aqc_manage_mac_write *cmd;
1973 	struct ice_aq_desc desc;
1974 
1975 	cmd = &desc.params.mac_write;
1976 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
1977 
1978 	cmd->flags = flags;
1979 
1980 	/* Prep values for flags, sah, sal */
1981 	cmd->sah = htons(*((const u16 *)mac_addr));
1982 	cmd->sal = htonl(*((const u32 *)(mac_addr + 2)));
1983 
1984 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1985 }
1986 
1987 /**
1988  * ice_aq_clear_pxe_mode
1989  * @hw: pointer to the HW struct
1990  *
1991  * Tell the firmware that the driver is taking over from PXE (0x0110).
1992  */
1993 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
1994 {
1995 	struct ice_aq_desc desc;
1996 
1997 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
1998 	desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
1999 
2000 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2001 }
2002 
2003 /**
2004  * ice_clear_pxe_mode - clear pxe operations mode
2005  * @hw: pointer to the HW struct
2006  *
2007  * Make sure all PXE mode settings are cleared, including things
2008  * like descriptor fetch/write-back mode.
2009  */
2010 void ice_clear_pxe_mode(struct ice_hw *hw)
2011 {
2012 	if (ice_check_sq_alive(hw, &hw->adminq))
2013 		ice_aq_clear_pxe_mode(hw);
2014 }
2015 
2016 /**
2017  * ice_get_link_speed_based_on_phy_type - returns link speed
2018  * @phy_type_low: lower part of phy_type
2019  * @phy_type_high: higher part of phy_type
2020  *
2021  * This helper function will convert an entry in PHY type structure
2022  * [phy_type_low, phy_type_high] to its corresponding link speed.
2023  * Note: In the structure of [phy_type_low, phy_type_high], there should
2024  * be one bit set, as this function will convert one PHY type to its
2025  * speed.
2026  * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2027  * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2028  */
2029 static u16
2030 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2031 {
2032 	u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2033 	u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2034 
2035 	switch (phy_type_low) {
2036 	case ICE_PHY_TYPE_LOW_100BASE_TX:
2037 	case ICE_PHY_TYPE_LOW_100M_SGMII:
2038 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2039 		break;
2040 	case ICE_PHY_TYPE_LOW_1000BASE_T:
2041 	case ICE_PHY_TYPE_LOW_1000BASE_SX:
2042 	case ICE_PHY_TYPE_LOW_1000BASE_LX:
2043 	case ICE_PHY_TYPE_LOW_1000BASE_KX:
2044 	case ICE_PHY_TYPE_LOW_1G_SGMII:
2045 		speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2046 		break;
2047 	case ICE_PHY_TYPE_LOW_2500BASE_T:
2048 	case ICE_PHY_TYPE_LOW_2500BASE_X:
2049 	case ICE_PHY_TYPE_LOW_2500BASE_KX:
2050 		speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2051 		break;
2052 	case ICE_PHY_TYPE_LOW_5GBASE_T:
2053 	case ICE_PHY_TYPE_LOW_5GBASE_KR:
2054 		speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2055 		break;
2056 	case ICE_PHY_TYPE_LOW_10GBASE_T:
2057 	case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2058 	case ICE_PHY_TYPE_LOW_10GBASE_SR:
2059 	case ICE_PHY_TYPE_LOW_10GBASE_LR:
2060 	case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2061 	case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2062 	case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2063 		speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2064 		break;
2065 	case ICE_PHY_TYPE_LOW_25GBASE_T:
2066 	case ICE_PHY_TYPE_LOW_25GBASE_CR:
2067 	case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2068 	case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2069 	case ICE_PHY_TYPE_LOW_25GBASE_SR:
2070 	case ICE_PHY_TYPE_LOW_25GBASE_LR:
2071 	case ICE_PHY_TYPE_LOW_25GBASE_KR:
2072 	case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2073 	case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2074 	case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2075 	case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2076 		speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2077 		break;
2078 	case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2079 	case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2080 	case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2081 	case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2082 	case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2083 	case ICE_PHY_TYPE_LOW_40G_XLAUI:
2084 		speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2085 		break;
2086 	case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2087 	case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2088 	case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2089 	case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2090 	case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2091 	case ICE_PHY_TYPE_LOW_50G_LAUI2:
2092 	case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2093 	case ICE_PHY_TYPE_LOW_50G_AUI2:
2094 	case ICE_PHY_TYPE_LOW_50GBASE_CP:
2095 	case ICE_PHY_TYPE_LOW_50GBASE_SR:
2096 	case ICE_PHY_TYPE_LOW_50GBASE_FR:
2097 	case ICE_PHY_TYPE_LOW_50GBASE_LR:
2098 	case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2099 	case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2100 	case ICE_PHY_TYPE_LOW_50G_AUI1:
2101 		speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2102 		break;
2103 	case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2104 	case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2105 	case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2106 	case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2107 	case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2108 	case ICE_PHY_TYPE_LOW_100G_CAUI4:
2109 	case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2110 	case ICE_PHY_TYPE_LOW_100G_AUI4:
2111 	case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2112 	case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2113 	case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2114 	case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2115 	case ICE_PHY_TYPE_LOW_100GBASE_DR:
2116 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2117 		break;
2118 	default:
2119 		speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2120 		break;
2121 	}
2122 
2123 	switch (phy_type_high) {
2124 	case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2125 	case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2126 	case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2127 	case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2128 	case ICE_PHY_TYPE_HIGH_100G_AUI2:
2129 		speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2130 		break;
2131 	default:
2132 		speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2133 		break;
2134 	}
2135 
2136 	if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2137 	    speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2138 		return ICE_AQ_LINK_SPEED_UNKNOWN;
2139 	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2140 		 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2141 		return ICE_AQ_LINK_SPEED_UNKNOWN;
2142 	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2143 		 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2144 		return speed_phy_type_low;
2145 	else
2146 		return speed_phy_type_high;
2147 }
2148 
2149 /**
2150  * ice_update_phy_type
2151  * @phy_type_low: pointer to the lower part of phy_type
2152  * @phy_type_high: pointer to the higher part of phy_type
2153  * @link_speeds_bitmap: targeted link speeds bitmap
2154  *
2155  * Note: For the link_speeds_bitmap structure, you can check it at
2156  * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2157  * link_speeds_bitmap include multiple speeds.
2158  *
2159  * Each entry in this [phy_type_low, phy_type_high] structure will
2160  * present a certain link speed. This helper function will turn on bits
2161  * in [phy_type_low, phy_type_high] structure based on the value of
2162  * link_speeds_bitmap input parameter.
2163  */
2164 void
2165 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2166 		    u16 link_speeds_bitmap)
2167 {
2168 	u64 pt_high;
2169 	u64 pt_low;
2170 	int index;
2171 	u16 speed;
2172 
2173 	/* We first check with low part of phy_type */
2174 	for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2175 		pt_low = BIT_ULL(index);
2176 		speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2177 
2178 		if (link_speeds_bitmap & speed)
2179 			*phy_type_low |= BIT_ULL(index);
2180 	}
2181 
2182 	/* We then check with high part of phy_type */
2183 	for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2184 		pt_high = BIT_ULL(index);
2185 		speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2186 
2187 		if (link_speeds_bitmap & speed)
2188 			*phy_type_high |= BIT_ULL(index);
2189 	}
2190 }
2191 
2192 /**
2193  * ice_aq_set_phy_cfg
2194  * @hw: pointer to the HW struct
2195  * @lport: logical port number
2196  * @cfg: structure with PHY configuration data to be set
2197  * @cd: pointer to command details structure or NULL
2198  *
2199  * Set the various PHY configuration parameters supported on the Port.
2200  * One or more of the Set PHY config parameters may be ignored in an MFP
2201  * mode as the PF may not have the privilege to set some of the PHY Config
2202  * parameters. This status will be indicated by the command response (0x0601).
2203  */
2204 enum ice_status
2205 ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
2206 		   struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2207 {
2208 	struct ice_aq_desc desc;
2209 
2210 	if (!cfg)
2211 		return ICE_ERR_PARAM;
2212 
2213 	/* Ensure that only valid bits of cfg->caps can be turned on. */
2214 	if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2215 		ice_debug(hw, ICE_DBG_PHY,
2216 			  "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2217 			  cfg->caps);
2218 
2219 		cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2220 	}
2221 
2222 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2223 	desc.params.set_phy.lport_num = lport;
2224 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2225 
2226 	ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
2227 		  (unsigned long long)le64_to_cpu(cfg->phy_type_low));
2228 	ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
2229 		  (unsigned long long)le64_to_cpu(cfg->phy_type_high));
2230 	ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps);
2231 	ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl = 0x%x\n",
2232 		  cfg->low_power_ctrl);
2233 	ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap);
2234 	ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
2235 	ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
2236 
2237 	return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2238 }
2239 
2240 /**
2241  * ice_update_link_info - update status of the HW network link
2242  * @pi: port info structure of the interested logical port
2243  */
2244 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2245 {
2246 	struct ice_link_status *li;
2247 	enum ice_status status;
2248 
2249 	if (!pi)
2250 		return ICE_ERR_PARAM;
2251 
2252 	li = &pi->phy.link_info;
2253 
2254 	status = ice_aq_get_link_info(pi, true, NULL, NULL);
2255 	if (status)
2256 		return status;
2257 
2258 	if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2259 		struct ice_aqc_get_phy_caps_data *pcaps;
2260 		struct ice_hw *hw;
2261 
2262 		hw = pi->hw;
2263 		pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
2264 				     GFP_KERNEL);
2265 		if (!pcaps)
2266 			return ICE_ERR_NO_MEMORY;
2267 
2268 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2269 					     pcaps, NULL);
2270 		if (!status)
2271 			memcpy(li->module_type, &pcaps->module_type,
2272 			       sizeof(li->module_type));
2273 
2274 		devm_kfree(ice_hw_to_dev(hw), pcaps);
2275 	}
2276 
2277 	return status;
2278 }
2279 
2280 /**
2281  * ice_set_fc
2282  * @pi: port information structure
2283  * @aq_failures: pointer to status code, specific to ice_set_fc routine
2284  * @ena_auto_link_update: enable automatic link update
2285  *
2286  * Set the requested flow control mode.
2287  */
2288 enum ice_status
2289 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2290 {
2291 	struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2292 	struct ice_aqc_get_phy_caps_data *pcaps;
2293 	enum ice_status status;
2294 	u8 pause_mask = 0x0;
2295 	struct ice_hw *hw;
2296 
2297 	if (!pi)
2298 		return ICE_ERR_PARAM;
2299 	hw = pi->hw;
2300 	*aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
2301 
2302 	switch (pi->fc.req_mode) {
2303 	case ICE_FC_FULL:
2304 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2305 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2306 		break;
2307 	case ICE_FC_RX_PAUSE:
2308 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2309 		break;
2310 	case ICE_FC_TX_PAUSE:
2311 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2312 		break;
2313 	default:
2314 		break;
2315 	}
2316 
2317 	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
2318 	if (!pcaps)
2319 		return ICE_ERR_NO_MEMORY;
2320 
2321 	/* Get the current PHY config */
2322 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
2323 				     NULL);
2324 	if (status) {
2325 		*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2326 		goto out;
2327 	}
2328 
2329 	/* clear the old pause settings */
2330 	cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2331 				   ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2332 
2333 	/* set the new capabilities */
2334 	cfg.caps |= pause_mask;
2335 
2336 	/* If the capabilities have changed, then set the new config */
2337 	if (cfg.caps != pcaps->caps) {
2338 		int retry_count, retry_max = 10;
2339 
2340 		/* Auto restart link so settings take effect */
2341 		if (ena_auto_link_update)
2342 			cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2343 		/* Copy over all the old settings */
2344 		cfg.phy_type_high = pcaps->phy_type_high;
2345 		cfg.phy_type_low = pcaps->phy_type_low;
2346 		cfg.low_power_ctrl = pcaps->low_power_ctrl;
2347 		cfg.eee_cap = pcaps->eee_cap;
2348 		cfg.eeer_value = pcaps->eeer_value;
2349 		cfg.link_fec_opt = pcaps->link_fec_options;
2350 
2351 		status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
2352 		if (status) {
2353 			*aq_failures = ICE_SET_FC_AQ_FAIL_SET;
2354 			goto out;
2355 		}
2356 
2357 		/* Update the link info
2358 		 * It sometimes takes a really long time for link to
2359 		 * come back from the atomic reset. Thus, we wait a
2360 		 * little bit.
2361 		 */
2362 		for (retry_count = 0; retry_count < retry_max; retry_count++) {
2363 			status = ice_update_link_info(pi);
2364 
2365 			if (!status)
2366 				break;
2367 
2368 			mdelay(100);
2369 		}
2370 
2371 		if (status)
2372 			*aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
2373 	}
2374 
2375 out:
2376 	devm_kfree(ice_hw_to_dev(hw), pcaps);
2377 	return status;
2378 }
2379 
2380 /**
2381  * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
2382  * @caps: PHY ability structure to copy date from
2383  * @cfg: PHY configuration structure to copy data to
2384  *
2385  * Helper function to copy AQC PHY get ability data to PHY set configuration
2386  * data structure
2387  */
2388 void
2389 ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps,
2390 			 struct ice_aqc_set_phy_cfg_data *cfg)
2391 {
2392 	if (!caps || !cfg)
2393 		return;
2394 
2395 	cfg->phy_type_low = caps->phy_type_low;
2396 	cfg->phy_type_high = caps->phy_type_high;
2397 	cfg->caps = caps->caps;
2398 	cfg->low_power_ctrl = caps->low_power_ctrl;
2399 	cfg->eee_cap = caps->eee_cap;
2400 	cfg->eeer_value = caps->eeer_value;
2401 	cfg->link_fec_opt = caps->link_fec_options;
2402 }
2403 
2404 /**
2405  * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
2406  * @cfg: PHY configuration data to set FEC mode
2407  * @fec: FEC mode to configure
2408  *
2409  * Caller should copy ice_aqc_get_phy_caps_data.caps ICE_AQC_PHY_EN_AUTO_FEC
2410  * (bit 7) and ice_aqc_get_phy_caps_data.link_fec_options to cfg.caps
2411  * ICE_AQ_PHY_ENA_AUTO_FEC (bit 7) and cfg.link_fec_options before calling.
2412  */
2413 void
2414 ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
2415 {
2416 	switch (fec) {
2417 	case ICE_FEC_BASER:
2418 		/* Clear RS bits, and AND BASE-R ability
2419 		 * bits and OR request bits.
2420 		 */
2421 		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2422 				     ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
2423 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2424 				     ICE_AQC_PHY_FEC_25G_KR_REQ;
2425 		break;
2426 	case ICE_FEC_RS:
2427 		/* Clear BASE-R bits, and AND RS ability
2428 		 * bits and OR request bits.
2429 		 */
2430 		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
2431 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2432 				     ICE_AQC_PHY_FEC_25G_RS_544_REQ;
2433 		break;
2434 	case ICE_FEC_NONE:
2435 		/* Clear all FEC option bits. */
2436 		cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
2437 		break;
2438 	case ICE_FEC_AUTO:
2439 		/* AND auto FEC bit, and all caps bits. */
2440 		cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
2441 		break;
2442 	}
2443 }
2444 
2445 /**
2446  * ice_get_link_status - get status of the HW network link
2447  * @pi: port information structure
2448  * @link_up: pointer to bool (true/false = linkup/linkdown)
2449  *
2450  * Variable link_up is true if link is up, false if link is down.
2451  * The variable link_up is invalid if status is non zero. As a
2452  * result of this call, link status reporting becomes enabled
2453  */
2454 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
2455 {
2456 	struct ice_phy_info *phy_info;
2457 	enum ice_status status = 0;
2458 
2459 	if (!pi || !link_up)
2460 		return ICE_ERR_PARAM;
2461 
2462 	phy_info = &pi->phy;
2463 
2464 	if (phy_info->get_link_info) {
2465 		status = ice_update_link_info(pi);
2466 
2467 		if (status)
2468 			ice_debug(pi->hw, ICE_DBG_LINK,
2469 				  "get link status error, status = %d\n",
2470 				  status);
2471 	}
2472 
2473 	*link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
2474 
2475 	return status;
2476 }
2477 
2478 /**
2479  * ice_aq_set_link_restart_an
2480  * @pi: pointer to the port information structure
2481  * @ena_link: if true: enable link, if false: disable link
2482  * @cd: pointer to command details structure or NULL
2483  *
2484  * Sets up the link and restarts the Auto-Negotiation over the link.
2485  */
2486 enum ice_status
2487 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
2488 			   struct ice_sq_cd *cd)
2489 {
2490 	struct ice_aqc_restart_an *cmd;
2491 	struct ice_aq_desc desc;
2492 
2493 	cmd = &desc.params.restart_an;
2494 
2495 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
2496 
2497 	cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
2498 	cmd->lport_num = pi->lport;
2499 	if (ena_link)
2500 		cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
2501 	else
2502 		cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
2503 
2504 	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
2505 }
2506 
2507 /**
2508  * ice_aq_set_event_mask
2509  * @hw: pointer to the HW struct
2510  * @port_num: port number of the physical function
2511  * @mask: event mask to be set
2512  * @cd: pointer to command details structure or NULL
2513  *
2514  * Set event mask (0x0613)
2515  */
2516 enum ice_status
2517 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
2518 		      struct ice_sq_cd *cd)
2519 {
2520 	struct ice_aqc_set_event_mask *cmd;
2521 	struct ice_aq_desc desc;
2522 
2523 	cmd = &desc.params.set_event_mask;
2524 
2525 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
2526 
2527 	cmd->lport_num = port_num;
2528 
2529 	cmd->event_mask = cpu_to_le16(mask);
2530 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2531 }
2532 
2533 /**
2534  * ice_aq_set_mac_loopback
2535  * @hw: pointer to the HW struct
2536  * @ena_lpbk: Enable or Disable loopback
2537  * @cd: pointer to command details structure or NULL
2538  *
2539  * Enable/disable loopback on a given port
2540  */
2541 enum ice_status
2542 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
2543 {
2544 	struct ice_aqc_set_mac_lb *cmd;
2545 	struct ice_aq_desc desc;
2546 
2547 	cmd = &desc.params.set_mac_lb;
2548 
2549 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
2550 	if (ena_lpbk)
2551 		cmd->lb_mode = ICE_AQ_MAC_LB_EN;
2552 
2553 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2554 }
2555 
2556 /**
2557  * ice_aq_set_port_id_led
2558  * @pi: pointer to the port information
2559  * @is_orig_mode: is this LED set to original mode (by the net-list)
2560  * @cd: pointer to command details structure or NULL
2561  *
2562  * Set LED value for the given port (0x06e9)
2563  */
2564 enum ice_status
2565 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
2566 		       struct ice_sq_cd *cd)
2567 {
2568 	struct ice_aqc_set_port_id_led *cmd;
2569 	struct ice_hw *hw = pi->hw;
2570 	struct ice_aq_desc desc;
2571 
2572 	cmd = &desc.params.set_port_id_led;
2573 
2574 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
2575 
2576 	if (is_orig_mode)
2577 		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
2578 	else
2579 		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
2580 
2581 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2582 }
2583 
2584 /**
2585  * ice_aq_sff_eeprom
2586  * @hw: pointer to the HW struct
2587  * @lport: bits [7:0] = logical port, bit [8] = logical port valid
2588  * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
2589  * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
2590  * @page: QSFP page
2591  * @set_page: set or ignore the page
2592  * @data: pointer to data buffer to be read/written to the I2C device.
2593  * @length: 1-16 for read, 1 for write.
2594  * @write: 0 read, 1 for write.
2595  * @cd: pointer to command details structure or NULL
2596  *
2597  * Read/Write SFF EEPROM (0x06EE)
2598  */
2599 enum ice_status
2600 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
2601 		  u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
2602 		  bool write, struct ice_sq_cd *cd)
2603 {
2604 	struct ice_aqc_sff_eeprom *cmd;
2605 	struct ice_aq_desc desc;
2606 	enum ice_status status;
2607 
2608 	if (!data || (mem_addr & 0xff00))
2609 		return ICE_ERR_PARAM;
2610 
2611 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
2612 	cmd = &desc.params.read_write_sff_param;
2613 	desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF);
2614 	cmd->lport_num = (u8)(lport & 0xff);
2615 	cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
2616 	cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
2617 					 ICE_AQC_SFF_I2CBUS_7BIT_M) |
2618 					((set_page <<
2619 					  ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
2620 					 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
2621 	cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
2622 	cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
2623 	if (write)
2624 		cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
2625 
2626 	status = ice_aq_send_cmd(hw, &desc, data, length, cd);
2627 	return status;
2628 }
2629 
2630 /**
2631  * __ice_aq_get_set_rss_lut
2632  * @hw: pointer to the hardware structure
2633  * @vsi_id: VSI FW index
2634  * @lut_type: LUT table type
2635  * @lut: pointer to the LUT buffer provided by the caller
2636  * @lut_size: size of the LUT buffer
2637  * @glob_lut_idx: global LUT index
2638  * @set: set true to set the table, false to get the table
2639  *
2640  * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
2641  */
2642 static enum ice_status
2643 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
2644 			 u16 lut_size, u8 glob_lut_idx, bool set)
2645 {
2646 	struct ice_aqc_get_set_rss_lut *cmd_resp;
2647 	struct ice_aq_desc desc;
2648 	enum ice_status status;
2649 	u16 flags = 0;
2650 
2651 	cmd_resp = &desc.params.get_set_rss_lut;
2652 
2653 	if (set) {
2654 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
2655 		desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2656 	} else {
2657 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
2658 	}
2659 
2660 	cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2661 					 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
2662 					ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
2663 				       ICE_AQC_GSET_RSS_LUT_VSI_VALID);
2664 
2665 	switch (lut_type) {
2666 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
2667 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
2668 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
2669 		flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
2670 			  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
2671 		break;
2672 	default:
2673 		status = ICE_ERR_PARAM;
2674 		goto ice_aq_get_set_rss_lut_exit;
2675 	}
2676 
2677 	if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
2678 		flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
2679 			  ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
2680 
2681 		if (!set)
2682 			goto ice_aq_get_set_rss_lut_send;
2683 	} else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2684 		if (!set)
2685 			goto ice_aq_get_set_rss_lut_send;
2686 	} else {
2687 		goto ice_aq_get_set_rss_lut_send;
2688 	}
2689 
2690 	/* LUT size is only valid for Global and PF table types */
2691 	switch (lut_size) {
2692 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
2693 		break;
2694 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
2695 		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
2696 			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2697 			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2698 		break;
2699 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
2700 		if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2701 			flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
2702 				  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2703 				 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2704 			break;
2705 		}
2706 		/* fall-through */
2707 	default:
2708 		status = ICE_ERR_PARAM;
2709 		goto ice_aq_get_set_rss_lut_exit;
2710 	}
2711 
2712 ice_aq_get_set_rss_lut_send:
2713 	cmd_resp->flags = cpu_to_le16(flags);
2714 	status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
2715 
2716 ice_aq_get_set_rss_lut_exit:
2717 	return status;
2718 }
2719 
2720 /**
2721  * ice_aq_get_rss_lut
2722  * @hw: pointer to the hardware structure
2723  * @vsi_handle: software VSI handle
2724  * @lut_type: LUT table type
2725  * @lut: pointer to the LUT buffer provided by the caller
2726  * @lut_size: size of the LUT buffer
2727  *
2728  * get the RSS lookup table, PF or VSI type
2729  */
2730 enum ice_status
2731 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2732 		   u8 *lut, u16 lut_size)
2733 {
2734 	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2735 		return ICE_ERR_PARAM;
2736 
2737 	return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2738 					lut_type, lut, lut_size, 0, false);
2739 }
2740 
2741 /**
2742  * ice_aq_set_rss_lut
2743  * @hw: pointer to the hardware structure
2744  * @vsi_handle: software VSI handle
2745  * @lut_type: LUT table type
2746  * @lut: pointer to the LUT buffer provided by the caller
2747  * @lut_size: size of the LUT buffer
2748  *
2749  * set the RSS lookup table, PF or VSI type
2750  */
2751 enum ice_status
2752 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2753 		   u8 *lut, u16 lut_size)
2754 {
2755 	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2756 		return ICE_ERR_PARAM;
2757 
2758 	return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2759 					lut_type, lut, lut_size, 0, true);
2760 }
2761 
2762 /**
2763  * __ice_aq_get_set_rss_key
2764  * @hw: pointer to the HW struct
2765  * @vsi_id: VSI FW index
2766  * @key: pointer to key info struct
2767  * @set: set true to set the key, false to get the key
2768  *
2769  * get (0x0B04) or set (0x0B02) the RSS key per VSI
2770  */
2771 static enum
2772 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
2773 				    struct ice_aqc_get_set_rss_keys *key,
2774 				    bool set)
2775 {
2776 	struct ice_aqc_get_set_rss_key *cmd_resp;
2777 	u16 key_size = sizeof(*key);
2778 	struct ice_aq_desc desc;
2779 
2780 	cmd_resp = &desc.params.get_set_rss_key;
2781 
2782 	if (set) {
2783 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
2784 		desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2785 	} else {
2786 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
2787 	}
2788 
2789 	cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2790 					 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
2791 					ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
2792 				       ICE_AQC_GSET_RSS_KEY_VSI_VALID);
2793 
2794 	return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
2795 }
2796 
2797 /**
2798  * ice_aq_get_rss_key
2799  * @hw: pointer to the HW struct
2800  * @vsi_handle: software VSI handle
2801  * @key: pointer to key info struct
2802  *
2803  * get the RSS key per VSI
2804  */
2805 enum ice_status
2806 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
2807 		   struct ice_aqc_get_set_rss_keys *key)
2808 {
2809 	if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
2810 		return ICE_ERR_PARAM;
2811 
2812 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2813 					key, false);
2814 }
2815 
2816 /**
2817  * ice_aq_set_rss_key
2818  * @hw: pointer to the HW struct
2819  * @vsi_handle: software VSI handle
2820  * @keys: pointer to key info struct
2821  *
2822  * set the RSS key per VSI
2823  */
2824 enum ice_status
2825 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
2826 		   struct ice_aqc_get_set_rss_keys *keys)
2827 {
2828 	if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
2829 		return ICE_ERR_PARAM;
2830 
2831 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2832 					keys, true);
2833 }
2834 
2835 /**
2836  * ice_aq_add_lan_txq
2837  * @hw: pointer to the hardware structure
2838  * @num_qgrps: Number of added queue groups
2839  * @qg_list: list of queue groups to be added
2840  * @buf_size: size of buffer for indirect command
2841  * @cd: pointer to command details structure or NULL
2842  *
2843  * Add Tx LAN queue (0x0C30)
2844  *
2845  * NOTE:
2846  * Prior to calling add Tx LAN queue:
2847  * Initialize the following as part of the Tx queue context:
2848  * Completion queue ID if the queue uses Completion queue, Quanta profile,
2849  * Cache profile and Packet shaper profile.
2850  *
2851  * After add Tx LAN queue AQ command is completed:
2852  * Interrupts should be associated with specific queues,
2853  * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
2854  * flow.
2855  */
2856 static enum ice_status
2857 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2858 		   struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
2859 		   struct ice_sq_cd *cd)
2860 {
2861 	u16 i, sum_header_size, sum_q_size = 0;
2862 	struct ice_aqc_add_tx_qgrp *list;
2863 	struct ice_aqc_add_txqs *cmd;
2864 	struct ice_aq_desc desc;
2865 
2866 	cmd = &desc.params.add_txqs;
2867 
2868 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
2869 
2870 	if (!qg_list)
2871 		return ICE_ERR_PARAM;
2872 
2873 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2874 		return ICE_ERR_PARAM;
2875 
2876 	sum_header_size = num_qgrps *
2877 		(sizeof(*qg_list) - sizeof(*qg_list->txqs));
2878 
2879 	list = qg_list;
2880 	for (i = 0; i < num_qgrps; i++) {
2881 		struct ice_aqc_add_txqs_perq *q = list->txqs;
2882 
2883 		sum_q_size += list->num_txqs * sizeof(*q);
2884 		list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
2885 	}
2886 
2887 	if (buf_size != (sum_header_size + sum_q_size))
2888 		return ICE_ERR_PARAM;
2889 
2890 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2891 
2892 	cmd->num_qgrps = num_qgrps;
2893 
2894 	return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2895 }
2896 
2897 /**
2898  * ice_aq_dis_lan_txq
2899  * @hw: pointer to the hardware structure
2900  * @num_qgrps: number of groups in the list
2901  * @qg_list: the list of groups to disable
2902  * @buf_size: the total size of the qg_list buffer in bytes
2903  * @rst_src: if called due to reset, specifies the reset source
2904  * @vmvf_num: the relative VM or VF number that is undergoing the reset
2905  * @cd: pointer to command details structure or NULL
2906  *
2907  * Disable LAN Tx queue (0x0C31)
2908  */
2909 static enum ice_status
2910 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2911 		   struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
2912 		   enum ice_disq_rst_src rst_src, u16 vmvf_num,
2913 		   struct ice_sq_cd *cd)
2914 {
2915 	struct ice_aqc_dis_txqs *cmd;
2916 	struct ice_aq_desc desc;
2917 	enum ice_status status;
2918 	u16 i, sz = 0;
2919 
2920 	cmd = &desc.params.dis_txqs;
2921 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
2922 
2923 	/* qg_list can be NULL only in VM/VF reset flow */
2924 	if (!qg_list && !rst_src)
2925 		return ICE_ERR_PARAM;
2926 
2927 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2928 		return ICE_ERR_PARAM;
2929 
2930 	cmd->num_entries = num_qgrps;
2931 
2932 	cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
2933 					    ICE_AQC_Q_DIS_TIMEOUT_M);
2934 
2935 	switch (rst_src) {
2936 	case ICE_VM_RESET:
2937 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
2938 		cmd->vmvf_and_timeout |=
2939 			cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
2940 		break;
2941 	case ICE_VF_RESET:
2942 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
2943 		/* In this case, FW expects vmvf_num to be absolute VF ID */
2944 		cmd->vmvf_and_timeout |=
2945 			cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
2946 				    ICE_AQC_Q_DIS_VMVF_NUM_M);
2947 		break;
2948 	case ICE_NO_RESET:
2949 	default:
2950 		break;
2951 	}
2952 
2953 	/* flush pipe on time out */
2954 	cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
2955 	/* If no queue group info, we are in a reset flow. Issue the AQ */
2956 	if (!qg_list)
2957 		goto do_aq;
2958 
2959 	/* set RD bit to indicate that command buffer is provided by the driver
2960 	 * and it needs to be read by the firmware
2961 	 */
2962 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2963 
2964 	for (i = 0; i < num_qgrps; ++i) {
2965 		/* Calculate the size taken up by the queue IDs in this group */
2966 		sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
2967 
2968 		/* Add the size of the group header */
2969 		sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
2970 
2971 		/* If the num of queues is even, add 2 bytes of padding */
2972 		if ((qg_list[i].num_qs % 2) == 0)
2973 			sz += 2;
2974 	}
2975 
2976 	if (buf_size != sz)
2977 		return ICE_ERR_PARAM;
2978 
2979 do_aq:
2980 	status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2981 	if (status) {
2982 		if (!qg_list)
2983 			ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
2984 				  vmvf_num, hw->adminq.sq_last_status);
2985 		else
2986 			ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
2987 				  le16_to_cpu(qg_list[0].q_id[0]),
2988 				  hw->adminq.sq_last_status);
2989 	}
2990 	return status;
2991 }
2992 
2993 /* End of FW Admin Queue command wrappers */
2994 
2995 /**
2996  * ice_write_byte - write a byte to a packed context structure
2997  * @src_ctx:  the context structure to read from
2998  * @dest_ctx: the context to be written to
2999  * @ce_info:  a description of the struct to be filled
3000  */
3001 static void
3002 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3003 {
3004 	u8 src_byte, dest_byte, mask;
3005 	u8 *from, *dest;
3006 	u16 shift_width;
3007 
3008 	/* copy from the next struct field */
3009 	from = src_ctx + ce_info->offset;
3010 
3011 	/* prepare the bits and mask */
3012 	shift_width = ce_info->lsb % 8;
3013 	mask = (u8)(BIT(ce_info->width) - 1);
3014 
3015 	src_byte = *from;
3016 	src_byte &= mask;
3017 
3018 	/* shift to correct alignment */
3019 	mask <<= shift_width;
3020 	src_byte <<= shift_width;
3021 
3022 	/* get the current bits from the target bit string */
3023 	dest = dest_ctx + (ce_info->lsb / 8);
3024 
3025 	memcpy(&dest_byte, dest, sizeof(dest_byte));
3026 
3027 	dest_byte &= ~mask;	/* get the bits not changing */
3028 	dest_byte |= src_byte;	/* add in the new bits */
3029 
3030 	/* put it all back */
3031 	memcpy(dest, &dest_byte, sizeof(dest_byte));
3032 }
3033 
3034 /**
3035  * ice_write_word - write a word to a packed context structure
3036  * @src_ctx:  the context structure to read from
3037  * @dest_ctx: the context to be written to
3038  * @ce_info:  a description of the struct to be filled
3039  */
3040 static void
3041 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3042 {
3043 	u16 src_word, mask;
3044 	__le16 dest_word;
3045 	u8 *from, *dest;
3046 	u16 shift_width;
3047 
3048 	/* copy from the next struct field */
3049 	from = src_ctx + ce_info->offset;
3050 
3051 	/* prepare the bits and mask */
3052 	shift_width = ce_info->lsb % 8;
3053 	mask = BIT(ce_info->width) - 1;
3054 
3055 	/* don't swizzle the bits until after the mask because the mask bits
3056 	 * will be in a different bit position on big endian machines
3057 	 */
3058 	src_word = *(u16 *)from;
3059 	src_word &= mask;
3060 
3061 	/* shift to correct alignment */
3062 	mask <<= shift_width;
3063 	src_word <<= shift_width;
3064 
3065 	/* get the current bits from the target bit string */
3066 	dest = dest_ctx + (ce_info->lsb / 8);
3067 
3068 	memcpy(&dest_word, dest, sizeof(dest_word));
3069 
3070 	dest_word &= ~(cpu_to_le16(mask));	/* get the bits not changing */
3071 	dest_word |= cpu_to_le16(src_word);	/* add in the new bits */
3072 
3073 	/* put it all back */
3074 	memcpy(dest, &dest_word, sizeof(dest_word));
3075 }
3076 
3077 /**
3078  * ice_write_dword - write a dword to a packed context structure
3079  * @src_ctx:  the context structure to read from
3080  * @dest_ctx: the context to be written to
3081  * @ce_info:  a description of the struct to be filled
3082  */
3083 static void
3084 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3085 {
3086 	u32 src_dword, mask;
3087 	__le32 dest_dword;
3088 	u8 *from, *dest;
3089 	u16 shift_width;
3090 
3091 	/* copy from the next struct field */
3092 	from = src_ctx + ce_info->offset;
3093 
3094 	/* prepare the bits and mask */
3095 	shift_width = ce_info->lsb % 8;
3096 
3097 	/* if the field width is exactly 32 on an x86 machine, then the shift
3098 	 * operation will not work because the SHL instructions count is masked
3099 	 * to 5 bits so the shift will do nothing
3100 	 */
3101 	if (ce_info->width < 32)
3102 		mask = BIT(ce_info->width) - 1;
3103 	else
3104 		mask = (u32)~0;
3105 
3106 	/* don't swizzle the bits until after the mask because the mask bits
3107 	 * will be in a different bit position on big endian machines
3108 	 */
3109 	src_dword = *(u32 *)from;
3110 	src_dword &= mask;
3111 
3112 	/* shift to correct alignment */
3113 	mask <<= shift_width;
3114 	src_dword <<= shift_width;
3115 
3116 	/* get the current bits from the target bit string */
3117 	dest = dest_ctx + (ce_info->lsb / 8);
3118 
3119 	memcpy(&dest_dword, dest, sizeof(dest_dword));
3120 
3121 	dest_dword &= ~(cpu_to_le32(mask));	/* get the bits not changing */
3122 	dest_dword |= cpu_to_le32(src_dword);	/* add in the new bits */
3123 
3124 	/* put it all back */
3125 	memcpy(dest, &dest_dword, sizeof(dest_dword));
3126 }
3127 
3128 /**
3129  * ice_write_qword - write a qword to a packed context structure
3130  * @src_ctx:  the context structure to read from
3131  * @dest_ctx: the context to be written to
3132  * @ce_info:  a description of the struct to be filled
3133  */
3134 static void
3135 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3136 {
3137 	u64 src_qword, mask;
3138 	__le64 dest_qword;
3139 	u8 *from, *dest;
3140 	u16 shift_width;
3141 
3142 	/* copy from the next struct field */
3143 	from = src_ctx + ce_info->offset;
3144 
3145 	/* prepare the bits and mask */
3146 	shift_width = ce_info->lsb % 8;
3147 
3148 	/* if the field width is exactly 64 on an x86 machine, then the shift
3149 	 * operation will not work because the SHL instructions count is masked
3150 	 * to 6 bits so the shift will do nothing
3151 	 */
3152 	if (ce_info->width < 64)
3153 		mask = BIT_ULL(ce_info->width) - 1;
3154 	else
3155 		mask = (u64)~0;
3156 
3157 	/* don't swizzle the bits until after the mask because the mask bits
3158 	 * will be in a different bit position on big endian machines
3159 	 */
3160 	src_qword = *(u64 *)from;
3161 	src_qword &= mask;
3162 
3163 	/* shift to correct alignment */
3164 	mask <<= shift_width;
3165 	src_qword <<= shift_width;
3166 
3167 	/* get the current bits from the target bit string */
3168 	dest = dest_ctx + (ce_info->lsb / 8);
3169 
3170 	memcpy(&dest_qword, dest, sizeof(dest_qword));
3171 
3172 	dest_qword &= ~(cpu_to_le64(mask));	/* get the bits not changing */
3173 	dest_qword |= cpu_to_le64(src_qword);	/* add in the new bits */
3174 
3175 	/* put it all back */
3176 	memcpy(dest, &dest_qword, sizeof(dest_qword));
3177 }
3178 
3179 /**
3180  * ice_set_ctx - set context bits in packed structure
3181  * @src_ctx:  pointer to a generic non-packed context structure
3182  * @dest_ctx: pointer to memory for the packed structure
3183  * @ce_info:  a description of the structure to be transformed
3184  */
3185 enum ice_status
3186 ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3187 {
3188 	int f;
3189 
3190 	for (f = 0; ce_info[f].width; f++) {
3191 		/* We have to deal with each element of the FW response
3192 		 * using the correct size so that we are correct regardless
3193 		 * of the endianness of the machine.
3194 		 */
3195 		switch (ce_info[f].size_of) {
3196 		case sizeof(u8):
3197 			ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
3198 			break;
3199 		case sizeof(u16):
3200 			ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
3201 			break;
3202 		case sizeof(u32):
3203 			ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
3204 			break;
3205 		case sizeof(u64):
3206 			ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
3207 			break;
3208 		default:
3209 			return ICE_ERR_INVAL_SIZE;
3210 		}
3211 	}
3212 
3213 	return 0;
3214 }
3215 
3216 /**
3217  * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
3218  * @hw: pointer to the HW struct
3219  * @vsi_handle: software VSI handle
3220  * @tc: TC number
3221  * @q_handle: software queue handle
3222  */
3223 struct ice_q_ctx *
3224 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
3225 {
3226 	struct ice_vsi_ctx *vsi;
3227 	struct ice_q_ctx *q_ctx;
3228 
3229 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
3230 	if (!vsi)
3231 		return NULL;
3232 	if (q_handle >= vsi->num_lan_q_entries[tc])
3233 		return NULL;
3234 	if (!vsi->lan_q_ctx[tc])
3235 		return NULL;
3236 	q_ctx = vsi->lan_q_ctx[tc];
3237 	return &q_ctx[q_handle];
3238 }
3239 
3240 /**
3241  * ice_ena_vsi_txq
3242  * @pi: port information structure
3243  * @vsi_handle: software VSI handle
3244  * @tc: TC number
3245  * @q_handle: software queue handle
3246  * @num_qgrps: Number of added queue groups
3247  * @buf: list of queue groups to be added
3248  * @buf_size: size of buffer for indirect command
3249  * @cd: pointer to command details structure or NULL
3250  *
3251  * This function adds one LAN queue
3252  */
3253 enum ice_status
3254 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
3255 		u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
3256 		struct ice_sq_cd *cd)
3257 {
3258 	struct ice_aqc_txsched_elem_data node = { 0 };
3259 	struct ice_sched_node *parent;
3260 	struct ice_q_ctx *q_ctx;
3261 	enum ice_status status;
3262 	struct ice_hw *hw;
3263 
3264 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3265 		return ICE_ERR_CFG;
3266 
3267 	if (num_qgrps > 1 || buf->num_txqs > 1)
3268 		return ICE_ERR_MAX_LIMIT;
3269 
3270 	hw = pi->hw;
3271 
3272 	if (!ice_is_vsi_valid(hw, vsi_handle))
3273 		return ICE_ERR_PARAM;
3274 
3275 	mutex_lock(&pi->sched_lock);
3276 
3277 	q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
3278 	if (!q_ctx) {
3279 		ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
3280 			  q_handle);
3281 		status = ICE_ERR_PARAM;
3282 		goto ena_txq_exit;
3283 	}
3284 
3285 	/* find a parent node */
3286 	parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
3287 					    ICE_SCHED_NODE_OWNER_LAN);
3288 	if (!parent) {
3289 		status = ICE_ERR_PARAM;
3290 		goto ena_txq_exit;
3291 	}
3292 
3293 	buf->parent_teid = parent->info.node_teid;
3294 	node.parent_teid = parent->info.node_teid;
3295 	/* Mark that the values in the "generic" section as valid. The default
3296 	 * value in the "generic" section is zero. This means that :
3297 	 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
3298 	 * - 0 priority among siblings, indicated by Bit 1-3.
3299 	 * - WFQ, indicated by Bit 4.
3300 	 * - 0 Adjustment value is used in PSM credit update flow, indicated by
3301 	 * Bit 5-6.
3302 	 * - Bit 7 is reserved.
3303 	 * Without setting the generic section as valid in valid_sections, the
3304 	 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
3305 	 */
3306 	buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
3307 
3308 	/* add the LAN queue */
3309 	status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
3310 	if (status) {
3311 		ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
3312 			  le16_to_cpu(buf->txqs[0].txq_id),
3313 			  hw->adminq.sq_last_status);
3314 		goto ena_txq_exit;
3315 	}
3316 
3317 	node.node_teid = buf->txqs[0].q_teid;
3318 	node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
3319 	q_ctx->q_handle = q_handle;
3320 	q_ctx->q_teid = le32_to_cpu(node.node_teid);
3321 
3322 	/* add a leaf node into scheduler tree queue layer */
3323 	status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
3324 	if (!status)
3325 		status = ice_sched_replay_q_bw(pi, q_ctx);
3326 
3327 ena_txq_exit:
3328 	mutex_unlock(&pi->sched_lock);
3329 	return status;
3330 }
3331 
3332 /**
3333  * ice_dis_vsi_txq
3334  * @pi: port information structure
3335  * @vsi_handle: software VSI handle
3336  * @tc: TC number
3337  * @num_queues: number of queues
3338  * @q_handles: pointer to software queue handle array
3339  * @q_ids: pointer to the q_id array
3340  * @q_teids: pointer to queue node teids
3341  * @rst_src: if called due to reset, specifies the reset source
3342  * @vmvf_num: the relative VM or VF number that is undergoing the reset
3343  * @cd: pointer to command details structure or NULL
3344  *
3345  * This function removes queues and their corresponding nodes in SW DB
3346  */
3347 enum ice_status
3348 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
3349 		u16 *q_handles, u16 *q_ids, u32 *q_teids,
3350 		enum ice_disq_rst_src rst_src, u16 vmvf_num,
3351 		struct ice_sq_cd *cd)
3352 {
3353 	enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
3354 	struct ice_aqc_dis_txq_item qg_list;
3355 	struct ice_q_ctx *q_ctx;
3356 	u16 i;
3357 
3358 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3359 		return ICE_ERR_CFG;
3360 
3361 	if (!num_queues) {
3362 		/* if queue is disabled already yet the disable queue command
3363 		 * has to be sent to complete the VF reset, then call
3364 		 * ice_aq_dis_lan_txq without any queue information
3365 		 */
3366 		if (rst_src)
3367 			return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
3368 						  vmvf_num, NULL);
3369 		return ICE_ERR_CFG;
3370 	}
3371 
3372 	mutex_lock(&pi->sched_lock);
3373 
3374 	for (i = 0; i < num_queues; i++) {
3375 		struct ice_sched_node *node;
3376 
3377 		node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
3378 		if (!node)
3379 			continue;
3380 		q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
3381 		if (!q_ctx) {
3382 			ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
3383 				  q_handles[i]);
3384 			continue;
3385 		}
3386 		if (q_ctx->q_handle != q_handles[i]) {
3387 			ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
3388 				  q_ctx->q_handle, q_handles[i]);
3389 			continue;
3390 		}
3391 		qg_list.parent_teid = node->info.parent_teid;
3392 		qg_list.num_qs = 1;
3393 		qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
3394 		status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
3395 					    sizeof(qg_list), rst_src, vmvf_num,
3396 					    cd);
3397 
3398 		if (status)
3399 			break;
3400 		ice_free_sched_node(pi, node);
3401 		q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
3402 	}
3403 	mutex_unlock(&pi->sched_lock);
3404 	return status;
3405 }
3406 
3407 /**
3408  * ice_cfg_vsi_qs - configure the new/existing VSI queues
3409  * @pi: port information structure
3410  * @vsi_handle: software VSI handle
3411  * @tc_bitmap: TC bitmap
3412  * @maxqs: max queues array per TC
3413  * @owner: LAN or RDMA
3414  *
3415  * This function adds/updates the VSI queues per TC.
3416  */
3417 static enum ice_status
3418 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
3419 	       u16 *maxqs, u8 owner)
3420 {
3421 	enum ice_status status = 0;
3422 	u8 i;
3423 
3424 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3425 		return ICE_ERR_CFG;
3426 
3427 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3428 		return ICE_ERR_PARAM;
3429 
3430 	mutex_lock(&pi->sched_lock);
3431 
3432 	ice_for_each_traffic_class(i) {
3433 		/* configuration is possible only if TC node is present */
3434 		if (!ice_sched_get_tc_node(pi, i))
3435 			continue;
3436 
3437 		status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
3438 					   ice_is_tc_ena(tc_bitmap, i));
3439 		if (status)
3440 			break;
3441 	}
3442 
3443 	mutex_unlock(&pi->sched_lock);
3444 	return status;
3445 }
3446 
3447 /**
3448  * ice_cfg_vsi_lan - configure VSI LAN queues
3449  * @pi: port information structure
3450  * @vsi_handle: software VSI handle
3451  * @tc_bitmap: TC bitmap
3452  * @max_lanqs: max LAN queues array per TC
3453  *
3454  * This function adds/updates the VSI LAN queues per TC.
3455  */
3456 enum ice_status
3457 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
3458 		u16 *max_lanqs)
3459 {
3460 	return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
3461 			      ICE_SCHED_NODE_OWNER_LAN);
3462 }
3463 
3464 /**
3465  * ice_replay_pre_init - replay pre initialization
3466  * @hw: pointer to the HW struct
3467  *
3468  * Initializes required config data for VSI, FD, ACL, and RSS before replay.
3469  */
3470 static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
3471 {
3472 	struct ice_switch_info *sw = hw->switch_info;
3473 	u8 i;
3474 
3475 	/* Delete old entries from replay filter list head if there is any */
3476 	ice_rm_all_sw_replay_rule_info(hw);
3477 	/* In start of replay, move entries into replay_rules list, it
3478 	 * will allow adding rules entries back to filt_rules list,
3479 	 * which is operational list.
3480 	 */
3481 	for (i = 0; i < ICE_SW_LKUP_LAST; i++)
3482 		list_replace_init(&sw->recp_list[i].filt_rules,
3483 				  &sw->recp_list[i].filt_replay_rules);
3484 
3485 	return 0;
3486 }
3487 
3488 /**
3489  * ice_replay_vsi - replay VSI configuration
3490  * @hw: pointer to the HW struct
3491  * @vsi_handle: driver VSI handle
3492  *
3493  * Restore all VSI configuration after reset. It is required to call this
3494  * function with main VSI first.
3495  */
3496 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
3497 {
3498 	enum ice_status status;
3499 
3500 	if (!ice_is_vsi_valid(hw, vsi_handle))
3501 		return ICE_ERR_PARAM;
3502 
3503 	/* Replay pre-initialization if there is any */
3504 	if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
3505 		status = ice_replay_pre_init(hw);
3506 		if (status)
3507 			return status;
3508 	}
3509 	/* Replay per VSI all RSS configurations */
3510 	status = ice_replay_rss_cfg(hw, vsi_handle);
3511 	if (status)
3512 		return status;
3513 	/* Replay per VSI all filters */
3514 	status = ice_replay_vsi_all_fltr(hw, vsi_handle);
3515 	return status;
3516 }
3517 
3518 /**
3519  * ice_replay_post - post replay configuration cleanup
3520  * @hw: pointer to the HW struct
3521  *
3522  * Post replay cleanup.
3523  */
3524 void ice_replay_post(struct ice_hw *hw)
3525 {
3526 	/* Delete old entries from replay filter list head */
3527 	ice_rm_all_sw_replay_rule_info(hw);
3528 }
3529 
3530 /**
3531  * ice_stat_update40 - read 40 bit stat from the chip and update stat values
3532  * @hw: ptr to the hardware info
3533  * @reg: offset of 64 bit HW register to read from
3534  * @prev_stat_loaded: bool to specify if previous stats are loaded
3535  * @prev_stat: ptr to previous loaded stat value
3536  * @cur_stat: ptr to current stat value
3537  */
3538 void
3539 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
3540 		  u64 *prev_stat, u64 *cur_stat)
3541 {
3542 	u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
3543 
3544 	/* device stats are not reset at PFR, they likely will not be zeroed
3545 	 * when the driver starts. Thus, save the value from the first read
3546 	 * without adding to the statistic value so that we report stats which
3547 	 * count up from zero.
3548 	 */
3549 	if (!prev_stat_loaded) {
3550 		*prev_stat = new_data;
3551 		return;
3552 	}
3553 
3554 	/* Calculate the difference between the new and old values, and then
3555 	 * add it to the software stat value.
3556 	 */
3557 	if (new_data >= *prev_stat)
3558 		*cur_stat += new_data - *prev_stat;
3559 	else
3560 		/* to manage the potential roll-over */
3561 		*cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
3562 
3563 	/* Update the previously stored value to prepare for next read */
3564 	*prev_stat = new_data;
3565 }
3566 
3567 /**
3568  * ice_stat_update32 - read 32 bit stat from the chip and update stat values
3569  * @hw: ptr to the hardware info
3570  * @reg: offset of HW register to read from
3571  * @prev_stat_loaded: bool to specify if previous stats are loaded
3572  * @prev_stat: ptr to previous loaded stat value
3573  * @cur_stat: ptr to current stat value
3574  */
3575 void
3576 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
3577 		  u64 *prev_stat, u64 *cur_stat)
3578 {
3579 	u32 new_data;
3580 
3581 	new_data = rd32(hw, reg);
3582 
3583 	/* device stats are not reset at PFR, they likely will not be zeroed
3584 	 * when the driver starts. Thus, save the value from the first read
3585 	 * without adding to the statistic value so that we report stats which
3586 	 * count up from zero.
3587 	 */
3588 	if (!prev_stat_loaded) {
3589 		*prev_stat = new_data;
3590 		return;
3591 	}
3592 
3593 	/* Calculate the difference between the new and old values, and then
3594 	 * add it to the software stat value.
3595 	 */
3596 	if (new_data >= *prev_stat)
3597 		*cur_stat += new_data - *prev_stat;
3598 	else
3599 		/* to manage the potential roll-over */
3600 		*cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
3601 
3602 	/* Update the previously stored value to prepare for next read */
3603 	*prev_stat = new_data;
3604 }
3605 
3606 /**
3607  * ice_sched_query_elem - query element information from HW
3608  * @hw: pointer to the HW struct
3609  * @node_teid: node TEID to be queried
3610  * @buf: buffer to element information
3611  *
3612  * This function queries HW element information
3613  */
3614 enum ice_status
3615 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
3616 		     struct ice_aqc_get_elem *buf)
3617 {
3618 	u16 buf_size, num_elem_ret = 0;
3619 	enum ice_status status;
3620 
3621 	buf_size = sizeof(*buf);
3622 	memset(buf, 0, buf_size);
3623 	buf->generic[0].node_teid = cpu_to_le32(node_teid);
3624 	status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
3625 					  NULL);
3626 	if (status || num_elem_ret != 1)
3627 		ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
3628 	return status;
3629 }
3630