1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_common.h"
5 #include "ice_sched.h"
6 #include "ice_adminq_cmd.h"
7 
8 #define ICE_PF_RESET_WAIT_COUNT	200
9 
10 /**
11  * ice_set_mac_type - Sets MAC type
12  * @hw: pointer to the HW structure
13  *
14  * This function sets the MAC type of the adapter based on the
15  * vendor ID and device ID stored in the hw structure.
16  */
17 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
18 {
19 	if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
20 		return ICE_ERR_DEVICE_NOT_SUPPORTED;
21 
22 	hw->mac_type = ICE_MAC_GENERIC;
23 	return 0;
24 }
25 
26 /**
27  * ice_clear_pf_cfg - Clear PF configuration
28  * @hw: pointer to the hardware structure
29  */
30 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
31 {
32 	struct ice_aq_desc desc;
33 
34 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
35 
36 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
37 }
38 
39 /**
40  * ice_aq_manage_mac_read - manage MAC address read command
41  * @hw: pointer to the hw struct
42  * @buf: a virtual buffer to hold the manage MAC read response
43  * @buf_size: Size of the virtual buffer
44  * @cd: pointer to command details structure or NULL
45  *
46  * This function is used to return per PF station MAC address (0x0107).
47  * NOTE: Upon successful completion of this command, MAC address information
48  * is returned in user specified buffer. Please interpret user specified
49  * buffer as "manage_mac_read" response.
50  * Response such as various MAC addresses are stored in HW struct (port.mac)
51  * ice_aq_discover_caps is expected to be called before this function is called.
52  */
53 static enum ice_status
54 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
55 		       struct ice_sq_cd *cd)
56 {
57 	struct ice_aqc_manage_mac_read_resp *resp;
58 	struct ice_aqc_manage_mac_read *cmd;
59 	struct ice_aq_desc desc;
60 	enum ice_status status;
61 	u16 flags;
62 
63 	cmd = &desc.params.mac_read;
64 
65 	if (buf_size < sizeof(*resp))
66 		return ICE_ERR_BUF_TOO_SHORT;
67 
68 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
69 
70 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
71 	if (status)
72 		return status;
73 
74 	resp = (struct ice_aqc_manage_mac_read_resp *)buf;
75 	flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
76 
77 	if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
78 		ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
79 		return ICE_ERR_CFG;
80 	}
81 
82 	ether_addr_copy(hw->port_info->mac.lan_addr, resp->mac_addr);
83 	ether_addr_copy(hw->port_info->mac.perm_addr, resp->mac_addr);
84 	return 0;
85 }
86 
87 /**
88  * ice_aq_get_phy_caps - returns PHY capabilities
89  * @pi: port information structure
90  * @qual_mods: report qualified modules
91  * @report_mode: report mode capabilities
92  * @pcaps: structure for PHY capabilities to be filled
93  * @cd: pointer to command details structure or NULL
94  *
95  * Returns the various PHY capabilities supported on the Port (0x0600)
96  */
97 static enum ice_status
98 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
99 		    struct ice_aqc_get_phy_caps_data *pcaps,
100 		    struct ice_sq_cd *cd)
101 {
102 	struct ice_aqc_get_phy_caps *cmd;
103 	u16 pcaps_size = sizeof(*pcaps);
104 	struct ice_aq_desc desc;
105 	enum ice_status status;
106 
107 	cmd = &desc.params.get_phy;
108 
109 	if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
110 		return ICE_ERR_PARAM;
111 
112 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
113 
114 	if (qual_mods)
115 		cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
116 
117 	cmd->param0 |= cpu_to_le16(report_mode);
118 	status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
119 
120 	if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP)
121 		pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
122 
123 	return status;
124 }
125 
126 /**
127  * ice_get_media_type - Gets media type
128  * @pi: port information structure
129  */
130 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
131 {
132 	struct ice_link_status *hw_link_info;
133 
134 	if (!pi)
135 		return ICE_MEDIA_UNKNOWN;
136 
137 	hw_link_info = &pi->phy.link_info;
138 
139 	if (hw_link_info->phy_type_low) {
140 		switch (hw_link_info->phy_type_low) {
141 		case ICE_PHY_TYPE_LOW_1000BASE_SX:
142 		case ICE_PHY_TYPE_LOW_1000BASE_LX:
143 		case ICE_PHY_TYPE_LOW_10GBASE_SR:
144 		case ICE_PHY_TYPE_LOW_10GBASE_LR:
145 		case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
146 		case ICE_PHY_TYPE_LOW_25GBASE_SR:
147 		case ICE_PHY_TYPE_LOW_25GBASE_LR:
148 		case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
149 		case ICE_PHY_TYPE_LOW_40GBASE_SR4:
150 		case ICE_PHY_TYPE_LOW_40GBASE_LR4:
151 			return ICE_MEDIA_FIBER;
152 		case ICE_PHY_TYPE_LOW_100BASE_TX:
153 		case ICE_PHY_TYPE_LOW_1000BASE_T:
154 		case ICE_PHY_TYPE_LOW_2500BASE_T:
155 		case ICE_PHY_TYPE_LOW_5GBASE_T:
156 		case ICE_PHY_TYPE_LOW_10GBASE_T:
157 		case ICE_PHY_TYPE_LOW_25GBASE_T:
158 			return ICE_MEDIA_BASET;
159 		case ICE_PHY_TYPE_LOW_10G_SFI_DA:
160 		case ICE_PHY_TYPE_LOW_25GBASE_CR:
161 		case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
162 		case ICE_PHY_TYPE_LOW_25GBASE_CR1:
163 		case ICE_PHY_TYPE_LOW_40GBASE_CR4:
164 			return ICE_MEDIA_DA;
165 		case ICE_PHY_TYPE_LOW_1000BASE_KX:
166 		case ICE_PHY_TYPE_LOW_2500BASE_KX:
167 		case ICE_PHY_TYPE_LOW_2500BASE_X:
168 		case ICE_PHY_TYPE_LOW_5GBASE_KR:
169 		case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
170 		case ICE_PHY_TYPE_LOW_25GBASE_KR:
171 		case ICE_PHY_TYPE_LOW_25GBASE_KR1:
172 		case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
173 		case ICE_PHY_TYPE_LOW_40GBASE_KR4:
174 			return ICE_MEDIA_BACKPLANE;
175 		}
176 	}
177 
178 	return ICE_MEDIA_UNKNOWN;
179 }
180 
181 /**
182  * ice_aq_get_link_info
183  * @pi: port information structure
184  * @ena_lse: enable/disable LinkStatusEvent reporting
185  * @link: pointer to link status structure - optional
186  * @cd: pointer to command details structure or NULL
187  *
188  * Get Link Status (0x607). Returns the link status of the adapter.
189  */
190 enum ice_status
191 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
192 		     struct ice_link_status *link, struct ice_sq_cd *cd)
193 {
194 	struct ice_link_status *hw_link_info_old, *hw_link_info;
195 	struct ice_aqc_get_link_status_data link_data = { 0 };
196 	struct ice_aqc_get_link_status *resp;
197 	enum ice_media_type *hw_media_type;
198 	struct ice_fc_info *hw_fc_info;
199 	bool tx_pause, rx_pause;
200 	struct ice_aq_desc desc;
201 	enum ice_status status;
202 	u16 cmd_flags;
203 
204 	if (!pi)
205 		return ICE_ERR_PARAM;
206 	hw_link_info_old = &pi->phy.link_info_old;
207 	hw_media_type = &pi->phy.media_type;
208 	hw_link_info = &pi->phy.link_info;
209 	hw_fc_info = &pi->fc;
210 
211 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
212 	cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
213 	resp = &desc.params.get_link_status;
214 	resp->cmd_flags = cpu_to_le16(cmd_flags);
215 	resp->lport_num = pi->lport;
216 
217 	status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data),
218 				 cd);
219 
220 	if (status)
221 		return status;
222 
223 	/* save off old link status information */
224 	*hw_link_info_old = *hw_link_info;
225 
226 	/* update current link status information */
227 	hw_link_info->link_speed = le16_to_cpu(link_data.link_speed);
228 	hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low);
229 	*hw_media_type = ice_get_media_type(pi);
230 	hw_link_info->link_info = link_data.link_info;
231 	hw_link_info->an_info = link_data.an_info;
232 	hw_link_info->ext_info = link_data.ext_info;
233 	hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size);
234 	hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M;
235 
236 	/* update fc info */
237 	tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
238 	rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
239 	if (tx_pause && rx_pause)
240 		hw_fc_info->current_mode = ICE_FC_FULL;
241 	else if (tx_pause)
242 		hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
243 	else if (rx_pause)
244 		hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
245 	else
246 		hw_fc_info->current_mode = ICE_FC_NONE;
247 
248 	hw_link_info->lse_ena =
249 		!!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
250 
251 	/* save link status information */
252 	if (link)
253 		*link = *hw_link_info;
254 
255 	/* flag cleared so calling functions don't call AQ again */
256 	pi->phy.get_link_info = false;
257 
258 	return status;
259 }
260 
261 /**
262  * ice_init_hw - main hardware initialization routine
263  * @hw: pointer to the hardware structure
264  */
265 enum ice_status ice_init_hw(struct ice_hw *hw)
266 {
267 	struct ice_aqc_get_phy_caps_data *pcaps;
268 	enum ice_status status;
269 	u16 mac_buf_len;
270 	void *mac_buf;
271 
272 	/* Set MAC type based on DeviceID */
273 	status = ice_set_mac_type(hw);
274 	if (status)
275 		return status;
276 
277 	hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
278 			 PF_FUNC_RID_FUNC_NUM_M) >>
279 		PF_FUNC_RID_FUNC_NUM_S;
280 
281 	status = ice_reset(hw, ICE_RESET_PFR);
282 	if (status)
283 		return status;
284 
285 	status = ice_init_all_ctrlq(hw);
286 	if (status)
287 		goto err_unroll_cqinit;
288 
289 	status = ice_clear_pf_cfg(hw);
290 	if (status)
291 		goto err_unroll_cqinit;
292 
293 	ice_clear_pxe_mode(hw);
294 
295 	status = ice_init_nvm(hw);
296 	if (status)
297 		goto err_unroll_cqinit;
298 
299 	status = ice_get_caps(hw);
300 	if (status)
301 		goto err_unroll_cqinit;
302 
303 	hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
304 				     sizeof(*hw->port_info), GFP_KERNEL);
305 	if (!hw->port_info) {
306 		status = ICE_ERR_NO_MEMORY;
307 		goto err_unroll_cqinit;
308 	}
309 
310 	/* set the back pointer to hw */
311 	hw->port_info->hw = hw;
312 
313 	/* Initialize port_info struct with switch configuration data */
314 	status = ice_get_initial_sw_cfg(hw);
315 	if (status)
316 		goto err_unroll_alloc;
317 
318 	/* Query the allocated resources for tx scheduler */
319 	status = ice_sched_query_res_alloc(hw);
320 	if (status) {
321 		ice_debug(hw, ICE_DBG_SCHED,
322 			  "Failed to get scheduler allocated resources\n");
323 		goto err_unroll_alloc;
324 	}
325 
326 	/* Initialize port_info struct with scheduler data */
327 	status = ice_sched_init_port(hw->port_info);
328 	if (status)
329 		goto err_unroll_sched;
330 
331 	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
332 	if (!pcaps) {
333 		status = ICE_ERR_NO_MEMORY;
334 		goto err_unroll_sched;
335 	}
336 
337 	/* Initialize port_info struct with PHY capabilities */
338 	status = ice_aq_get_phy_caps(hw->port_info, false,
339 				     ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
340 	devm_kfree(ice_hw_to_dev(hw), pcaps);
341 	if (status)
342 		goto err_unroll_sched;
343 
344 	/* Initialize port_info struct with link information */
345 	status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
346 	if (status)
347 		goto err_unroll_sched;
348 
349 	/* Get port MAC information */
350 	mac_buf_len = sizeof(struct ice_aqc_manage_mac_read_resp);
351 	mac_buf = devm_kzalloc(ice_hw_to_dev(hw), mac_buf_len, GFP_KERNEL);
352 
353 	if (!mac_buf)
354 		goto err_unroll_sched;
355 
356 	status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
357 	devm_kfree(ice_hw_to_dev(hw), mac_buf);
358 
359 	if (status)
360 		goto err_unroll_sched;
361 
362 	return 0;
363 
364 err_unroll_sched:
365 	ice_sched_cleanup_all(hw);
366 err_unroll_alloc:
367 	devm_kfree(ice_hw_to_dev(hw), hw->port_info);
368 err_unroll_cqinit:
369 	ice_shutdown_all_ctrlq(hw);
370 	return status;
371 }
372 
373 /**
374  * ice_deinit_hw - unroll initialization operations done by ice_init_hw
375  * @hw: pointer to the hardware structure
376  */
377 void ice_deinit_hw(struct ice_hw *hw)
378 {
379 	ice_sched_cleanup_all(hw);
380 	ice_shutdown_all_ctrlq(hw);
381 
382 	if (hw->port_info) {
383 		devm_kfree(ice_hw_to_dev(hw), hw->port_info);
384 		hw->port_info = NULL;
385 	}
386 }
387 
388 /**
389  * ice_check_reset - Check to see if a global reset is complete
390  * @hw: pointer to the hardware structure
391  */
392 enum ice_status ice_check_reset(struct ice_hw *hw)
393 {
394 	u32 cnt, reg = 0, grst_delay;
395 
396 	/* Poll for Device Active state in case a recent CORER, GLOBR,
397 	 * or EMPR has occurred. The grst delay value is in 100ms units.
398 	 * Add 1sec for outstanding AQ commands that can take a long time.
399 	 */
400 	grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
401 		      GLGEN_RSTCTL_GRSTDEL_S) + 10;
402 
403 	for (cnt = 0; cnt < grst_delay; cnt++) {
404 		mdelay(100);
405 		reg = rd32(hw, GLGEN_RSTAT);
406 		if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
407 			break;
408 	}
409 
410 	if (cnt == grst_delay) {
411 		ice_debug(hw, ICE_DBG_INIT,
412 			  "Global reset polling failed to complete.\n");
413 		return ICE_ERR_RESET_FAILED;
414 	}
415 
416 #define ICE_RESET_DONE_MASK	(GLNVM_ULD_CORER_DONE_M | \
417 				 GLNVM_ULD_GLOBR_DONE_M)
418 
419 	/* Device is Active; check Global Reset processes are done */
420 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
421 		reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
422 		if (reg == ICE_RESET_DONE_MASK) {
423 			ice_debug(hw, ICE_DBG_INIT,
424 				  "Global reset processes done. %d\n", cnt);
425 			break;
426 		}
427 		mdelay(10);
428 	}
429 
430 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
431 		ice_debug(hw, ICE_DBG_INIT,
432 			  "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
433 			  reg);
434 		return ICE_ERR_RESET_FAILED;
435 	}
436 
437 	return 0;
438 }
439 
440 /**
441  * ice_pf_reset - Reset the PF
442  * @hw: pointer to the hardware structure
443  *
444  * If a global reset has been triggered, this function checks
445  * for its completion and then issues the PF reset
446  */
447 static enum ice_status ice_pf_reset(struct ice_hw *hw)
448 {
449 	u32 cnt, reg;
450 
451 	/* If at function entry a global reset was already in progress, i.e.
452 	 * state is not 'device active' or any of the reset done bits are not
453 	 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
454 	 * global reset is done.
455 	 */
456 	if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
457 	    (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
458 		/* poll on global reset currently in progress until done */
459 		if (ice_check_reset(hw))
460 			return ICE_ERR_RESET_FAILED;
461 
462 		return 0;
463 	}
464 
465 	/* Reset the PF */
466 	reg = rd32(hw, PFGEN_CTRL);
467 
468 	wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
469 
470 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
471 		reg = rd32(hw, PFGEN_CTRL);
472 		if (!(reg & PFGEN_CTRL_PFSWR_M))
473 			break;
474 
475 		mdelay(1);
476 	}
477 
478 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
479 		ice_debug(hw, ICE_DBG_INIT,
480 			  "PF reset polling failed to complete.\n");
481 		return ICE_ERR_RESET_FAILED;
482 	}
483 
484 	return 0;
485 }
486 
487 /**
488  * ice_reset - Perform different types of reset
489  * @hw: pointer to the hardware structure
490  * @req: reset request
491  *
492  * This function triggers a reset as specified by the req parameter.
493  *
494  * Note:
495  * If anything other than a PF reset is triggered, PXE mode is restored.
496  * This has to be cleared using ice_clear_pxe_mode again, once the AQ
497  * interface has been restored in the rebuild flow.
498  */
499 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
500 {
501 	u32 val = 0;
502 
503 	switch (req) {
504 	case ICE_RESET_PFR:
505 		return ice_pf_reset(hw);
506 	case ICE_RESET_CORER:
507 		ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
508 		val = GLGEN_RTRIG_CORER_M;
509 		break;
510 	case ICE_RESET_GLOBR:
511 		ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
512 		val = GLGEN_RTRIG_GLOBR_M;
513 		break;
514 	}
515 
516 	val |= rd32(hw, GLGEN_RTRIG);
517 	wr32(hw, GLGEN_RTRIG, val);
518 	ice_flush(hw);
519 
520 	/* wait for the FW to be ready */
521 	return ice_check_reset(hw);
522 }
523 
524 /**
525  * ice_debug_cq
526  * @hw: pointer to the hardware structure
527  * @mask: debug mask
528  * @desc: pointer to control queue descriptor
529  * @buf: pointer to command buffer
530  * @buf_len: max length of buf
531  *
532  * Dumps debug log about control command with descriptor contents.
533  */
534 void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
535 		  void *buf, u16 buf_len)
536 {
537 	struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
538 	u16 len;
539 
540 #ifndef CONFIG_DYNAMIC_DEBUG
541 	if (!(mask & hw->debug_mask))
542 		return;
543 #endif
544 
545 	if (!desc)
546 		return;
547 
548 	len = le16_to_cpu(cq_desc->datalen);
549 
550 	ice_debug(hw, mask,
551 		  "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
552 		  le16_to_cpu(cq_desc->opcode),
553 		  le16_to_cpu(cq_desc->flags),
554 		  le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
555 	ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
556 		  le32_to_cpu(cq_desc->cookie_high),
557 		  le32_to_cpu(cq_desc->cookie_low));
558 	ice_debug(hw, mask, "\tparam (0,1)  0x%08X 0x%08X\n",
559 		  le32_to_cpu(cq_desc->params.generic.param0),
560 		  le32_to_cpu(cq_desc->params.generic.param1));
561 	ice_debug(hw, mask, "\taddr (h,l)   0x%08X 0x%08X\n",
562 		  le32_to_cpu(cq_desc->params.generic.addr_high),
563 		  le32_to_cpu(cq_desc->params.generic.addr_low));
564 	if (buf && cq_desc->datalen != 0) {
565 		ice_debug(hw, mask, "Buffer:\n");
566 		if (buf_len < len)
567 			len = buf_len;
568 
569 		ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
570 	}
571 }
572 
573 /* FW Admin Queue command wrappers */
574 
575 /**
576  * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
577  * @hw: pointer to the hw struct
578  * @desc: descriptor describing the command
579  * @buf: buffer to use for indirect commands (NULL for direct commands)
580  * @buf_size: size of buffer for indirect commands (0 for direct commands)
581  * @cd: pointer to command details structure
582  *
583  * Helper function to send FW Admin Queue commands to the FW Admin Queue.
584  */
585 enum ice_status
586 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
587 		u16 buf_size, struct ice_sq_cd *cd)
588 {
589 	return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
590 }
591 
592 /**
593  * ice_aq_get_fw_ver
594  * @hw: pointer to the hw struct
595  * @cd: pointer to command details structure or NULL
596  *
597  * Get the firmware version (0x0001) from the admin queue commands
598  */
599 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
600 {
601 	struct ice_aqc_get_ver *resp;
602 	struct ice_aq_desc desc;
603 	enum ice_status status;
604 
605 	resp = &desc.params.get_ver;
606 
607 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
608 
609 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
610 
611 	if (!status) {
612 		hw->fw_branch = resp->fw_branch;
613 		hw->fw_maj_ver = resp->fw_major;
614 		hw->fw_min_ver = resp->fw_minor;
615 		hw->fw_patch = resp->fw_patch;
616 		hw->fw_build = le32_to_cpu(resp->fw_build);
617 		hw->api_branch = resp->api_branch;
618 		hw->api_maj_ver = resp->api_major;
619 		hw->api_min_ver = resp->api_minor;
620 		hw->api_patch = resp->api_patch;
621 	}
622 
623 	return status;
624 }
625 
626 /**
627  * ice_aq_q_shutdown
628  * @hw: pointer to the hw struct
629  * @unloading: is the driver unloading itself
630  *
631  * Tell the Firmware that we're shutting down the AdminQ and whether
632  * or not the driver is unloading as well (0x0003).
633  */
634 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
635 {
636 	struct ice_aqc_q_shutdown *cmd;
637 	struct ice_aq_desc desc;
638 
639 	cmd = &desc.params.q_shutdown;
640 
641 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
642 
643 	if (unloading)
644 		cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING);
645 
646 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
647 }
648 
649 /**
650  * ice_aq_req_res
651  * @hw: pointer to the hw struct
652  * @res: resource id
653  * @access: access type
654  * @sdp_number: resource number
655  * @timeout: the maximum time in ms that the driver may hold the resource
656  * @cd: pointer to command details structure or NULL
657  *
658  * requests common resource using the admin queue commands (0x0008)
659  */
660 static enum ice_status
661 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
662 	       enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
663 	       struct ice_sq_cd *cd)
664 {
665 	struct ice_aqc_req_res *cmd_resp;
666 	struct ice_aq_desc desc;
667 	enum ice_status status;
668 
669 	cmd_resp = &desc.params.res_owner;
670 
671 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
672 
673 	cmd_resp->res_id = cpu_to_le16(res);
674 	cmd_resp->access_type = cpu_to_le16(access);
675 	cmd_resp->res_number = cpu_to_le32(sdp_number);
676 
677 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
678 	/* The completion specifies the maximum time in ms that the driver
679 	 * may hold the resource in the Timeout field.
680 	 * If the resource is held by someone else, the command completes with
681 	 * busy return value and the timeout field indicates the maximum time
682 	 * the current owner of the resource has to free it.
683 	 */
684 	if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
685 		*timeout = le32_to_cpu(cmd_resp->timeout);
686 
687 	return status;
688 }
689 
690 /**
691  * ice_aq_release_res
692  * @hw: pointer to the hw struct
693  * @res: resource id
694  * @sdp_number: resource number
695  * @cd: pointer to command details structure or NULL
696  *
697  * release common resource using the admin queue commands (0x0009)
698  */
699 static enum ice_status
700 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
701 		   struct ice_sq_cd *cd)
702 {
703 	struct ice_aqc_req_res *cmd;
704 	struct ice_aq_desc desc;
705 
706 	cmd = &desc.params.res_owner;
707 
708 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
709 
710 	cmd->res_id = cpu_to_le16(res);
711 	cmd->res_number = cpu_to_le32(sdp_number);
712 
713 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
714 }
715 
716 /**
717  * ice_acquire_res
718  * @hw: pointer to the HW structure
719  * @res: resource id
720  * @access: access type (read or write)
721  *
722  * This function will attempt to acquire the ownership of a resource.
723  */
724 enum ice_status
725 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
726 		enum ice_aq_res_access_type access)
727 {
728 #define ICE_RES_POLLING_DELAY_MS	10
729 	u32 delay = ICE_RES_POLLING_DELAY_MS;
730 	enum ice_status status;
731 	u32 time_left = 0;
732 	u32 timeout;
733 
734 	status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
735 
736 	/* An admin queue return code of ICE_AQ_RC_EEXIST means that another
737 	 * driver has previously acquired the resource and performed any
738 	 * necessary updates; in this case the caller does not obtain the
739 	 * resource and has no further work to do.
740 	 */
741 	if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) {
742 		status = ICE_ERR_AQ_NO_WORK;
743 		goto ice_acquire_res_exit;
744 	}
745 
746 	if (status)
747 		ice_debug(hw, ICE_DBG_RES,
748 			  "resource %d acquire type %d failed.\n", res, access);
749 
750 	/* If necessary, poll until the current lock owner timeouts */
751 	timeout = time_left;
752 	while (status && timeout && time_left) {
753 		mdelay(delay);
754 		timeout = (timeout > delay) ? timeout - delay : 0;
755 		status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
756 
757 		if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) {
758 			/* lock free, but no work to do */
759 			status = ICE_ERR_AQ_NO_WORK;
760 			break;
761 		}
762 
763 		if (!status)
764 			/* lock acquired */
765 			break;
766 	}
767 	if (status && status != ICE_ERR_AQ_NO_WORK)
768 		ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
769 
770 ice_acquire_res_exit:
771 	if (status == ICE_ERR_AQ_NO_WORK) {
772 		if (access == ICE_RES_WRITE)
773 			ice_debug(hw, ICE_DBG_RES,
774 				  "resource indicates no work to do.\n");
775 		else
776 			ice_debug(hw, ICE_DBG_RES,
777 				  "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
778 	}
779 	return status;
780 }
781 
782 /**
783  * ice_release_res
784  * @hw: pointer to the HW structure
785  * @res: resource id
786  *
787  * This function will release a resource using the proper Admin Command.
788  */
789 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
790 {
791 	enum ice_status status;
792 	u32 total_delay = 0;
793 
794 	status = ice_aq_release_res(hw, res, 0, NULL);
795 
796 	/* there are some rare cases when trying to release the resource
797 	 * results in an admin Q timeout, so handle them correctly
798 	 */
799 	while ((status == ICE_ERR_AQ_TIMEOUT) &&
800 	       (total_delay < hw->adminq.sq_cmd_timeout)) {
801 		mdelay(1);
802 		status = ice_aq_release_res(hw, res, 0, NULL);
803 		total_delay++;
804 	}
805 }
806 
807 /**
808  * ice_parse_caps - parse function/device capabilities
809  * @hw: pointer to the hw struct
810  * @buf: pointer to a buffer containing function/device capability records
811  * @cap_count: number of capability records in the list
812  * @opc: type of capabilities list to parse
813  *
814  * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
815  */
816 static void
817 ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
818 	       enum ice_adminq_opc opc)
819 {
820 	struct ice_aqc_list_caps_elem *cap_resp;
821 	struct ice_hw_func_caps *func_p = NULL;
822 	struct ice_hw_dev_caps *dev_p = NULL;
823 	struct ice_hw_common_caps *caps;
824 	u32 i;
825 
826 	if (!buf)
827 		return;
828 
829 	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
830 
831 	if (opc == ice_aqc_opc_list_dev_caps) {
832 		dev_p = &hw->dev_caps;
833 		caps = &dev_p->common_cap;
834 	} else if (opc == ice_aqc_opc_list_func_caps) {
835 		func_p = &hw->func_caps;
836 		caps = &func_p->common_cap;
837 	} else {
838 		ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
839 		return;
840 	}
841 
842 	for (i = 0; caps && i < cap_count; i++, cap_resp++) {
843 		u32 logical_id = le32_to_cpu(cap_resp->logical_id);
844 		u32 phys_id = le32_to_cpu(cap_resp->phys_id);
845 		u32 number = le32_to_cpu(cap_resp->number);
846 		u16 cap = le16_to_cpu(cap_resp->cap);
847 
848 		switch (cap) {
849 		case ICE_AQC_CAPS_VSI:
850 			if (dev_p) {
851 				dev_p->num_vsi_allocd_to_host = number;
852 				ice_debug(hw, ICE_DBG_INIT,
853 					  "HW caps: Dev.VSI cnt = %d\n",
854 					  dev_p->num_vsi_allocd_to_host);
855 			} else if (func_p) {
856 				func_p->guaranteed_num_vsi = number;
857 				ice_debug(hw, ICE_DBG_INIT,
858 					  "HW caps: Func.VSI cnt = %d\n",
859 					  func_p->guaranteed_num_vsi);
860 			}
861 			break;
862 		case ICE_AQC_CAPS_RSS:
863 			caps->rss_table_size = number;
864 			caps->rss_table_entry_width = logical_id;
865 			ice_debug(hw, ICE_DBG_INIT,
866 				  "HW caps: RSS table size = %d\n",
867 				  caps->rss_table_size);
868 			ice_debug(hw, ICE_DBG_INIT,
869 				  "HW caps: RSS table width = %d\n",
870 				  caps->rss_table_entry_width);
871 			break;
872 		case ICE_AQC_CAPS_RXQS:
873 			caps->num_rxq = number;
874 			caps->rxq_first_id = phys_id;
875 			ice_debug(hw, ICE_DBG_INIT,
876 				  "HW caps: Num Rx Qs = %d\n", caps->num_rxq);
877 			ice_debug(hw, ICE_DBG_INIT,
878 				  "HW caps: Rx first queue ID = %d\n",
879 				  caps->rxq_first_id);
880 			break;
881 		case ICE_AQC_CAPS_TXQS:
882 			caps->num_txq = number;
883 			caps->txq_first_id = phys_id;
884 			ice_debug(hw, ICE_DBG_INIT,
885 				  "HW caps: Num Tx Qs = %d\n", caps->num_txq);
886 			ice_debug(hw, ICE_DBG_INIT,
887 				  "HW caps: Tx first queue ID = %d\n",
888 				  caps->txq_first_id);
889 			break;
890 		case ICE_AQC_CAPS_MSIX:
891 			caps->num_msix_vectors = number;
892 			caps->msix_vector_first_id = phys_id;
893 			ice_debug(hw, ICE_DBG_INIT,
894 				  "HW caps: MSIX vector count = %d\n",
895 				  caps->num_msix_vectors);
896 			ice_debug(hw, ICE_DBG_INIT,
897 				  "HW caps: MSIX first vector index = %d\n",
898 				  caps->msix_vector_first_id);
899 			break;
900 		case ICE_AQC_CAPS_MAX_MTU:
901 			caps->max_mtu = number;
902 			if (dev_p)
903 				ice_debug(hw, ICE_DBG_INIT,
904 					  "HW caps: Dev.MaxMTU = %d\n",
905 					  caps->max_mtu);
906 			else if (func_p)
907 				ice_debug(hw, ICE_DBG_INIT,
908 					  "HW caps: func.MaxMTU = %d\n",
909 					  caps->max_mtu);
910 			break;
911 		default:
912 			ice_debug(hw, ICE_DBG_INIT,
913 				  "HW caps: Unknown capability[%d]: 0x%x\n", i,
914 				  cap);
915 			break;
916 		}
917 	}
918 }
919 
920 /**
921  * ice_aq_discover_caps - query function/device capabilities
922  * @hw: pointer to the hw struct
923  * @buf: a virtual buffer to hold the capabilities
924  * @buf_size: Size of the virtual buffer
925  * @data_size: Size of the returned data, or buf size needed if AQ err==ENOMEM
926  * @opc: capabilities type to discover - pass in the command opcode
927  * @cd: pointer to command details structure or NULL
928  *
929  * Get the function(0x000a)/device(0x000b) capabilities description from
930  * the firmware.
931  */
932 static enum ice_status
933 ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size,
934 		     enum ice_adminq_opc opc, struct ice_sq_cd *cd)
935 {
936 	struct ice_aqc_list_caps *cmd;
937 	struct ice_aq_desc desc;
938 	enum ice_status status;
939 
940 	cmd = &desc.params.get_cap;
941 
942 	if (opc != ice_aqc_opc_list_func_caps &&
943 	    opc != ice_aqc_opc_list_dev_caps)
944 		return ICE_ERR_PARAM;
945 
946 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
947 
948 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
949 	if (!status)
950 		ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
951 	*data_size = le16_to_cpu(desc.datalen);
952 
953 	return status;
954 }
955 
956 /**
957  * ice_get_caps - get info about the HW
958  * @hw: pointer to the hardware structure
959  */
960 enum ice_status ice_get_caps(struct ice_hw *hw)
961 {
962 	enum ice_status status;
963 	u16 data_size = 0;
964 	u16 cbuf_len;
965 	u8 retries;
966 
967 	/* The driver doesn't know how many capabilities the device will return
968 	 * so the buffer size required isn't known ahead of time. The driver
969 	 * starts with cbuf_len and if this turns out to be insufficient, the
970 	 * device returns ICE_AQ_RC_ENOMEM and also the buffer size it needs.
971 	 * The driver then allocates the buffer of this size and retries the
972 	 * operation. So it follows that the retry count is 2.
973 	 */
974 #define ICE_GET_CAP_BUF_COUNT	40
975 #define ICE_GET_CAP_RETRY_COUNT	2
976 
977 	cbuf_len = ICE_GET_CAP_BUF_COUNT *
978 		sizeof(struct ice_aqc_list_caps_elem);
979 
980 	retries = ICE_GET_CAP_RETRY_COUNT;
981 
982 	do {
983 		void *cbuf;
984 
985 		cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
986 		if (!cbuf)
987 			return ICE_ERR_NO_MEMORY;
988 
989 		status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &data_size,
990 					      ice_aqc_opc_list_func_caps, NULL);
991 		devm_kfree(ice_hw_to_dev(hw), cbuf);
992 
993 		if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
994 			break;
995 
996 		/* If ENOMEM is returned, try again with bigger buffer */
997 		cbuf_len = data_size;
998 	} while (--retries);
999 
1000 	return status;
1001 }
1002 
1003 /**
1004  * ice_aq_clear_pxe_mode
1005  * @hw: pointer to the hw struct
1006  *
1007  * Tell the firmware that the driver is taking over from PXE (0x0110).
1008  */
1009 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
1010 {
1011 	struct ice_aq_desc desc;
1012 
1013 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
1014 	desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
1015 
1016 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1017 }
1018 
1019 /**
1020  * ice_clear_pxe_mode - clear pxe operations mode
1021  * @hw: pointer to the hw struct
1022  *
1023  * Make sure all PXE mode settings are cleared, including things
1024  * like descriptor fetch/write-back mode.
1025  */
1026 void ice_clear_pxe_mode(struct ice_hw *hw)
1027 {
1028 	if (ice_check_sq_alive(hw, &hw->adminq))
1029 		ice_aq_clear_pxe_mode(hw);
1030 }
1031