1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_common.h"
5 #include "ice_sched.h"
6 #include "ice_adminq_cmd.h"
7 
8 #define ICE_PF_RESET_WAIT_COUNT	200
9 
10 /**
11  * ice_set_mac_type - Sets MAC type
12  * @hw: pointer to the HW structure
13  *
14  * This function sets the MAC type of the adapter based on the
15  * vendor ID and device ID stored in the hw structure.
16  */
17 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
18 {
19 	if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
20 		return ICE_ERR_DEVICE_NOT_SUPPORTED;
21 
22 	hw->mac_type = ICE_MAC_GENERIC;
23 	return 0;
24 }
25 
26 /**
27  * ice_clear_pf_cfg - Clear PF configuration
28  * @hw: pointer to the hardware structure
29  */
30 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
31 {
32 	struct ice_aq_desc desc;
33 
34 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
35 
36 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
37 }
38 
39 /**
40  * ice_aq_manage_mac_read - manage MAC address read command
41  * @hw: pointer to the hw struct
42  * @buf: a virtual buffer to hold the manage MAC read response
43  * @buf_size: Size of the virtual buffer
44  * @cd: pointer to command details structure or NULL
45  *
46  * This function is used to return per PF station MAC address (0x0107).
47  * NOTE: Upon successful completion of this command, MAC address information
48  * is returned in user specified buffer. Please interpret user specified
49  * buffer as "manage_mac_read" response.
50  * Response such as various MAC addresses are stored in HW struct (port.mac)
51  * ice_aq_discover_caps is expected to be called before this function is called.
52  */
53 static enum ice_status
54 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
55 		       struct ice_sq_cd *cd)
56 {
57 	struct ice_aqc_manage_mac_read_resp *resp;
58 	struct ice_aqc_manage_mac_read *cmd;
59 	struct ice_aq_desc desc;
60 	enum ice_status status;
61 	u16 flags;
62 
63 	cmd = &desc.params.mac_read;
64 
65 	if (buf_size < sizeof(*resp))
66 		return ICE_ERR_BUF_TOO_SHORT;
67 
68 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
69 
70 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
71 	if (status)
72 		return status;
73 
74 	resp = (struct ice_aqc_manage_mac_read_resp *)buf;
75 	flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
76 
77 	if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
78 		ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
79 		return ICE_ERR_CFG;
80 	}
81 
82 	ether_addr_copy(hw->port_info->mac.lan_addr, resp->mac_addr);
83 	ether_addr_copy(hw->port_info->mac.perm_addr, resp->mac_addr);
84 	return 0;
85 }
86 
87 /**
88  * ice_aq_get_phy_caps - returns PHY capabilities
89  * @pi: port information structure
90  * @qual_mods: report qualified modules
91  * @report_mode: report mode capabilities
92  * @pcaps: structure for PHY capabilities to be filled
93  * @cd: pointer to command details structure or NULL
94  *
95  * Returns the various PHY capabilities supported on the Port (0x0600)
96  */
97 static enum ice_status
98 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
99 		    struct ice_aqc_get_phy_caps_data *pcaps,
100 		    struct ice_sq_cd *cd)
101 {
102 	struct ice_aqc_get_phy_caps *cmd;
103 	u16 pcaps_size = sizeof(*pcaps);
104 	struct ice_aq_desc desc;
105 	enum ice_status status;
106 
107 	cmd = &desc.params.get_phy;
108 
109 	if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
110 		return ICE_ERR_PARAM;
111 
112 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
113 
114 	if (qual_mods)
115 		cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
116 
117 	cmd->param0 |= cpu_to_le16(report_mode);
118 	status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
119 
120 	if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP)
121 		pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
122 
123 	return status;
124 }
125 
126 /**
127  * ice_get_media_type - Gets media type
128  * @pi: port information structure
129  */
130 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
131 {
132 	struct ice_link_status *hw_link_info;
133 
134 	if (!pi)
135 		return ICE_MEDIA_UNKNOWN;
136 
137 	hw_link_info = &pi->phy.link_info;
138 
139 	if (hw_link_info->phy_type_low) {
140 		switch (hw_link_info->phy_type_low) {
141 		case ICE_PHY_TYPE_LOW_1000BASE_SX:
142 		case ICE_PHY_TYPE_LOW_1000BASE_LX:
143 		case ICE_PHY_TYPE_LOW_10GBASE_SR:
144 		case ICE_PHY_TYPE_LOW_10GBASE_LR:
145 		case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
146 		case ICE_PHY_TYPE_LOW_25GBASE_SR:
147 		case ICE_PHY_TYPE_LOW_25GBASE_LR:
148 		case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
149 		case ICE_PHY_TYPE_LOW_40GBASE_SR4:
150 		case ICE_PHY_TYPE_LOW_40GBASE_LR4:
151 			return ICE_MEDIA_FIBER;
152 		case ICE_PHY_TYPE_LOW_100BASE_TX:
153 		case ICE_PHY_TYPE_LOW_1000BASE_T:
154 		case ICE_PHY_TYPE_LOW_2500BASE_T:
155 		case ICE_PHY_TYPE_LOW_5GBASE_T:
156 		case ICE_PHY_TYPE_LOW_10GBASE_T:
157 		case ICE_PHY_TYPE_LOW_25GBASE_T:
158 			return ICE_MEDIA_BASET;
159 		case ICE_PHY_TYPE_LOW_10G_SFI_DA:
160 		case ICE_PHY_TYPE_LOW_25GBASE_CR:
161 		case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
162 		case ICE_PHY_TYPE_LOW_25GBASE_CR1:
163 		case ICE_PHY_TYPE_LOW_40GBASE_CR4:
164 			return ICE_MEDIA_DA;
165 		case ICE_PHY_TYPE_LOW_1000BASE_KX:
166 		case ICE_PHY_TYPE_LOW_2500BASE_KX:
167 		case ICE_PHY_TYPE_LOW_2500BASE_X:
168 		case ICE_PHY_TYPE_LOW_5GBASE_KR:
169 		case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
170 		case ICE_PHY_TYPE_LOW_25GBASE_KR:
171 		case ICE_PHY_TYPE_LOW_25GBASE_KR1:
172 		case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
173 		case ICE_PHY_TYPE_LOW_40GBASE_KR4:
174 			return ICE_MEDIA_BACKPLANE;
175 		}
176 	}
177 
178 	return ICE_MEDIA_UNKNOWN;
179 }
180 
181 /**
182  * ice_aq_get_link_info
183  * @pi: port information structure
184  * @ena_lse: enable/disable LinkStatusEvent reporting
185  * @link: pointer to link status structure - optional
186  * @cd: pointer to command details structure or NULL
187  *
188  * Get Link Status (0x607). Returns the link status of the adapter.
189  */
190 enum ice_status
191 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
192 		     struct ice_link_status *link, struct ice_sq_cd *cd)
193 {
194 	struct ice_link_status *hw_link_info_old, *hw_link_info;
195 	struct ice_aqc_get_link_status_data link_data = { 0 };
196 	struct ice_aqc_get_link_status *resp;
197 	enum ice_media_type *hw_media_type;
198 	struct ice_fc_info *hw_fc_info;
199 	bool tx_pause, rx_pause;
200 	struct ice_aq_desc desc;
201 	enum ice_status status;
202 	u16 cmd_flags;
203 
204 	if (!pi)
205 		return ICE_ERR_PARAM;
206 	hw_link_info_old = &pi->phy.link_info_old;
207 	hw_media_type = &pi->phy.media_type;
208 	hw_link_info = &pi->phy.link_info;
209 	hw_fc_info = &pi->fc;
210 
211 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
212 	cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
213 	resp = &desc.params.get_link_status;
214 	resp->cmd_flags = cpu_to_le16(cmd_flags);
215 	resp->lport_num = pi->lport;
216 
217 	status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data),
218 				 cd);
219 
220 	if (status)
221 		return status;
222 
223 	/* save off old link status information */
224 	*hw_link_info_old = *hw_link_info;
225 
226 	/* update current link status information */
227 	hw_link_info->link_speed = le16_to_cpu(link_data.link_speed);
228 	hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low);
229 	*hw_media_type = ice_get_media_type(pi);
230 	hw_link_info->link_info = link_data.link_info;
231 	hw_link_info->an_info = link_data.an_info;
232 	hw_link_info->ext_info = link_data.ext_info;
233 	hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size);
234 	hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M;
235 
236 	/* update fc info */
237 	tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
238 	rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
239 	if (tx_pause && rx_pause)
240 		hw_fc_info->current_mode = ICE_FC_FULL;
241 	else if (tx_pause)
242 		hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
243 	else if (rx_pause)
244 		hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
245 	else
246 		hw_fc_info->current_mode = ICE_FC_NONE;
247 
248 	hw_link_info->lse_ena =
249 		!!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
250 
251 	/* save link status information */
252 	if (link)
253 		*link = *hw_link_info;
254 
255 	/* flag cleared so calling functions don't call AQ again */
256 	pi->phy.get_link_info = false;
257 
258 	return status;
259 }
260 
261 /**
262  * ice_init_fltr_mgmt_struct - initializes filter management list and locks
263  * @hw: pointer to the hw struct
264  */
265 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
266 {
267 	struct ice_switch_info *sw;
268 
269 	hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
270 				       sizeof(*hw->switch_info), GFP_KERNEL);
271 	sw = hw->switch_info;
272 
273 	if (!sw)
274 		return ICE_ERR_NO_MEMORY;
275 
276 	INIT_LIST_HEAD(&sw->vsi_list_map_head);
277 
278 	mutex_init(&sw->mac_list_lock);
279 	INIT_LIST_HEAD(&sw->mac_list_head);
280 
281 	mutex_init(&sw->vlan_list_lock);
282 	INIT_LIST_HEAD(&sw->vlan_list_head);
283 
284 	mutex_init(&sw->eth_m_list_lock);
285 	INIT_LIST_HEAD(&sw->eth_m_list_head);
286 
287 	mutex_init(&sw->promisc_list_lock);
288 	INIT_LIST_HEAD(&sw->promisc_list_head);
289 
290 	mutex_init(&sw->mac_vlan_list_lock);
291 	INIT_LIST_HEAD(&sw->mac_vlan_list_head);
292 
293 	return 0;
294 }
295 
296 /**
297  * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
298  * @hw: pointer to the hw struct
299  */
300 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
301 {
302 	struct ice_switch_info *sw = hw->switch_info;
303 	struct ice_vsi_list_map_info *v_pos_map;
304 	struct ice_vsi_list_map_info *v_tmp_map;
305 
306 	list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
307 				 list_entry) {
308 		list_del(&v_pos_map->list_entry);
309 		devm_kfree(ice_hw_to_dev(hw), v_pos_map);
310 	}
311 
312 	mutex_destroy(&sw->mac_list_lock);
313 	mutex_destroy(&sw->vlan_list_lock);
314 	mutex_destroy(&sw->eth_m_list_lock);
315 	mutex_destroy(&sw->promisc_list_lock);
316 	mutex_destroy(&sw->mac_vlan_list_lock);
317 
318 	devm_kfree(ice_hw_to_dev(hw), sw);
319 }
320 
321 /**
322  * ice_init_hw - main hardware initialization routine
323  * @hw: pointer to the hardware structure
324  */
325 enum ice_status ice_init_hw(struct ice_hw *hw)
326 {
327 	struct ice_aqc_get_phy_caps_data *pcaps;
328 	enum ice_status status;
329 	u16 mac_buf_len;
330 	void *mac_buf;
331 
332 	/* Set MAC type based on DeviceID */
333 	status = ice_set_mac_type(hw);
334 	if (status)
335 		return status;
336 
337 	hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
338 			 PF_FUNC_RID_FUNC_NUM_M) >>
339 		PF_FUNC_RID_FUNC_NUM_S;
340 
341 	status = ice_reset(hw, ICE_RESET_PFR);
342 	if (status)
343 		return status;
344 
345 	/* set these values to minimum allowed */
346 	hw->itr_gran_200 = ICE_ITR_GRAN_MIN_200;
347 	hw->itr_gran_100 = ICE_ITR_GRAN_MIN_100;
348 	hw->itr_gran_50 = ICE_ITR_GRAN_MIN_50;
349 	hw->itr_gran_25 = ICE_ITR_GRAN_MIN_25;
350 
351 	status = ice_init_all_ctrlq(hw);
352 	if (status)
353 		goto err_unroll_cqinit;
354 
355 	status = ice_clear_pf_cfg(hw);
356 	if (status)
357 		goto err_unroll_cqinit;
358 
359 	ice_clear_pxe_mode(hw);
360 
361 	status = ice_init_nvm(hw);
362 	if (status)
363 		goto err_unroll_cqinit;
364 
365 	status = ice_get_caps(hw);
366 	if (status)
367 		goto err_unroll_cqinit;
368 
369 	hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
370 				     sizeof(*hw->port_info), GFP_KERNEL);
371 	if (!hw->port_info) {
372 		status = ICE_ERR_NO_MEMORY;
373 		goto err_unroll_cqinit;
374 	}
375 
376 	/* set the back pointer to hw */
377 	hw->port_info->hw = hw;
378 
379 	/* Initialize port_info struct with switch configuration data */
380 	status = ice_get_initial_sw_cfg(hw);
381 	if (status)
382 		goto err_unroll_alloc;
383 
384 	hw->evb_veb = true;
385 
386 	/* Query the allocated resources for tx scheduler */
387 	status = ice_sched_query_res_alloc(hw);
388 	if (status) {
389 		ice_debug(hw, ICE_DBG_SCHED,
390 			  "Failed to get scheduler allocated resources\n");
391 		goto err_unroll_alloc;
392 	}
393 
394 	/* Initialize port_info struct with scheduler data */
395 	status = ice_sched_init_port(hw->port_info);
396 	if (status)
397 		goto err_unroll_sched;
398 
399 	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
400 	if (!pcaps) {
401 		status = ICE_ERR_NO_MEMORY;
402 		goto err_unroll_sched;
403 	}
404 
405 	/* Initialize port_info struct with PHY capabilities */
406 	status = ice_aq_get_phy_caps(hw->port_info, false,
407 				     ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
408 	devm_kfree(ice_hw_to_dev(hw), pcaps);
409 	if (status)
410 		goto err_unroll_sched;
411 
412 	/* Initialize port_info struct with link information */
413 	status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
414 	if (status)
415 		goto err_unroll_sched;
416 
417 	status = ice_init_fltr_mgmt_struct(hw);
418 	if (status)
419 		goto err_unroll_sched;
420 
421 	/* Get port MAC information */
422 	mac_buf_len = sizeof(struct ice_aqc_manage_mac_read_resp);
423 	mac_buf = devm_kzalloc(ice_hw_to_dev(hw), mac_buf_len, GFP_KERNEL);
424 
425 	if (!mac_buf)
426 		goto err_unroll_fltr_mgmt_struct;
427 
428 	status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
429 	devm_kfree(ice_hw_to_dev(hw), mac_buf);
430 
431 	if (status)
432 		goto err_unroll_fltr_mgmt_struct;
433 
434 	return 0;
435 
436 err_unroll_fltr_mgmt_struct:
437 	ice_cleanup_fltr_mgmt_struct(hw);
438 err_unroll_sched:
439 	ice_sched_cleanup_all(hw);
440 err_unroll_alloc:
441 	devm_kfree(ice_hw_to_dev(hw), hw->port_info);
442 err_unroll_cqinit:
443 	ice_shutdown_all_ctrlq(hw);
444 	return status;
445 }
446 
447 /**
448  * ice_deinit_hw - unroll initialization operations done by ice_init_hw
449  * @hw: pointer to the hardware structure
450  */
451 void ice_deinit_hw(struct ice_hw *hw)
452 {
453 	ice_sched_cleanup_all(hw);
454 	ice_shutdown_all_ctrlq(hw);
455 
456 	if (hw->port_info) {
457 		devm_kfree(ice_hw_to_dev(hw), hw->port_info);
458 		hw->port_info = NULL;
459 	}
460 
461 	ice_cleanup_fltr_mgmt_struct(hw);
462 }
463 
464 /**
465  * ice_check_reset - Check to see if a global reset is complete
466  * @hw: pointer to the hardware structure
467  */
468 enum ice_status ice_check_reset(struct ice_hw *hw)
469 {
470 	u32 cnt, reg = 0, grst_delay;
471 
472 	/* Poll for Device Active state in case a recent CORER, GLOBR,
473 	 * or EMPR has occurred. The grst delay value is in 100ms units.
474 	 * Add 1sec for outstanding AQ commands that can take a long time.
475 	 */
476 	grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
477 		      GLGEN_RSTCTL_GRSTDEL_S) + 10;
478 
479 	for (cnt = 0; cnt < grst_delay; cnt++) {
480 		mdelay(100);
481 		reg = rd32(hw, GLGEN_RSTAT);
482 		if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
483 			break;
484 	}
485 
486 	if (cnt == grst_delay) {
487 		ice_debug(hw, ICE_DBG_INIT,
488 			  "Global reset polling failed to complete.\n");
489 		return ICE_ERR_RESET_FAILED;
490 	}
491 
492 #define ICE_RESET_DONE_MASK	(GLNVM_ULD_CORER_DONE_M | \
493 				 GLNVM_ULD_GLOBR_DONE_M)
494 
495 	/* Device is Active; check Global Reset processes are done */
496 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
497 		reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
498 		if (reg == ICE_RESET_DONE_MASK) {
499 			ice_debug(hw, ICE_DBG_INIT,
500 				  "Global reset processes done. %d\n", cnt);
501 			break;
502 		}
503 		mdelay(10);
504 	}
505 
506 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
507 		ice_debug(hw, ICE_DBG_INIT,
508 			  "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
509 			  reg);
510 		return ICE_ERR_RESET_FAILED;
511 	}
512 
513 	return 0;
514 }
515 
516 /**
517  * ice_pf_reset - Reset the PF
518  * @hw: pointer to the hardware structure
519  *
520  * If a global reset has been triggered, this function checks
521  * for its completion and then issues the PF reset
522  */
523 static enum ice_status ice_pf_reset(struct ice_hw *hw)
524 {
525 	u32 cnt, reg;
526 
527 	/* If at function entry a global reset was already in progress, i.e.
528 	 * state is not 'device active' or any of the reset done bits are not
529 	 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
530 	 * global reset is done.
531 	 */
532 	if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
533 	    (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
534 		/* poll on global reset currently in progress until done */
535 		if (ice_check_reset(hw))
536 			return ICE_ERR_RESET_FAILED;
537 
538 		return 0;
539 	}
540 
541 	/* Reset the PF */
542 	reg = rd32(hw, PFGEN_CTRL);
543 
544 	wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
545 
546 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
547 		reg = rd32(hw, PFGEN_CTRL);
548 		if (!(reg & PFGEN_CTRL_PFSWR_M))
549 			break;
550 
551 		mdelay(1);
552 	}
553 
554 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
555 		ice_debug(hw, ICE_DBG_INIT,
556 			  "PF reset polling failed to complete.\n");
557 		return ICE_ERR_RESET_FAILED;
558 	}
559 
560 	return 0;
561 }
562 
563 /**
564  * ice_reset - Perform different types of reset
565  * @hw: pointer to the hardware structure
566  * @req: reset request
567  *
568  * This function triggers a reset as specified by the req parameter.
569  *
570  * Note:
571  * If anything other than a PF reset is triggered, PXE mode is restored.
572  * This has to be cleared using ice_clear_pxe_mode again, once the AQ
573  * interface has been restored in the rebuild flow.
574  */
575 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
576 {
577 	u32 val = 0;
578 
579 	switch (req) {
580 	case ICE_RESET_PFR:
581 		return ice_pf_reset(hw);
582 	case ICE_RESET_CORER:
583 		ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
584 		val = GLGEN_RTRIG_CORER_M;
585 		break;
586 	case ICE_RESET_GLOBR:
587 		ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
588 		val = GLGEN_RTRIG_GLOBR_M;
589 		break;
590 	}
591 
592 	val |= rd32(hw, GLGEN_RTRIG);
593 	wr32(hw, GLGEN_RTRIG, val);
594 	ice_flush(hw);
595 
596 	/* wait for the FW to be ready */
597 	return ice_check_reset(hw);
598 }
599 
600 /**
601  * ice_debug_cq
602  * @hw: pointer to the hardware structure
603  * @mask: debug mask
604  * @desc: pointer to control queue descriptor
605  * @buf: pointer to command buffer
606  * @buf_len: max length of buf
607  *
608  * Dumps debug log about control command with descriptor contents.
609  */
610 void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
611 		  void *buf, u16 buf_len)
612 {
613 	struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
614 	u16 len;
615 
616 #ifndef CONFIG_DYNAMIC_DEBUG
617 	if (!(mask & hw->debug_mask))
618 		return;
619 #endif
620 
621 	if (!desc)
622 		return;
623 
624 	len = le16_to_cpu(cq_desc->datalen);
625 
626 	ice_debug(hw, mask,
627 		  "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
628 		  le16_to_cpu(cq_desc->opcode),
629 		  le16_to_cpu(cq_desc->flags),
630 		  le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
631 	ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
632 		  le32_to_cpu(cq_desc->cookie_high),
633 		  le32_to_cpu(cq_desc->cookie_low));
634 	ice_debug(hw, mask, "\tparam (0,1)  0x%08X 0x%08X\n",
635 		  le32_to_cpu(cq_desc->params.generic.param0),
636 		  le32_to_cpu(cq_desc->params.generic.param1));
637 	ice_debug(hw, mask, "\taddr (h,l)   0x%08X 0x%08X\n",
638 		  le32_to_cpu(cq_desc->params.generic.addr_high),
639 		  le32_to_cpu(cq_desc->params.generic.addr_low));
640 	if (buf && cq_desc->datalen != 0) {
641 		ice_debug(hw, mask, "Buffer:\n");
642 		if (buf_len < len)
643 			len = buf_len;
644 
645 		ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
646 	}
647 }
648 
649 /* FW Admin Queue command wrappers */
650 
651 /**
652  * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
653  * @hw: pointer to the hw struct
654  * @desc: descriptor describing the command
655  * @buf: buffer to use for indirect commands (NULL for direct commands)
656  * @buf_size: size of buffer for indirect commands (0 for direct commands)
657  * @cd: pointer to command details structure
658  *
659  * Helper function to send FW Admin Queue commands to the FW Admin Queue.
660  */
661 enum ice_status
662 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
663 		u16 buf_size, struct ice_sq_cd *cd)
664 {
665 	return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
666 }
667 
668 /**
669  * ice_aq_get_fw_ver
670  * @hw: pointer to the hw struct
671  * @cd: pointer to command details structure or NULL
672  *
673  * Get the firmware version (0x0001) from the admin queue commands
674  */
675 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
676 {
677 	struct ice_aqc_get_ver *resp;
678 	struct ice_aq_desc desc;
679 	enum ice_status status;
680 
681 	resp = &desc.params.get_ver;
682 
683 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
684 
685 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
686 
687 	if (!status) {
688 		hw->fw_branch = resp->fw_branch;
689 		hw->fw_maj_ver = resp->fw_major;
690 		hw->fw_min_ver = resp->fw_minor;
691 		hw->fw_patch = resp->fw_patch;
692 		hw->fw_build = le32_to_cpu(resp->fw_build);
693 		hw->api_branch = resp->api_branch;
694 		hw->api_maj_ver = resp->api_major;
695 		hw->api_min_ver = resp->api_minor;
696 		hw->api_patch = resp->api_patch;
697 	}
698 
699 	return status;
700 }
701 
702 /**
703  * ice_aq_q_shutdown
704  * @hw: pointer to the hw struct
705  * @unloading: is the driver unloading itself
706  *
707  * Tell the Firmware that we're shutting down the AdminQ and whether
708  * or not the driver is unloading as well (0x0003).
709  */
710 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
711 {
712 	struct ice_aqc_q_shutdown *cmd;
713 	struct ice_aq_desc desc;
714 
715 	cmd = &desc.params.q_shutdown;
716 
717 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
718 
719 	if (unloading)
720 		cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING);
721 
722 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
723 }
724 
725 /**
726  * ice_aq_req_res
727  * @hw: pointer to the hw struct
728  * @res: resource id
729  * @access: access type
730  * @sdp_number: resource number
731  * @timeout: the maximum time in ms that the driver may hold the resource
732  * @cd: pointer to command details structure or NULL
733  *
734  * requests common resource using the admin queue commands (0x0008)
735  */
736 static enum ice_status
737 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
738 	       enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
739 	       struct ice_sq_cd *cd)
740 {
741 	struct ice_aqc_req_res *cmd_resp;
742 	struct ice_aq_desc desc;
743 	enum ice_status status;
744 
745 	cmd_resp = &desc.params.res_owner;
746 
747 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
748 
749 	cmd_resp->res_id = cpu_to_le16(res);
750 	cmd_resp->access_type = cpu_to_le16(access);
751 	cmd_resp->res_number = cpu_to_le32(sdp_number);
752 
753 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
754 	/* The completion specifies the maximum time in ms that the driver
755 	 * may hold the resource in the Timeout field.
756 	 * If the resource is held by someone else, the command completes with
757 	 * busy return value and the timeout field indicates the maximum time
758 	 * the current owner of the resource has to free it.
759 	 */
760 	if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
761 		*timeout = le32_to_cpu(cmd_resp->timeout);
762 
763 	return status;
764 }
765 
766 /**
767  * ice_aq_release_res
768  * @hw: pointer to the hw struct
769  * @res: resource id
770  * @sdp_number: resource number
771  * @cd: pointer to command details structure or NULL
772  *
773  * release common resource using the admin queue commands (0x0009)
774  */
775 static enum ice_status
776 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
777 		   struct ice_sq_cd *cd)
778 {
779 	struct ice_aqc_req_res *cmd;
780 	struct ice_aq_desc desc;
781 
782 	cmd = &desc.params.res_owner;
783 
784 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
785 
786 	cmd->res_id = cpu_to_le16(res);
787 	cmd->res_number = cpu_to_le32(sdp_number);
788 
789 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
790 }
791 
792 /**
793  * ice_acquire_res
794  * @hw: pointer to the HW structure
795  * @res: resource id
796  * @access: access type (read or write)
797  *
798  * This function will attempt to acquire the ownership of a resource.
799  */
800 enum ice_status
801 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
802 		enum ice_aq_res_access_type access)
803 {
804 #define ICE_RES_POLLING_DELAY_MS	10
805 	u32 delay = ICE_RES_POLLING_DELAY_MS;
806 	enum ice_status status;
807 	u32 time_left = 0;
808 	u32 timeout;
809 
810 	status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
811 
812 	/* An admin queue return code of ICE_AQ_RC_EEXIST means that another
813 	 * driver has previously acquired the resource and performed any
814 	 * necessary updates; in this case the caller does not obtain the
815 	 * resource and has no further work to do.
816 	 */
817 	if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) {
818 		status = ICE_ERR_AQ_NO_WORK;
819 		goto ice_acquire_res_exit;
820 	}
821 
822 	if (status)
823 		ice_debug(hw, ICE_DBG_RES,
824 			  "resource %d acquire type %d failed.\n", res, access);
825 
826 	/* If necessary, poll until the current lock owner timeouts */
827 	timeout = time_left;
828 	while (status && timeout && time_left) {
829 		mdelay(delay);
830 		timeout = (timeout > delay) ? timeout - delay : 0;
831 		status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
832 
833 		if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) {
834 			/* lock free, but no work to do */
835 			status = ICE_ERR_AQ_NO_WORK;
836 			break;
837 		}
838 
839 		if (!status)
840 			/* lock acquired */
841 			break;
842 	}
843 	if (status && status != ICE_ERR_AQ_NO_WORK)
844 		ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
845 
846 ice_acquire_res_exit:
847 	if (status == ICE_ERR_AQ_NO_WORK) {
848 		if (access == ICE_RES_WRITE)
849 			ice_debug(hw, ICE_DBG_RES,
850 				  "resource indicates no work to do.\n");
851 		else
852 			ice_debug(hw, ICE_DBG_RES,
853 				  "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
854 	}
855 	return status;
856 }
857 
858 /**
859  * ice_release_res
860  * @hw: pointer to the HW structure
861  * @res: resource id
862  *
863  * This function will release a resource using the proper Admin Command.
864  */
865 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
866 {
867 	enum ice_status status;
868 	u32 total_delay = 0;
869 
870 	status = ice_aq_release_res(hw, res, 0, NULL);
871 
872 	/* there are some rare cases when trying to release the resource
873 	 * results in an admin Q timeout, so handle them correctly
874 	 */
875 	while ((status == ICE_ERR_AQ_TIMEOUT) &&
876 	       (total_delay < hw->adminq.sq_cmd_timeout)) {
877 		mdelay(1);
878 		status = ice_aq_release_res(hw, res, 0, NULL);
879 		total_delay++;
880 	}
881 }
882 
883 /**
884  * ice_parse_caps - parse function/device capabilities
885  * @hw: pointer to the hw struct
886  * @buf: pointer to a buffer containing function/device capability records
887  * @cap_count: number of capability records in the list
888  * @opc: type of capabilities list to parse
889  *
890  * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
891  */
892 static void
893 ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
894 	       enum ice_adminq_opc opc)
895 {
896 	struct ice_aqc_list_caps_elem *cap_resp;
897 	struct ice_hw_func_caps *func_p = NULL;
898 	struct ice_hw_dev_caps *dev_p = NULL;
899 	struct ice_hw_common_caps *caps;
900 	u32 i;
901 
902 	if (!buf)
903 		return;
904 
905 	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
906 
907 	if (opc == ice_aqc_opc_list_dev_caps) {
908 		dev_p = &hw->dev_caps;
909 		caps = &dev_p->common_cap;
910 	} else if (opc == ice_aqc_opc_list_func_caps) {
911 		func_p = &hw->func_caps;
912 		caps = &func_p->common_cap;
913 	} else {
914 		ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
915 		return;
916 	}
917 
918 	for (i = 0; caps && i < cap_count; i++, cap_resp++) {
919 		u32 logical_id = le32_to_cpu(cap_resp->logical_id);
920 		u32 phys_id = le32_to_cpu(cap_resp->phys_id);
921 		u32 number = le32_to_cpu(cap_resp->number);
922 		u16 cap = le16_to_cpu(cap_resp->cap);
923 
924 		switch (cap) {
925 		case ICE_AQC_CAPS_VSI:
926 			if (dev_p) {
927 				dev_p->num_vsi_allocd_to_host = number;
928 				ice_debug(hw, ICE_DBG_INIT,
929 					  "HW caps: Dev.VSI cnt = %d\n",
930 					  dev_p->num_vsi_allocd_to_host);
931 			} else if (func_p) {
932 				func_p->guaranteed_num_vsi = number;
933 				ice_debug(hw, ICE_DBG_INIT,
934 					  "HW caps: Func.VSI cnt = %d\n",
935 					  func_p->guaranteed_num_vsi);
936 			}
937 			break;
938 		case ICE_AQC_CAPS_RSS:
939 			caps->rss_table_size = number;
940 			caps->rss_table_entry_width = logical_id;
941 			ice_debug(hw, ICE_DBG_INIT,
942 				  "HW caps: RSS table size = %d\n",
943 				  caps->rss_table_size);
944 			ice_debug(hw, ICE_DBG_INIT,
945 				  "HW caps: RSS table width = %d\n",
946 				  caps->rss_table_entry_width);
947 			break;
948 		case ICE_AQC_CAPS_RXQS:
949 			caps->num_rxq = number;
950 			caps->rxq_first_id = phys_id;
951 			ice_debug(hw, ICE_DBG_INIT,
952 				  "HW caps: Num Rx Qs = %d\n", caps->num_rxq);
953 			ice_debug(hw, ICE_DBG_INIT,
954 				  "HW caps: Rx first queue ID = %d\n",
955 				  caps->rxq_first_id);
956 			break;
957 		case ICE_AQC_CAPS_TXQS:
958 			caps->num_txq = number;
959 			caps->txq_first_id = phys_id;
960 			ice_debug(hw, ICE_DBG_INIT,
961 				  "HW caps: Num Tx Qs = %d\n", caps->num_txq);
962 			ice_debug(hw, ICE_DBG_INIT,
963 				  "HW caps: Tx first queue ID = %d\n",
964 				  caps->txq_first_id);
965 			break;
966 		case ICE_AQC_CAPS_MSIX:
967 			caps->num_msix_vectors = number;
968 			caps->msix_vector_first_id = phys_id;
969 			ice_debug(hw, ICE_DBG_INIT,
970 				  "HW caps: MSIX vector count = %d\n",
971 				  caps->num_msix_vectors);
972 			ice_debug(hw, ICE_DBG_INIT,
973 				  "HW caps: MSIX first vector index = %d\n",
974 				  caps->msix_vector_first_id);
975 			break;
976 		case ICE_AQC_CAPS_MAX_MTU:
977 			caps->max_mtu = number;
978 			if (dev_p)
979 				ice_debug(hw, ICE_DBG_INIT,
980 					  "HW caps: Dev.MaxMTU = %d\n",
981 					  caps->max_mtu);
982 			else if (func_p)
983 				ice_debug(hw, ICE_DBG_INIT,
984 					  "HW caps: func.MaxMTU = %d\n",
985 					  caps->max_mtu);
986 			break;
987 		default:
988 			ice_debug(hw, ICE_DBG_INIT,
989 				  "HW caps: Unknown capability[%d]: 0x%x\n", i,
990 				  cap);
991 			break;
992 		}
993 	}
994 }
995 
996 /**
997  * ice_aq_discover_caps - query function/device capabilities
998  * @hw: pointer to the hw struct
999  * @buf: a virtual buffer to hold the capabilities
1000  * @buf_size: Size of the virtual buffer
1001  * @data_size: Size of the returned data, or buf size needed if AQ err==ENOMEM
1002  * @opc: capabilities type to discover - pass in the command opcode
1003  * @cd: pointer to command details structure or NULL
1004  *
1005  * Get the function(0x000a)/device(0x000b) capabilities description from
1006  * the firmware.
1007  */
1008 static enum ice_status
1009 ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size,
1010 		     enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1011 {
1012 	struct ice_aqc_list_caps *cmd;
1013 	struct ice_aq_desc desc;
1014 	enum ice_status status;
1015 
1016 	cmd = &desc.params.get_cap;
1017 
1018 	if (opc != ice_aqc_opc_list_func_caps &&
1019 	    opc != ice_aqc_opc_list_dev_caps)
1020 		return ICE_ERR_PARAM;
1021 
1022 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1023 
1024 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1025 	if (!status)
1026 		ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
1027 	*data_size = le16_to_cpu(desc.datalen);
1028 
1029 	return status;
1030 }
1031 
1032 /**
1033  * ice_get_caps - get info about the HW
1034  * @hw: pointer to the hardware structure
1035  */
1036 enum ice_status ice_get_caps(struct ice_hw *hw)
1037 {
1038 	enum ice_status status;
1039 	u16 data_size = 0;
1040 	u16 cbuf_len;
1041 	u8 retries;
1042 
1043 	/* The driver doesn't know how many capabilities the device will return
1044 	 * so the buffer size required isn't known ahead of time. The driver
1045 	 * starts with cbuf_len and if this turns out to be insufficient, the
1046 	 * device returns ICE_AQ_RC_ENOMEM and also the buffer size it needs.
1047 	 * The driver then allocates the buffer of this size and retries the
1048 	 * operation. So it follows that the retry count is 2.
1049 	 */
1050 #define ICE_GET_CAP_BUF_COUNT	40
1051 #define ICE_GET_CAP_RETRY_COUNT	2
1052 
1053 	cbuf_len = ICE_GET_CAP_BUF_COUNT *
1054 		sizeof(struct ice_aqc_list_caps_elem);
1055 
1056 	retries = ICE_GET_CAP_RETRY_COUNT;
1057 
1058 	do {
1059 		void *cbuf;
1060 
1061 		cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
1062 		if (!cbuf)
1063 			return ICE_ERR_NO_MEMORY;
1064 
1065 		status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &data_size,
1066 					      ice_aqc_opc_list_func_caps, NULL);
1067 		devm_kfree(ice_hw_to_dev(hw), cbuf);
1068 
1069 		if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
1070 			break;
1071 
1072 		/* If ENOMEM is returned, try again with bigger buffer */
1073 		cbuf_len = data_size;
1074 	} while (--retries);
1075 
1076 	return status;
1077 }
1078 
1079 /**
1080  * ice_aq_clear_pxe_mode
1081  * @hw: pointer to the hw struct
1082  *
1083  * Tell the firmware that the driver is taking over from PXE (0x0110).
1084  */
1085 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
1086 {
1087 	struct ice_aq_desc desc;
1088 
1089 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
1090 	desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
1091 
1092 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1093 }
1094 
1095 /**
1096  * ice_clear_pxe_mode - clear pxe operations mode
1097  * @hw: pointer to the hw struct
1098  *
1099  * Make sure all PXE mode settings are cleared, including things
1100  * like descriptor fetch/write-back mode.
1101  */
1102 void ice_clear_pxe_mode(struct ice_hw *hw)
1103 {
1104 	if (ice_check_sq_alive(hw, &hw->adminq))
1105 		ice_aq_clear_pxe_mode(hw);
1106 }
1107