1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include "i40e_type.h"
5 #include "i40e_adminq.h"
6 #include "i40e_prototype.h"
7 #include <linux/avf/virtchnl.h>
8 
9 /**
10  * i40e_set_mac_type - Sets MAC type
11  * @hw: pointer to the HW structure
12  *
13  * This function sets the mac type of the adapter based on the
14  * vendor ID and device ID stored in the hw structure.
15  **/
16 static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
17 {
18 	i40e_status status = 0;
19 
20 	if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
21 		switch (hw->device_id) {
22 		case I40E_DEV_ID_SFP_XL710:
23 		case I40E_DEV_ID_QEMU:
24 		case I40E_DEV_ID_KX_B:
25 		case I40E_DEV_ID_KX_C:
26 		case I40E_DEV_ID_QSFP_A:
27 		case I40E_DEV_ID_QSFP_B:
28 		case I40E_DEV_ID_QSFP_C:
29 		case I40E_DEV_ID_10G_BASE_T:
30 		case I40E_DEV_ID_10G_BASE_T4:
31 		case I40E_DEV_ID_20G_KR2:
32 		case I40E_DEV_ID_20G_KR2_A:
33 		case I40E_DEV_ID_25G_B:
34 		case I40E_DEV_ID_25G_SFP28:
35 			hw->mac.type = I40E_MAC_XL710;
36 			break;
37 		case I40E_DEV_ID_KX_X722:
38 		case I40E_DEV_ID_QSFP_X722:
39 		case I40E_DEV_ID_SFP_X722:
40 		case I40E_DEV_ID_1G_BASE_T_X722:
41 		case I40E_DEV_ID_10G_BASE_T_X722:
42 		case I40E_DEV_ID_SFP_I_X722:
43 			hw->mac.type = I40E_MAC_X722;
44 			break;
45 		default:
46 			hw->mac.type = I40E_MAC_GENERIC;
47 			break;
48 		}
49 	} else {
50 		status = I40E_ERR_DEVICE_NOT_SUPPORTED;
51 	}
52 
53 	hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
54 		  hw->mac.type, status);
55 	return status;
56 }
57 
58 /**
59  * i40e_aq_str - convert AQ err code to a string
60  * @hw: pointer to the HW structure
61  * @aq_err: the AQ error code to convert
62  **/
63 const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
64 {
65 	switch (aq_err) {
66 	case I40E_AQ_RC_OK:
67 		return "OK";
68 	case I40E_AQ_RC_EPERM:
69 		return "I40E_AQ_RC_EPERM";
70 	case I40E_AQ_RC_ENOENT:
71 		return "I40E_AQ_RC_ENOENT";
72 	case I40E_AQ_RC_ESRCH:
73 		return "I40E_AQ_RC_ESRCH";
74 	case I40E_AQ_RC_EINTR:
75 		return "I40E_AQ_RC_EINTR";
76 	case I40E_AQ_RC_EIO:
77 		return "I40E_AQ_RC_EIO";
78 	case I40E_AQ_RC_ENXIO:
79 		return "I40E_AQ_RC_ENXIO";
80 	case I40E_AQ_RC_E2BIG:
81 		return "I40E_AQ_RC_E2BIG";
82 	case I40E_AQ_RC_EAGAIN:
83 		return "I40E_AQ_RC_EAGAIN";
84 	case I40E_AQ_RC_ENOMEM:
85 		return "I40E_AQ_RC_ENOMEM";
86 	case I40E_AQ_RC_EACCES:
87 		return "I40E_AQ_RC_EACCES";
88 	case I40E_AQ_RC_EFAULT:
89 		return "I40E_AQ_RC_EFAULT";
90 	case I40E_AQ_RC_EBUSY:
91 		return "I40E_AQ_RC_EBUSY";
92 	case I40E_AQ_RC_EEXIST:
93 		return "I40E_AQ_RC_EEXIST";
94 	case I40E_AQ_RC_EINVAL:
95 		return "I40E_AQ_RC_EINVAL";
96 	case I40E_AQ_RC_ENOTTY:
97 		return "I40E_AQ_RC_ENOTTY";
98 	case I40E_AQ_RC_ENOSPC:
99 		return "I40E_AQ_RC_ENOSPC";
100 	case I40E_AQ_RC_ENOSYS:
101 		return "I40E_AQ_RC_ENOSYS";
102 	case I40E_AQ_RC_ERANGE:
103 		return "I40E_AQ_RC_ERANGE";
104 	case I40E_AQ_RC_EFLUSHED:
105 		return "I40E_AQ_RC_EFLUSHED";
106 	case I40E_AQ_RC_BAD_ADDR:
107 		return "I40E_AQ_RC_BAD_ADDR";
108 	case I40E_AQ_RC_EMODE:
109 		return "I40E_AQ_RC_EMODE";
110 	case I40E_AQ_RC_EFBIG:
111 		return "I40E_AQ_RC_EFBIG";
112 	}
113 
114 	snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
115 	return hw->err_str;
116 }
117 
118 /**
119  * i40e_stat_str - convert status err code to a string
120  * @hw: pointer to the HW structure
121  * @stat_err: the status error code to convert
122  **/
123 const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
124 {
125 	switch (stat_err) {
126 	case 0:
127 		return "OK";
128 	case I40E_ERR_NVM:
129 		return "I40E_ERR_NVM";
130 	case I40E_ERR_NVM_CHECKSUM:
131 		return "I40E_ERR_NVM_CHECKSUM";
132 	case I40E_ERR_PHY:
133 		return "I40E_ERR_PHY";
134 	case I40E_ERR_CONFIG:
135 		return "I40E_ERR_CONFIG";
136 	case I40E_ERR_PARAM:
137 		return "I40E_ERR_PARAM";
138 	case I40E_ERR_MAC_TYPE:
139 		return "I40E_ERR_MAC_TYPE";
140 	case I40E_ERR_UNKNOWN_PHY:
141 		return "I40E_ERR_UNKNOWN_PHY";
142 	case I40E_ERR_LINK_SETUP:
143 		return "I40E_ERR_LINK_SETUP";
144 	case I40E_ERR_ADAPTER_STOPPED:
145 		return "I40E_ERR_ADAPTER_STOPPED";
146 	case I40E_ERR_INVALID_MAC_ADDR:
147 		return "I40E_ERR_INVALID_MAC_ADDR";
148 	case I40E_ERR_DEVICE_NOT_SUPPORTED:
149 		return "I40E_ERR_DEVICE_NOT_SUPPORTED";
150 	case I40E_ERR_MASTER_REQUESTS_PENDING:
151 		return "I40E_ERR_MASTER_REQUESTS_PENDING";
152 	case I40E_ERR_INVALID_LINK_SETTINGS:
153 		return "I40E_ERR_INVALID_LINK_SETTINGS";
154 	case I40E_ERR_AUTONEG_NOT_COMPLETE:
155 		return "I40E_ERR_AUTONEG_NOT_COMPLETE";
156 	case I40E_ERR_RESET_FAILED:
157 		return "I40E_ERR_RESET_FAILED";
158 	case I40E_ERR_SWFW_SYNC:
159 		return "I40E_ERR_SWFW_SYNC";
160 	case I40E_ERR_NO_AVAILABLE_VSI:
161 		return "I40E_ERR_NO_AVAILABLE_VSI";
162 	case I40E_ERR_NO_MEMORY:
163 		return "I40E_ERR_NO_MEMORY";
164 	case I40E_ERR_BAD_PTR:
165 		return "I40E_ERR_BAD_PTR";
166 	case I40E_ERR_RING_FULL:
167 		return "I40E_ERR_RING_FULL";
168 	case I40E_ERR_INVALID_PD_ID:
169 		return "I40E_ERR_INVALID_PD_ID";
170 	case I40E_ERR_INVALID_QP_ID:
171 		return "I40E_ERR_INVALID_QP_ID";
172 	case I40E_ERR_INVALID_CQ_ID:
173 		return "I40E_ERR_INVALID_CQ_ID";
174 	case I40E_ERR_INVALID_CEQ_ID:
175 		return "I40E_ERR_INVALID_CEQ_ID";
176 	case I40E_ERR_INVALID_AEQ_ID:
177 		return "I40E_ERR_INVALID_AEQ_ID";
178 	case I40E_ERR_INVALID_SIZE:
179 		return "I40E_ERR_INVALID_SIZE";
180 	case I40E_ERR_INVALID_ARP_INDEX:
181 		return "I40E_ERR_INVALID_ARP_INDEX";
182 	case I40E_ERR_INVALID_FPM_FUNC_ID:
183 		return "I40E_ERR_INVALID_FPM_FUNC_ID";
184 	case I40E_ERR_QP_INVALID_MSG_SIZE:
185 		return "I40E_ERR_QP_INVALID_MSG_SIZE";
186 	case I40E_ERR_QP_TOOMANY_WRS_POSTED:
187 		return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
188 	case I40E_ERR_INVALID_FRAG_COUNT:
189 		return "I40E_ERR_INVALID_FRAG_COUNT";
190 	case I40E_ERR_QUEUE_EMPTY:
191 		return "I40E_ERR_QUEUE_EMPTY";
192 	case I40E_ERR_INVALID_ALIGNMENT:
193 		return "I40E_ERR_INVALID_ALIGNMENT";
194 	case I40E_ERR_FLUSHED_QUEUE:
195 		return "I40E_ERR_FLUSHED_QUEUE";
196 	case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
197 		return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
198 	case I40E_ERR_INVALID_IMM_DATA_SIZE:
199 		return "I40E_ERR_INVALID_IMM_DATA_SIZE";
200 	case I40E_ERR_TIMEOUT:
201 		return "I40E_ERR_TIMEOUT";
202 	case I40E_ERR_OPCODE_MISMATCH:
203 		return "I40E_ERR_OPCODE_MISMATCH";
204 	case I40E_ERR_CQP_COMPL_ERROR:
205 		return "I40E_ERR_CQP_COMPL_ERROR";
206 	case I40E_ERR_INVALID_VF_ID:
207 		return "I40E_ERR_INVALID_VF_ID";
208 	case I40E_ERR_INVALID_HMCFN_ID:
209 		return "I40E_ERR_INVALID_HMCFN_ID";
210 	case I40E_ERR_BACKING_PAGE_ERROR:
211 		return "I40E_ERR_BACKING_PAGE_ERROR";
212 	case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
213 		return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
214 	case I40E_ERR_INVALID_PBLE_INDEX:
215 		return "I40E_ERR_INVALID_PBLE_INDEX";
216 	case I40E_ERR_INVALID_SD_INDEX:
217 		return "I40E_ERR_INVALID_SD_INDEX";
218 	case I40E_ERR_INVALID_PAGE_DESC_INDEX:
219 		return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
220 	case I40E_ERR_INVALID_SD_TYPE:
221 		return "I40E_ERR_INVALID_SD_TYPE";
222 	case I40E_ERR_MEMCPY_FAILED:
223 		return "I40E_ERR_MEMCPY_FAILED";
224 	case I40E_ERR_INVALID_HMC_OBJ_INDEX:
225 		return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
226 	case I40E_ERR_INVALID_HMC_OBJ_COUNT:
227 		return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
228 	case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
229 		return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
230 	case I40E_ERR_SRQ_ENABLED:
231 		return "I40E_ERR_SRQ_ENABLED";
232 	case I40E_ERR_ADMIN_QUEUE_ERROR:
233 		return "I40E_ERR_ADMIN_QUEUE_ERROR";
234 	case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
235 		return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
236 	case I40E_ERR_BUF_TOO_SHORT:
237 		return "I40E_ERR_BUF_TOO_SHORT";
238 	case I40E_ERR_ADMIN_QUEUE_FULL:
239 		return "I40E_ERR_ADMIN_QUEUE_FULL";
240 	case I40E_ERR_ADMIN_QUEUE_NO_WORK:
241 		return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
242 	case I40E_ERR_BAD_IWARP_CQE:
243 		return "I40E_ERR_BAD_IWARP_CQE";
244 	case I40E_ERR_NVM_BLANK_MODE:
245 		return "I40E_ERR_NVM_BLANK_MODE";
246 	case I40E_ERR_NOT_IMPLEMENTED:
247 		return "I40E_ERR_NOT_IMPLEMENTED";
248 	case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
249 		return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
250 	case I40E_ERR_DIAG_TEST_FAILED:
251 		return "I40E_ERR_DIAG_TEST_FAILED";
252 	case I40E_ERR_NOT_READY:
253 		return "I40E_ERR_NOT_READY";
254 	case I40E_NOT_SUPPORTED:
255 		return "I40E_NOT_SUPPORTED";
256 	case I40E_ERR_FIRMWARE_API_VERSION:
257 		return "I40E_ERR_FIRMWARE_API_VERSION";
258 	case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
259 		return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
260 	}
261 
262 	snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
263 	return hw->err_str;
264 }
265 
266 /**
267  * i40e_debug_aq
268  * @hw: debug mask related to admin queue
269  * @mask: debug mask
270  * @desc: pointer to admin queue descriptor
271  * @buffer: pointer to command buffer
272  * @buf_len: max length of buffer
273  *
274  * Dumps debug log about adminq command with descriptor contents.
275  **/
276 void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
277 		   void *buffer, u16 buf_len)
278 {
279 	struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
280 	u16 len;
281 	u8 *buf = (u8 *)buffer;
282 
283 	if ((!(mask & hw->debug_mask)) || (desc == NULL))
284 		return;
285 
286 	len = le16_to_cpu(aq_desc->datalen);
287 
288 	i40e_debug(hw, mask,
289 		   "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
290 		   le16_to_cpu(aq_desc->opcode),
291 		   le16_to_cpu(aq_desc->flags),
292 		   le16_to_cpu(aq_desc->datalen),
293 		   le16_to_cpu(aq_desc->retval));
294 	i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
295 		   le32_to_cpu(aq_desc->cookie_high),
296 		   le32_to_cpu(aq_desc->cookie_low));
297 	i40e_debug(hw, mask, "\tparam (0,1)  0x%08X 0x%08X\n",
298 		   le32_to_cpu(aq_desc->params.internal.param0),
299 		   le32_to_cpu(aq_desc->params.internal.param1));
300 	i40e_debug(hw, mask, "\taddr (h,l)   0x%08X 0x%08X\n",
301 		   le32_to_cpu(aq_desc->params.external.addr_high),
302 		   le32_to_cpu(aq_desc->params.external.addr_low));
303 
304 	if ((buffer != NULL) && (aq_desc->datalen != 0)) {
305 		i40e_debug(hw, mask, "AQ CMD Buffer:\n");
306 		if (buf_len < len)
307 			len = buf_len;
308 		/* write the full 16-byte chunks */
309 		if (hw->debug_mask & mask) {
310 			char prefix[27];
311 
312 			snprintf(prefix, sizeof(prefix),
313 				 "i40e %02x:%02x.%x: \t0x",
314 				 hw->bus.bus_id,
315 				 hw->bus.device,
316 				 hw->bus.func);
317 
318 			print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
319 				       16, 1, buf, len, false);
320 		}
321 	}
322 }
323 
324 /**
325  * i40e_check_asq_alive
326  * @hw: pointer to the hw struct
327  *
328  * Returns true if Queue is enabled else false.
329  **/
330 bool i40e_check_asq_alive(struct i40e_hw *hw)
331 {
332 	if (hw->aq.asq.len)
333 		return !!(rd32(hw, hw->aq.asq.len) &
334 			  I40E_PF_ATQLEN_ATQENABLE_MASK);
335 	else
336 		return false;
337 }
338 
339 /**
340  * i40e_aq_queue_shutdown
341  * @hw: pointer to the hw struct
342  * @unloading: is the driver unloading itself
343  *
344  * Tell the Firmware that we're shutting down the AdminQ and whether
345  * or not the driver is unloading as well.
346  **/
347 i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
348 					     bool unloading)
349 {
350 	struct i40e_aq_desc desc;
351 	struct i40e_aqc_queue_shutdown *cmd =
352 		(struct i40e_aqc_queue_shutdown *)&desc.params.raw;
353 	i40e_status status;
354 
355 	i40e_fill_default_direct_cmd_desc(&desc,
356 					  i40e_aqc_opc_queue_shutdown);
357 
358 	if (unloading)
359 		cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
360 	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
361 
362 	return status;
363 }
364 
365 /**
366  * i40e_aq_get_set_rss_lut
367  * @hw: pointer to the hardware structure
368  * @vsi_id: vsi fw index
369  * @pf_lut: for PF table set true, for VSI table set false
370  * @lut: pointer to the lut buffer provided by the caller
371  * @lut_size: size of the lut buffer
372  * @set: set true to set the table, false to get the table
373  *
374  * Internal function to get or set RSS look up table
375  **/
376 static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
377 					   u16 vsi_id, bool pf_lut,
378 					   u8 *lut, u16 lut_size,
379 					   bool set)
380 {
381 	i40e_status status;
382 	struct i40e_aq_desc desc;
383 	struct i40e_aqc_get_set_rss_lut *cmd_resp =
384 		   (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
385 
386 	if (set)
387 		i40e_fill_default_direct_cmd_desc(&desc,
388 						  i40e_aqc_opc_set_rss_lut);
389 	else
390 		i40e_fill_default_direct_cmd_desc(&desc,
391 						  i40e_aqc_opc_get_rss_lut);
392 
393 	/* Indirect command */
394 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
395 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
396 
397 	cmd_resp->vsi_id =
398 			cpu_to_le16((u16)((vsi_id <<
399 					  I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
400 					  I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
401 	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
402 
403 	if (pf_lut)
404 		cmd_resp->flags |= cpu_to_le16((u16)
405 					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
406 					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
407 					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
408 	else
409 		cmd_resp->flags |= cpu_to_le16((u16)
410 					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
411 					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
412 					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
413 
414 	status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
415 
416 	return status;
417 }
418 
419 /**
420  * i40e_aq_get_rss_lut
421  * @hw: pointer to the hardware structure
422  * @vsi_id: vsi fw index
423  * @pf_lut: for PF table set true, for VSI table set false
424  * @lut: pointer to the lut buffer provided by the caller
425  * @lut_size: size of the lut buffer
426  *
427  * get the RSS lookup table, PF or VSI type
428  **/
429 i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
430 				bool pf_lut, u8 *lut, u16 lut_size)
431 {
432 	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
433 				       false);
434 }
435 
436 /**
437  * i40e_aq_set_rss_lut
438  * @hw: pointer to the hardware structure
439  * @vsi_id: vsi fw index
440  * @pf_lut: for PF table set true, for VSI table set false
441  * @lut: pointer to the lut buffer provided by the caller
442  * @lut_size: size of the lut buffer
443  *
444  * set the RSS lookup table, PF or VSI type
445  **/
446 i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
447 				bool pf_lut, u8 *lut, u16 lut_size)
448 {
449 	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
450 }
451 
452 /**
453  * i40e_aq_get_set_rss_key
454  * @hw: pointer to the hw struct
455  * @vsi_id: vsi fw index
456  * @key: pointer to key info struct
457  * @set: set true to set the key, false to get the key
458  *
459  * get the RSS key per VSI
460  **/
461 static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
462 				      u16 vsi_id,
463 				      struct i40e_aqc_get_set_rss_key_data *key,
464 				      bool set)
465 {
466 	i40e_status status;
467 	struct i40e_aq_desc desc;
468 	struct i40e_aqc_get_set_rss_key *cmd_resp =
469 			(struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
470 	u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
471 
472 	if (set)
473 		i40e_fill_default_direct_cmd_desc(&desc,
474 						  i40e_aqc_opc_set_rss_key);
475 	else
476 		i40e_fill_default_direct_cmd_desc(&desc,
477 						  i40e_aqc_opc_get_rss_key);
478 
479 	/* Indirect command */
480 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
481 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
482 
483 	cmd_resp->vsi_id =
484 			cpu_to_le16((u16)((vsi_id <<
485 					  I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
486 					  I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
487 	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
488 
489 	status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
490 
491 	return status;
492 }
493 
494 /**
495  * i40e_aq_get_rss_key
496  * @hw: pointer to the hw struct
497  * @vsi_id: vsi fw index
498  * @key: pointer to key info struct
499  *
500  **/
501 i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
502 				u16 vsi_id,
503 				struct i40e_aqc_get_set_rss_key_data *key)
504 {
505 	return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
506 }
507 
508 /**
509  * i40e_aq_set_rss_key
510  * @hw: pointer to the hw struct
511  * @vsi_id: vsi fw index
512  * @key: pointer to key info struct
513  *
514  * set the RSS key per VSI
515  **/
516 i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
517 				u16 vsi_id,
518 				struct i40e_aqc_get_set_rss_key_data *key)
519 {
520 	return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
521 }
522 
523 /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
524  * hardware to a bit-field that can be used by SW to more easily determine the
525  * packet type.
526  *
527  * Macros are used to shorten the table lines and make this table human
528  * readable.
529  *
530  * We store the PTYPE in the top byte of the bit field - this is just so that
531  * we can check that the table doesn't have a row missing, as the index into
532  * the table should be the PTYPE.
533  *
534  * Typical work flow:
535  *
536  * IF NOT i40e_ptype_lookup[ptype].known
537  * THEN
538  *      Packet is unknown
539  * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
540  *      Use the rest of the fields to look at the tunnels, inner protocols, etc
541  * ELSE
542  *      Use the enum i40e_rx_l2_ptype to decode the packet type
543  * ENDIF
544  */
545 
546 /* macro to make the table lines short */
547 #define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
548 	{	PTYPE, \
549 		1, \
550 		I40E_RX_PTYPE_OUTER_##OUTER_IP, \
551 		I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
552 		I40E_RX_PTYPE_##OUTER_FRAG, \
553 		I40E_RX_PTYPE_TUNNEL_##T, \
554 		I40E_RX_PTYPE_TUNNEL_END_##TE, \
555 		I40E_RX_PTYPE_##TEF, \
556 		I40E_RX_PTYPE_INNER_PROT_##I, \
557 		I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
558 
559 #define I40E_PTT_UNUSED_ENTRY(PTYPE) \
560 		{ PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
561 
562 /* shorter macros makes the table fit but are terse */
563 #define I40E_RX_PTYPE_NOF		I40E_RX_PTYPE_NOT_FRAG
564 #define I40E_RX_PTYPE_FRG		I40E_RX_PTYPE_FRAG
565 #define I40E_RX_PTYPE_INNER_PROT_TS	I40E_RX_PTYPE_INNER_PROT_TIMESYNC
566 
567 /* Lookup table mapping the HW PTYPE to the bit field for decoding */
568 struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
569 	/* L2 Packet types */
570 	I40E_PTT_UNUSED_ENTRY(0),
571 	I40E_PTT(1,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
572 	I40E_PTT(2,  L2, NONE, NOF, NONE, NONE, NOF, TS,   PAY2),
573 	I40E_PTT(3,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
574 	I40E_PTT_UNUSED_ENTRY(4),
575 	I40E_PTT_UNUSED_ENTRY(5),
576 	I40E_PTT(6,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
577 	I40E_PTT(7,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
578 	I40E_PTT_UNUSED_ENTRY(8),
579 	I40E_PTT_UNUSED_ENTRY(9),
580 	I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
581 	I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
582 	I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
583 	I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
584 	I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
585 	I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
586 	I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
587 	I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
588 	I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
589 	I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
590 	I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
591 	I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
592 
593 	/* Non Tunneled IPv4 */
594 	I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
595 	I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
596 	I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP,  PAY4),
597 	I40E_PTT_UNUSED_ENTRY(25),
598 	I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP,  PAY4),
599 	I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
600 	I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
601 
602 	/* IPv4 --> IPv4 */
603 	I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
604 	I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
605 	I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP,  PAY4),
606 	I40E_PTT_UNUSED_ENTRY(32),
607 	I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP,  PAY4),
608 	I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
609 	I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
610 
611 	/* IPv4 --> IPv6 */
612 	I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
613 	I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
614 	I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP,  PAY4),
615 	I40E_PTT_UNUSED_ENTRY(39),
616 	I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP,  PAY4),
617 	I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
618 	I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
619 
620 	/* IPv4 --> GRE/NAT */
621 	I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
622 
623 	/* IPv4 --> GRE/NAT --> IPv4 */
624 	I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
625 	I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
626 	I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4),
627 	I40E_PTT_UNUSED_ENTRY(47),
628 	I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4),
629 	I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
630 	I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
631 
632 	/* IPv4 --> GRE/NAT --> IPv6 */
633 	I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
634 	I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
635 	I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4),
636 	I40E_PTT_UNUSED_ENTRY(54),
637 	I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4),
638 	I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
639 	I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
640 
641 	/* IPv4 --> GRE/NAT --> MAC */
642 	I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
643 
644 	/* IPv4 --> GRE/NAT --> MAC --> IPv4 */
645 	I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
646 	I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
647 	I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4),
648 	I40E_PTT_UNUSED_ENTRY(62),
649 	I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4),
650 	I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
651 	I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
652 
653 	/* IPv4 --> GRE/NAT -> MAC --> IPv6 */
654 	I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
655 	I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
656 	I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4),
657 	I40E_PTT_UNUSED_ENTRY(69),
658 	I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4),
659 	I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
660 	I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
661 
662 	/* IPv4 --> GRE/NAT --> MAC/VLAN */
663 	I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
664 
665 	/* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
666 	I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
667 	I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
668 	I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4),
669 	I40E_PTT_UNUSED_ENTRY(77),
670 	I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4),
671 	I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
672 	I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
673 
674 	/* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
675 	I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
676 	I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
677 	I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4),
678 	I40E_PTT_UNUSED_ENTRY(84),
679 	I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4),
680 	I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
681 	I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
682 
683 	/* Non Tunneled IPv6 */
684 	I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
685 	I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
686 	I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP,  PAY4),
687 	I40E_PTT_UNUSED_ENTRY(91),
688 	I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP,  PAY4),
689 	I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
690 	I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
691 
692 	/* IPv6 --> IPv4 */
693 	I40E_PTT(95,  IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
694 	I40E_PTT(96,  IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
695 	I40E_PTT(97,  IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP,  PAY4),
696 	I40E_PTT_UNUSED_ENTRY(98),
697 	I40E_PTT(99,  IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP,  PAY4),
698 	I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
699 	I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
700 
701 	/* IPv6 --> IPv6 */
702 	I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
703 	I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
704 	I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP,  PAY4),
705 	I40E_PTT_UNUSED_ENTRY(105),
706 	I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP,  PAY4),
707 	I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
708 	I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
709 
710 	/* IPv6 --> GRE/NAT */
711 	I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
712 
713 	/* IPv6 --> GRE/NAT -> IPv4 */
714 	I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
715 	I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
716 	I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4),
717 	I40E_PTT_UNUSED_ENTRY(113),
718 	I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4),
719 	I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
720 	I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
721 
722 	/* IPv6 --> GRE/NAT -> IPv6 */
723 	I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
724 	I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
725 	I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4),
726 	I40E_PTT_UNUSED_ENTRY(120),
727 	I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4),
728 	I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
729 	I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
730 
731 	/* IPv6 --> GRE/NAT -> MAC */
732 	I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
733 
734 	/* IPv6 --> GRE/NAT -> MAC -> IPv4 */
735 	I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
736 	I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
737 	I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4),
738 	I40E_PTT_UNUSED_ENTRY(128),
739 	I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4),
740 	I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
741 	I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
742 
743 	/* IPv6 --> GRE/NAT -> MAC -> IPv6 */
744 	I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
745 	I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
746 	I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4),
747 	I40E_PTT_UNUSED_ENTRY(135),
748 	I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4),
749 	I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
750 	I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
751 
752 	/* IPv6 --> GRE/NAT -> MAC/VLAN */
753 	I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
754 
755 	/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
756 	I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
757 	I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
758 	I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4),
759 	I40E_PTT_UNUSED_ENTRY(143),
760 	I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4),
761 	I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
762 	I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
763 
764 	/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
765 	I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
766 	I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
767 	I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4),
768 	I40E_PTT_UNUSED_ENTRY(150),
769 	I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4),
770 	I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
771 	I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
772 
773 	/* unused entries */
774 	I40E_PTT_UNUSED_ENTRY(154),
775 	I40E_PTT_UNUSED_ENTRY(155),
776 	I40E_PTT_UNUSED_ENTRY(156),
777 	I40E_PTT_UNUSED_ENTRY(157),
778 	I40E_PTT_UNUSED_ENTRY(158),
779 	I40E_PTT_UNUSED_ENTRY(159),
780 
781 	I40E_PTT_UNUSED_ENTRY(160),
782 	I40E_PTT_UNUSED_ENTRY(161),
783 	I40E_PTT_UNUSED_ENTRY(162),
784 	I40E_PTT_UNUSED_ENTRY(163),
785 	I40E_PTT_UNUSED_ENTRY(164),
786 	I40E_PTT_UNUSED_ENTRY(165),
787 	I40E_PTT_UNUSED_ENTRY(166),
788 	I40E_PTT_UNUSED_ENTRY(167),
789 	I40E_PTT_UNUSED_ENTRY(168),
790 	I40E_PTT_UNUSED_ENTRY(169),
791 
792 	I40E_PTT_UNUSED_ENTRY(170),
793 	I40E_PTT_UNUSED_ENTRY(171),
794 	I40E_PTT_UNUSED_ENTRY(172),
795 	I40E_PTT_UNUSED_ENTRY(173),
796 	I40E_PTT_UNUSED_ENTRY(174),
797 	I40E_PTT_UNUSED_ENTRY(175),
798 	I40E_PTT_UNUSED_ENTRY(176),
799 	I40E_PTT_UNUSED_ENTRY(177),
800 	I40E_PTT_UNUSED_ENTRY(178),
801 	I40E_PTT_UNUSED_ENTRY(179),
802 
803 	I40E_PTT_UNUSED_ENTRY(180),
804 	I40E_PTT_UNUSED_ENTRY(181),
805 	I40E_PTT_UNUSED_ENTRY(182),
806 	I40E_PTT_UNUSED_ENTRY(183),
807 	I40E_PTT_UNUSED_ENTRY(184),
808 	I40E_PTT_UNUSED_ENTRY(185),
809 	I40E_PTT_UNUSED_ENTRY(186),
810 	I40E_PTT_UNUSED_ENTRY(187),
811 	I40E_PTT_UNUSED_ENTRY(188),
812 	I40E_PTT_UNUSED_ENTRY(189),
813 
814 	I40E_PTT_UNUSED_ENTRY(190),
815 	I40E_PTT_UNUSED_ENTRY(191),
816 	I40E_PTT_UNUSED_ENTRY(192),
817 	I40E_PTT_UNUSED_ENTRY(193),
818 	I40E_PTT_UNUSED_ENTRY(194),
819 	I40E_PTT_UNUSED_ENTRY(195),
820 	I40E_PTT_UNUSED_ENTRY(196),
821 	I40E_PTT_UNUSED_ENTRY(197),
822 	I40E_PTT_UNUSED_ENTRY(198),
823 	I40E_PTT_UNUSED_ENTRY(199),
824 
825 	I40E_PTT_UNUSED_ENTRY(200),
826 	I40E_PTT_UNUSED_ENTRY(201),
827 	I40E_PTT_UNUSED_ENTRY(202),
828 	I40E_PTT_UNUSED_ENTRY(203),
829 	I40E_PTT_UNUSED_ENTRY(204),
830 	I40E_PTT_UNUSED_ENTRY(205),
831 	I40E_PTT_UNUSED_ENTRY(206),
832 	I40E_PTT_UNUSED_ENTRY(207),
833 	I40E_PTT_UNUSED_ENTRY(208),
834 	I40E_PTT_UNUSED_ENTRY(209),
835 
836 	I40E_PTT_UNUSED_ENTRY(210),
837 	I40E_PTT_UNUSED_ENTRY(211),
838 	I40E_PTT_UNUSED_ENTRY(212),
839 	I40E_PTT_UNUSED_ENTRY(213),
840 	I40E_PTT_UNUSED_ENTRY(214),
841 	I40E_PTT_UNUSED_ENTRY(215),
842 	I40E_PTT_UNUSED_ENTRY(216),
843 	I40E_PTT_UNUSED_ENTRY(217),
844 	I40E_PTT_UNUSED_ENTRY(218),
845 	I40E_PTT_UNUSED_ENTRY(219),
846 
847 	I40E_PTT_UNUSED_ENTRY(220),
848 	I40E_PTT_UNUSED_ENTRY(221),
849 	I40E_PTT_UNUSED_ENTRY(222),
850 	I40E_PTT_UNUSED_ENTRY(223),
851 	I40E_PTT_UNUSED_ENTRY(224),
852 	I40E_PTT_UNUSED_ENTRY(225),
853 	I40E_PTT_UNUSED_ENTRY(226),
854 	I40E_PTT_UNUSED_ENTRY(227),
855 	I40E_PTT_UNUSED_ENTRY(228),
856 	I40E_PTT_UNUSED_ENTRY(229),
857 
858 	I40E_PTT_UNUSED_ENTRY(230),
859 	I40E_PTT_UNUSED_ENTRY(231),
860 	I40E_PTT_UNUSED_ENTRY(232),
861 	I40E_PTT_UNUSED_ENTRY(233),
862 	I40E_PTT_UNUSED_ENTRY(234),
863 	I40E_PTT_UNUSED_ENTRY(235),
864 	I40E_PTT_UNUSED_ENTRY(236),
865 	I40E_PTT_UNUSED_ENTRY(237),
866 	I40E_PTT_UNUSED_ENTRY(238),
867 	I40E_PTT_UNUSED_ENTRY(239),
868 
869 	I40E_PTT_UNUSED_ENTRY(240),
870 	I40E_PTT_UNUSED_ENTRY(241),
871 	I40E_PTT_UNUSED_ENTRY(242),
872 	I40E_PTT_UNUSED_ENTRY(243),
873 	I40E_PTT_UNUSED_ENTRY(244),
874 	I40E_PTT_UNUSED_ENTRY(245),
875 	I40E_PTT_UNUSED_ENTRY(246),
876 	I40E_PTT_UNUSED_ENTRY(247),
877 	I40E_PTT_UNUSED_ENTRY(248),
878 	I40E_PTT_UNUSED_ENTRY(249),
879 
880 	I40E_PTT_UNUSED_ENTRY(250),
881 	I40E_PTT_UNUSED_ENTRY(251),
882 	I40E_PTT_UNUSED_ENTRY(252),
883 	I40E_PTT_UNUSED_ENTRY(253),
884 	I40E_PTT_UNUSED_ENTRY(254),
885 	I40E_PTT_UNUSED_ENTRY(255)
886 };
887 
888 /**
889  * i40e_init_shared_code - Initialize the shared code
890  * @hw: pointer to hardware structure
891  *
892  * This assigns the MAC type and PHY code and inits the NVM.
893  * Does not touch the hardware. This function must be called prior to any
894  * other function in the shared code. The i40e_hw structure should be
895  * memset to 0 prior to calling this function.  The following fields in
896  * hw structure should be filled in prior to calling this function:
897  * hw_addr, back, device_id, vendor_id, subsystem_device_id,
898  * subsystem_vendor_id, and revision_id
899  **/
900 i40e_status i40e_init_shared_code(struct i40e_hw *hw)
901 {
902 	i40e_status status = 0;
903 	u32 port, ari, func_rid;
904 
905 	i40e_set_mac_type(hw);
906 
907 	switch (hw->mac.type) {
908 	case I40E_MAC_XL710:
909 	case I40E_MAC_X722:
910 		break;
911 	default:
912 		return I40E_ERR_DEVICE_NOT_SUPPORTED;
913 	}
914 
915 	hw->phy.get_link_info = true;
916 
917 	/* Determine port number and PF number*/
918 	port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK)
919 					   >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
920 	hw->port = (u8)port;
921 	ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >>
922 						 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
923 	func_rid = rd32(hw, I40E_PF_FUNC_RID);
924 	if (ari)
925 		hw->pf_id = (u8)(func_rid & 0xff);
926 	else
927 		hw->pf_id = (u8)(func_rid & 0x7);
928 
929 	if (hw->mac.type == I40E_MAC_X722)
930 		hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
931 			     I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
932 
933 	status = i40e_init_nvm(hw);
934 	return status;
935 }
936 
937 /**
938  * i40e_aq_mac_address_read - Retrieve the MAC addresses
939  * @hw: pointer to the hw struct
940  * @flags: a return indicator of what addresses were added to the addr store
941  * @addrs: the requestor's mac addr store
942  * @cmd_details: pointer to command details structure or NULL
943  **/
944 static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
945 				   u16 *flags,
946 				   struct i40e_aqc_mac_address_read_data *addrs,
947 				   struct i40e_asq_cmd_details *cmd_details)
948 {
949 	struct i40e_aq_desc desc;
950 	struct i40e_aqc_mac_address_read *cmd_data =
951 		(struct i40e_aqc_mac_address_read *)&desc.params.raw;
952 	i40e_status status;
953 
954 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
955 	desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
956 
957 	status = i40e_asq_send_command(hw, &desc, addrs,
958 				       sizeof(*addrs), cmd_details);
959 	*flags = le16_to_cpu(cmd_data->command_flags);
960 
961 	return status;
962 }
963 
964 /**
965  * i40e_aq_mac_address_write - Change the MAC addresses
966  * @hw: pointer to the hw struct
967  * @flags: indicates which MAC to be written
968  * @mac_addr: address to write
969  * @cmd_details: pointer to command details structure or NULL
970  **/
971 i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
972 				    u16 flags, u8 *mac_addr,
973 				    struct i40e_asq_cmd_details *cmd_details)
974 {
975 	struct i40e_aq_desc desc;
976 	struct i40e_aqc_mac_address_write *cmd_data =
977 		(struct i40e_aqc_mac_address_write *)&desc.params.raw;
978 	i40e_status status;
979 
980 	i40e_fill_default_direct_cmd_desc(&desc,
981 					  i40e_aqc_opc_mac_address_write);
982 	cmd_data->command_flags = cpu_to_le16(flags);
983 	cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
984 	cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
985 					((u32)mac_addr[3] << 16) |
986 					((u32)mac_addr[4] << 8) |
987 					mac_addr[5]);
988 
989 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
990 
991 	return status;
992 }
993 
994 /**
995  * i40e_get_mac_addr - get MAC address
996  * @hw: pointer to the HW structure
997  * @mac_addr: pointer to MAC address
998  *
999  * Reads the adapter's MAC address from register
1000  **/
1001 i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
1002 {
1003 	struct i40e_aqc_mac_address_read_data addrs;
1004 	i40e_status status;
1005 	u16 flags = 0;
1006 
1007 	status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
1008 
1009 	if (flags & I40E_AQC_LAN_ADDR_VALID)
1010 		ether_addr_copy(mac_addr, addrs.pf_lan_mac);
1011 
1012 	return status;
1013 }
1014 
1015 /**
1016  * i40e_get_port_mac_addr - get Port MAC address
1017  * @hw: pointer to the HW structure
1018  * @mac_addr: pointer to Port MAC address
1019  *
1020  * Reads the adapter's Port MAC address
1021  **/
1022 i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
1023 {
1024 	struct i40e_aqc_mac_address_read_data addrs;
1025 	i40e_status status;
1026 	u16 flags = 0;
1027 
1028 	status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
1029 	if (status)
1030 		return status;
1031 
1032 	if (flags & I40E_AQC_PORT_ADDR_VALID)
1033 		ether_addr_copy(mac_addr, addrs.port_mac);
1034 	else
1035 		status = I40E_ERR_INVALID_MAC_ADDR;
1036 
1037 	return status;
1038 }
1039 
1040 /**
1041  * i40e_pre_tx_queue_cfg - pre tx queue configure
1042  * @hw: pointer to the HW structure
1043  * @queue: target PF queue index
1044  * @enable: state change request
1045  *
1046  * Handles hw requirement to indicate intention to enable
1047  * or disable target queue.
1048  **/
1049 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
1050 {
1051 	u32 abs_queue_idx = hw->func_caps.base_queue + queue;
1052 	u32 reg_block = 0;
1053 	u32 reg_val;
1054 
1055 	if (abs_queue_idx >= 128) {
1056 		reg_block = abs_queue_idx / 128;
1057 		abs_queue_idx %= 128;
1058 	}
1059 
1060 	reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
1061 	reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
1062 	reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
1063 
1064 	if (enable)
1065 		reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
1066 	else
1067 		reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
1068 
1069 	wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
1070 }
1071 
1072 /**
1073  *  i40e_read_pba_string - Reads part number string from EEPROM
1074  *  @hw: pointer to hardware structure
1075  *  @pba_num: stores the part number string from the EEPROM
1076  *  @pba_num_size: part number string buffer length
1077  *
1078  *  Reads the part number string from the EEPROM.
1079  **/
1080 i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
1081 				 u32 pba_num_size)
1082 {
1083 	i40e_status status = 0;
1084 	u16 pba_word = 0;
1085 	u16 pba_size = 0;
1086 	u16 pba_ptr = 0;
1087 	u16 i = 0;
1088 
1089 	status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
1090 	if (status || (pba_word != 0xFAFA)) {
1091 		hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n");
1092 		return status;
1093 	}
1094 
1095 	status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
1096 	if (status) {
1097 		hw_dbg(hw, "Failed to read PBA Block pointer.\n");
1098 		return status;
1099 	}
1100 
1101 	status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
1102 	if (status) {
1103 		hw_dbg(hw, "Failed to read PBA Block size.\n");
1104 		return status;
1105 	}
1106 
1107 	/* Subtract one to get PBA word count (PBA Size word is included in
1108 	 * total size)
1109 	 */
1110 	pba_size--;
1111 	if (pba_num_size < (((u32)pba_size * 2) + 1)) {
1112 		hw_dbg(hw, "Buffer to small for PBA data.\n");
1113 		return I40E_ERR_PARAM;
1114 	}
1115 
1116 	for (i = 0; i < pba_size; i++) {
1117 		status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
1118 		if (status) {
1119 			hw_dbg(hw, "Failed to read PBA Block word %d.\n", i);
1120 			return status;
1121 		}
1122 
1123 		pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
1124 		pba_num[(i * 2) + 1] = pba_word & 0xFF;
1125 	}
1126 	pba_num[(pba_size * 2)] = '\0';
1127 
1128 	return status;
1129 }
1130 
1131 /**
1132  * i40e_get_media_type - Gets media type
1133  * @hw: pointer to the hardware structure
1134  **/
1135 static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
1136 {
1137 	enum i40e_media_type media;
1138 
1139 	switch (hw->phy.link_info.phy_type) {
1140 	case I40E_PHY_TYPE_10GBASE_SR:
1141 	case I40E_PHY_TYPE_10GBASE_LR:
1142 	case I40E_PHY_TYPE_1000BASE_SX:
1143 	case I40E_PHY_TYPE_1000BASE_LX:
1144 	case I40E_PHY_TYPE_40GBASE_SR4:
1145 	case I40E_PHY_TYPE_40GBASE_LR4:
1146 	case I40E_PHY_TYPE_25GBASE_LR:
1147 	case I40E_PHY_TYPE_25GBASE_SR:
1148 		media = I40E_MEDIA_TYPE_FIBER;
1149 		break;
1150 	case I40E_PHY_TYPE_100BASE_TX:
1151 	case I40E_PHY_TYPE_1000BASE_T:
1152 	case I40E_PHY_TYPE_10GBASE_T:
1153 		media = I40E_MEDIA_TYPE_BASET;
1154 		break;
1155 	case I40E_PHY_TYPE_10GBASE_CR1_CU:
1156 	case I40E_PHY_TYPE_40GBASE_CR4_CU:
1157 	case I40E_PHY_TYPE_10GBASE_CR1:
1158 	case I40E_PHY_TYPE_40GBASE_CR4:
1159 	case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1160 	case I40E_PHY_TYPE_40GBASE_AOC:
1161 	case I40E_PHY_TYPE_10GBASE_AOC:
1162 	case I40E_PHY_TYPE_25GBASE_CR:
1163 	case I40E_PHY_TYPE_25GBASE_AOC:
1164 	case I40E_PHY_TYPE_25GBASE_ACC:
1165 		media = I40E_MEDIA_TYPE_DA;
1166 		break;
1167 	case I40E_PHY_TYPE_1000BASE_KX:
1168 	case I40E_PHY_TYPE_10GBASE_KX4:
1169 	case I40E_PHY_TYPE_10GBASE_KR:
1170 	case I40E_PHY_TYPE_40GBASE_KR4:
1171 	case I40E_PHY_TYPE_20GBASE_KR2:
1172 	case I40E_PHY_TYPE_25GBASE_KR:
1173 		media = I40E_MEDIA_TYPE_BACKPLANE;
1174 		break;
1175 	case I40E_PHY_TYPE_SGMII:
1176 	case I40E_PHY_TYPE_XAUI:
1177 	case I40E_PHY_TYPE_XFI:
1178 	case I40E_PHY_TYPE_XLAUI:
1179 	case I40E_PHY_TYPE_XLPPI:
1180 	default:
1181 		media = I40E_MEDIA_TYPE_UNKNOWN;
1182 		break;
1183 	}
1184 
1185 	return media;
1186 }
1187 
1188 /**
1189  * i40e_poll_globr - Poll for Global Reset completion
1190  * @hw: pointer to the hardware structure
1191  * @retry_limit: how many times to retry before failure
1192  **/
1193 static i40e_status i40e_poll_globr(struct i40e_hw *hw,
1194 				   u32 retry_limit)
1195 {
1196 	u32 cnt, reg = 0;
1197 
1198 	for (cnt = 0; cnt < retry_limit; cnt++) {
1199 		reg = rd32(hw, I40E_GLGEN_RSTAT);
1200 		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
1201 			return 0;
1202 		msleep(100);
1203 	}
1204 
1205 	hw_dbg(hw, "Global reset failed.\n");
1206 	hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg);
1207 
1208 	return I40E_ERR_RESET_FAILED;
1209 }
1210 
1211 #define I40E_PF_RESET_WAIT_COUNT_A0	200
1212 #define I40E_PF_RESET_WAIT_COUNT	200
1213 /**
1214  * i40e_pf_reset - Reset the PF
1215  * @hw: pointer to the hardware structure
1216  *
1217  * Assuming someone else has triggered a global reset,
1218  * assure the global reset is complete and then reset the PF
1219  **/
1220 i40e_status i40e_pf_reset(struct i40e_hw *hw)
1221 {
1222 	u32 cnt = 0;
1223 	u32 cnt1 = 0;
1224 	u32 reg = 0;
1225 	u32 grst_del;
1226 
1227 	/* Poll for Global Reset steady state in case of recent GRST.
1228 	 * The grst delay value is in 100ms units, and we'll wait a
1229 	 * couple counts longer to be sure we don't just miss the end.
1230 	 */
1231 	grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
1232 		    I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
1233 		    I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
1234 
1235 	/* It can take upto 15 secs for GRST steady state.
1236 	 * Bump it to 16 secs max to be safe.
1237 	 */
1238 	grst_del = grst_del * 20;
1239 
1240 	for (cnt = 0; cnt < grst_del; cnt++) {
1241 		reg = rd32(hw, I40E_GLGEN_RSTAT);
1242 		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
1243 			break;
1244 		msleep(100);
1245 	}
1246 	if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
1247 		hw_dbg(hw, "Global reset polling failed to complete.\n");
1248 		return I40E_ERR_RESET_FAILED;
1249 	}
1250 
1251 	/* Now Wait for the FW to be ready */
1252 	for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
1253 		reg = rd32(hw, I40E_GLNVM_ULD);
1254 		reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1255 			I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
1256 		if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1257 			    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
1258 			hw_dbg(hw, "Core and Global modules ready %d\n", cnt1);
1259 			break;
1260 		}
1261 		usleep_range(10000, 20000);
1262 	}
1263 	if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1264 		     I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
1265 		hw_dbg(hw, "wait for FW Reset complete timedout\n");
1266 		hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg);
1267 		return I40E_ERR_RESET_FAILED;
1268 	}
1269 
1270 	/* If there was a Global Reset in progress when we got here,
1271 	 * we don't need to do the PF Reset
1272 	 */
1273 	if (!cnt) {
1274 		u32 reg2 = 0;
1275 		if (hw->revision_id == 0)
1276 			cnt = I40E_PF_RESET_WAIT_COUNT_A0;
1277 		else
1278 			cnt = I40E_PF_RESET_WAIT_COUNT;
1279 		reg = rd32(hw, I40E_PFGEN_CTRL);
1280 		wr32(hw, I40E_PFGEN_CTRL,
1281 		     (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
1282 		for (; cnt; cnt--) {
1283 			reg = rd32(hw, I40E_PFGEN_CTRL);
1284 			if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
1285 				break;
1286 			reg2 = rd32(hw, I40E_GLGEN_RSTAT);
1287 			if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK)
1288 				break;
1289 			usleep_range(1000, 2000);
1290 		}
1291 		if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
1292 			if (i40e_poll_globr(hw, grst_del))
1293 				return I40E_ERR_RESET_FAILED;
1294 		} else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
1295 			hw_dbg(hw, "PF reset polling failed to complete.\n");
1296 			return I40E_ERR_RESET_FAILED;
1297 		}
1298 	}
1299 
1300 	i40e_clear_pxe_mode(hw);
1301 
1302 	return 0;
1303 }
1304 
1305 /**
1306  * i40e_clear_hw - clear out any left over hw state
1307  * @hw: pointer to the hw struct
1308  *
1309  * Clear queues and interrupts, typically called at init time,
1310  * but after the capabilities have been found so we know how many
1311  * queues and msix vectors have been allocated.
1312  **/
1313 void i40e_clear_hw(struct i40e_hw *hw)
1314 {
1315 	u32 num_queues, base_queue;
1316 	u32 num_pf_int;
1317 	u32 num_vf_int;
1318 	u32 num_vfs;
1319 	u32 i, j;
1320 	u32 val;
1321 	u32 eol = 0x7ff;
1322 
1323 	/* get number of interrupts, queues, and VFs */
1324 	val = rd32(hw, I40E_GLPCI_CNF2);
1325 	num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
1326 		     I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
1327 	num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
1328 		     I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
1329 
1330 	val = rd32(hw, I40E_PFLAN_QALLOC);
1331 	base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
1332 		     I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1333 	j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
1334 	    I40E_PFLAN_QALLOC_LASTQ_SHIFT;
1335 	if (val & I40E_PFLAN_QALLOC_VALID_MASK)
1336 		num_queues = (j - base_queue) + 1;
1337 	else
1338 		num_queues = 0;
1339 
1340 	val = rd32(hw, I40E_PF_VT_PFALLOC);
1341 	i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
1342 	    I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
1343 	j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
1344 	    I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
1345 	if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
1346 		num_vfs = (j - i) + 1;
1347 	else
1348 		num_vfs = 0;
1349 
1350 	/* stop all the interrupts */
1351 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);
1352 	val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
1353 	for (i = 0; i < num_pf_int - 2; i++)
1354 		wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
1355 
1356 	/* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
1357 	val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1358 	wr32(hw, I40E_PFINT_LNKLST0, val);
1359 	for (i = 0; i < num_pf_int - 2; i++)
1360 		wr32(hw, I40E_PFINT_LNKLSTN(i), val);
1361 	val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1362 	for (i = 0; i < num_vfs; i++)
1363 		wr32(hw, I40E_VPINT_LNKLST0(i), val);
1364 	for (i = 0; i < num_vf_int - 2; i++)
1365 		wr32(hw, I40E_VPINT_LNKLSTN(i), val);
1366 
1367 	/* warn the HW of the coming Tx disables */
1368 	for (i = 0; i < num_queues; i++) {
1369 		u32 abs_queue_idx = base_queue + i;
1370 		u32 reg_block = 0;
1371 
1372 		if (abs_queue_idx >= 128) {
1373 			reg_block = abs_queue_idx / 128;
1374 			abs_queue_idx %= 128;
1375 		}
1376 
1377 		val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
1378 		val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
1379 		val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
1380 		val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
1381 
1382 		wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
1383 	}
1384 	udelay(400);
1385 
1386 	/* stop all the queues */
1387 	for (i = 0; i < num_queues; i++) {
1388 		wr32(hw, I40E_QINT_TQCTL(i), 0);
1389 		wr32(hw, I40E_QTX_ENA(i), 0);
1390 		wr32(hw, I40E_QINT_RQCTL(i), 0);
1391 		wr32(hw, I40E_QRX_ENA(i), 0);
1392 	}
1393 
1394 	/* short wait for all queue disables to settle */
1395 	udelay(50);
1396 }
1397 
1398 /**
1399  * i40e_clear_pxe_mode - clear pxe operations mode
1400  * @hw: pointer to the hw struct
1401  *
1402  * Make sure all PXE mode settings are cleared, including things
1403  * like descriptor fetch/write-back mode.
1404  **/
1405 void i40e_clear_pxe_mode(struct i40e_hw *hw)
1406 {
1407 	u32 reg;
1408 
1409 	if (i40e_check_asq_alive(hw))
1410 		i40e_aq_clear_pxe_mode(hw, NULL);
1411 
1412 	/* Clear single descriptor fetch/write-back mode */
1413 	reg = rd32(hw, I40E_GLLAN_RCTL_0);
1414 
1415 	if (hw->revision_id == 0) {
1416 		/* As a work around clear PXE_MODE instead of setting it */
1417 		wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK)));
1418 	} else {
1419 		wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
1420 	}
1421 }
1422 
1423 /**
1424  * i40e_led_is_mine - helper to find matching led
1425  * @hw: pointer to the hw struct
1426  * @idx: index into GPIO registers
1427  *
1428  * returns: 0 if no match, otherwise the value of the GPIO_CTL register
1429  */
1430 static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
1431 {
1432 	u32 gpio_val = 0;
1433 	u32 port;
1434 
1435 	if (!hw->func_caps.led[idx])
1436 		return 0;
1437 
1438 	gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
1439 	port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
1440 		I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
1441 
1442 	/* if PRT_NUM_NA is 1 then this LED is not port specific, OR
1443 	 * if it is not our port then ignore
1444 	 */
1445 	if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
1446 	    (port != hw->port))
1447 		return 0;
1448 
1449 	return gpio_val;
1450 }
1451 
1452 #define I40E_COMBINED_ACTIVITY 0xA
1453 #define I40E_FILTER_ACTIVITY 0xE
1454 #define I40E_LINK_ACTIVITY 0xC
1455 #define I40E_MAC_ACTIVITY 0xD
1456 #define I40E_LED0 22
1457 
1458 /**
1459  * i40e_led_get - return current on/off mode
1460  * @hw: pointer to the hw struct
1461  *
1462  * The value returned is the 'mode' field as defined in the
1463  * GPIO register definitions: 0x0 = off, 0xf = on, and other
1464  * values are variations of possible behaviors relating to
1465  * blink, link, and wire.
1466  **/
1467 u32 i40e_led_get(struct i40e_hw *hw)
1468 {
1469 	u32 mode = 0;
1470 	int i;
1471 
1472 	/* as per the documentation GPIO 22-29 are the LED
1473 	 * GPIO pins named LED0..LED7
1474 	 */
1475 	for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1476 		u32 gpio_val = i40e_led_is_mine(hw, i);
1477 
1478 		if (!gpio_val)
1479 			continue;
1480 
1481 		mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
1482 			I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
1483 		break;
1484 	}
1485 
1486 	return mode;
1487 }
1488 
1489 /**
1490  * i40e_led_set - set new on/off mode
1491  * @hw: pointer to the hw struct
1492  * @mode: 0=off, 0xf=on (else see manual for mode details)
1493  * @blink: true if the LED should blink when on, false if steady
1494  *
1495  * if this function is used to turn on the blink it should
1496  * be used to disable the blink when restoring the original state.
1497  **/
1498 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
1499 {
1500 	int i;
1501 
1502 	if (mode & 0xfffffff0)
1503 		hw_dbg(hw, "invalid mode passed in %X\n", mode);
1504 
1505 	/* as per the documentation GPIO 22-29 are the LED
1506 	 * GPIO pins named LED0..LED7
1507 	 */
1508 	for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1509 		u32 gpio_val = i40e_led_is_mine(hw, i);
1510 
1511 		if (!gpio_val)
1512 			continue;
1513 		gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
1514 		/* this & is a bit of paranoia, but serves as a range check */
1515 		gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
1516 			     I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
1517 
1518 		if (blink)
1519 			gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1520 		else
1521 			gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1522 
1523 		wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
1524 		break;
1525 	}
1526 }
1527 
1528 /* Admin command wrappers */
1529 
1530 /**
1531  * i40e_aq_get_phy_capabilities
1532  * @hw: pointer to the hw struct
1533  * @abilities: structure for PHY capabilities to be filled
1534  * @qualified_modules: report Qualified Modules
1535  * @report_init: report init capabilities (active are default)
1536  * @cmd_details: pointer to command details structure or NULL
1537  *
1538  * Returns the various PHY abilities supported on the Port.
1539  **/
1540 i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
1541 			bool qualified_modules, bool report_init,
1542 			struct i40e_aq_get_phy_abilities_resp *abilities,
1543 			struct i40e_asq_cmd_details *cmd_details)
1544 {
1545 	struct i40e_aq_desc desc;
1546 	i40e_status status;
1547 	u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
1548 	u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
1549 
1550 	if (!abilities)
1551 		return I40E_ERR_PARAM;
1552 
1553 	do {
1554 		i40e_fill_default_direct_cmd_desc(&desc,
1555 					       i40e_aqc_opc_get_phy_abilities);
1556 
1557 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1558 		if (abilities_size > I40E_AQ_LARGE_BUF)
1559 			desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1560 
1561 		if (qualified_modules)
1562 			desc.params.external.param0 |=
1563 			cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
1564 
1565 		if (report_init)
1566 			desc.params.external.param0 |=
1567 			cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
1568 
1569 		status = i40e_asq_send_command(hw, &desc, abilities,
1570 					       abilities_size, cmd_details);
1571 
1572 		if (status)
1573 			break;
1574 
1575 		if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) {
1576 			status = I40E_ERR_UNKNOWN_PHY;
1577 			break;
1578 		} else if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) {
1579 			usleep_range(1000, 2000);
1580 			total_delay++;
1581 			status = I40E_ERR_TIMEOUT;
1582 		}
1583 	} while ((hw->aq.asq_last_status != I40E_AQ_RC_OK) &&
1584 		 (total_delay < max_delay));
1585 
1586 	if (status)
1587 		return status;
1588 
1589 	if (report_init) {
1590 		if (hw->mac.type ==  I40E_MAC_XL710 &&
1591 		    hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
1592 		    hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
1593 			status = i40e_aq_get_link_info(hw, true, NULL, NULL);
1594 		} else {
1595 			hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
1596 			hw->phy.phy_types |=
1597 					((u64)abilities->phy_type_ext << 32);
1598 		}
1599 	}
1600 
1601 	return status;
1602 }
1603 
1604 /**
1605  * i40e_aq_set_phy_config
1606  * @hw: pointer to the hw struct
1607  * @config: structure with PHY configuration to be set
1608  * @cmd_details: pointer to command details structure or NULL
1609  *
1610  * Set the various PHY configuration parameters
1611  * supported on the Port.One or more of the Set PHY config parameters may be
1612  * ignored in an MFP mode as the PF may not have the privilege to set some
1613  * of the PHY Config parameters. This status will be indicated by the
1614  * command response.
1615  **/
1616 enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
1617 				struct i40e_aq_set_phy_config *config,
1618 				struct i40e_asq_cmd_details *cmd_details)
1619 {
1620 	struct i40e_aq_desc desc;
1621 	struct i40e_aq_set_phy_config *cmd =
1622 			(struct i40e_aq_set_phy_config *)&desc.params.raw;
1623 	enum i40e_status_code status;
1624 
1625 	if (!config)
1626 		return I40E_ERR_PARAM;
1627 
1628 	i40e_fill_default_direct_cmd_desc(&desc,
1629 					  i40e_aqc_opc_set_phy_config);
1630 
1631 	*cmd = *config;
1632 
1633 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1634 
1635 	return status;
1636 }
1637 
1638 /**
1639  * i40e_set_fc
1640  * @hw: pointer to the hw struct
1641  * @aq_failures: buffer to return AdminQ failure information
1642  * @atomic_restart: whether to enable atomic link restart
1643  *
1644  * Set the requested flow control mode using set_phy_config.
1645  **/
1646 enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
1647 				  bool atomic_restart)
1648 {
1649 	enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
1650 	struct i40e_aq_get_phy_abilities_resp abilities;
1651 	struct i40e_aq_set_phy_config config;
1652 	enum i40e_status_code status;
1653 	u8 pause_mask = 0x0;
1654 
1655 	*aq_failures = 0x0;
1656 
1657 	switch (fc_mode) {
1658 	case I40E_FC_FULL:
1659 		pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1660 		pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1661 		break;
1662 	case I40E_FC_RX_PAUSE:
1663 		pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1664 		break;
1665 	case I40E_FC_TX_PAUSE:
1666 		pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1667 		break;
1668 	default:
1669 		break;
1670 	}
1671 
1672 	/* Get the current phy config */
1673 	status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
1674 					      NULL);
1675 	if (status) {
1676 		*aq_failures |= I40E_SET_FC_AQ_FAIL_GET;
1677 		return status;
1678 	}
1679 
1680 	memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
1681 	/* clear the old pause settings */
1682 	config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
1683 			   ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
1684 	/* set the new abilities */
1685 	config.abilities |= pause_mask;
1686 	/* If the abilities have changed, then set the new config */
1687 	if (config.abilities != abilities.abilities) {
1688 		/* Auto restart link so settings take effect */
1689 		if (atomic_restart)
1690 			config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1691 		/* Copy over all the old settings */
1692 		config.phy_type = abilities.phy_type;
1693 		config.phy_type_ext = abilities.phy_type_ext;
1694 		config.link_speed = abilities.link_speed;
1695 		config.eee_capability = abilities.eee_capability;
1696 		config.eeer = abilities.eeer_val;
1697 		config.low_power_ctrl = abilities.d3_lpan;
1698 		config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
1699 				    I40E_AQ_PHY_FEC_CONFIG_MASK;
1700 		status = i40e_aq_set_phy_config(hw, &config, NULL);
1701 
1702 		if (status)
1703 			*aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
1704 	}
1705 	/* Update the link info */
1706 	status = i40e_update_link_info(hw);
1707 	if (status) {
1708 		/* Wait a little bit (on 40G cards it sometimes takes a really
1709 		 * long time for link to come back from the atomic reset)
1710 		 * and try once more
1711 		 */
1712 		msleep(1000);
1713 		status = i40e_update_link_info(hw);
1714 	}
1715 	if (status)
1716 		*aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
1717 
1718 	return status;
1719 }
1720 
1721 /**
1722  * i40e_aq_clear_pxe_mode
1723  * @hw: pointer to the hw struct
1724  * @cmd_details: pointer to command details structure or NULL
1725  *
1726  * Tell the firmware that the driver is taking over from PXE
1727  **/
1728 i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
1729 				struct i40e_asq_cmd_details *cmd_details)
1730 {
1731 	i40e_status status;
1732 	struct i40e_aq_desc desc;
1733 	struct i40e_aqc_clear_pxe *cmd =
1734 		(struct i40e_aqc_clear_pxe *)&desc.params.raw;
1735 
1736 	i40e_fill_default_direct_cmd_desc(&desc,
1737 					  i40e_aqc_opc_clear_pxe_mode);
1738 
1739 	cmd->rx_cnt = 0x2;
1740 
1741 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1742 
1743 	wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
1744 
1745 	return status;
1746 }
1747 
1748 /**
1749  * i40e_aq_set_link_restart_an
1750  * @hw: pointer to the hw struct
1751  * @enable_link: if true: enable link, if false: disable link
1752  * @cmd_details: pointer to command details structure or NULL
1753  *
1754  * Sets up the link and restarts the Auto-Negotiation over the link.
1755  **/
1756 i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
1757 					bool enable_link,
1758 					struct i40e_asq_cmd_details *cmd_details)
1759 {
1760 	struct i40e_aq_desc desc;
1761 	struct i40e_aqc_set_link_restart_an *cmd =
1762 		(struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
1763 	i40e_status status;
1764 
1765 	i40e_fill_default_direct_cmd_desc(&desc,
1766 					  i40e_aqc_opc_set_link_restart_an);
1767 
1768 	cmd->command = I40E_AQ_PHY_RESTART_AN;
1769 	if (enable_link)
1770 		cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
1771 	else
1772 		cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
1773 
1774 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1775 
1776 	return status;
1777 }
1778 
1779 /**
1780  * i40e_aq_get_link_info
1781  * @hw: pointer to the hw struct
1782  * @enable_lse: enable/disable LinkStatusEvent reporting
1783  * @link: pointer to link status structure - optional
1784  * @cmd_details: pointer to command details structure or NULL
1785  *
1786  * Returns the link status of the adapter.
1787  **/
1788 i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
1789 				bool enable_lse, struct i40e_link_status *link,
1790 				struct i40e_asq_cmd_details *cmd_details)
1791 {
1792 	struct i40e_aq_desc desc;
1793 	struct i40e_aqc_get_link_status *resp =
1794 		(struct i40e_aqc_get_link_status *)&desc.params.raw;
1795 	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
1796 	i40e_status status;
1797 	bool tx_pause, rx_pause;
1798 	u16 command_flags;
1799 
1800 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
1801 
1802 	if (enable_lse)
1803 		command_flags = I40E_AQ_LSE_ENABLE;
1804 	else
1805 		command_flags = I40E_AQ_LSE_DISABLE;
1806 	resp->command_flags = cpu_to_le16(command_flags);
1807 
1808 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1809 
1810 	if (status)
1811 		goto aq_get_link_info_exit;
1812 
1813 	/* save off old link status information */
1814 	hw->phy.link_info_old = *hw_link_info;
1815 
1816 	/* update link status */
1817 	hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
1818 	hw->phy.media_type = i40e_get_media_type(hw);
1819 	hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
1820 	hw_link_info->link_info = resp->link_info;
1821 	hw_link_info->an_info = resp->an_info;
1822 	hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
1823 						 I40E_AQ_CONFIG_FEC_RS_ENA);
1824 	hw_link_info->ext_info = resp->ext_info;
1825 	hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK;
1826 	hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
1827 	hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
1828 
1829 	/* update fc info */
1830 	tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX);
1831 	rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX);
1832 	if (tx_pause & rx_pause)
1833 		hw->fc.current_mode = I40E_FC_FULL;
1834 	else if (tx_pause)
1835 		hw->fc.current_mode = I40E_FC_TX_PAUSE;
1836 	else if (rx_pause)
1837 		hw->fc.current_mode = I40E_FC_RX_PAUSE;
1838 	else
1839 		hw->fc.current_mode = I40E_FC_NONE;
1840 
1841 	if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
1842 		hw_link_info->crc_enable = true;
1843 	else
1844 		hw_link_info->crc_enable = false;
1845 
1846 	if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED))
1847 		hw_link_info->lse_enable = true;
1848 	else
1849 		hw_link_info->lse_enable = false;
1850 
1851 	if ((hw->mac.type == I40E_MAC_XL710) &&
1852 	    (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
1853 	     hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
1854 		hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
1855 
1856 	if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
1857 	    hw->aq.api_min_ver >= 7) {
1858 		__le32 tmp;
1859 
1860 		memcpy(&tmp, resp->link_type, sizeof(tmp));
1861 		hw->phy.phy_types = le32_to_cpu(tmp);
1862 		hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
1863 	}
1864 
1865 	/* save link status information */
1866 	if (link)
1867 		*link = *hw_link_info;
1868 
1869 	/* flag cleared so helper functions don't call AQ again */
1870 	hw->phy.get_link_info = false;
1871 
1872 aq_get_link_info_exit:
1873 	return status;
1874 }
1875 
1876 /**
1877  * i40e_aq_set_phy_int_mask
1878  * @hw: pointer to the hw struct
1879  * @mask: interrupt mask to be set
1880  * @cmd_details: pointer to command details structure or NULL
1881  *
1882  * Set link interrupt mask.
1883  **/
1884 i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
1885 				     u16 mask,
1886 				     struct i40e_asq_cmd_details *cmd_details)
1887 {
1888 	struct i40e_aq_desc desc;
1889 	struct i40e_aqc_set_phy_int_mask *cmd =
1890 		(struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
1891 	i40e_status status;
1892 
1893 	i40e_fill_default_direct_cmd_desc(&desc,
1894 					  i40e_aqc_opc_set_phy_int_mask);
1895 
1896 	cmd->event_mask = cpu_to_le16(mask);
1897 
1898 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1899 
1900 	return status;
1901 }
1902 
1903 /**
1904  * i40e_aq_set_phy_debug
1905  * @hw: pointer to the hw struct
1906  * @cmd_flags: debug command flags
1907  * @cmd_details: pointer to command details structure or NULL
1908  *
1909  * Reset the external PHY.
1910  **/
1911 i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
1912 				  struct i40e_asq_cmd_details *cmd_details)
1913 {
1914 	struct i40e_aq_desc desc;
1915 	struct i40e_aqc_set_phy_debug *cmd =
1916 		(struct i40e_aqc_set_phy_debug *)&desc.params.raw;
1917 	i40e_status status;
1918 
1919 	i40e_fill_default_direct_cmd_desc(&desc,
1920 					  i40e_aqc_opc_set_phy_debug);
1921 
1922 	cmd->command_flags = cmd_flags;
1923 
1924 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1925 
1926 	return status;
1927 }
1928 
1929 /**
1930  * i40e_aq_add_vsi
1931  * @hw: pointer to the hw struct
1932  * @vsi_ctx: pointer to a vsi context struct
1933  * @cmd_details: pointer to command details structure or NULL
1934  *
1935  * Add a VSI context to the hardware.
1936 **/
1937 i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
1938 				struct i40e_vsi_context *vsi_ctx,
1939 				struct i40e_asq_cmd_details *cmd_details)
1940 {
1941 	struct i40e_aq_desc desc;
1942 	struct i40e_aqc_add_get_update_vsi *cmd =
1943 		(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
1944 	struct i40e_aqc_add_get_update_vsi_completion *resp =
1945 		(struct i40e_aqc_add_get_update_vsi_completion *)
1946 		&desc.params.raw;
1947 	i40e_status status;
1948 
1949 	i40e_fill_default_direct_cmd_desc(&desc,
1950 					  i40e_aqc_opc_add_vsi);
1951 
1952 	cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
1953 	cmd->connection_type = vsi_ctx->connection_type;
1954 	cmd->vf_id = vsi_ctx->vf_num;
1955 	cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1956 
1957 	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
1958 
1959 	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
1960 				    sizeof(vsi_ctx->info), cmd_details);
1961 
1962 	if (status)
1963 		goto aq_add_vsi_exit;
1964 
1965 	vsi_ctx->seid = le16_to_cpu(resp->seid);
1966 	vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
1967 	vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
1968 	vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1969 
1970 aq_add_vsi_exit:
1971 	return status;
1972 }
1973 
1974 /**
1975  * i40e_aq_set_default_vsi
1976  * @hw: pointer to the hw struct
1977  * @seid: vsi number
1978  * @cmd_details: pointer to command details structure or NULL
1979  **/
1980 i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
1981 				    u16 seid,
1982 				    struct i40e_asq_cmd_details *cmd_details)
1983 {
1984 	struct i40e_aq_desc desc;
1985 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1986 		(struct i40e_aqc_set_vsi_promiscuous_modes *)
1987 		&desc.params.raw;
1988 	i40e_status status;
1989 
1990 	i40e_fill_default_direct_cmd_desc(&desc,
1991 					  i40e_aqc_opc_set_vsi_promiscuous_modes);
1992 
1993 	cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1994 	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1995 	cmd->seid = cpu_to_le16(seid);
1996 
1997 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1998 
1999 	return status;
2000 }
2001 
2002 /**
2003  * i40e_aq_clear_default_vsi
2004  * @hw: pointer to the hw struct
2005  * @seid: vsi number
2006  * @cmd_details: pointer to command details structure or NULL
2007  **/
2008 i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
2009 				      u16 seid,
2010 				      struct i40e_asq_cmd_details *cmd_details)
2011 {
2012 	struct i40e_aq_desc desc;
2013 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2014 		(struct i40e_aqc_set_vsi_promiscuous_modes *)
2015 		&desc.params.raw;
2016 	i40e_status status;
2017 
2018 	i40e_fill_default_direct_cmd_desc(&desc,
2019 					  i40e_aqc_opc_set_vsi_promiscuous_modes);
2020 
2021 	cmd->promiscuous_flags = cpu_to_le16(0);
2022 	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
2023 	cmd->seid = cpu_to_le16(seid);
2024 
2025 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2026 
2027 	return status;
2028 }
2029 
2030 /**
2031  * i40e_aq_set_vsi_unicast_promiscuous
2032  * @hw: pointer to the hw struct
2033  * @seid: vsi number
2034  * @set: set unicast promiscuous enable/disable
2035  * @cmd_details: pointer to command details structure or NULL
2036  * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
2037  **/
2038 i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
2039 				u16 seid, bool set,
2040 				struct i40e_asq_cmd_details *cmd_details,
2041 				bool rx_only_promisc)
2042 {
2043 	struct i40e_aq_desc desc;
2044 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2045 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2046 	i40e_status status;
2047 	u16 flags = 0;
2048 
2049 	i40e_fill_default_direct_cmd_desc(&desc,
2050 					i40e_aqc_opc_set_vsi_promiscuous_modes);
2051 
2052 	if (set) {
2053 		flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
2054 		if (rx_only_promisc &&
2055 		    (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
2056 		     (hw->aq.api_maj_ver > 1)))
2057 			flags |= I40E_AQC_SET_VSI_PROMISC_TX;
2058 	}
2059 
2060 	cmd->promiscuous_flags = cpu_to_le16(flags);
2061 
2062 	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
2063 	if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
2064 	    (hw->aq.api_maj_ver > 1))
2065 		cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX);
2066 
2067 	cmd->seid = cpu_to_le16(seid);
2068 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2069 
2070 	return status;
2071 }
2072 
2073 /**
2074  * i40e_aq_set_vsi_multicast_promiscuous
2075  * @hw: pointer to the hw struct
2076  * @seid: vsi number
2077  * @set: set multicast promiscuous enable/disable
2078  * @cmd_details: pointer to command details structure or NULL
2079  **/
2080 i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
2081 				u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
2082 {
2083 	struct i40e_aq_desc desc;
2084 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2085 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2086 	i40e_status status;
2087 	u16 flags = 0;
2088 
2089 	i40e_fill_default_direct_cmd_desc(&desc,
2090 					i40e_aqc_opc_set_vsi_promiscuous_modes);
2091 
2092 	if (set)
2093 		flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
2094 
2095 	cmd->promiscuous_flags = cpu_to_le16(flags);
2096 
2097 	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
2098 
2099 	cmd->seid = cpu_to_le16(seid);
2100 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2101 
2102 	return status;
2103 }
2104 
2105 /**
2106  * i40e_aq_set_vsi_mc_promisc_on_vlan
2107  * @hw: pointer to the hw struct
2108  * @seid: vsi number
2109  * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2110  * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
2111  * @cmd_details: pointer to command details structure or NULL
2112  **/
2113 enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
2114 							 u16 seid, bool enable,
2115 							 u16 vid,
2116 				struct i40e_asq_cmd_details *cmd_details)
2117 {
2118 	struct i40e_aq_desc desc;
2119 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2120 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2121 	enum i40e_status_code status;
2122 	u16 flags = 0;
2123 
2124 	i40e_fill_default_direct_cmd_desc(&desc,
2125 					  i40e_aqc_opc_set_vsi_promiscuous_modes);
2126 
2127 	if (enable)
2128 		flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
2129 
2130 	cmd->promiscuous_flags = cpu_to_le16(flags);
2131 	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
2132 	cmd->seid = cpu_to_le16(seid);
2133 	cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2134 
2135 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2136 
2137 	return status;
2138 }
2139 
2140 /**
2141  * i40e_aq_set_vsi_uc_promisc_on_vlan
2142  * @hw: pointer to the hw struct
2143  * @seid: vsi number
2144  * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2145  * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
2146  * @cmd_details: pointer to command details structure or NULL
2147  **/
2148 enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
2149 							 u16 seid, bool enable,
2150 							 u16 vid,
2151 				struct i40e_asq_cmd_details *cmd_details)
2152 {
2153 	struct i40e_aq_desc desc;
2154 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2155 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2156 	enum i40e_status_code status;
2157 	u16 flags = 0;
2158 
2159 	i40e_fill_default_direct_cmd_desc(&desc,
2160 					  i40e_aqc_opc_set_vsi_promiscuous_modes);
2161 
2162 	if (enable)
2163 		flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
2164 
2165 	cmd->promiscuous_flags = cpu_to_le16(flags);
2166 	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
2167 	cmd->seid = cpu_to_le16(seid);
2168 	cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2169 
2170 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2171 
2172 	return status;
2173 }
2174 
2175 /**
2176  * i40e_aq_set_vsi_bc_promisc_on_vlan
2177  * @hw: pointer to the hw struct
2178  * @seid: vsi number
2179  * @enable: set broadcast promiscuous enable/disable for a given VLAN
2180  * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
2181  * @cmd_details: pointer to command details structure or NULL
2182  **/
2183 i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
2184 				u16 seid, bool enable, u16 vid,
2185 				struct i40e_asq_cmd_details *cmd_details)
2186 {
2187 	struct i40e_aq_desc desc;
2188 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2189 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2190 	i40e_status status;
2191 	u16 flags = 0;
2192 
2193 	i40e_fill_default_direct_cmd_desc(&desc,
2194 					i40e_aqc_opc_set_vsi_promiscuous_modes);
2195 
2196 	if (enable)
2197 		flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
2198 
2199 	cmd->promiscuous_flags = cpu_to_le16(flags);
2200 	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2201 	cmd->seid = cpu_to_le16(seid);
2202 	cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2203 
2204 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2205 
2206 	return status;
2207 }
2208 
2209 /**
2210  * i40e_aq_set_vsi_broadcast
2211  * @hw: pointer to the hw struct
2212  * @seid: vsi number
2213  * @set_filter: true to set filter, false to clear filter
2214  * @cmd_details: pointer to command details structure or NULL
2215  *
2216  * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
2217  **/
2218 i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
2219 				u16 seid, bool set_filter,
2220 				struct i40e_asq_cmd_details *cmd_details)
2221 {
2222 	struct i40e_aq_desc desc;
2223 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2224 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2225 	i40e_status status;
2226 
2227 	i40e_fill_default_direct_cmd_desc(&desc,
2228 					i40e_aqc_opc_set_vsi_promiscuous_modes);
2229 
2230 	if (set_filter)
2231 		cmd->promiscuous_flags
2232 			    |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2233 	else
2234 		cmd->promiscuous_flags
2235 			    &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2236 
2237 	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2238 	cmd->seid = cpu_to_le16(seid);
2239 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2240 
2241 	return status;
2242 }
2243 
2244 /**
2245  * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
2246  * @hw: pointer to the hw struct
2247  * @seid: vsi number
2248  * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2249  * @cmd_details: pointer to command details structure or NULL
2250  **/
2251 i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
2252 				       u16 seid, bool enable,
2253 				       struct i40e_asq_cmd_details *cmd_details)
2254 {
2255 	struct i40e_aq_desc desc;
2256 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2257 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2258 	i40e_status status;
2259 	u16 flags = 0;
2260 
2261 	i40e_fill_default_direct_cmd_desc(&desc,
2262 					i40e_aqc_opc_set_vsi_promiscuous_modes);
2263 	if (enable)
2264 		flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
2265 
2266 	cmd->promiscuous_flags = cpu_to_le16(flags);
2267 	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN);
2268 	cmd->seid = cpu_to_le16(seid);
2269 
2270 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2271 
2272 	return status;
2273 }
2274 
2275 /**
2276  * i40e_get_vsi_params - get VSI configuration info
2277  * @hw: pointer to the hw struct
2278  * @vsi_ctx: pointer to a vsi context struct
2279  * @cmd_details: pointer to command details structure or NULL
2280  **/
2281 i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
2282 				struct i40e_vsi_context *vsi_ctx,
2283 				struct i40e_asq_cmd_details *cmd_details)
2284 {
2285 	struct i40e_aq_desc desc;
2286 	struct i40e_aqc_add_get_update_vsi *cmd =
2287 		(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2288 	struct i40e_aqc_add_get_update_vsi_completion *resp =
2289 		(struct i40e_aqc_add_get_update_vsi_completion *)
2290 		&desc.params.raw;
2291 	i40e_status status;
2292 
2293 	i40e_fill_default_direct_cmd_desc(&desc,
2294 					  i40e_aqc_opc_get_vsi_parameters);
2295 
2296 	cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2297 
2298 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2299 
2300 	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
2301 				    sizeof(vsi_ctx->info), NULL);
2302 
2303 	if (status)
2304 		goto aq_get_vsi_params_exit;
2305 
2306 	vsi_ctx->seid = le16_to_cpu(resp->seid);
2307 	vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
2308 	vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2309 	vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2310 
2311 aq_get_vsi_params_exit:
2312 	return status;
2313 }
2314 
2315 /**
2316  * i40e_aq_update_vsi_params
2317  * @hw: pointer to the hw struct
2318  * @vsi_ctx: pointer to a vsi context struct
2319  * @cmd_details: pointer to command details structure or NULL
2320  *
2321  * Update a VSI context.
2322  **/
2323 i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
2324 				struct i40e_vsi_context *vsi_ctx,
2325 				struct i40e_asq_cmd_details *cmd_details)
2326 {
2327 	struct i40e_aq_desc desc;
2328 	struct i40e_aqc_add_get_update_vsi *cmd =
2329 		(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2330 	struct i40e_aqc_add_get_update_vsi_completion *resp =
2331 		(struct i40e_aqc_add_get_update_vsi_completion *)
2332 		&desc.params.raw;
2333 	i40e_status status;
2334 
2335 	i40e_fill_default_direct_cmd_desc(&desc,
2336 					  i40e_aqc_opc_update_vsi_parameters);
2337 	cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2338 
2339 	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2340 
2341 	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
2342 				    sizeof(vsi_ctx->info), cmd_details);
2343 
2344 	vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2345 	vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2346 
2347 	return status;
2348 }
2349 
2350 /**
2351  * i40e_aq_get_switch_config
2352  * @hw: pointer to the hardware structure
2353  * @buf: pointer to the result buffer
2354  * @buf_size: length of input buffer
2355  * @start_seid: seid to start for the report, 0 == beginning
2356  * @cmd_details: pointer to command details structure or NULL
2357  *
2358  * Fill the buf with switch configuration returned from AdminQ command
2359  **/
2360 i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
2361 				struct i40e_aqc_get_switch_config_resp *buf,
2362 				u16 buf_size, u16 *start_seid,
2363 				struct i40e_asq_cmd_details *cmd_details)
2364 {
2365 	struct i40e_aq_desc desc;
2366 	struct i40e_aqc_switch_seid *scfg =
2367 		(struct i40e_aqc_switch_seid *)&desc.params.raw;
2368 	i40e_status status;
2369 
2370 	i40e_fill_default_direct_cmd_desc(&desc,
2371 					  i40e_aqc_opc_get_switch_config);
2372 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2373 	if (buf_size > I40E_AQ_LARGE_BUF)
2374 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2375 	scfg->seid = cpu_to_le16(*start_seid);
2376 
2377 	status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
2378 	*start_seid = le16_to_cpu(scfg->seid);
2379 
2380 	return status;
2381 }
2382 
2383 /**
2384  * i40e_aq_set_switch_config
2385  * @hw: pointer to the hardware structure
2386  * @flags: bit flag values to set
2387  * @mode: cloud filter mode
2388  * @valid_flags: which bit flags to set
2389  * @mode: cloud filter mode
2390  * @cmd_details: pointer to command details structure or NULL
2391  *
2392  * Set switch configuration bits
2393  **/
2394 enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
2395 						u16 flags,
2396 						u16 valid_flags, u8 mode,
2397 				struct i40e_asq_cmd_details *cmd_details)
2398 {
2399 	struct i40e_aq_desc desc;
2400 	struct i40e_aqc_set_switch_config *scfg =
2401 		(struct i40e_aqc_set_switch_config *)&desc.params.raw;
2402 	enum i40e_status_code status;
2403 
2404 	i40e_fill_default_direct_cmd_desc(&desc,
2405 					  i40e_aqc_opc_set_switch_config);
2406 	scfg->flags = cpu_to_le16(flags);
2407 	scfg->valid_flags = cpu_to_le16(valid_flags);
2408 	scfg->mode = mode;
2409 	if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
2410 		scfg->switch_tag = cpu_to_le16(hw->switch_tag);
2411 		scfg->first_tag = cpu_to_le16(hw->first_tag);
2412 		scfg->second_tag = cpu_to_le16(hw->second_tag);
2413 	}
2414 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2415 
2416 	return status;
2417 }
2418 
2419 /**
2420  * i40e_aq_get_firmware_version
2421  * @hw: pointer to the hw struct
2422  * @fw_major_version: firmware major version
2423  * @fw_minor_version: firmware minor version
2424  * @fw_build: firmware build number
2425  * @api_major_version: major queue version
2426  * @api_minor_version: minor queue version
2427  * @cmd_details: pointer to command details structure or NULL
2428  *
2429  * Get the firmware version from the admin queue commands
2430  **/
2431 i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
2432 				u16 *fw_major_version, u16 *fw_minor_version,
2433 				u32 *fw_build,
2434 				u16 *api_major_version, u16 *api_minor_version,
2435 				struct i40e_asq_cmd_details *cmd_details)
2436 {
2437 	struct i40e_aq_desc desc;
2438 	struct i40e_aqc_get_version *resp =
2439 		(struct i40e_aqc_get_version *)&desc.params.raw;
2440 	i40e_status status;
2441 
2442 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
2443 
2444 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2445 
2446 	if (!status) {
2447 		if (fw_major_version)
2448 			*fw_major_version = le16_to_cpu(resp->fw_major);
2449 		if (fw_minor_version)
2450 			*fw_minor_version = le16_to_cpu(resp->fw_minor);
2451 		if (fw_build)
2452 			*fw_build = le32_to_cpu(resp->fw_build);
2453 		if (api_major_version)
2454 			*api_major_version = le16_to_cpu(resp->api_major);
2455 		if (api_minor_version)
2456 			*api_minor_version = le16_to_cpu(resp->api_minor);
2457 	}
2458 
2459 	return status;
2460 }
2461 
2462 /**
2463  * i40e_aq_send_driver_version
2464  * @hw: pointer to the hw struct
2465  * @dv: driver's major, minor version
2466  * @cmd_details: pointer to command details structure or NULL
2467  *
2468  * Send the driver version to the firmware
2469  **/
2470 i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
2471 				struct i40e_driver_version *dv,
2472 				struct i40e_asq_cmd_details *cmd_details)
2473 {
2474 	struct i40e_aq_desc desc;
2475 	struct i40e_aqc_driver_version *cmd =
2476 		(struct i40e_aqc_driver_version *)&desc.params.raw;
2477 	i40e_status status;
2478 	u16 len;
2479 
2480 	if (dv == NULL)
2481 		return I40E_ERR_PARAM;
2482 
2483 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
2484 
2485 	desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
2486 	cmd->driver_major_ver = dv->major_version;
2487 	cmd->driver_minor_ver = dv->minor_version;
2488 	cmd->driver_build_ver = dv->build_version;
2489 	cmd->driver_subbuild_ver = dv->subbuild_version;
2490 
2491 	len = 0;
2492 	while (len < sizeof(dv->driver_string) &&
2493 	       (dv->driver_string[len] < 0x80) &&
2494 	       dv->driver_string[len])
2495 		len++;
2496 	status = i40e_asq_send_command(hw, &desc, dv->driver_string,
2497 				       len, cmd_details);
2498 
2499 	return status;
2500 }
2501 
2502 /**
2503  * i40e_get_link_status - get status of the HW network link
2504  * @hw: pointer to the hw struct
2505  * @link_up: pointer to bool (true/false = linkup/linkdown)
2506  *
2507  * Variable link_up true if link is up, false if link is down.
2508  * The variable link_up is invalid if returned value of status != 0
2509  *
2510  * Side effect: LinkStatusEvent reporting becomes enabled
2511  **/
2512 i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
2513 {
2514 	i40e_status status = 0;
2515 
2516 	if (hw->phy.get_link_info) {
2517 		status = i40e_update_link_info(hw);
2518 
2519 		if (status)
2520 			i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
2521 				   status);
2522 	}
2523 
2524 	*link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
2525 
2526 	return status;
2527 }
2528 
2529 /**
2530  * i40e_updatelink_status - update status of the HW network link
2531  * @hw: pointer to the hw struct
2532  **/
2533 i40e_status i40e_update_link_info(struct i40e_hw *hw)
2534 {
2535 	struct i40e_aq_get_phy_abilities_resp abilities;
2536 	i40e_status status = 0;
2537 
2538 	status = i40e_aq_get_link_info(hw, true, NULL, NULL);
2539 	if (status)
2540 		return status;
2541 
2542 	/* extra checking needed to ensure link info to user is timely */
2543 	if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2544 	    ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) ||
2545 	     !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) {
2546 		status = i40e_aq_get_phy_capabilities(hw, false, false,
2547 						      &abilities, NULL);
2548 		if (status)
2549 			return status;
2550 
2551 		hw->phy.link_info.req_fec_info =
2552 			abilities.fec_cfg_curr_mod_ext_info &
2553 			(I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS);
2554 
2555 		memcpy(hw->phy.link_info.module_type, &abilities.module_type,
2556 		       sizeof(hw->phy.link_info.module_type));
2557 	}
2558 
2559 	return status;
2560 }
2561 
2562 /**
2563  * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
2564  * @hw: pointer to the hw struct
2565  * @uplink_seid: the MAC or other gizmo SEID
2566  * @downlink_seid: the VSI SEID
2567  * @enabled_tc: bitmap of TCs to be enabled
2568  * @default_port: true for default port VSI, false for control port
2569  * @veb_seid: pointer to where to put the resulting VEB SEID
2570  * @enable_stats: true to turn on VEB stats
2571  * @cmd_details: pointer to command details structure or NULL
2572  *
2573  * This asks the FW to add a VEB between the uplink and downlink
2574  * elements.  If the uplink SEID is 0, this will be a floating VEB.
2575  **/
2576 i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
2577 				u16 downlink_seid, u8 enabled_tc,
2578 				bool default_port, u16 *veb_seid,
2579 				bool enable_stats,
2580 				struct i40e_asq_cmd_details *cmd_details)
2581 {
2582 	struct i40e_aq_desc desc;
2583 	struct i40e_aqc_add_veb *cmd =
2584 		(struct i40e_aqc_add_veb *)&desc.params.raw;
2585 	struct i40e_aqc_add_veb_completion *resp =
2586 		(struct i40e_aqc_add_veb_completion *)&desc.params.raw;
2587 	i40e_status status;
2588 	u16 veb_flags = 0;
2589 
2590 	/* SEIDs need to either both be set or both be 0 for floating VEB */
2591 	if (!!uplink_seid != !!downlink_seid)
2592 		return I40E_ERR_PARAM;
2593 
2594 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
2595 
2596 	cmd->uplink_seid = cpu_to_le16(uplink_seid);
2597 	cmd->downlink_seid = cpu_to_le16(downlink_seid);
2598 	cmd->enable_tcs = enabled_tc;
2599 	if (!uplink_seid)
2600 		veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
2601 	if (default_port)
2602 		veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
2603 	else
2604 		veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
2605 
2606 	/* reverse logic here: set the bitflag to disable the stats */
2607 	if (!enable_stats)
2608 		veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS;
2609 
2610 	cmd->veb_flags = cpu_to_le16(veb_flags);
2611 
2612 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2613 
2614 	if (!status && veb_seid)
2615 		*veb_seid = le16_to_cpu(resp->veb_seid);
2616 
2617 	return status;
2618 }
2619 
2620 /**
2621  * i40e_aq_get_veb_parameters - Retrieve VEB parameters
2622  * @hw: pointer to the hw struct
2623  * @veb_seid: the SEID of the VEB to query
2624  * @switch_id: the uplink switch id
2625  * @floating: set to true if the VEB is floating
2626  * @statistic_index: index of the stats counter block for this VEB
2627  * @vebs_used: number of VEB's used by function
2628  * @vebs_free: total VEB's not reserved by any function
2629  * @cmd_details: pointer to command details structure or NULL
2630  *
2631  * This retrieves the parameters for a particular VEB, specified by
2632  * uplink_seid, and returns them to the caller.
2633  **/
2634 i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
2635 				u16 veb_seid, u16 *switch_id,
2636 				bool *floating, u16 *statistic_index,
2637 				u16 *vebs_used, u16 *vebs_free,
2638 				struct i40e_asq_cmd_details *cmd_details)
2639 {
2640 	struct i40e_aq_desc desc;
2641 	struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
2642 		(struct i40e_aqc_get_veb_parameters_completion *)
2643 		&desc.params.raw;
2644 	i40e_status status;
2645 
2646 	if (veb_seid == 0)
2647 		return I40E_ERR_PARAM;
2648 
2649 	i40e_fill_default_direct_cmd_desc(&desc,
2650 					  i40e_aqc_opc_get_veb_parameters);
2651 	cmd_resp->seid = cpu_to_le16(veb_seid);
2652 
2653 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2654 	if (status)
2655 		goto get_veb_exit;
2656 
2657 	if (switch_id)
2658 		*switch_id = le16_to_cpu(cmd_resp->switch_id);
2659 	if (statistic_index)
2660 		*statistic_index = le16_to_cpu(cmd_resp->statistic_index);
2661 	if (vebs_used)
2662 		*vebs_used = le16_to_cpu(cmd_resp->vebs_used);
2663 	if (vebs_free)
2664 		*vebs_free = le16_to_cpu(cmd_resp->vebs_free);
2665 	if (floating) {
2666 		u16 flags = le16_to_cpu(cmd_resp->veb_flags);
2667 
2668 		if (flags & I40E_AQC_ADD_VEB_FLOATING)
2669 			*floating = true;
2670 		else
2671 			*floating = false;
2672 	}
2673 
2674 get_veb_exit:
2675 	return status;
2676 }
2677 
2678 /**
2679  * i40e_aq_add_macvlan
2680  * @hw: pointer to the hw struct
2681  * @seid: VSI for the mac address
2682  * @mv_list: list of macvlans to be added
2683  * @count: length of the list
2684  * @cmd_details: pointer to command details structure or NULL
2685  *
2686  * Add MAC/VLAN addresses to the HW filtering
2687  **/
2688 i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
2689 			struct i40e_aqc_add_macvlan_element_data *mv_list,
2690 			u16 count, struct i40e_asq_cmd_details *cmd_details)
2691 {
2692 	struct i40e_aq_desc desc;
2693 	struct i40e_aqc_macvlan *cmd =
2694 		(struct i40e_aqc_macvlan *)&desc.params.raw;
2695 	i40e_status status;
2696 	u16 buf_size;
2697 	int i;
2698 
2699 	if (count == 0 || !mv_list || !hw)
2700 		return I40E_ERR_PARAM;
2701 
2702 	buf_size = count * sizeof(*mv_list);
2703 
2704 	/* prep the rest of the request */
2705 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
2706 	cmd->num_addresses = cpu_to_le16(count);
2707 	cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2708 	cmd->seid[1] = 0;
2709 	cmd->seid[2] = 0;
2710 
2711 	for (i = 0; i < count; i++)
2712 		if (is_multicast_ether_addr(mv_list[i].mac_addr))
2713 			mv_list[i].flags |=
2714 			       cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
2715 
2716 	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2717 	if (buf_size > I40E_AQ_LARGE_BUF)
2718 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2719 
2720 	status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
2721 				       cmd_details);
2722 
2723 	return status;
2724 }
2725 
2726 /**
2727  * i40e_aq_remove_macvlan
2728  * @hw: pointer to the hw struct
2729  * @seid: VSI for the mac address
2730  * @mv_list: list of macvlans to be removed
2731  * @count: length of the list
2732  * @cmd_details: pointer to command details structure or NULL
2733  *
2734  * Remove MAC/VLAN addresses from the HW filtering
2735  **/
2736 i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
2737 			struct i40e_aqc_remove_macvlan_element_data *mv_list,
2738 			u16 count, struct i40e_asq_cmd_details *cmd_details)
2739 {
2740 	struct i40e_aq_desc desc;
2741 	struct i40e_aqc_macvlan *cmd =
2742 		(struct i40e_aqc_macvlan *)&desc.params.raw;
2743 	i40e_status status;
2744 	u16 buf_size;
2745 
2746 	if (count == 0 || !mv_list || !hw)
2747 		return I40E_ERR_PARAM;
2748 
2749 	buf_size = count * sizeof(*mv_list);
2750 
2751 	/* prep the rest of the request */
2752 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
2753 	cmd->num_addresses = cpu_to_le16(count);
2754 	cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2755 	cmd->seid[1] = 0;
2756 	cmd->seid[2] = 0;
2757 
2758 	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2759 	if (buf_size > I40E_AQ_LARGE_BUF)
2760 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2761 
2762 	status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
2763 				       cmd_details);
2764 
2765 	return status;
2766 }
2767 
2768 /**
2769  * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
2770  * @hw: pointer to the hw struct
2771  * @opcode: AQ opcode for add or delete mirror rule
2772  * @sw_seid: Switch SEID (to which rule refers)
2773  * @rule_type: Rule Type (ingress/egress/VLAN)
2774  * @id: Destination VSI SEID or Rule ID
2775  * @count: length of the list
2776  * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2777  * @cmd_details: pointer to command details structure or NULL
2778  * @rule_id: Rule ID returned from FW
2779  * @rules_used: Number of rules used in internal switch
2780  * @rules_free: Number of rules free in internal switch
2781  *
2782  * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
2783  * VEBs/VEPA elements only
2784  **/
2785 static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
2786 				u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
2787 				u16 count, __le16 *mr_list,
2788 				struct i40e_asq_cmd_details *cmd_details,
2789 				u16 *rule_id, u16 *rules_used, u16 *rules_free)
2790 {
2791 	struct i40e_aq_desc desc;
2792 	struct i40e_aqc_add_delete_mirror_rule *cmd =
2793 		(struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
2794 	struct i40e_aqc_add_delete_mirror_rule_completion *resp =
2795 	(struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
2796 	i40e_status status;
2797 	u16 buf_size;
2798 
2799 	buf_size = count * sizeof(*mr_list);
2800 
2801 	/* prep the rest of the request */
2802 	i40e_fill_default_direct_cmd_desc(&desc, opcode);
2803 	cmd->seid = cpu_to_le16(sw_seid);
2804 	cmd->rule_type = cpu_to_le16(rule_type &
2805 				     I40E_AQC_MIRROR_RULE_TYPE_MASK);
2806 	cmd->num_entries = cpu_to_le16(count);
2807 	/* Dest VSI for add, rule_id for delete */
2808 	cmd->destination = cpu_to_le16(id);
2809 	if (mr_list) {
2810 		desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2811 						I40E_AQ_FLAG_RD));
2812 		if (buf_size > I40E_AQ_LARGE_BUF)
2813 			desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2814 	}
2815 
2816 	status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
2817 				       cmd_details);
2818 	if (!status ||
2819 	    hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
2820 		if (rule_id)
2821 			*rule_id = le16_to_cpu(resp->rule_id);
2822 		if (rules_used)
2823 			*rules_used = le16_to_cpu(resp->mirror_rules_used);
2824 		if (rules_free)
2825 			*rules_free = le16_to_cpu(resp->mirror_rules_free);
2826 	}
2827 	return status;
2828 }
2829 
2830 /**
2831  * i40e_aq_add_mirrorrule - add a mirror rule
2832  * @hw: pointer to the hw struct
2833  * @sw_seid: Switch SEID (to which rule refers)
2834  * @rule_type: Rule Type (ingress/egress/VLAN)
2835  * @dest_vsi: SEID of VSI to which packets will be mirrored
2836  * @count: length of the list
2837  * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2838  * @cmd_details: pointer to command details structure or NULL
2839  * @rule_id: Rule ID returned from FW
2840  * @rules_used: Number of rules used in internal switch
2841  * @rules_free: Number of rules free in internal switch
2842  *
2843  * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
2844  **/
2845 i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2846 			u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
2847 			struct i40e_asq_cmd_details *cmd_details,
2848 			u16 *rule_id, u16 *rules_used, u16 *rules_free)
2849 {
2850 	if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
2851 	    rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
2852 		if (count == 0 || !mr_list)
2853 			return I40E_ERR_PARAM;
2854 	}
2855 
2856 	return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
2857 				  rule_type, dest_vsi, count, mr_list,
2858 				  cmd_details, rule_id, rules_used, rules_free);
2859 }
2860 
2861 /**
2862  * i40e_aq_delete_mirrorrule - delete a mirror rule
2863  * @hw: pointer to the hw struct
2864  * @sw_seid: Switch SEID (to which rule refers)
2865  * @rule_type: Rule Type (ingress/egress/VLAN)
2866  * @count: length of the list
2867  * @rule_id: Rule ID that is returned in the receive desc as part of
2868  *		add_mirrorrule.
2869  * @mr_list: list of mirrored VLAN IDs to be removed
2870  * @cmd_details: pointer to command details structure or NULL
2871  * @rules_used: Number of rules used in internal switch
2872  * @rules_free: Number of rules free in internal switch
2873  *
2874  * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
2875  **/
2876 i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2877 			u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
2878 			struct i40e_asq_cmd_details *cmd_details,
2879 			u16 *rules_used, u16 *rules_free)
2880 {
2881 	/* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
2882 	if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
2883 		/* count and mr_list shall be valid for rule_type INGRESS VLAN
2884 		 * mirroring. For other rule_type, count and rule_type should
2885 		 * not matter.
2886 		 */
2887 		if (count == 0 || !mr_list)
2888 			return I40E_ERR_PARAM;
2889 	}
2890 
2891 	return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
2892 				  rule_type, rule_id, count, mr_list,
2893 				  cmd_details, NULL, rules_used, rules_free);
2894 }
2895 
2896 /**
2897  * i40e_aq_send_msg_to_vf
2898  * @hw: pointer to the hardware structure
2899  * @vfid: VF id to send msg
2900  * @v_opcode: opcodes for VF-PF communication
2901  * @v_retval: return error code
2902  * @msg: pointer to the msg buffer
2903  * @msglen: msg length
2904  * @cmd_details: pointer to command details
2905  *
2906  * send msg to vf
2907  **/
2908 i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
2909 				u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
2910 				struct i40e_asq_cmd_details *cmd_details)
2911 {
2912 	struct i40e_aq_desc desc;
2913 	struct i40e_aqc_pf_vf_message *cmd =
2914 		(struct i40e_aqc_pf_vf_message *)&desc.params.raw;
2915 	i40e_status status;
2916 
2917 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
2918 	cmd->id = cpu_to_le32(vfid);
2919 	desc.cookie_high = cpu_to_le32(v_opcode);
2920 	desc.cookie_low = cpu_to_le32(v_retval);
2921 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
2922 	if (msglen) {
2923 		desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2924 						I40E_AQ_FLAG_RD));
2925 		if (msglen > I40E_AQ_LARGE_BUF)
2926 			desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2927 		desc.datalen = cpu_to_le16(msglen);
2928 	}
2929 	status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
2930 
2931 	return status;
2932 }
2933 
2934 /**
2935  * i40e_aq_debug_read_register
2936  * @hw: pointer to the hw struct
2937  * @reg_addr: register address
2938  * @reg_val: register value
2939  * @cmd_details: pointer to command details structure or NULL
2940  *
2941  * Read the register using the admin queue commands
2942  **/
2943 i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
2944 				u32 reg_addr, u64 *reg_val,
2945 				struct i40e_asq_cmd_details *cmd_details)
2946 {
2947 	struct i40e_aq_desc desc;
2948 	struct i40e_aqc_debug_reg_read_write *cmd_resp =
2949 		(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
2950 	i40e_status status;
2951 
2952 	if (reg_val == NULL)
2953 		return I40E_ERR_PARAM;
2954 
2955 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
2956 
2957 	cmd_resp->address = cpu_to_le32(reg_addr);
2958 
2959 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2960 
2961 	if (!status) {
2962 		*reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) |
2963 			   (u64)le32_to_cpu(cmd_resp->value_low);
2964 	}
2965 
2966 	return status;
2967 }
2968 
2969 /**
2970  * i40e_aq_debug_write_register
2971  * @hw: pointer to the hw struct
2972  * @reg_addr: register address
2973  * @reg_val: register value
2974  * @cmd_details: pointer to command details structure or NULL
2975  *
2976  * Write to a register using the admin queue commands
2977  **/
2978 i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
2979 					u32 reg_addr, u64 reg_val,
2980 					struct i40e_asq_cmd_details *cmd_details)
2981 {
2982 	struct i40e_aq_desc desc;
2983 	struct i40e_aqc_debug_reg_read_write *cmd =
2984 		(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
2985 	i40e_status status;
2986 
2987 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
2988 
2989 	cmd->address = cpu_to_le32(reg_addr);
2990 	cmd->value_high = cpu_to_le32((u32)(reg_val >> 32));
2991 	cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF));
2992 
2993 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2994 
2995 	return status;
2996 }
2997 
2998 /**
2999  * i40e_aq_request_resource
3000  * @hw: pointer to the hw struct
3001  * @resource: resource id
3002  * @access: access type
3003  * @sdp_number: resource number
3004  * @timeout: the maximum time in ms that the driver may hold the resource
3005  * @cmd_details: pointer to command details structure or NULL
3006  *
3007  * requests common resource using the admin queue commands
3008  **/
3009 i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
3010 				enum i40e_aq_resources_ids resource,
3011 				enum i40e_aq_resource_access_type access,
3012 				u8 sdp_number, u64 *timeout,
3013 				struct i40e_asq_cmd_details *cmd_details)
3014 {
3015 	struct i40e_aq_desc desc;
3016 	struct i40e_aqc_request_resource *cmd_resp =
3017 		(struct i40e_aqc_request_resource *)&desc.params.raw;
3018 	i40e_status status;
3019 
3020 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
3021 
3022 	cmd_resp->resource_id = cpu_to_le16(resource);
3023 	cmd_resp->access_type = cpu_to_le16(access);
3024 	cmd_resp->resource_number = cpu_to_le32(sdp_number);
3025 
3026 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3027 	/* The completion specifies the maximum time in ms that the driver
3028 	 * may hold the resource in the Timeout field.
3029 	 * If the resource is held by someone else, the command completes with
3030 	 * busy return value and the timeout field indicates the maximum time
3031 	 * the current owner of the resource has to free it.
3032 	 */
3033 	if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
3034 		*timeout = le32_to_cpu(cmd_resp->timeout);
3035 
3036 	return status;
3037 }
3038 
3039 /**
3040  * i40e_aq_release_resource
3041  * @hw: pointer to the hw struct
3042  * @resource: resource id
3043  * @sdp_number: resource number
3044  * @cmd_details: pointer to command details structure or NULL
3045  *
3046  * release common resource using the admin queue commands
3047  **/
3048 i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
3049 				enum i40e_aq_resources_ids resource,
3050 				u8 sdp_number,
3051 				struct i40e_asq_cmd_details *cmd_details)
3052 {
3053 	struct i40e_aq_desc desc;
3054 	struct i40e_aqc_request_resource *cmd =
3055 		(struct i40e_aqc_request_resource *)&desc.params.raw;
3056 	i40e_status status;
3057 
3058 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
3059 
3060 	cmd->resource_id = cpu_to_le16(resource);
3061 	cmd->resource_number = cpu_to_le32(sdp_number);
3062 
3063 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3064 
3065 	return status;
3066 }
3067 
3068 /**
3069  * i40e_aq_read_nvm
3070  * @hw: pointer to the hw struct
3071  * @module_pointer: module pointer location in words from the NVM beginning
3072  * @offset: byte offset from the module beginning
3073  * @length: length of the section to be read (in bytes from the offset)
3074  * @data: command buffer (size [bytes] = length)
3075  * @last_command: tells if this is the last command in a series
3076  * @cmd_details: pointer to command details structure or NULL
3077  *
3078  * Read the NVM using the admin queue commands
3079  **/
3080 i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
3081 				u32 offset, u16 length, void *data,
3082 				bool last_command,
3083 				struct i40e_asq_cmd_details *cmd_details)
3084 {
3085 	struct i40e_aq_desc desc;
3086 	struct i40e_aqc_nvm_update *cmd =
3087 		(struct i40e_aqc_nvm_update *)&desc.params.raw;
3088 	i40e_status status;
3089 
3090 	/* In offset the highest byte must be zeroed. */
3091 	if (offset & 0xFF000000) {
3092 		status = I40E_ERR_PARAM;
3093 		goto i40e_aq_read_nvm_exit;
3094 	}
3095 
3096 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
3097 
3098 	/* If this is the last command in a series, set the proper flag. */
3099 	if (last_command)
3100 		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3101 	cmd->module_pointer = module_pointer;
3102 	cmd->offset = cpu_to_le32(offset);
3103 	cmd->length = cpu_to_le16(length);
3104 
3105 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3106 	if (length > I40E_AQ_LARGE_BUF)
3107 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3108 
3109 	status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3110 
3111 i40e_aq_read_nvm_exit:
3112 	return status;
3113 }
3114 
3115 /**
3116  * i40e_aq_erase_nvm
3117  * @hw: pointer to the hw struct
3118  * @module_pointer: module pointer location in words from the NVM beginning
3119  * @offset: offset in the module (expressed in 4 KB from module's beginning)
3120  * @length: length of the section to be erased (expressed in 4 KB)
3121  * @last_command: tells if this is the last command in a series
3122  * @cmd_details: pointer to command details structure or NULL
3123  *
3124  * Erase the NVM sector using the admin queue commands
3125  **/
3126 i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
3127 			      u32 offset, u16 length, bool last_command,
3128 			      struct i40e_asq_cmd_details *cmd_details)
3129 {
3130 	struct i40e_aq_desc desc;
3131 	struct i40e_aqc_nvm_update *cmd =
3132 		(struct i40e_aqc_nvm_update *)&desc.params.raw;
3133 	i40e_status status;
3134 
3135 	/* In offset the highest byte must be zeroed. */
3136 	if (offset & 0xFF000000) {
3137 		status = I40E_ERR_PARAM;
3138 		goto i40e_aq_erase_nvm_exit;
3139 	}
3140 
3141 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
3142 
3143 	/* If this is the last command in a series, set the proper flag. */
3144 	if (last_command)
3145 		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3146 	cmd->module_pointer = module_pointer;
3147 	cmd->offset = cpu_to_le32(offset);
3148 	cmd->length = cpu_to_le16(length);
3149 
3150 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3151 
3152 i40e_aq_erase_nvm_exit:
3153 	return status;
3154 }
3155 
3156 /**
3157  * i40e_parse_discover_capabilities
3158  * @hw: pointer to the hw struct
3159  * @buff: pointer to a buffer containing device/function capability records
3160  * @cap_count: number of capability records in the list
3161  * @list_type_opc: type of capabilities list to parse
3162  *
3163  * Parse the device/function capabilities list.
3164  **/
3165 static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
3166 				     u32 cap_count,
3167 				     enum i40e_admin_queue_opc list_type_opc)
3168 {
3169 	struct i40e_aqc_list_capabilities_element_resp *cap;
3170 	u32 valid_functions, num_functions;
3171 	u32 number, logical_id, phys_id;
3172 	struct i40e_hw_capabilities *p;
3173 	u16 id, ocp_cfg_word0;
3174 	i40e_status status;
3175 	u8 major_rev;
3176 	u32 i = 0;
3177 
3178 	cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
3179 
3180 	if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
3181 		p = &hw->dev_caps;
3182 	else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
3183 		p = &hw->func_caps;
3184 	else
3185 		return;
3186 
3187 	for (i = 0; i < cap_count; i++, cap++) {
3188 		id = le16_to_cpu(cap->id);
3189 		number = le32_to_cpu(cap->number);
3190 		logical_id = le32_to_cpu(cap->logical_id);
3191 		phys_id = le32_to_cpu(cap->phys_id);
3192 		major_rev = cap->major_rev;
3193 
3194 		switch (id) {
3195 		case I40E_AQ_CAP_ID_SWITCH_MODE:
3196 			p->switch_mode = number;
3197 			break;
3198 		case I40E_AQ_CAP_ID_MNG_MODE:
3199 			p->management_mode = number;
3200 			if (major_rev > 1) {
3201 				p->mng_protocols_over_mctp = logical_id;
3202 				i40e_debug(hw, I40E_DEBUG_INIT,
3203 					   "HW Capability: Protocols over MCTP = %d\n",
3204 					   p->mng_protocols_over_mctp);
3205 			} else {
3206 				p->mng_protocols_over_mctp = 0;
3207 			}
3208 			break;
3209 		case I40E_AQ_CAP_ID_NPAR_ACTIVE:
3210 			p->npar_enable = number;
3211 			break;
3212 		case I40E_AQ_CAP_ID_OS2BMC_CAP:
3213 			p->os2bmc = number;
3214 			break;
3215 		case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
3216 			p->valid_functions = number;
3217 			break;
3218 		case I40E_AQ_CAP_ID_SRIOV:
3219 			if (number == 1)
3220 				p->sr_iov_1_1 = true;
3221 			break;
3222 		case I40E_AQ_CAP_ID_VF:
3223 			p->num_vfs = number;
3224 			p->vf_base_id = logical_id;
3225 			break;
3226 		case I40E_AQ_CAP_ID_VMDQ:
3227 			if (number == 1)
3228 				p->vmdq = true;
3229 			break;
3230 		case I40E_AQ_CAP_ID_8021QBG:
3231 			if (number == 1)
3232 				p->evb_802_1_qbg = true;
3233 			break;
3234 		case I40E_AQ_CAP_ID_8021QBR:
3235 			if (number == 1)
3236 				p->evb_802_1_qbh = true;
3237 			break;
3238 		case I40E_AQ_CAP_ID_VSI:
3239 			p->num_vsis = number;
3240 			break;
3241 		case I40E_AQ_CAP_ID_DCB:
3242 			if (number == 1) {
3243 				p->dcb = true;
3244 				p->enabled_tcmap = logical_id;
3245 				p->maxtc = phys_id;
3246 			}
3247 			break;
3248 		case I40E_AQ_CAP_ID_FCOE:
3249 			if (number == 1)
3250 				p->fcoe = true;
3251 			break;
3252 		case I40E_AQ_CAP_ID_ISCSI:
3253 			if (number == 1)
3254 				p->iscsi = true;
3255 			break;
3256 		case I40E_AQ_CAP_ID_RSS:
3257 			p->rss = true;
3258 			p->rss_table_size = number;
3259 			p->rss_table_entry_width = logical_id;
3260 			break;
3261 		case I40E_AQ_CAP_ID_RXQ:
3262 			p->num_rx_qp = number;
3263 			p->base_queue = phys_id;
3264 			break;
3265 		case I40E_AQ_CAP_ID_TXQ:
3266 			p->num_tx_qp = number;
3267 			p->base_queue = phys_id;
3268 			break;
3269 		case I40E_AQ_CAP_ID_MSIX:
3270 			p->num_msix_vectors = number;
3271 			i40e_debug(hw, I40E_DEBUG_INIT,
3272 				   "HW Capability: MSIX vector count = %d\n",
3273 				   p->num_msix_vectors);
3274 			break;
3275 		case I40E_AQ_CAP_ID_VF_MSIX:
3276 			p->num_msix_vectors_vf = number;
3277 			break;
3278 		case I40E_AQ_CAP_ID_FLEX10:
3279 			if (major_rev == 1) {
3280 				if (number == 1) {
3281 					p->flex10_enable = true;
3282 					p->flex10_capable = true;
3283 				}
3284 			} else {
3285 				/* Capability revision >= 2 */
3286 				if (number & 1)
3287 					p->flex10_enable = true;
3288 				if (number & 2)
3289 					p->flex10_capable = true;
3290 			}
3291 			p->flex10_mode = logical_id;
3292 			p->flex10_status = phys_id;
3293 			break;
3294 		case I40E_AQ_CAP_ID_CEM:
3295 			if (number == 1)
3296 				p->mgmt_cem = true;
3297 			break;
3298 		case I40E_AQ_CAP_ID_IWARP:
3299 			if (number == 1)
3300 				p->iwarp = true;
3301 			break;
3302 		case I40E_AQ_CAP_ID_LED:
3303 			if (phys_id < I40E_HW_CAP_MAX_GPIO)
3304 				p->led[phys_id] = true;
3305 			break;
3306 		case I40E_AQ_CAP_ID_SDP:
3307 			if (phys_id < I40E_HW_CAP_MAX_GPIO)
3308 				p->sdp[phys_id] = true;
3309 			break;
3310 		case I40E_AQ_CAP_ID_MDIO:
3311 			if (number == 1) {
3312 				p->mdio_port_num = phys_id;
3313 				p->mdio_port_mode = logical_id;
3314 			}
3315 			break;
3316 		case I40E_AQ_CAP_ID_1588:
3317 			if (number == 1)
3318 				p->ieee_1588 = true;
3319 			break;
3320 		case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
3321 			p->fd = true;
3322 			p->fd_filters_guaranteed = number;
3323 			p->fd_filters_best_effort = logical_id;
3324 			break;
3325 		case I40E_AQ_CAP_ID_WSR_PROT:
3326 			p->wr_csr_prot = (u64)number;
3327 			p->wr_csr_prot |= (u64)logical_id << 32;
3328 			break;
3329 		case I40E_AQ_CAP_ID_NVM_MGMT:
3330 			if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
3331 				p->sec_rev_disabled = true;
3332 			if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
3333 				p->update_disabled = true;
3334 			break;
3335 		default:
3336 			break;
3337 		}
3338 	}
3339 
3340 	if (p->fcoe)
3341 		i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
3342 
3343 	/* Software override ensuring FCoE is disabled if npar or mfp
3344 	 * mode because it is not supported in these modes.
3345 	 */
3346 	if (p->npar_enable || p->flex10_enable)
3347 		p->fcoe = false;
3348 
3349 	/* count the enabled ports (aka the "not disabled" ports) */
3350 	hw->num_ports = 0;
3351 	for (i = 0; i < 4; i++) {
3352 		u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
3353 		u64 port_cfg = 0;
3354 
3355 		/* use AQ read to get the physical register offset instead
3356 		 * of the port relative offset
3357 		 */
3358 		i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
3359 		if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
3360 			hw->num_ports++;
3361 	}
3362 
3363 	/* OCP cards case: if a mezz is removed the Ethernet port is at
3364 	 * disabled state in PRTGEN_CNF register. Additional NVM read is
3365 	 * needed in order to check if we are dealing with OCP card.
3366 	 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting
3367 	 * physical ports results in wrong partition id calculation and thus
3368 	 * not supporting WoL.
3369 	 */
3370 	if (hw->mac.type == I40E_MAC_X722) {
3371 		if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) {
3372 			status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR,
3373 						  2 * I40E_SR_OCP_CFG_WORD0,
3374 						  sizeof(ocp_cfg_word0),
3375 						  &ocp_cfg_word0, true, NULL);
3376 			if (!status &&
3377 			    (ocp_cfg_word0 & I40E_SR_OCP_ENABLED))
3378 				hw->num_ports = 4;
3379 			i40e_release_nvm(hw);
3380 		}
3381 	}
3382 
3383 	valid_functions = p->valid_functions;
3384 	num_functions = 0;
3385 	while (valid_functions) {
3386 		if (valid_functions & 1)
3387 			num_functions++;
3388 		valid_functions >>= 1;
3389 	}
3390 
3391 	/* partition id is 1-based, and functions are evenly spread
3392 	 * across the ports as partitions
3393 	 */
3394 	if (hw->num_ports != 0) {
3395 		hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
3396 		hw->num_partitions = num_functions / hw->num_ports;
3397 	}
3398 
3399 	/* additional HW specific goodies that might
3400 	 * someday be HW version specific
3401 	 */
3402 	p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
3403 }
3404 
3405 /**
3406  * i40e_aq_discover_capabilities
3407  * @hw: pointer to the hw struct
3408  * @buff: a virtual buffer to hold the capabilities
3409  * @buff_size: Size of the virtual buffer
3410  * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
3411  * @list_type_opc: capabilities type to discover - pass in the command opcode
3412  * @cmd_details: pointer to command details structure or NULL
3413  *
3414  * Get the device capabilities descriptions from the firmware
3415  **/
3416 i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
3417 				void *buff, u16 buff_size, u16 *data_size,
3418 				enum i40e_admin_queue_opc list_type_opc,
3419 				struct i40e_asq_cmd_details *cmd_details)
3420 {
3421 	struct i40e_aqc_list_capabilites *cmd;
3422 	struct i40e_aq_desc desc;
3423 	i40e_status status = 0;
3424 
3425 	cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
3426 
3427 	if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
3428 		list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
3429 		status = I40E_ERR_PARAM;
3430 		goto exit;
3431 	}
3432 
3433 	i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
3434 
3435 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3436 	if (buff_size > I40E_AQ_LARGE_BUF)
3437 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3438 
3439 	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3440 	*data_size = le16_to_cpu(desc.datalen);
3441 
3442 	if (status)
3443 		goto exit;
3444 
3445 	i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count),
3446 					 list_type_opc);
3447 
3448 exit:
3449 	return status;
3450 }
3451 
3452 /**
3453  * i40e_aq_update_nvm
3454  * @hw: pointer to the hw struct
3455  * @module_pointer: module pointer location in words from the NVM beginning
3456  * @offset: byte offset from the module beginning
3457  * @length: length of the section to be written (in bytes from the offset)
3458  * @data: command buffer (size [bytes] = length)
3459  * @last_command: tells if this is the last command in a series
3460  * @preservation_flags: Preservation mode flags
3461  * @cmd_details: pointer to command details structure or NULL
3462  *
3463  * Update the NVM using the admin queue commands
3464  **/
3465 i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
3466 			       u32 offset, u16 length, void *data,
3467 				bool last_command, u8 preservation_flags,
3468 			       struct i40e_asq_cmd_details *cmd_details)
3469 {
3470 	struct i40e_aq_desc desc;
3471 	struct i40e_aqc_nvm_update *cmd =
3472 		(struct i40e_aqc_nvm_update *)&desc.params.raw;
3473 	i40e_status status;
3474 
3475 	/* In offset the highest byte must be zeroed. */
3476 	if (offset & 0xFF000000) {
3477 		status = I40E_ERR_PARAM;
3478 		goto i40e_aq_update_nvm_exit;
3479 	}
3480 
3481 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3482 
3483 	/* If this is the last command in a series, set the proper flag. */
3484 	if (last_command)
3485 		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3486 	if (hw->mac.type == I40E_MAC_X722) {
3487 		if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
3488 			cmd->command_flags |=
3489 				(I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
3490 				 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3491 		else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
3492 			cmd->command_flags |=
3493 				(I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
3494 				 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3495 	}
3496 	cmd->module_pointer = module_pointer;
3497 	cmd->offset = cpu_to_le32(offset);
3498 	cmd->length = cpu_to_le16(length);
3499 
3500 	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
3501 	if (length > I40E_AQ_LARGE_BUF)
3502 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3503 
3504 	status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3505 
3506 i40e_aq_update_nvm_exit:
3507 	return status;
3508 }
3509 
3510 /**
3511  * i40e_aq_rearrange_nvm
3512  * @hw: pointer to the hw struct
3513  * @rearrange_nvm: defines direction of rearrangement
3514  * @cmd_details: pointer to command details structure or NULL
3515  *
3516  * Rearrange NVM structure, available only for transition FW
3517  **/
3518 i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
3519 				  u8 rearrange_nvm,
3520 				  struct i40e_asq_cmd_details *cmd_details)
3521 {
3522 	struct i40e_aqc_nvm_update *cmd;
3523 	i40e_status status;
3524 	struct i40e_aq_desc desc;
3525 
3526 	cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
3527 
3528 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3529 
3530 	rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
3531 			 I40E_AQ_NVM_REARRANGE_TO_STRUCT);
3532 
3533 	if (!rearrange_nvm) {
3534 		status = I40E_ERR_PARAM;
3535 		goto i40e_aq_rearrange_nvm_exit;
3536 	}
3537 
3538 	cmd->command_flags |= rearrange_nvm;
3539 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3540 
3541 i40e_aq_rearrange_nvm_exit:
3542 	return status;
3543 }
3544 
3545 /**
3546  * i40e_aq_get_lldp_mib
3547  * @hw: pointer to the hw struct
3548  * @bridge_type: type of bridge requested
3549  * @mib_type: Local, Remote or both Local and Remote MIBs
3550  * @buff: pointer to a user supplied buffer to store the MIB block
3551  * @buff_size: size of the buffer (in bytes)
3552  * @local_len : length of the returned Local LLDP MIB
3553  * @remote_len: length of the returned Remote LLDP MIB
3554  * @cmd_details: pointer to command details structure or NULL
3555  *
3556  * Requests the complete LLDP MIB (entire packet).
3557  **/
3558 i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
3559 				u8 mib_type, void *buff, u16 buff_size,
3560 				u16 *local_len, u16 *remote_len,
3561 				struct i40e_asq_cmd_details *cmd_details)
3562 {
3563 	struct i40e_aq_desc desc;
3564 	struct i40e_aqc_lldp_get_mib *cmd =
3565 		(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3566 	struct i40e_aqc_lldp_get_mib *resp =
3567 		(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3568 	i40e_status status;
3569 
3570 	if (buff_size == 0 || !buff)
3571 		return I40E_ERR_PARAM;
3572 
3573 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
3574 	/* Indirect Command */
3575 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3576 
3577 	cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
3578 	cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
3579 		       I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
3580 
3581 	desc.datalen = cpu_to_le16(buff_size);
3582 
3583 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3584 	if (buff_size > I40E_AQ_LARGE_BUF)
3585 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3586 
3587 	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3588 	if (!status) {
3589 		if (local_len != NULL)
3590 			*local_len = le16_to_cpu(resp->local_len);
3591 		if (remote_len != NULL)
3592 			*remote_len = le16_to_cpu(resp->remote_len);
3593 	}
3594 
3595 	return status;
3596 }
3597 
3598 /**
3599  * i40e_aq_cfg_lldp_mib_change_event
3600  * @hw: pointer to the hw struct
3601  * @enable_update: Enable or Disable event posting
3602  * @cmd_details: pointer to command details structure or NULL
3603  *
3604  * Enable or Disable posting of an event on ARQ when LLDP MIB
3605  * associated with the interface changes
3606  **/
3607 i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
3608 				bool enable_update,
3609 				struct i40e_asq_cmd_details *cmd_details)
3610 {
3611 	struct i40e_aq_desc desc;
3612 	struct i40e_aqc_lldp_update_mib *cmd =
3613 		(struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
3614 	i40e_status status;
3615 
3616 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
3617 
3618 	if (!enable_update)
3619 		cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
3620 
3621 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3622 
3623 	return status;
3624 }
3625 
3626 /**
3627  * i40e_aq_stop_lldp
3628  * @hw: pointer to the hw struct
3629  * @shutdown_agent: True if LLDP Agent needs to be Shutdown
3630  * @cmd_details: pointer to command details structure or NULL
3631  *
3632  * Stop or Shutdown the embedded LLDP Agent
3633  **/
3634 i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
3635 				struct i40e_asq_cmd_details *cmd_details)
3636 {
3637 	struct i40e_aq_desc desc;
3638 	struct i40e_aqc_lldp_stop *cmd =
3639 		(struct i40e_aqc_lldp_stop *)&desc.params.raw;
3640 	i40e_status status;
3641 
3642 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
3643 
3644 	if (shutdown_agent)
3645 		cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
3646 
3647 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3648 
3649 	return status;
3650 }
3651 
3652 /**
3653  * i40e_aq_start_lldp
3654  * @hw: pointer to the hw struct
3655  * @buff: buffer for result
3656  * @buff_size: buffer size
3657  * @cmd_details: pointer to command details structure or NULL
3658  *
3659  * Start the embedded LLDP Agent on all ports.
3660  **/
3661 i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
3662 				struct i40e_asq_cmd_details *cmd_details)
3663 {
3664 	struct i40e_aq_desc desc;
3665 	struct i40e_aqc_lldp_start *cmd =
3666 		(struct i40e_aqc_lldp_start *)&desc.params.raw;
3667 	i40e_status status;
3668 
3669 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
3670 
3671 	cmd->command = I40E_AQ_LLDP_AGENT_START;
3672 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3673 
3674 	return status;
3675 }
3676 
3677 /**
3678  * i40e_aq_set_dcb_parameters
3679  * @hw: pointer to the hw struct
3680  * @cmd_details: pointer to command details structure or NULL
3681  * @dcb_enable: True if DCB configuration needs to be applied
3682  *
3683  **/
3684 enum i40e_status_code
3685 i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
3686 			   struct i40e_asq_cmd_details *cmd_details)
3687 {
3688 	struct i40e_aq_desc desc;
3689 	struct i40e_aqc_set_dcb_parameters *cmd =
3690 		(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
3691 	i40e_status status;
3692 
3693 	if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
3694 		return I40E_ERR_DEVICE_NOT_SUPPORTED;
3695 
3696 	i40e_fill_default_direct_cmd_desc(&desc,
3697 					  i40e_aqc_opc_set_dcb_parameters);
3698 
3699 	if (dcb_enable) {
3700 		cmd->valid_flags = I40E_DCB_VALID;
3701 		cmd->command = I40E_AQ_DCB_SET_AGENT;
3702 	}
3703 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3704 
3705 	return status;
3706 }
3707 
3708 /**
3709  * i40e_aq_get_cee_dcb_config
3710  * @hw: pointer to the hw struct
3711  * @buff: response buffer that stores CEE operational configuration
3712  * @buff_size: size of the buffer passed
3713  * @cmd_details: pointer to command details structure or NULL
3714  *
3715  * Get CEE DCBX mode operational configuration from firmware
3716  **/
3717 i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
3718 				       void *buff, u16 buff_size,
3719 				       struct i40e_asq_cmd_details *cmd_details)
3720 {
3721 	struct i40e_aq_desc desc;
3722 	i40e_status status;
3723 
3724 	if (buff_size == 0 || !buff)
3725 		return I40E_ERR_PARAM;
3726 
3727 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
3728 
3729 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3730 	status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
3731 				       cmd_details);
3732 
3733 	return status;
3734 }
3735 
3736 /**
3737  * i40e_aq_add_udp_tunnel
3738  * @hw: pointer to the hw struct
3739  * @udp_port: the UDP port to add in Host byte order
3740  * @protocol_index: protocol index type
3741  * @filter_index: pointer to filter index
3742  * @cmd_details: pointer to command details structure or NULL
3743  *
3744  * Note: Firmware expects the udp_port value to be in Little Endian format,
3745  * and this function will call cpu_to_le16 to convert from Host byte order to
3746  * Little Endian order.
3747  **/
3748 i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
3749 				u16 udp_port, u8 protocol_index,
3750 				u8 *filter_index,
3751 				struct i40e_asq_cmd_details *cmd_details)
3752 {
3753 	struct i40e_aq_desc desc;
3754 	struct i40e_aqc_add_udp_tunnel *cmd =
3755 		(struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
3756 	struct i40e_aqc_del_udp_tunnel_completion *resp =
3757 		(struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
3758 	i40e_status status;
3759 
3760 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
3761 
3762 	cmd->udp_port = cpu_to_le16(udp_port);
3763 	cmd->protocol_type = protocol_index;
3764 
3765 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3766 
3767 	if (!status && filter_index)
3768 		*filter_index = resp->index;
3769 
3770 	return status;
3771 }
3772 
3773 /**
3774  * i40e_aq_del_udp_tunnel
3775  * @hw: pointer to the hw struct
3776  * @index: filter index
3777  * @cmd_details: pointer to command details structure or NULL
3778  **/
3779 i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
3780 				struct i40e_asq_cmd_details *cmd_details)
3781 {
3782 	struct i40e_aq_desc desc;
3783 	struct i40e_aqc_remove_udp_tunnel *cmd =
3784 		(struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
3785 	i40e_status status;
3786 
3787 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
3788 
3789 	cmd->index = index;
3790 
3791 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3792 
3793 	return status;
3794 }
3795 
3796 /**
3797  * i40e_aq_delete_element - Delete switch element
3798  * @hw: pointer to the hw struct
3799  * @seid: the SEID to delete from the switch
3800  * @cmd_details: pointer to command details structure or NULL
3801  *
3802  * This deletes a switch element from the switch.
3803  **/
3804 i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
3805 				struct i40e_asq_cmd_details *cmd_details)
3806 {
3807 	struct i40e_aq_desc desc;
3808 	struct i40e_aqc_switch_seid *cmd =
3809 		(struct i40e_aqc_switch_seid *)&desc.params.raw;
3810 	i40e_status status;
3811 
3812 	if (seid == 0)
3813 		return I40E_ERR_PARAM;
3814 
3815 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
3816 
3817 	cmd->seid = cpu_to_le16(seid);
3818 
3819 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3820 
3821 	return status;
3822 }
3823 
3824 /**
3825  * i40e_aq_dcb_updated - DCB Updated Command
3826  * @hw: pointer to the hw struct
3827  * @cmd_details: pointer to command details structure or NULL
3828  *
3829  * EMP will return when the shared RPB settings have been
3830  * recomputed and modified. The retval field in the descriptor
3831  * will be set to 0 when RPB is modified.
3832  **/
3833 i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
3834 				struct i40e_asq_cmd_details *cmd_details)
3835 {
3836 	struct i40e_aq_desc desc;
3837 	i40e_status status;
3838 
3839 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
3840 
3841 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3842 
3843 	return status;
3844 }
3845 
3846 /**
3847  * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
3848  * @hw: pointer to the hw struct
3849  * @seid: seid for the physical port/switching component/vsi
3850  * @buff: Indirect buffer to hold data parameters and response
3851  * @buff_size: Indirect buffer size
3852  * @opcode: Tx scheduler AQ command opcode
3853  * @cmd_details: pointer to command details structure or NULL
3854  *
3855  * Generic command handler for Tx scheduler AQ commands
3856  **/
3857 static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
3858 				void *buff, u16 buff_size,
3859 				 enum i40e_admin_queue_opc opcode,
3860 				struct i40e_asq_cmd_details *cmd_details)
3861 {
3862 	struct i40e_aq_desc desc;
3863 	struct i40e_aqc_tx_sched_ind *cmd =
3864 		(struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
3865 	i40e_status status;
3866 	bool cmd_param_flag = false;
3867 
3868 	switch (opcode) {
3869 	case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
3870 	case i40e_aqc_opc_configure_vsi_tc_bw:
3871 	case i40e_aqc_opc_enable_switching_comp_ets:
3872 	case i40e_aqc_opc_modify_switching_comp_ets:
3873 	case i40e_aqc_opc_disable_switching_comp_ets:
3874 	case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
3875 	case i40e_aqc_opc_configure_switching_comp_bw_config:
3876 		cmd_param_flag = true;
3877 		break;
3878 	case i40e_aqc_opc_query_vsi_bw_config:
3879 	case i40e_aqc_opc_query_vsi_ets_sla_config:
3880 	case i40e_aqc_opc_query_switching_comp_ets_config:
3881 	case i40e_aqc_opc_query_port_ets_config:
3882 	case i40e_aqc_opc_query_switching_comp_bw_config:
3883 		cmd_param_flag = false;
3884 		break;
3885 	default:
3886 		return I40E_ERR_PARAM;
3887 	}
3888 
3889 	i40e_fill_default_direct_cmd_desc(&desc, opcode);
3890 
3891 	/* Indirect command */
3892 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3893 	if (cmd_param_flag)
3894 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
3895 	if (buff_size > I40E_AQ_LARGE_BUF)
3896 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3897 
3898 	desc.datalen = cpu_to_le16(buff_size);
3899 
3900 	cmd->vsi_seid = cpu_to_le16(seid);
3901 
3902 	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3903 
3904 	return status;
3905 }
3906 
3907 /**
3908  * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
3909  * @hw: pointer to the hw struct
3910  * @seid: VSI seid
3911  * @credit: BW limit credits (0 = disabled)
3912  * @max_credit: Max BW limit credits
3913  * @cmd_details: pointer to command details structure or NULL
3914  **/
3915 i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
3916 				u16 seid, u16 credit, u8 max_credit,
3917 				struct i40e_asq_cmd_details *cmd_details)
3918 {
3919 	struct i40e_aq_desc desc;
3920 	struct i40e_aqc_configure_vsi_bw_limit *cmd =
3921 		(struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
3922 	i40e_status status;
3923 
3924 	i40e_fill_default_direct_cmd_desc(&desc,
3925 					  i40e_aqc_opc_configure_vsi_bw_limit);
3926 
3927 	cmd->vsi_seid = cpu_to_le16(seid);
3928 	cmd->credit = cpu_to_le16(credit);
3929 	cmd->max_credit = max_credit;
3930 
3931 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3932 
3933 	return status;
3934 }
3935 
3936 /**
3937  * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
3938  * @hw: pointer to the hw struct
3939  * @seid: VSI seid
3940  * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
3941  * @cmd_details: pointer to command details structure or NULL
3942  **/
3943 i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
3944 			u16 seid,
3945 			struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
3946 			struct i40e_asq_cmd_details *cmd_details)
3947 {
3948 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
3949 				    i40e_aqc_opc_configure_vsi_tc_bw,
3950 				    cmd_details);
3951 }
3952 
3953 /**
3954  * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port
3955  * @hw: pointer to the hw struct
3956  * @seid: seid of the switching component connected to Physical Port
3957  * @ets_data: Buffer holding ETS parameters
3958  * @opcode: Tx scheduler AQ command opcode
3959  * @cmd_details: pointer to command details structure or NULL
3960  **/
3961 i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
3962 		u16 seid,
3963 		struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
3964 		enum i40e_admin_queue_opc opcode,
3965 		struct i40e_asq_cmd_details *cmd_details)
3966 {
3967 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
3968 				    sizeof(*ets_data), opcode, cmd_details);
3969 }
3970 
3971 /**
3972  * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC
3973  * @hw: pointer to the hw struct
3974  * @seid: seid of the switching component
3975  * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
3976  * @cmd_details: pointer to command details structure or NULL
3977  **/
3978 i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
3979 	u16 seid,
3980 	struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
3981 	struct i40e_asq_cmd_details *cmd_details)
3982 {
3983 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
3984 			    i40e_aqc_opc_configure_switching_comp_bw_config,
3985 			    cmd_details);
3986 }
3987 
3988 /**
3989  * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
3990  * @hw: pointer to the hw struct
3991  * @seid: seid of the VSI
3992  * @bw_data: Buffer to hold VSI BW configuration
3993  * @cmd_details: pointer to command details structure or NULL
3994  **/
3995 i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
3996 			u16 seid,
3997 			struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
3998 			struct i40e_asq_cmd_details *cmd_details)
3999 {
4000 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4001 				    i40e_aqc_opc_query_vsi_bw_config,
4002 				    cmd_details);
4003 }
4004 
4005 /**
4006  * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
4007  * @hw: pointer to the hw struct
4008  * @seid: seid of the VSI
4009  * @bw_data: Buffer to hold VSI BW configuration per TC
4010  * @cmd_details: pointer to command details structure or NULL
4011  **/
4012 i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
4013 			u16 seid,
4014 			struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
4015 			struct i40e_asq_cmd_details *cmd_details)
4016 {
4017 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4018 				    i40e_aqc_opc_query_vsi_ets_sla_config,
4019 				    cmd_details);
4020 }
4021 
4022 /**
4023  * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
4024  * @hw: pointer to the hw struct
4025  * @seid: seid of the switching component
4026  * @bw_data: Buffer to hold switching component's per TC BW config
4027  * @cmd_details: pointer to command details structure or NULL
4028  **/
4029 i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
4030 		u16 seid,
4031 		struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
4032 		struct i40e_asq_cmd_details *cmd_details)
4033 {
4034 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4035 				   i40e_aqc_opc_query_switching_comp_ets_config,
4036 				   cmd_details);
4037 }
4038 
4039 /**
4040  * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
4041  * @hw: pointer to the hw struct
4042  * @seid: seid of the VSI or switching component connected to Physical Port
4043  * @bw_data: Buffer to hold current ETS configuration for the Physical Port
4044  * @cmd_details: pointer to command details structure or NULL
4045  **/
4046 i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
4047 			u16 seid,
4048 			struct i40e_aqc_query_port_ets_config_resp *bw_data,
4049 			struct i40e_asq_cmd_details *cmd_details)
4050 {
4051 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4052 				    i40e_aqc_opc_query_port_ets_config,
4053 				    cmd_details);
4054 }
4055 
4056 /**
4057  * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
4058  * @hw: pointer to the hw struct
4059  * @seid: seid of the switching component
4060  * @bw_data: Buffer to hold switching component's BW configuration
4061  * @cmd_details: pointer to command details structure or NULL
4062  **/
4063 i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
4064 		u16 seid,
4065 		struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
4066 		struct i40e_asq_cmd_details *cmd_details)
4067 {
4068 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4069 				    i40e_aqc_opc_query_switching_comp_bw_config,
4070 				    cmd_details);
4071 }
4072 
4073 /**
4074  * i40e_validate_filter_settings
4075  * @hw: pointer to the hardware structure
4076  * @settings: Filter control settings
4077  *
4078  * Check and validate the filter control settings passed.
4079  * The function checks for the valid filter/context sizes being
4080  * passed for FCoE and PE.
4081  *
4082  * Returns 0 if the values passed are valid and within
4083  * range else returns an error.
4084  **/
4085 static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
4086 				struct i40e_filter_control_settings *settings)
4087 {
4088 	u32 fcoe_cntx_size, fcoe_filt_size;
4089 	u32 pe_cntx_size, pe_filt_size;
4090 	u32 fcoe_fmax;
4091 	u32 val;
4092 
4093 	/* Validate FCoE settings passed */
4094 	switch (settings->fcoe_filt_num) {
4095 	case I40E_HASH_FILTER_SIZE_1K:
4096 	case I40E_HASH_FILTER_SIZE_2K:
4097 	case I40E_HASH_FILTER_SIZE_4K:
4098 	case I40E_HASH_FILTER_SIZE_8K:
4099 	case I40E_HASH_FILTER_SIZE_16K:
4100 	case I40E_HASH_FILTER_SIZE_32K:
4101 		fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
4102 		fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
4103 		break;
4104 	default:
4105 		return I40E_ERR_PARAM;
4106 	}
4107 
4108 	switch (settings->fcoe_cntx_num) {
4109 	case I40E_DMA_CNTX_SIZE_512:
4110 	case I40E_DMA_CNTX_SIZE_1K:
4111 	case I40E_DMA_CNTX_SIZE_2K:
4112 	case I40E_DMA_CNTX_SIZE_4K:
4113 		fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
4114 		fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
4115 		break;
4116 	default:
4117 		return I40E_ERR_PARAM;
4118 	}
4119 
4120 	/* Validate PE settings passed */
4121 	switch (settings->pe_filt_num) {
4122 	case I40E_HASH_FILTER_SIZE_1K:
4123 	case I40E_HASH_FILTER_SIZE_2K:
4124 	case I40E_HASH_FILTER_SIZE_4K:
4125 	case I40E_HASH_FILTER_SIZE_8K:
4126 	case I40E_HASH_FILTER_SIZE_16K:
4127 	case I40E_HASH_FILTER_SIZE_32K:
4128 	case I40E_HASH_FILTER_SIZE_64K:
4129 	case I40E_HASH_FILTER_SIZE_128K:
4130 	case I40E_HASH_FILTER_SIZE_256K:
4131 	case I40E_HASH_FILTER_SIZE_512K:
4132 	case I40E_HASH_FILTER_SIZE_1M:
4133 		pe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
4134 		pe_filt_size <<= (u32)settings->pe_filt_num;
4135 		break;
4136 	default:
4137 		return I40E_ERR_PARAM;
4138 	}
4139 
4140 	switch (settings->pe_cntx_num) {
4141 	case I40E_DMA_CNTX_SIZE_512:
4142 	case I40E_DMA_CNTX_SIZE_1K:
4143 	case I40E_DMA_CNTX_SIZE_2K:
4144 	case I40E_DMA_CNTX_SIZE_4K:
4145 	case I40E_DMA_CNTX_SIZE_8K:
4146 	case I40E_DMA_CNTX_SIZE_16K:
4147 	case I40E_DMA_CNTX_SIZE_32K:
4148 	case I40E_DMA_CNTX_SIZE_64K:
4149 	case I40E_DMA_CNTX_SIZE_128K:
4150 	case I40E_DMA_CNTX_SIZE_256K:
4151 		pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
4152 		pe_cntx_size <<= (u32)settings->pe_cntx_num;
4153 		break;
4154 	default:
4155 		return I40E_ERR_PARAM;
4156 	}
4157 
4158 	/* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
4159 	val = rd32(hw, I40E_GLHMC_FCOEFMAX);
4160 	fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
4161 		     >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
4162 	if (fcoe_filt_size + fcoe_cntx_size >  fcoe_fmax)
4163 		return I40E_ERR_INVALID_SIZE;
4164 
4165 	return 0;
4166 }
4167 
4168 /**
4169  * i40e_set_filter_control
4170  * @hw: pointer to the hardware structure
4171  * @settings: Filter control settings
4172  *
4173  * Set the Queue Filters for PE/FCoE and enable filters required
4174  * for a single PF. It is expected that these settings are programmed
4175  * at the driver initialization time.
4176  **/
4177 i40e_status i40e_set_filter_control(struct i40e_hw *hw,
4178 				struct i40e_filter_control_settings *settings)
4179 {
4180 	i40e_status ret = 0;
4181 	u32 hash_lut_size = 0;
4182 	u32 val;
4183 
4184 	if (!settings)
4185 		return I40E_ERR_PARAM;
4186 
4187 	/* Validate the input settings */
4188 	ret = i40e_validate_filter_settings(hw, settings);
4189 	if (ret)
4190 		return ret;
4191 
4192 	/* Read the PF Queue Filter control register */
4193 	val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
4194 
4195 	/* Program required PE hash buckets for the PF */
4196 	val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
4197 	val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
4198 		I40E_PFQF_CTL_0_PEHSIZE_MASK;
4199 	/* Program required PE contexts for the PF */
4200 	val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
4201 	val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
4202 		I40E_PFQF_CTL_0_PEDSIZE_MASK;
4203 
4204 	/* Program required FCoE hash buckets for the PF */
4205 	val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4206 	val |= ((u32)settings->fcoe_filt_num <<
4207 			I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
4208 		I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4209 	/* Program required FCoE DDP contexts for the PF */
4210 	val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4211 	val |= ((u32)settings->fcoe_cntx_num <<
4212 			I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
4213 		I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4214 
4215 	/* Program Hash LUT size for the PF */
4216 	val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4217 	if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
4218 		hash_lut_size = 1;
4219 	val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
4220 		I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4221 
4222 	/* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
4223 	if (settings->enable_fdir)
4224 		val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
4225 	if (settings->enable_ethtype)
4226 		val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
4227 	if (settings->enable_macvlan)
4228 		val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
4229 
4230 	i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
4231 
4232 	return 0;
4233 }
4234 
4235 /**
4236  * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
4237  * @hw: pointer to the hw struct
4238  * @mac_addr: MAC address to use in the filter
4239  * @ethtype: Ethertype to use in the filter
4240  * @flags: Flags that needs to be applied to the filter
4241  * @vsi_seid: seid of the control VSI
4242  * @queue: VSI queue number to send the packet to
4243  * @is_add: Add control packet filter if True else remove
4244  * @stats: Structure to hold information on control filter counts
4245  * @cmd_details: pointer to command details structure or NULL
4246  *
4247  * This command will Add or Remove control packet filter for a control VSI.
4248  * In return it will update the total number of perfect filter count in
4249  * the stats member.
4250  **/
4251 i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
4252 				u8 *mac_addr, u16 ethtype, u16 flags,
4253 				u16 vsi_seid, u16 queue, bool is_add,
4254 				struct i40e_control_filter_stats *stats,
4255 				struct i40e_asq_cmd_details *cmd_details)
4256 {
4257 	struct i40e_aq_desc desc;
4258 	struct i40e_aqc_add_remove_control_packet_filter *cmd =
4259 		(struct i40e_aqc_add_remove_control_packet_filter *)
4260 		&desc.params.raw;
4261 	struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
4262 		(struct i40e_aqc_add_remove_control_packet_filter_completion *)
4263 		&desc.params.raw;
4264 	i40e_status status;
4265 
4266 	if (vsi_seid == 0)
4267 		return I40E_ERR_PARAM;
4268 
4269 	if (is_add) {
4270 		i40e_fill_default_direct_cmd_desc(&desc,
4271 				i40e_aqc_opc_add_control_packet_filter);
4272 		cmd->queue = cpu_to_le16(queue);
4273 	} else {
4274 		i40e_fill_default_direct_cmd_desc(&desc,
4275 				i40e_aqc_opc_remove_control_packet_filter);
4276 	}
4277 
4278 	if (mac_addr)
4279 		ether_addr_copy(cmd->mac, mac_addr);
4280 
4281 	cmd->etype = cpu_to_le16(ethtype);
4282 	cmd->flags = cpu_to_le16(flags);
4283 	cmd->seid = cpu_to_le16(vsi_seid);
4284 
4285 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4286 
4287 	if (!status && stats) {
4288 		stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used);
4289 		stats->etype_used = le16_to_cpu(resp->etype_used);
4290 		stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free);
4291 		stats->etype_free = le16_to_cpu(resp->etype_free);
4292 	}
4293 
4294 	return status;
4295 }
4296 
4297 /**
4298  * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
4299  * @hw: pointer to the hw struct
4300  * @seid: VSI seid to add ethertype filter from
4301  **/
4302 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
4303 						    u16 seid)
4304 {
4305 #define I40E_FLOW_CONTROL_ETHTYPE 0x8808
4306 	u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
4307 		   I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
4308 		   I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
4309 	u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
4310 	i40e_status status;
4311 
4312 	status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
4313 						       seid, 0, true, NULL,
4314 						       NULL);
4315 	if (status)
4316 		hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
4317 }
4318 
4319 /**
4320  * i40e_aq_alternate_read
4321  * @hw: pointer to the hardware structure
4322  * @reg_addr0: address of first dword to be read
4323  * @reg_val0: pointer for data read from 'reg_addr0'
4324  * @reg_addr1: address of second dword to be read
4325  * @reg_val1: pointer for data read from 'reg_addr1'
4326  *
4327  * Read one or two dwords from alternate structure. Fields are indicated
4328  * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
4329  * is not passed then only register at 'reg_addr0' is read.
4330  *
4331  **/
4332 static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
4333 					  u32 reg_addr0, u32 *reg_val0,
4334 					  u32 reg_addr1, u32 *reg_val1)
4335 {
4336 	struct i40e_aq_desc desc;
4337 	struct i40e_aqc_alternate_write *cmd_resp =
4338 		(struct i40e_aqc_alternate_write *)&desc.params.raw;
4339 	i40e_status status;
4340 
4341 	if (!reg_val0)
4342 		return I40E_ERR_PARAM;
4343 
4344 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
4345 	cmd_resp->address0 = cpu_to_le32(reg_addr0);
4346 	cmd_resp->address1 = cpu_to_le32(reg_addr1);
4347 
4348 	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
4349 
4350 	if (!status) {
4351 		*reg_val0 = le32_to_cpu(cmd_resp->data0);
4352 
4353 		if (reg_val1)
4354 			*reg_val1 = le32_to_cpu(cmd_resp->data1);
4355 	}
4356 
4357 	return status;
4358 }
4359 
4360 /**
4361  * i40e_aq_resume_port_tx
4362  * @hw: pointer to the hardware structure
4363  * @cmd_details: pointer to command details structure or NULL
4364  *
4365  * Resume port's Tx traffic
4366  **/
4367 i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
4368 				   struct i40e_asq_cmd_details *cmd_details)
4369 {
4370 	struct i40e_aq_desc desc;
4371 	i40e_status status;
4372 
4373 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
4374 
4375 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4376 
4377 	return status;
4378 }
4379 
4380 /**
4381  * i40e_set_pci_config_data - store PCI bus info
4382  * @hw: pointer to hardware structure
4383  * @link_status: the link status word from PCI config space
4384  *
4385  * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
4386  **/
4387 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
4388 {
4389 	hw->bus.type = i40e_bus_type_pci_express;
4390 
4391 	switch (link_status & PCI_EXP_LNKSTA_NLW) {
4392 	case PCI_EXP_LNKSTA_NLW_X1:
4393 		hw->bus.width = i40e_bus_width_pcie_x1;
4394 		break;
4395 	case PCI_EXP_LNKSTA_NLW_X2:
4396 		hw->bus.width = i40e_bus_width_pcie_x2;
4397 		break;
4398 	case PCI_EXP_LNKSTA_NLW_X4:
4399 		hw->bus.width = i40e_bus_width_pcie_x4;
4400 		break;
4401 	case PCI_EXP_LNKSTA_NLW_X8:
4402 		hw->bus.width = i40e_bus_width_pcie_x8;
4403 		break;
4404 	default:
4405 		hw->bus.width = i40e_bus_width_unknown;
4406 		break;
4407 	}
4408 
4409 	switch (link_status & PCI_EXP_LNKSTA_CLS) {
4410 	case PCI_EXP_LNKSTA_CLS_2_5GB:
4411 		hw->bus.speed = i40e_bus_speed_2500;
4412 		break;
4413 	case PCI_EXP_LNKSTA_CLS_5_0GB:
4414 		hw->bus.speed = i40e_bus_speed_5000;
4415 		break;
4416 	case PCI_EXP_LNKSTA_CLS_8_0GB:
4417 		hw->bus.speed = i40e_bus_speed_8000;
4418 		break;
4419 	default:
4420 		hw->bus.speed = i40e_bus_speed_unknown;
4421 		break;
4422 	}
4423 }
4424 
4425 /**
4426  * i40e_aq_debug_dump
4427  * @hw: pointer to the hardware structure
4428  * @cluster_id: specific cluster to dump
4429  * @table_id: table id within cluster
4430  * @start_index: index of line in the block to read
4431  * @buff_size: dump buffer size
4432  * @buff: dump buffer
4433  * @ret_buff_size: actual buffer size returned
4434  * @ret_next_table: next block to read
4435  * @ret_next_index: next index to read
4436  * @cmd_details: pointer to command details structure or NULL
4437  *
4438  * Dump internal FW/HW data for debug purposes.
4439  *
4440  **/
4441 i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
4442 			       u8 table_id, u32 start_index, u16 buff_size,
4443 			       void *buff, u16 *ret_buff_size,
4444 			       u8 *ret_next_table, u32 *ret_next_index,
4445 			       struct i40e_asq_cmd_details *cmd_details)
4446 {
4447 	struct i40e_aq_desc desc;
4448 	struct i40e_aqc_debug_dump_internals *cmd =
4449 		(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4450 	struct i40e_aqc_debug_dump_internals *resp =
4451 		(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4452 	i40e_status status;
4453 
4454 	if (buff_size == 0 || !buff)
4455 		return I40E_ERR_PARAM;
4456 
4457 	i40e_fill_default_direct_cmd_desc(&desc,
4458 					  i40e_aqc_opc_debug_dump_internals);
4459 	/* Indirect Command */
4460 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4461 	if (buff_size > I40E_AQ_LARGE_BUF)
4462 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4463 
4464 	cmd->cluster_id = cluster_id;
4465 	cmd->table_id = table_id;
4466 	cmd->idx = cpu_to_le32(start_index);
4467 
4468 	desc.datalen = cpu_to_le16(buff_size);
4469 
4470 	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
4471 	if (!status) {
4472 		if (ret_buff_size)
4473 			*ret_buff_size = le16_to_cpu(desc.datalen);
4474 		if (ret_next_table)
4475 			*ret_next_table = resp->table_id;
4476 		if (ret_next_index)
4477 			*ret_next_index = le32_to_cpu(resp->idx);
4478 	}
4479 
4480 	return status;
4481 }
4482 
4483 /**
4484  * i40e_read_bw_from_alt_ram
4485  * @hw: pointer to the hardware structure
4486  * @max_bw: pointer for max_bw read
4487  * @min_bw: pointer for min_bw read
4488  * @min_valid: pointer for bool that is true if min_bw is a valid value
4489  * @max_valid: pointer for bool that is true if max_bw is a valid value
4490  *
4491  * Read bw from the alternate ram for the given pf
4492  **/
4493 i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
4494 				      u32 *max_bw, u32 *min_bw,
4495 				      bool *min_valid, bool *max_valid)
4496 {
4497 	i40e_status status;
4498 	u32 max_bw_addr, min_bw_addr;
4499 
4500 	/* Calculate the address of the min/max bw registers */
4501 	max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4502 		      I40E_ALT_STRUCT_MAX_BW_OFFSET +
4503 		      (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4504 	min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4505 		      I40E_ALT_STRUCT_MIN_BW_OFFSET +
4506 		      (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4507 
4508 	/* Read the bandwidths from alt ram */
4509 	status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
4510 					min_bw_addr, min_bw);
4511 
4512 	if (*min_bw & I40E_ALT_BW_VALID_MASK)
4513 		*min_valid = true;
4514 	else
4515 		*min_valid = false;
4516 
4517 	if (*max_bw & I40E_ALT_BW_VALID_MASK)
4518 		*max_valid = true;
4519 	else
4520 		*max_valid = false;
4521 
4522 	return status;
4523 }
4524 
4525 /**
4526  * i40e_aq_configure_partition_bw
4527  * @hw: pointer to the hardware structure
4528  * @bw_data: Buffer holding valid pfs and bw limits
4529  * @cmd_details: pointer to command details
4530  *
4531  * Configure partitions guaranteed/max bw
4532  **/
4533 i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
4534 			struct i40e_aqc_configure_partition_bw_data *bw_data,
4535 			struct i40e_asq_cmd_details *cmd_details)
4536 {
4537 	i40e_status status;
4538 	struct i40e_aq_desc desc;
4539 	u16 bwd_size = sizeof(*bw_data);
4540 
4541 	i40e_fill_default_direct_cmd_desc(&desc,
4542 					  i40e_aqc_opc_configure_partition_bw);
4543 
4544 	/* Indirect command */
4545 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4546 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
4547 
4548 	if (bwd_size > I40E_AQ_LARGE_BUF)
4549 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4550 
4551 	desc.datalen = cpu_to_le16(bwd_size);
4552 
4553 	status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size,
4554 				       cmd_details);
4555 
4556 	return status;
4557 }
4558 
4559 /**
4560  * i40e_read_phy_register_clause22
4561  * @hw: pointer to the HW structure
4562  * @reg: register address in the page
4563  * @phy_addr: PHY address on MDIO interface
4564  * @value: PHY register value
4565  *
4566  * Reads specified PHY register value
4567  **/
4568 i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
4569 					    u16 reg, u8 phy_addr, u16 *value)
4570 {
4571 	i40e_status status = I40E_ERR_TIMEOUT;
4572 	u8 port_num = (u8)hw->func_caps.mdio_port_num;
4573 	u32 command = 0;
4574 	u16 retry = 1000;
4575 
4576 	command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4577 		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4578 		  (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) |
4579 		  (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4580 		  (I40E_GLGEN_MSCA_MDICMD_MASK);
4581 	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4582 	do {
4583 		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4584 		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4585 			status = 0;
4586 			break;
4587 		}
4588 		udelay(10);
4589 		retry--;
4590 	} while (retry);
4591 
4592 	if (status) {
4593 		i40e_debug(hw, I40E_DEBUG_PHY,
4594 			   "PHY: Can't write command to external PHY.\n");
4595 	} else {
4596 		command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4597 		*value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
4598 			 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
4599 	}
4600 
4601 	return status;
4602 }
4603 
4604 /**
4605  * i40e_write_phy_register_clause22
4606  * @hw: pointer to the HW structure
4607  * @reg: register address in the page
4608  * @phy_addr: PHY address on MDIO interface
4609  * @value: PHY register value
4610  *
4611  * Writes specified PHY register value
4612  **/
4613 i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
4614 					     u16 reg, u8 phy_addr, u16 value)
4615 {
4616 	i40e_status status = I40E_ERR_TIMEOUT;
4617 	u8 port_num = (u8)hw->func_caps.mdio_port_num;
4618 	u32 command  = 0;
4619 	u16 retry = 1000;
4620 
4621 	command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4622 	wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4623 
4624 	command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4625 		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4626 		  (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) |
4627 		  (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4628 		  (I40E_GLGEN_MSCA_MDICMD_MASK);
4629 
4630 	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4631 	do {
4632 		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4633 		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4634 			status = 0;
4635 			break;
4636 		}
4637 		udelay(10);
4638 		retry--;
4639 	} while (retry);
4640 
4641 	return status;
4642 }
4643 
4644 /**
4645  * i40e_read_phy_register_clause45
4646  * @hw: pointer to the HW structure
4647  * @page: registers page number
4648  * @reg: register address in the page
4649  * @phy_addr: PHY address on MDIO interface
4650  * @value: PHY register value
4651  *
4652  * Reads specified PHY register value
4653  **/
4654 i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
4655 				u8 page, u16 reg, u8 phy_addr, u16 *value)
4656 {
4657 	i40e_status status = I40E_ERR_TIMEOUT;
4658 	u32 command = 0;
4659 	u16 retry = 1000;
4660 	u8 port_num = hw->func_caps.mdio_port_num;
4661 
4662 	command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4663 		  (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4664 		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4665 		  (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4666 		  (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4667 		  (I40E_GLGEN_MSCA_MDICMD_MASK) |
4668 		  (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4669 	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4670 	do {
4671 		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4672 		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4673 			status = 0;
4674 			break;
4675 		}
4676 		usleep_range(10, 20);
4677 		retry--;
4678 	} while (retry);
4679 
4680 	if (status) {
4681 		i40e_debug(hw, I40E_DEBUG_PHY,
4682 			   "PHY: Can't write command to external PHY.\n");
4683 		goto phy_read_end;
4684 	}
4685 
4686 	command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4687 		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4688 		  (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) |
4689 		  (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4690 		  (I40E_GLGEN_MSCA_MDICMD_MASK) |
4691 		  (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4692 	status = I40E_ERR_TIMEOUT;
4693 	retry = 1000;
4694 	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4695 	do {
4696 		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4697 		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4698 			status = 0;
4699 			break;
4700 		}
4701 		usleep_range(10, 20);
4702 		retry--;
4703 	} while (retry);
4704 
4705 	if (!status) {
4706 		command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4707 		*value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
4708 			 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
4709 	} else {
4710 		i40e_debug(hw, I40E_DEBUG_PHY,
4711 			   "PHY: Can't read register value from external PHY.\n");
4712 	}
4713 
4714 phy_read_end:
4715 	return status;
4716 }
4717 
4718 /**
4719  * i40e_write_phy_register_clause45
4720  * @hw: pointer to the HW structure
4721  * @page: registers page number
4722  * @reg: register address in the page
4723  * @phy_addr: PHY address on MDIO interface
4724  * @value: PHY register value
4725  *
4726  * Writes value to specified PHY register
4727  **/
4728 i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
4729 				u8 page, u16 reg, u8 phy_addr, u16 value)
4730 {
4731 	i40e_status status = I40E_ERR_TIMEOUT;
4732 	u32 command = 0;
4733 	u16 retry = 1000;
4734 	u8 port_num = hw->func_caps.mdio_port_num;
4735 
4736 	command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4737 		  (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4738 		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4739 		  (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4740 		  (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4741 		  (I40E_GLGEN_MSCA_MDICMD_MASK) |
4742 		  (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4743 	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4744 	do {
4745 		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4746 		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4747 			status = 0;
4748 			break;
4749 		}
4750 		usleep_range(10, 20);
4751 		retry--;
4752 	} while (retry);
4753 	if (status) {
4754 		i40e_debug(hw, I40E_DEBUG_PHY,
4755 			   "PHY: Can't write command to external PHY.\n");
4756 		goto phy_write_end;
4757 	}
4758 
4759 	command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4760 	wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4761 
4762 	command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4763 		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4764 		  (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) |
4765 		  (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4766 		  (I40E_GLGEN_MSCA_MDICMD_MASK) |
4767 		  (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4768 	status = I40E_ERR_TIMEOUT;
4769 	retry = 1000;
4770 	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4771 	do {
4772 		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4773 		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4774 			status = 0;
4775 			break;
4776 		}
4777 		usleep_range(10, 20);
4778 		retry--;
4779 	} while (retry);
4780 
4781 phy_write_end:
4782 	return status;
4783 }
4784 
4785 /**
4786  * i40e_write_phy_register
4787  * @hw: pointer to the HW structure
4788  * @page: registers page number
4789  * @reg: register address in the page
4790  * @phy_addr: PHY address on MDIO interface
4791  * @value: PHY register value
4792  *
4793  * Writes value to specified PHY register
4794  **/
4795 i40e_status i40e_write_phy_register(struct i40e_hw *hw,
4796 				    u8 page, u16 reg, u8 phy_addr, u16 value)
4797 {
4798 	i40e_status status;
4799 
4800 	switch (hw->device_id) {
4801 	case I40E_DEV_ID_1G_BASE_T_X722:
4802 		status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
4803 							  value);
4804 		break;
4805 	case I40E_DEV_ID_10G_BASE_T:
4806 	case I40E_DEV_ID_10G_BASE_T4:
4807 	case I40E_DEV_ID_10G_BASE_T_X722:
4808 	case I40E_DEV_ID_25G_B:
4809 	case I40E_DEV_ID_25G_SFP28:
4810 		status = i40e_write_phy_register_clause45(hw, page, reg,
4811 							  phy_addr, value);
4812 		break;
4813 	default:
4814 		status = I40E_ERR_UNKNOWN_PHY;
4815 		break;
4816 	}
4817 
4818 	return status;
4819 }
4820 
4821 /**
4822  * i40e_read_phy_register
4823  * @hw: pointer to the HW structure
4824  * @page: registers page number
4825  * @reg: register address in the page
4826  * @phy_addr: PHY address on MDIO interface
4827  * @value: PHY register value
4828  *
4829  * Reads specified PHY register value
4830  **/
4831 i40e_status i40e_read_phy_register(struct i40e_hw *hw,
4832 				   u8 page, u16 reg, u8 phy_addr, u16 *value)
4833 {
4834 	i40e_status status;
4835 
4836 	switch (hw->device_id) {
4837 	case I40E_DEV_ID_1G_BASE_T_X722:
4838 		status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
4839 							 value);
4840 		break;
4841 	case I40E_DEV_ID_10G_BASE_T:
4842 	case I40E_DEV_ID_10G_BASE_T4:
4843 	case I40E_DEV_ID_10G_BASE_T_X722:
4844 	case I40E_DEV_ID_25G_B:
4845 	case I40E_DEV_ID_25G_SFP28:
4846 		status = i40e_read_phy_register_clause45(hw, page, reg,
4847 							 phy_addr, value);
4848 		break;
4849 	default:
4850 		status = I40E_ERR_UNKNOWN_PHY;
4851 		break;
4852 	}
4853 
4854 	return status;
4855 }
4856 
4857 /**
4858  * i40e_get_phy_address
4859  * @hw: pointer to the HW structure
4860  * @dev_num: PHY port num that address we want
4861  *
4862  * Gets PHY address for current port
4863  **/
4864 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
4865 {
4866 	u8 port_num = hw->func_caps.mdio_port_num;
4867 	u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num));
4868 
4869 	return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f;
4870 }
4871 
4872 /**
4873  * i40e_blink_phy_led
4874  * @hw: pointer to the HW structure
4875  * @time: time how long led will blinks in secs
4876  * @interval: gap between LED on and off in msecs
4877  *
4878  * Blinks PHY link LED
4879  **/
4880 i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
4881 				    u32 time, u32 interval)
4882 {
4883 	i40e_status status = 0;
4884 	u32 i;
4885 	u16 led_ctl;
4886 	u16 gpio_led_port;
4887 	u16 led_reg;
4888 	u16 led_addr = I40E_PHY_LED_PROV_REG_1;
4889 	u8 phy_addr = 0;
4890 	u8 port_num;
4891 
4892 	i = rd32(hw, I40E_PFGEN_PORTNUM);
4893 	port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
4894 	phy_addr = i40e_get_phy_address(hw, port_num);
4895 
4896 	for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
4897 	     led_addr++) {
4898 		status = i40e_read_phy_register_clause45(hw,
4899 							 I40E_PHY_COM_REG_PAGE,
4900 							 led_addr, phy_addr,
4901 							 &led_reg);
4902 		if (status)
4903 			goto phy_blinking_end;
4904 		led_ctl = led_reg;
4905 		if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
4906 			led_reg = 0;
4907 			status = i40e_write_phy_register_clause45(hw,
4908 							 I40E_PHY_COM_REG_PAGE,
4909 							 led_addr, phy_addr,
4910 							 led_reg);
4911 			if (status)
4912 				goto phy_blinking_end;
4913 			break;
4914 		}
4915 	}
4916 
4917 	if (time > 0 && interval > 0) {
4918 		for (i = 0; i < time * 1000; i += interval) {
4919 			status = i40e_read_phy_register_clause45(hw,
4920 						I40E_PHY_COM_REG_PAGE,
4921 						led_addr, phy_addr, &led_reg);
4922 			if (status)
4923 				goto restore_config;
4924 			if (led_reg & I40E_PHY_LED_MANUAL_ON)
4925 				led_reg = 0;
4926 			else
4927 				led_reg = I40E_PHY_LED_MANUAL_ON;
4928 			status = i40e_write_phy_register_clause45(hw,
4929 						I40E_PHY_COM_REG_PAGE,
4930 						led_addr, phy_addr, led_reg);
4931 			if (status)
4932 				goto restore_config;
4933 			msleep(interval);
4934 		}
4935 	}
4936 
4937 restore_config:
4938 	status = i40e_write_phy_register_clause45(hw,
4939 						  I40E_PHY_COM_REG_PAGE,
4940 						  led_addr, phy_addr, led_ctl);
4941 
4942 phy_blinking_end:
4943 	return status;
4944 }
4945 
4946 /**
4947  * i40e_led_get_reg - read LED register
4948  * @hw: pointer to the HW structure
4949  * @led_addr: LED register address
4950  * @reg_val: read register value
4951  **/
4952 static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
4953 					      u32 *reg_val)
4954 {
4955 	enum i40e_status_code status;
4956 	u8 phy_addr = 0;
4957 	u8 port_num;
4958 	u32 i;
4959 
4960 	*reg_val = 0;
4961 	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
4962 		status =
4963 		       i40e_aq_get_phy_register(hw,
4964 						I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
4965 						I40E_PHY_COM_REG_PAGE,
4966 						I40E_PHY_LED_PROV_REG_1,
4967 						reg_val, NULL);
4968 	} else {
4969 		i = rd32(hw, I40E_PFGEN_PORTNUM);
4970 		port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
4971 		phy_addr = i40e_get_phy_address(hw, port_num);
4972 		status = i40e_read_phy_register_clause45(hw,
4973 							 I40E_PHY_COM_REG_PAGE,
4974 							 led_addr, phy_addr,
4975 							 (u16 *)reg_val);
4976 	}
4977 	return status;
4978 }
4979 
4980 /**
4981  * i40e_led_set_reg - write LED register
4982  * @hw: pointer to the HW structure
4983  * @led_addr: LED register address
4984  * @reg_val: register value to write
4985  **/
4986 static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
4987 					      u32 reg_val)
4988 {
4989 	enum i40e_status_code status;
4990 	u8 phy_addr = 0;
4991 	u8 port_num;
4992 	u32 i;
4993 
4994 	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
4995 		status =
4996 		       i40e_aq_set_phy_register(hw,
4997 						I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
4998 						I40E_PHY_COM_REG_PAGE,
4999 						I40E_PHY_LED_PROV_REG_1,
5000 						reg_val, NULL);
5001 	} else {
5002 		i = rd32(hw, I40E_PFGEN_PORTNUM);
5003 		port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5004 		phy_addr = i40e_get_phy_address(hw, port_num);
5005 		status = i40e_write_phy_register_clause45(hw,
5006 							  I40E_PHY_COM_REG_PAGE,
5007 							  led_addr, phy_addr,
5008 							  (u16)reg_val);
5009 	}
5010 
5011 	return status;
5012 }
5013 
5014 /**
5015  * i40e_led_get_phy - return current on/off mode
5016  * @hw: pointer to the hw struct
5017  * @led_addr: address of led register to use
5018  * @val: original value of register to use
5019  *
5020  **/
5021 i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
5022 			     u16 *val)
5023 {
5024 	i40e_status status = 0;
5025 	u16 gpio_led_port;
5026 	u8 phy_addr = 0;
5027 	u16 reg_val;
5028 	u16 temp_addr;
5029 	u8 port_num;
5030 	u32 i;
5031 	u32 reg_val_aq;
5032 
5033 	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5034 		status =
5035 		      i40e_aq_get_phy_register(hw,
5036 					       I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5037 					       I40E_PHY_COM_REG_PAGE,
5038 					       I40E_PHY_LED_PROV_REG_1,
5039 					       &reg_val_aq, NULL);
5040 		if (status == I40E_SUCCESS)
5041 			*val = (u16)reg_val_aq;
5042 		return status;
5043 	}
5044 	temp_addr = I40E_PHY_LED_PROV_REG_1;
5045 	i = rd32(hw, I40E_PFGEN_PORTNUM);
5046 	port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5047 	phy_addr = i40e_get_phy_address(hw, port_num);
5048 
5049 	for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
5050 	     temp_addr++) {
5051 		status = i40e_read_phy_register_clause45(hw,
5052 							 I40E_PHY_COM_REG_PAGE,
5053 							 temp_addr, phy_addr,
5054 							 &reg_val);
5055 		if (status)
5056 			return status;
5057 		*val = reg_val;
5058 		if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
5059 			*led_addr = temp_addr;
5060 			break;
5061 		}
5062 	}
5063 	return status;
5064 }
5065 
5066 /**
5067  * i40e_led_set_phy
5068  * @hw: pointer to the HW structure
5069  * @on: true or false
5070  * @led_addr: address of led register to use
5071  * @mode: original val plus bit for set or ignore
5072  *
5073  * Set led's on or off when controlled by the PHY
5074  *
5075  **/
5076 i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
5077 			     u16 led_addr, u32 mode)
5078 {
5079 	i40e_status status = 0;
5080 	u32 led_ctl = 0;
5081 	u32 led_reg = 0;
5082 
5083 	status = i40e_led_get_reg(hw, led_addr, &led_reg);
5084 	if (status)
5085 		return status;
5086 	led_ctl = led_reg;
5087 	if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
5088 		led_reg = 0;
5089 		status = i40e_led_set_reg(hw, led_addr, led_reg);
5090 		if (status)
5091 			return status;
5092 	}
5093 	status = i40e_led_get_reg(hw, led_addr, &led_reg);
5094 	if (status)
5095 		goto restore_config;
5096 	if (on)
5097 		led_reg = I40E_PHY_LED_MANUAL_ON;
5098 	else
5099 		led_reg = 0;
5100 
5101 	status = i40e_led_set_reg(hw, led_addr, led_reg);
5102 	if (status)
5103 		goto restore_config;
5104 	if (mode & I40E_PHY_LED_MODE_ORIG) {
5105 		led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
5106 		status = i40e_led_set_reg(hw, led_addr, led_ctl);
5107 	}
5108 	return status;
5109 
5110 restore_config:
5111 	status = i40e_led_set_reg(hw, led_addr, led_ctl);
5112 	return status;
5113 }
5114 
5115 /**
5116  * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register
5117  * @hw: pointer to the hw struct
5118  * @reg_addr: register address
5119  * @reg_val: ptr to register value
5120  * @cmd_details: pointer to command details structure or NULL
5121  *
5122  * Use the firmware to read the Rx control register,
5123  * especially useful if the Rx unit is under heavy pressure
5124  **/
5125 i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
5126 				u32 reg_addr, u32 *reg_val,
5127 				struct i40e_asq_cmd_details *cmd_details)
5128 {
5129 	struct i40e_aq_desc desc;
5130 	struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
5131 		(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5132 	i40e_status status;
5133 
5134 	if (!reg_val)
5135 		return I40E_ERR_PARAM;
5136 
5137 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
5138 
5139 	cmd_resp->address = cpu_to_le32(reg_addr);
5140 
5141 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5142 
5143 	if (status == 0)
5144 		*reg_val = le32_to_cpu(cmd_resp->value);
5145 
5146 	return status;
5147 }
5148 
5149 /**
5150  * i40e_read_rx_ctl - read from an Rx control register
5151  * @hw: pointer to the hw struct
5152  * @reg_addr: register address
5153  **/
5154 u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
5155 {
5156 	i40e_status status = 0;
5157 	bool use_register;
5158 	int retry = 5;
5159 	u32 val = 0;
5160 
5161 	use_register = (((hw->aq.api_maj_ver == 1) &&
5162 			(hw->aq.api_min_ver < 5)) ||
5163 			(hw->mac.type == I40E_MAC_X722));
5164 	if (!use_register) {
5165 do_retry:
5166 		status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
5167 		if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5168 			usleep_range(1000, 2000);
5169 			retry--;
5170 			goto do_retry;
5171 		}
5172 	}
5173 
5174 	/* if the AQ access failed, try the old-fashioned way */
5175 	if (status || use_register)
5176 		val = rd32(hw, reg_addr);
5177 
5178 	return val;
5179 }
5180 
5181 /**
5182  * i40e_aq_rx_ctl_write_register
5183  * @hw: pointer to the hw struct
5184  * @reg_addr: register address
5185  * @reg_val: register value
5186  * @cmd_details: pointer to command details structure or NULL
5187  *
5188  * Use the firmware to write to an Rx control register,
5189  * especially useful if the Rx unit is under heavy pressure
5190  **/
5191 i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
5192 				u32 reg_addr, u32 reg_val,
5193 				struct i40e_asq_cmd_details *cmd_details)
5194 {
5195 	struct i40e_aq_desc desc;
5196 	struct i40e_aqc_rx_ctl_reg_read_write *cmd =
5197 		(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5198 	i40e_status status;
5199 
5200 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
5201 
5202 	cmd->address = cpu_to_le32(reg_addr);
5203 	cmd->value = cpu_to_le32(reg_val);
5204 
5205 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5206 
5207 	return status;
5208 }
5209 
5210 /**
5211  * i40e_write_rx_ctl - write to an Rx control register
5212  * @hw: pointer to the hw struct
5213  * @reg_addr: register address
5214  * @reg_val: register value
5215  **/
5216 void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
5217 {
5218 	i40e_status status = 0;
5219 	bool use_register;
5220 	int retry = 5;
5221 
5222 	use_register = (((hw->aq.api_maj_ver == 1) &&
5223 			(hw->aq.api_min_ver < 5)) ||
5224 			(hw->mac.type == I40E_MAC_X722));
5225 	if (!use_register) {
5226 do_retry:
5227 		status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
5228 						       reg_val, NULL);
5229 		if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5230 			usleep_range(1000, 2000);
5231 			retry--;
5232 			goto do_retry;
5233 		}
5234 	}
5235 
5236 	/* if the AQ access failed, try the old-fashioned way */
5237 	if (status || use_register)
5238 		wr32(hw, reg_addr, reg_val);
5239 }
5240 
5241 /**
5242  * i40e_aq_set_phy_register
5243  * @hw: pointer to the hw struct
5244  * @phy_select: select which phy should be accessed
5245  * @dev_addr: PHY device address
5246  * @reg_addr: PHY register address
5247  * @reg_val: new register value
5248  * @cmd_details: pointer to command details structure or NULL
5249  *
5250  * Write the external PHY register.
5251  **/
5252 i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw,
5253 				     u8 phy_select, u8 dev_addr,
5254 				     u32 reg_addr, u32 reg_val,
5255 				     struct i40e_asq_cmd_details *cmd_details)
5256 {
5257 	struct i40e_aq_desc desc;
5258 	struct i40e_aqc_phy_register_access *cmd =
5259 		(struct i40e_aqc_phy_register_access *)&desc.params.raw;
5260 	i40e_status status;
5261 
5262 	i40e_fill_default_direct_cmd_desc(&desc,
5263 					  i40e_aqc_opc_set_phy_register);
5264 
5265 	cmd->phy_interface = phy_select;
5266 	cmd->dev_address = dev_addr;
5267 	cmd->reg_address = cpu_to_le32(reg_addr);
5268 	cmd->reg_value = cpu_to_le32(reg_val);
5269 
5270 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5271 
5272 	return status;
5273 }
5274 
5275 /**
5276  * i40e_aq_get_phy_register
5277  * @hw: pointer to the hw struct
5278  * @phy_select: select which phy should be accessed
5279  * @dev_addr: PHY device address
5280  * @reg_addr: PHY register address
5281  * @reg_val: read register value
5282  * @cmd_details: pointer to command details structure or NULL
5283  *
5284  * Read the external PHY register.
5285  **/
5286 i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
5287 				     u8 phy_select, u8 dev_addr,
5288 				     u32 reg_addr, u32 *reg_val,
5289 				     struct i40e_asq_cmd_details *cmd_details)
5290 {
5291 	struct i40e_aq_desc desc;
5292 	struct i40e_aqc_phy_register_access *cmd =
5293 		(struct i40e_aqc_phy_register_access *)&desc.params.raw;
5294 	i40e_status status;
5295 
5296 	i40e_fill_default_direct_cmd_desc(&desc,
5297 					  i40e_aqc_opc_get_phy_register);
5298 
5299 	cmd->phy_interface = phy_select;
5300 	cmd->dev_address = dev_addr;
5301 	cmd->reg_address = cpu_to_le32(reg_addr);
5302 
5303 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5304 	if (!status)
5305 		*reg_val = le32_to_cpu(cmd->reg_value);
5306 
5307 	return status;
5308 }
5309 
5310 /**
5311  * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
5312  * @hw: pointer to the hw struct
5313  * @buff: command buffer (size in bytes = buff_size)
5314  * @buff_size: buffer size in bytes
5315  * @track_id: package tracking id
5316  * @error_offset: returns error offset
5317  * @error_info: returns error information
5318  * @cmd_details: pointer to command details structure or NULL
5319  **/
5320 enum
5321 i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
5322 				   u16 buff_size, u32 track_id,
5323 				   u32 *error_offset, u32 *error_info,
5324 				   struct i40e_asq_cmd_details *cmd_details)
5325 {
5326 	struct i40e_aq_desc desc;
5327 	struct i40e_aqc_write_personalization_profile *cmd =
5328 		(struct i40e_aqc_write_personalization_profile *)
5329 		&desc.params.raw;
5330 	struct i40e_aqc_write_ddp_resp *resp;
5331 	i40e_status status;
5332 
5333 	i40e_fill_default_direct_cmd_desc(&desc,
5334 					  i40e_aqc_opc_write_personalization_profile);
5335 
5336 	desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
5337 	if (buff_size > I40E_AQ_LARGE_BUF)
5338 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5339 
5340 	desc.datalen = cpu_to_le16(buff_size);
5341 
5342 	cmd->profile_track_id = cpu_to_le32(track_id);
5343 
5344 	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5345 	if (!status) {
5346 		resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
5347 		if (error_offset)
5348 			*error_offset = le32_to_cpu(resp->error_offset);
5349 		if (error_info)
5350 			*error_info = le32_to_cpu(resp->error_info);
5351 	}
5352 
5353 	return status;
5354 }
5355 
5356 /**
5357  * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
5358  * @hw: pointer to the hw struct
5359  * @buff: command buffer (size in bytes = buff_size)
5360  * @buff_size: buffer size in bytes
5361  * @flags: AdminQ command flags
5362  * @cmd_details: pointer to command details structure or NULL
5363  **/
5364 enum
5365 i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
5366 				      u16 buff_size, u8 flags,
5367 				      struct i40e_asq_cmd_details *cmd_details)
5368 {
5369 	struct i40e_aq_desc desc;
5370 	struct i40e_aqc_get_applied_profiles *cmd =
5371 		(struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
5372 	i40e_status status;
5373 
5374 	i40e_fill_default_direct_cmd_desc(&desc,
5375 					  i40e_aqc_opc_get_personalization_profile_list);
5376 
5377 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
5378 	if (buff_size > I40E_AQ_LARGE_BUF)
5379 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5380 	desc.datalen = cpu_to_le16(buff_size);
5381 
5382 	cmd->flags = flags;
5383 
5384 	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5385 
5386 	return status;
5387 }
5388 
5389 /**
5390  * i40e_find_segment_in_package
5391  * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
5392  * @pkg_hdr: pointer to the package header to be searched
5393  *
5394  * This function searches a package file for a particular segment type. On
5395  * success it returns a pointer to the segment header, otherwise it will
5396  * return NULL.
5397  **/
5398 struct i40e_generic_seg_header *
5399 i40e_find_segment_in_package(u32 segment_type,
5400 			     struct i40e_package_header *pkg_hdr)
5401 {
5402 	struct i40e_generic_seg_header *segment;
5403 	u32 i;
5404 
5405 	/* Search all package segments for the requested segment type */
5406 	for (i = 0; i < pkg_hdr->segment_count; i++) {
5407 		segment =
5408 			(struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
5409 			 pkg_hdr->segment_offset[i]);
5410 
5411 		if (segment->type == segment_type)
5412 			return segment;
5413 	}
5414 
5415 	return NULL;
5416 }
5417 
5418 /* Get section table in profile */
5419 #define I40E_SECTION_TABLE(profile, sec_tbl)				\
5420 	do {								\
5421 		struct i40e_profile_segment *p = (profile);		\
5422 		u32 count;						\
5423 		u32 *nvm;						\
5424 		count = p->device_table_count;				\
5425 		nvm = (u32 *)&p->device_table[count];			\
5426 		sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \
5427 	} while (0)
5428 
5429 /* Get section header in profile */
5430 #define I40E_SECTION_HEADER(profile, offset)				\
5431 	(struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
5432 
5433 /**
5434  * i40e_find_section_in_profile
5435  * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
5436  * @profile: pointer to the i40e segment header to be searched
5437  *
5438  * This function searches i40e segment for a particular section type. On
5439  * success it returns a pointer to the section header, otherwise it will
5440  * return NULL.
5441  **/
5442 struct i40e_profile_section_header *
5443 i40e_find_section_in_profile(u32 section_type,
5444 			     struct i40e_profile_segment *profile)
5445 {
5446 	struct i40e_profile_section_header *sec;
5447 	struct i40e_section_table *sec_tbl;
5448 	u32 sec_off;
5449 	u32 i;
5450 
5451 	if (profile->header.type != SEGMENT_TYPE_I40E)
5452 		return NULL;
5453 
5454 	I40E_SECTION_TABLE(profile, sec_tbl);
5455 
5456 	for (i = 0; i < sec_tbl->section_count; i++) {
5457 		sec_off = sec_tbl->section_offset[i];
5458 		sec = I40E_SECTION_HEADER(profile, sec_off);
5459 		if (sec->section.type == section_type)
5460 			return sec;
5461 	}
5462 
5463 	return NULL;
5464 }
5465 
5466 /**
5467  * i40e_ddp_exec_aq_section - Execute generic AQ for DDP
5468  * @hw: pointer to the hw struct
5469  * @aq: command buffer containing all data to execute AQ
5470  **/
5471 static enum
5472 i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
5473 					  struct i40e_profile_aq_section *aq)
5474 {
5475 	i40e_status status;
5476 	struct i40e_aq_desc desc;
5477 	u8 *msg = NULL;
5478 	u16 msglen;
5479 
5480 	i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
5481 	desc.flags |= cpu_to_le16(aq->flags);
5482 	memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw));
5483 
5484 	msglen = aq->datalen;
5485 	if (msglen) {
5486 		desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
5487 						I40E_AQ_FLAG_RD));
5488 		if (msglen > I40E_AQ_LARGE_BUF)
5489 			desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5490 		desc.datalen = cpu_to_le16(msglen);
5491 		msg = &aq->data[0];
5492 	}
5493 
5494 	status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL);
5495 
5496 	if (status) {
5497 		i40e_debug(hw, I40E_DEBUG_PACKAGE,
5498 			   "unable to exec DDP AQ opcode %u, error %d\n",
5499 			   aq->opcode, status);
5500 		return status;
5501 	}
5502 
5503 	/* copy returned desc to aq_buf */
5504 	memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw));
5505 
5506 	return 0;
5507 }
5508 
5509 /**
5510  * i40e_validate_profile
5511  * @hw: pointer to the hardware structure
5512  * @profile: pointer to the profile segment of the package to be validated
5513  * @track_id: package tracking id
5514  * @rollback: flag if the profile is for rollback.
5515  *
5516  * Validates supported devices and profile's sections.
5517  */
5518 static enum i40e_status_code
5519 i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5520 		      u32 track_id, bool rollback)
5521 {
5522 	struct i40e_profile_section_header *sec = NULL;
5523 	i40e_status status = 0;
5524 	struct i40e_section_table *sec_tbl;
5525 	u32 vendor_dev_id;
5526 	u32 dev_cnt;
5527 	u32 sec_off;
5528 	u32 i;
5529 
5530 	if (track_id == I40E_DDP_TRACKID_INVALID) {
5531 		i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
5532 		return I40E_NOT_SUPPORTED;
5533 	}
5534 
5535 	dev_cnt = profile->device_table_count;
5536 	for (i = 0; i < dev_cnt; i++) {
5537 		vendor_dev_id = profile->device_table[i].vendor_dev_id;
5538 		if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL &&
5539 		    hw->device_id == (vendor_dev_id & 0xFFFF))
5540 			break;
5541 	}
5542 	if (dev_cnt && i == dev_cnt) {
5543 		i40e_debug(hw, I40E_DEBUG_PACKAGE,
5544 			   "Device doesn't support DDP\n");
5545 		return I40E_ERR_DEVICE_NOT_SUPPORTED;
5546 	}
5547 
5548 	I40E_SECTION_TABLE(profile, sec_tbl);
5549 
5550 	/* Validate sections types */
5551 	for (i = 0; i < sec_tbl->section_count; i++) {
5552 		sec_off = sec_tbl->section_offset[i];
5553 		sec = I40E_SECTION_HEADER(profile, sec_off);
5554 		if (rollback) {
5555 			if (sec->section.type == SECTION_TYPE_MMIO ||
5556 			    sec->section.type == SECTION_TYPE_AQ ||
5557 			    sec->section.type == SECTION_TYPE_RB_AQ) {
5558 				i40e_debug(hw, I40E_DEBUG_PACKAGE,
5559 					   "Not a roll-back package\n");
5560 				return I40E_NOT_SUPPORTED;
5561 			}
5562 		} else {
5563 			if (sec->section.type == SECTION_TYPE_RB_AQ ||
5564 			    sec->section.type == SECTION_TYPE_RB_MMIO) {
5565 				i40e_debug(hw, I40E_DEBUG_PACKAGE,
5566 					   "Not an original package\n");
5567 				return I40E_NOT_SUPPORTED;
5568 			}
5569 		}
5570 	}
5571 
5572 	return status;
5573 }
5574 
5575 /**
5576  * i40e_write_profile
5577  * @hw: pointer to the hardware structure
5578  * @profile: pointer to the profile segment of the package to be downloaded
5579  * @track_id: package tracking id
5580  *
5581  * Handles the download of a complete package.
5582  */
5583 enum i40e_status_code
5584 i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5585 		   u32 track_id)
5586 {
5587 	i40e_status status = 0;
5588 	struct i40e_section_table *sec_tbl;
5589 	struct i40e_profile_section_header *sec = NULL;
5590 	struct i40e_profile_aq_section *ddp_aq;
5591 	u32 section_size = 0;
5592 	u32 offset = 0, info = 0;
5593 	u32 sec_off;
5594 	u32 i;
5595 
5596 	status = i40e_validate_profile(hw, profile, track_id, false);
5597 	if (status)
5598 		return status;
5599 
5600 	I40E_SECTION_TABLE(profile, sec_tbl);
5601 
5602 	for (i = 0; i < sec_tbl->section_count; i++) {
5603 		sec_off = sec_tbl->section_offset[i];
5604 		sec = I40E_SECTION_HEADER(profile, sec_off);
5605 		/* Process generic admin command */
5606 		if (sec->section.type == SECTION_TYPE_AQ) {
5607 			ddp_aq = (struct i40e_profile_aq_section *)&sec[1];
5608 			status = i40e_ddp_exec_aq_section(hw, ddp_aq);
5609 			if (status) {
5610 				i40e_debug(hw, I40E_DEBUG_PACKAGE,
5611 					   "Failed to execute aq: section %d, opcode %u\n",
5612 					   i, ddp_aq->opcode);
5613 				break;
5614 			}
5615 			sec->section.type = SECTION_TYPE_RB_AQ;
5616 		}
5617 
5618 		/* Skip any non-mmio sections */
5619 		if (sec->section.type != SECTION_TYPE_MMIO)
5620 			continue;
5621 
5622 		section_size = sec->section.size +
5623 			sizeof(struct i40e_profile_section_header);
5624 
5625 		/* Write MMIO section */
5626 		status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5627 					   track_id, &offset, &info, NULL);
5628 		if (status) {
5629 			i40e_debug(hw, I40E_DEBUG_PACKAGE,
5630 				   "Failed to write profile: section %d, offset %d, info %d\n",
5631 				   i, offset, info);
5632 			break;
5633 		}
5634 	}
5635 	return status;
5636 }
5637 
5638 /**
5639  * i40e_rollback_profile
5640  * @hw: pointer to the hardware structure
5641  * @profile: pointer to the profile segment of the package to be removed
5642  * @track_id: package tracking id
5643  *
5644  * Rolls back previously loaded package.
5645  */
5646 enum i40e_status_code
5647 i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5648 		      u32 track_id)
5649 {
5650 	struct i40e_profile_section_header *sec = NULL;
5651 	i40e_status status = 0;
5652 	struct i40e_section_table *sec_tbl;
5653 	u32 offset = 0, info = 0;
5654 	u32 section_size = 0;
5655 	u32 sec_off;
5656 	int i;
5657 
5658 	status = i40e_validate_profile(hw, profile, track_id, true);
5659 	if (status)
5660 		return status;
5661 
5662 	I40E_SECTION_TABLE(profile, sec_tbl);
5663 
5664 	/* For rollback write sections in reverse */
5665 	for (i = sec_tbl->section_count - 1; i >= 0; i--) {
5666 		sec_off = sec_tbl->section_offset[i];
5667 		sec = I40E_SECTION_HEADER(profile, sec_off);
5668 
5669 		/* Skip any non-rollback sections */
5670 		if (sec->section.type != SECTION_TYPE_RB_MMIO)
5671 			continue;
5672 
5673 		section_size = sec->section.size +
5674 			sizeof(struct i40e_profile_section_header);
5675 
5676 		/* Write roll-back MMIO section */
5677 		status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5678 					   track_id, &offset, &info, NULL);
5679 		if (status) {
5680 			i40e_debug(hw, I40E_DEBUG_PACKAGE,
5681 				   "Failed to write profile: section %d, offset %d, info %d\n",
5682 				   i, offset, info);
5683 			break;
5684 		}
5685 	}
5686 	return status;
5687 }
5688 
5689 /**
5690  * i40e_add_pinfo_to_list
5691  * @hw: pointer to the hardware structure
5692  * @profile: pointer to the profile segment of the package
5693  * @profile_info_sec: buffer for information section
5694  * @track_id: package tracking id
5695  *
5696  * Register a profile to the list of loaded profiles.
5697  */
5698 enum i40e_status_code
5699 i40e_add_pinfo_to_list(struct i40e_hw *hw,
5700 		       struct i40e_profile_segment *profile,
5701 		       u8 *profile_info_sec, u32 track_id)
5702 {
5703 	i40e_status status = 0;
5704 	struct i40e_profile_section_header *sec = NULL;
5705 	struct i40e_profile_info *pinfo;
5706 	u32 offset = 0, info = 0;
5707 
5708 	sec = (struct i40e_profile_section_header *)profile_info_sec;
5709 	sec->tbl_size = 1;
5710 	sec->data_end = sizeof(struct i40e_profile_section_header) +
5711 			sizeof(struct i40e_profile_info);
5712 	sec->section.type = SECTION_TYPE_INFO;
5713 	sec->section.offset = sizeof(struct i40e_profile_section_header);
5714 	sec->section.size = sizeof(struct i40e_profile_info);
5715 	pinfo = (struct i40e_profile_info *)(profile_info_sec +
5716 					     sec->section.offset);
5717 	pinfo->track_id = track_id;
5718 	pinfo->version = profile->version;
5719 	pinfo->op = I40E_DDP_ADD_TRACKID;
5720 	memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
5721 
5722 	status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
5723 				   track_id, &offset, &info, NULL);
5724 
5725 	return status;
5726 }
5727 
5728 /**
5729  * i40e_aq_add_cloud_filters
5730  * @hw: pointer to the hardware structure
5731  * @seid: VSI seid to add cloud filters from
5732  * @filters: Buffer which contains the filters to be added
5733  * @filter_count: number of filters contained in the buffer
5734  *
5735  * Set the cloud filters for a given VSI.  The contents of the
5736  * i40e_aqc_cloud_filters_element_data are filled in by the caller
5737  * of the function.
5738  *
5739  **/
5740 enum i40e_status_code
5741 i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
5742 			  struct i40e_aqc_cloud_filters_element_data *filters,
5743 			  u8 filter_count)
5744 {
5745 	struct i40e_aq_desc desc;
5746 	struct i40e_aqc_add_remove_cloud_filters *cmd =
5747 	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5748 	enum i40e_status_code status;
5749 	u16 buff_len;
5750 
5751 	i40e_fill_default_direct_cmd_desc(&desc,
5752 					  i40e_aqc_opc_add_cloud_filters);
5753 
5754 	buff_len = filter_count * sizeof(*filters);
5755 	desc.datalen = cpu_to_le16(buff_len);
5756 	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5757 	cmd->num_filters = filter_count;
5758 	cmd->seid = cpu_to_le16(seid);
5759 
5760 	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5761 
5762 	return status;
5763 }
5764 
5765 /**
5766  * i40e_aq_add_cloud_filters_bb
5767  * @hw: pointer to the hardware structure
5768  * @seid: VSI seid to add cloud filters from
5769  * @filters: Buffer which contains the filters in big buffer to be added
5770  * @filter_count: number of filters contained in the buffer
5771  *
5772  * Set the big buffer cloud filters for a given VSI.  The contents of the
5773  * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
5774  * function.
5775  *
5776  **/
5777 enum i40e_status_code
5778 i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
5779 			     struct i40e_aqc_cloud_filters_element_bb *filters,
5780 			     u8 filter_count)
5781 {
5782 	struct i40e_aq_desc desc;
5783 	struct i40e_aqc_add_remove_cloud_filters *cmd =
5784 	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5785 	i40e_status status;
5786 	u16 buff_len;
5787 	int i;
5788 
5789 	i40e_fill_default_direct_cmd_desc(&desc,
5790 					  i40e_aqc_opc_add_cloud_filters);
5791 
5792 	buff_len = filter_count * sizeof(*filters);
5793 	desc.datalen = cpu_to_le16(buff_len);
5794 	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5795 	cmd->num_filters = filter_count;
5796 	cmd->seid = cpu_to_le16(seid);
5797 	cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
5798 
5799 	for (i = 0; i < filter_count; i++) {
5800 		u16 tnl_type;
5801 		u32 ti;
5802 
5803 		tnl_type = (le16_to_cpu(filters[i].element.flags) &
5804 			   I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
5805 			   I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
5806 
5807 		/* Due to hardware eccentricities, the VNI for Geneve is shifted
5808 		 * one more byte further than normally used for Tenant ID in
5809 		 * other tunnel types.
5810 		 */
5811 		if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
5812 			ti = le32_to_cpu(filters[i].element.tenant_id);
5813 			filters[i].element.tenant_id = cpu_to_le32(ti << 8);
5814 		}
5815 	}
5816 
5817 	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5818 
5819 	return status;
5820 }
5821 
5822 /**
5823  * i40e_aq_rem_cloud_filters
5824  * @hw: pointer to the hardware structure
5825  * @seid: VSI seid to remove cloud filters from
5826  * @filters: Buffer which contains the filters to be removed
5827  * @filter_count: number of filters contained in the buffer
5828  *
5829  * Remove the cloud filters for a given VSI.  The contents of the
5830  * i40e_aqc_cloud_filters_element_data are filled in by the caller
5831  * of the function.
5832  *
5833  **/
5834 enum i40e_status_code
5835 i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
5836 			  struct i40e_aqc_cloud_filters_element_data *filters,
5837 			  u8 filter_count)
5838 {
5839 	struct i40e_aq_desc desc;
5840 	struct i40e_aqc_add_remove_cloud_filters *cmd =
5841 	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5842 	enum i40e_status_code status;
5843 	u16 buff_len;
5844 
5845 	i40e_fill_default_direct_cmd_desc(&desc,
5846 					  i40e_aqc_opc_remove_cloud_filters);
5847 
5848 	buff_len = filter_count * sizeof(*filters);
5849 	desc.datalen = cpu_to_le16(buff_len);
5850 	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5851 	cmd->num_filters = filter_count;
5852 	cmd->seid = cpu_to_le16(seid);
5853 
5854 	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5855 
5856 	return status;
5857 }
5858 
5859 /**
5860  * i40e_aq_rem_cloud_filters_bb
5861  * @hw: pointer to the hardware structure
5862  * @seid: VSI seid to remove cloud filters from
5863  * @filters: Buffer which contains the filters in big buffer to be removed
5864  * @filter_count: number of filters contained in the buffer
5865  *
5866  * Remove the big buffer cloud filters for a given VSI.  The contents of the
5867  * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
5868  * function.
5869  *
5870  **/
5871 enum i40e_status_code
5872 i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
5873 			     struct i40e_aqc_cloud_filters_element_bb *filters,
5874 			     u8 filter_count)
5875 {
5876 	struct i40e_aq_desc desc;
5877 	struct i40e_aqc_add_remove_cloud_filters *cmd =
5878 	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5879 	i40e_status status;
5880 	u16 buff_len;
5881 	int i;
5882 
5883 	i40e_fill_default_direct_cmd_desc(&desc,
5884 					  i40e_aqc_opc_remove_cloud_filters);
5885 
5886 	buff_len = filter_count * sizeof(*filters);
5887 	desc.datalen = cpu_to_le16(buff_len);
5888 	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5889 	cmd->num_filters = filter_count;
5890 	cmd->seid = cpu_to_le16(seid);
5891 	cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
5892 
5893 	for (i = 0; i < filter_count; i++) {
5894 		u16 tnl_type;
5895 		u32 ti;
5896 
5897 		tnl_type = (le16_to_cpu(filters[i].element.flags) &
5898 			   I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
5899 			   I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
5900 
5901 		/* Due to hardware eccentricities, the VNI for Geneve is shifted
5902 		 * one more byte further than normally used for Tenant ID in
5903 		 * other tunnel types.
5904 		 */
5905 		if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
5906 			ti = le32_to_cpu(filters[i].element.tenant_id);
5907 			filters[i].element.tenant_id = cpu_to_le32(ti << 8);
5908 		}
5909 	}
5910 
5911 	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5912 
5913 	return status;
5914 }
5915