1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2021 Intel Corporation. */
3
4 #include <linux/avf/virtchnl.h>
5 #include <linux/bitfield.h>
6 #include <linux/delay.h>
7 #include <linux/etherdevice.h>
8 #include <linux/pci.h>
9 #include "i40e_adminq_cmd.h"
10 #include "i40e_devids.h"
11 #include "i40e_prototype.h"
12 #include "i40e_register.h"
13
14 /**
15 * i40e_set_mac_type - Sets MAC type
16 * @hw: pointer to the HW structure
17 *
18 * This function sets the mac type of the adapter based on the
19 * vendor ID and device ID stored in the hw structure.
20 **/
i40e_set_mac_type(struct i40e_hw * hw)21 int i40e_set_mac_type(struct i40e_hw *hw)
22 {
23 int status = 0;
24
25 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
26 switch (hw->device_id) {
27 case I40E_DEV_ID_SFP_XL710:
28 case I40E_DEV_ID_QEMU:
29 case I40E_DEV_ID_KX_B:
30 case I40E_DEV_ID_KX_C:
31 case I40E_DEV_ID_QSFP_A:
32 case I40E_DEV_ID_QSFP_B:
33 case I40E_DEV_ID_QSFP_C:
34 case I40E_DEV_ID_1G_BASE_T_BC:
35 case I40E_DEV_ID_5G_BASE_T_BC:
36 case I40E_DEV_ID_10G_BASE_T:
37 case I40E_DEV_ID_10G_BASE_T4:
38 case I40E_DEV_ID_10G_BASE_T_BC:
39 case I40E_DEV_ID_10G_B:
40 case I40E_DEV_ID_10G_SFP:
41 case I40E_DEV_ID_20G_KR2:
42 case I40E_DEV_ID_20G_KR2_A:
43 case I40E_DEV_ID_25G_B:
44 case I40E_DEV_ID_25G_SFP28:
45 case I40E_DEV_ID_X710_N3000:
46 case I40E_DEV_ID_XXV710_N3000:
47 hw->mac.type = I40E_MAC_XL710;
48 break;
49 case I40E_DEV_ID_KX_X722:
50 case I40E_DEV_ID_QSFP_X722:
51 case I40E_DEV_ID_SFP_X722:
52 case I40E_DEV_ID_1G_BASE_T_X722:
53 case I40E_DEV_ID_10G_BASE_T_X722:
54 case I40E_DEV_ID_SFP_I_X722:
55 case I40E_DEV_ID_SFP_X722_A:
56 hw->mac.type = I40E_MAC_X722;
57 break;
58 default:
59 hw->mac.type = I40E_MAC_GENERIC;
60 break;
61 }
62 } else {
63 status = -ENODEV;
64 }
65
66 hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
67 hw->mac.type, status);
68 return status;
69 }
70
71 /**
72 * i40e_aq_str - convert AQ err code to a string
73 * @hw: pointer to the HW structure
74 * @aq_err: the AQ error code to convert
75 **/
i40e_aq_str(struct i40e_hw * hw,enum i40e_admin_queue_err aq_err)76 const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
77 {
78 switch (aq_err) {
79 case I40E_AQ_RC_OK:
80 return "OK";
81 case I40E_AQ_RC_EPERM:
82 return "I40E_AQ_RC_EPERM";
83 case I40E_AQ_RC_ENOENT:
84 return "I40E_AQ_RC_ENOENT";
85 case I40E_AQ_RC_ESRCH:
86 return "I40E_AQ_RC_ESRCH";
87 case I40E_AQ_RC_EINTR:
88 return "I40E_AQ_RC_EINTR";
89 case I40E_AQ_RC_EIO:
90 return "I40E_AQ_RC_EIO";
91 case I40E_AQ_RC_ENXIO:
92 return "I40E_AQ_RC_ENXIO";
93 case I40E_AQ_RC_E2BIG:
94 return "I40E_AQ_RC_E2BIG";
95 case I40E_AQ_RC_EAGAIN:
96 return "I40E_AQ_RC_EAGAIN";
97 case I40E_AQ_RC_ENOMEM:
98 return "I40E_AQ_RC_ENOMEM";
99 case I40E_AQ_RC_EACCES:
100 return "I40E_AQ_RC_EACCES";
101 case I40E_AQ_RC_EFAULT:
102 return "I40E_AQ_RC_EFAULT";
103 case I40E_AQ_RC_EBUSY:
104 return "I40E_AQ_RC_EBUSY";
105 case I40E_AQ_RC_EEXIST:
106 return "I40E_AQ_RC_EEXIST";
107 case I40E_AQ_RC_EINVAL:
108 return "I40E_AQ_RC_EINVAL";
109 case I40E_AQ_RC_ENOTTY:
110 return "I40E_AQ_RC_ENOTTY";
111 case I40E_AQ_RC_ENOSPC:
112 return "I40E_AQ_RC_ENOSPC";
113 case I40E_AQ_RC_ENOSYS:
114 return "I40E_AQ_RC_ENOSYS";
115 case I40E_AQ_RC_ERANGE:
116 return "I40E_AQ_RC_ERANGE";
117 case I40E_AQ_RC_EFLUSHED:
118 return "I40E_AQ_RC_EFLUSHED";
119 case I40E_AQ_RC_BAD_ADDR:
120 return "I40E_AQ_RC_BAD_ADDR";
121 case I40E_AQ_RC_EMODE:
122 return "I40E_AQ_RC_EMODE";
123 case I40E_AQ_RC_EFBIG:
124 return "I40E_AQ_RC_EFBIG";
125 }
126
127 snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
128 return hw->err_str;
129 }
130
131 /**
132 * i40e_debug_aq
133 * @hw: debug mask related to admin queue
134 * @mask: debug mask
135 * @desc: pointer to admin queue descriptor
136 * @buffer: pointer to command buffer
137 * @buf_len: max length of buffer
138 *
139 * Dumps debug log about adminq command with descriptor contents.
140 **/
i40e_debug_aq(struct i40e_hw * hw,enum i40e_debug_mask mask,void * desc,void * buffer,u16 buf_len)141 void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
142 void *buffer, u16 buf_len)
143 {
144 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
145 u32 effective_mask = hw->debug_mask & mask;
146 char prefix[27];
147 u16 len;
148 u8 *buf = (u8 *)buffer;
149
150 if (!effective_mask || !desc)
151 return;
152
153 len = le16_to_cpu(aq_desc->datalen);
154
155 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
156 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
157 le16_to_cpu(aq_desc->opcode),
158 le16_to_cpu(aq_desc->flags),
159 le16_to_cpu(aq_desc->datalen),
160 le16_to_cpu(aq_desc->retval));
161 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
162 "\tcookie (h,l) 0x%08X 0x%08X\n",
163 le32_to_cpu(aq_desc->cookie_high),
164 le32_to_cpu(aq_desc->cookie_low));
165 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
166 "\tparam (0,1) 0x%08X 0x%08X\n",
167 le32_to_cpu(aq_desc->params.internal.param0),
168 le32_to_cpu(aq_desc->params.internal.param1));
169 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
170 "\taddr (h,l) 0x%08X 0x%08X\n",
171 le32_to_cpu(aq_desc->params.external.addr_high),
172 le32_to_cpu(aq_desc->params.external.addr_low));
173
174 if (buffer && buf_len != 0 && len != 0 &&
175 (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) {
176 i40e_debug(hw, mask, "AQ CMD Buffer:\n");
177 if (buf_len < len)
178 len = buf_len;
179
180 snprintf(prefix, sizeof(prefix),
181 "i40e %02x:%02x.%x: \t0x",
182 hw->bus.bus_id,
183 hw->bus.device,
184 hw->bus.func);
185
186 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
187 16, 1, buf, len, false);
188 }
189 }
190
191 /**
192 * i40e_check_asq_alive
193 * @hw: pointer to the hw struct
194 *
195 * Returns true if Queue is enabled else false.
196 **/
i40e_check_asq_alive(struct i40e_hw * hw)197 bool i40e_check_asq_alive(struct i40e_hw *hw)
198 {
199 if (hw->aq.asq.len)
200 return !!(rd32(hw, hw->aq.asq.len) &
201 I40E_PF_ATQLEN_ATQENABLE_MASK);
202 else
203 return false;
204 }
205
206 /**
207 * i40e_aq_queue_shutdown
208 * @hw: pointer to the hw struct
209 * @unloading: is the driver unloading itself
210 *
211 * Tell the Firmware that we're shutting down the AdminQ and whether
212 * or not the driver is unloading as well.
213 **/
i40e_aq_queue_shutdown(struct i40e_hw * hw,bool unloading)214 int i40e_aq_queue_shutdown(struct i40e_hw *hw,
215 bool unloading)
216 {
217 struct i40e_aq_desc desc;
218 struct i40e_aqc_queue_shutdown *cmd =
219 (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
220 int status;
221
222 i40e_fill_default_direct_cmd_desc(&desc,
223 i40e_aqc_opc_queue_shutdown);
224
225 if (unloading)
226 cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
227 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
228
229 return status;
230 }
231
232 /**
233 * i40e_aq_get_set_rss_lut
234 * @hw: pointer to the hardware structure
235 * @vsi_id: vsi fw index
236 * @pf_lut: for PF table set true, for VSI table set false
237 * @lut: pointer to the lut buffer provided by the caller
238 * @lut_size: size of the lut buffer
239 * @set: set true to set the table, false to get the table
240 *
241 * Internal function to get or set RSS look up table
242 **/
i40e_aq_get_set_rss_lut(struct i40e_hw * hw,u16 vsi_id,bool pf_lut,u8 * lut,u16 lut_size,bool set)243 static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
244 u16 vsi_id, bool pf_lut,
245 u8 *lut, u16 lut_size,
246 bool set)
247 {
248 struct i40e_aq_desc desc;
249 struct i40e_aqc_get_set_rss_lut *cmd_resp =
250 (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
251 int status;
252
253 if (set)
254 i40e_fill_default_direct_cmd_desc(&desc,
255 i40e_aqc_opc_set_rss_lut);
256 else
257 i40e_fill_default_direct_cmd_desc(&desc,
258 i40e_aqc_opc_get_rss_lut);
259
260 /* Indirect command */
261 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
262 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
263
264 cmd_resp->vsi_id =
265 cpu_to_le16((u16)((vsi_id <<
266 I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
267 I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
268 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
269
270 if (pf_lut)
271 cmd_resp->flags |= cpu_to_le16((u16)
272 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
273 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
274 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
275 else
276 cmd_resp->flags |= cpu_to_le16((u16)
277 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
278 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
279 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
280
281 status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
282
283 return status;
284 }
285
286 /**
287 * i40e_aq_get_rss_lut
288 * @hw: pointer to the hardware structure
289 * @vsi_id: vsi fw index
290 * @pf_lut: for PF table set true, for VSI table set false
291 * @lut: pointer to the lut buffer provided by the caller
292 * @lut_size: size of the lut buffer
293 *
294 * get the RSS lookup table, PF or VSI type
295 **/
i40e_aq_get_rss_lut(struct i40e_hw * hw,u16 vsi_id,bool pf_lut,u8 * lut,u16 lut_size)296 int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
297 bool pf_lut, u8 *lut, u16 lut_size)
298 {
299 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
300 false);
301 }
302
303 /**
304 * i40e_aq_set_rss_lut
305 * @hw: pointer to the hardware structure
306 * @vsi_id: vsi fw index
307 * @pf_lut: for PF table set true, for VSI table set false
308 * @lut: pointer to the lut buffer provided by the caller
309 * @lut_size: size of the lut buffer
310 *
311 * set the RSS lookup table, PF or VSI type
312 **/
i40e_aq_set_rss_lut(struct i40e_hw * hw,u16 vsi_id,bool pf_lut,u8 * lut,u16 lut_size)313 int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
314 bool pf_lut, u8 *lut, u16 lut_size)
315 {
316 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
317 }
318
319 /**
320 * i40e_aq_get_set_rss_key
321 * @hw: pointer to the hw struct
322 * @vsi_id: vsi fw index
323 * @key: pointer to key info struct
324 * @set: set true to set the key, false to get the key
325 *
326 * get the RSS key per VSI
327 **/
i40e_aq_get_set_rss_key(struct i40e_hw * hw,u16 vsi_id,struct i40e_aqc_get_set_rss_key_data * key,bool set)328 static int i40e_aq_get_set_rss_key(struct i40e_hw *hw,
329 u16 vsi_id,
330 struct i40e_aqc_get_set_rss_key_data *key,
331 bool set)
332 {
333 struct i40e_aq_desc desc;
334 struct i40e_aqc_get_set_rss_key *cmd_resp =
335 (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
336 u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
337 int status;
338
339 if (set)
340 i40e_fill_default_direct_cmd_desc(&desc,
341 i40e_aqc_opc_set_rss_key);
342 else
343 i40e_fill_default_direct_cmd_desc(&desc,
344 i40e_aqc_opc_get_rss_key);
345
346 /* Indirect command */
347 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
348 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
349
350 cmd_resp->vsi_id =
351 cpu_to_le16((u16)((vsi_id <<
352 I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
353 I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
354 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
355
356 status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
357
358 return status;
359 }
360
361 /**
362 * i40e_aq_get_rss_key
363 * @hw: pointer to the hw struct
364 * @vsi_id: vsi fw index
365 * @key: pointer to key info struct
366 *
367 **/
i40e_aq_get_rss_key(struct i40e_hw * hw,u16 vsi_id,struct i40e_aqc_get_set_rss_key_data * key)368 int i40e_aq_get_rss_key(struct i40e_hw *hw,
369 u16 vsi_id,
370 struct i40e_aqc_get_set_rss_key_data *key)
371 {
372 return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
373 }
374
375 /**
376 * i40e_aq_set_rss_key
377 * @hw: pointer to the hw struct
378 * @vsi_id: vsi fw index
379 * @key: pointer to key info struct
380 *
381 * set the RSS key per VSI
382 **/
i40e_aq_set_rss_key(struct i40e_hw * hw,u16 vsi_id,struct i40e_aqc_get_set_rss_key_data * key)383 int i40e_aq_set_rss_key(struct i40e_hw *hw,
384 u16 vsi_id,
385 struct i40e_aqc_get_set_rss_key_data *key)
386 {
387 return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
388 }
389
390 /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
391 * hardware to a bit-field that can be used by SW to more easily determine the
392 * packet type.
393 *
394 * Macros are used to shorten the table lines and make this table human
395 * readable.
396 *
397 * We store the PTYPE in the top byte of the bit field - this is just so that
398 * we can check that the table doesn't have a row missing, as the index into
399 * the table should be the PTYPE.
400 *
401 * Typical work flow:
402 *
403 * IF NOT i40e_ptype_lookup[ptype].known
404 * THEN
405 * Packet is unknown
406 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
407 * Use the rest of the fields to look at the tunnels, inner protocols, etc
408 * ELSE
409 * Use the enum i40e_rx_l2_ptype to decode the packet type
410 * ENDIF
411 */
412
413 /* macro to make the table lines short, use explicit indexing with [PTYPE] */
414 #define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
415 [PTYPE] = { \
416 1, \
417 I40E_RX_PTYPE_OUTER_##OUTER_IP, \
418 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
419 I40E_RX_PTYPE_##OUTER_FRAG, \
420 I40E_RX_PTYPE_TUNNEL_##T, \
421 I40E_RX_PTYPE_TUNNEL_END_##TE, \
422 I40E_RX_PTYPE_##TEF, \
423 I40E_RX_PTYPE_INNER_PROT_##I, \
424 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
425
426 #define I40E_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
427
428 /* shorter macros makes the table fit but are terse */
429 #define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
430 #define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
431 #define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
432
433 /* Lookup table mapping in the 8-bit HW PTYPE to the bit field for decoding */
434 struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = {
435 /* L2 Packet types */
436 I40E_PTT_UNUSED_ENTRY(0),
437 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
438 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
439 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
440 I40E_PTT_UNUSED_ENTRY(4),
441 I40E_PTT_UNUSED_ENTRY(5),
442 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
443 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
444 I40E_PTT_UNUSED_ENTRY(8),
445 I40E_PTT_UNUSED_ENTRY(9),
446 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
447 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
448 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
449 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
450 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
451 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
452 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
453 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
454 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
455 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
456 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
457 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
458
459 /* Non Tunneled IPv4 */
460 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
461 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
462 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
463 I40E_PTT_UNUSED_ENTRY(25),
464 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
465 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
466 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
467
468 /* IPv4 --> IPv4 */
469 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
470 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
471 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
472 I40E_PTT_UNUSED_ENTRY(32),
473 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
474 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
475 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
476
477 /* IPv4 --> IPv6 */
478 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
479 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
480 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
481 I40E_PTT_UNUSED_ENTRY(39),
482 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
483 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
484 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
485
486 /* IPv4 --> GRE/NAT */
487 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
488
489 /* IPv4 --> GRE/NAT --> IPv4 */
490 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
491 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
492 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
493 I40E_PTT_UNUSED_ENTRY(47),
494 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
495 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
496 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
497
498 /* IPv4 --> GRE/NAT --> IPv6 */
499 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
500 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
501 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
502 I40E_PTT_UNUSED_ENTRY(54),
503 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
504 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
505 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
506
507 /* IPv4 --> GRE/NAT --> MAC */
508 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
509
510 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
511 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
512 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
513 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
514 I40E_PTT_UNUSED_ENTRY(62),
515 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
516 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
517 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
518
519 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
520 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
521 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
522 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
523 I40E_PTT_UNUSED_ENTRY(69),
524 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
525 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
526 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
527
528 /* IPv4 --> GRE/NAT --> MAC/VLAN */
529 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
530
531 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
532 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
533 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
534 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
535 I40E_PTT_UNUSED_ENTRY(77),
536 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
537 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
538 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
539
540 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
541 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
542 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
543 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
544 I40E_PTT_UNUSED_ENTRY(84),
545 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
546 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
547 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
548
549 /* Non Tunneled IPv6 */
550 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
551 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
552 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
553 I40E_PTT_UNUSED_ENTRY(91),
554 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
555 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
556 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
557
558 /* IPv6 --> IPv4 */
559 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
560 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
561 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
562 I40E_PTT_UNUSED_ENTRY(98),
563 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
564 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
565 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
566
567 /* IPv6 --> IPv6 */
568 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
569 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
570 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
571 I40E_PTT_UNUSED_ENTRY(105),
572 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
573 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
574 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
575
576 /* IPv6 --> GRE/NAT */
577 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
578
579 /* IPv6 --> GRE/NAT -> IPv4 */
580 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
581 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
582 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
583 I40E_PTT_UNUSED_ENTRY(113),
584 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
585 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
586 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
587
588 /* IPv6 --> GRE/NAT -> IPv6 */
589 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
590 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
591 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
592 I40E_PTT_UNUSED_ENTRY(120),
593 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
594 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
595 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
596
597 /* IPv6 --> GRE/NAT -> MAC */
598 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
599
600 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
601 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
602 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
603 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
604 I40E_PTT_UNUSED_ENTRY(128),
605 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
606 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
607 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
608
609 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
610 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
611 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
612 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
613 I40E_PTT_UNUSED_ENTRY(135),
614 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
615 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
616 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
617
618 /* IPv6 --> GRE/NAT -> MAC/VLAN */
619 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
620
621 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
622 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
623 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
624 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
625 I40E_PTT_UNUSED_ENTRY(143),
626 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
627 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
628 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
629
630 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
631 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
632 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
633 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
634 I40E_PTT_UNUSED_ENTRY(150),
635 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
636 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
637 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
638
639 /* unused entries */
640 [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
641 };
642
643 /**
644 * i40e_init_shared_code - Initialize the shared code
645 * @hw: pointer to hardware structure
646 *
647 * This assigns the MAC type and PHY code and inits the NVM.
648 * Does not touch the hardware. This function must be called prior to any
649 * other function in the shared code. The i40e_hw structure should be
650 * memset to 0 prior to calling this function. The following fields in
651 * hw structure should be filled in prior to calling this function:
652 * hw_addr, back, device_id, vendor_id, subsystem_device_id,
653 * subsystem_vendor_id, and revision_id
654 **/
i40e_init_shared_code(struct i40e_hw * hw)655 int i40e_init_shared_code(struct i40e_hw *hw)
656 {
657 u32 port, ari, func_rid;
658 int status = 0;
659
660 i40e_set_mac_type(hw);
661
662 switch (hw->mac.type) {
663 case I40E_MAC_XL710:
664 case I40E_MAC_X722:
665 break;
666 default:
667 return -ENODEV;
668 }
669
670 hw->phy.get_link_info = true;
671
672 /* Determine port number and PF number*/
673 port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK)
674 >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
675 hw->port = (u8)port;
676 ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >>
677 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
678 func_rid = rd32(hw, I40E_PF_FUNC_RID);
679 if (ari)
680 hw->pf_id = (u8)(func_rid & 0xff);
681 else
682 hw->pf_id = (u8)(func_rid & 0x7);
683
684 status = i40e_init_nvm(hw);
685 return status;
686 }
687
688 /**
689 * i40e_aq_mac_address_read - Retrieve the MAC addresses
690 * @hw: pointer to the hw struct
691 * @flags: a return indicator of what addresses were added to the addr store
692 * @addrs: the requestor's mac addr store
693 * @cmd_details: pointer to command details structure or NULL
694 **/
695 static int
i40e_aq_mac_address_read(struct i40e_hw * hw,u16 * flags,struct i40e_aqc_mac_address_read_data * addrs,struct i40e_asq_cmd_details * cmd_details)696 i40e_aq_mac_address_read(struct i40e_hw *hw,
697 u16 *flags,
698 struct i40e_aqc_mac_address_read_data *addrs,
699 struct i40e_asq_cmd_details *cmd_details)
700 {
701 struct i40e_aq_desc desc;
702 struct i40e_aqc_mac_address_read *cmd_data =
703 (struct i40e_aqc_mac_address_read *)&desc.params.raw;
704 int status;
705
706 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
707 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
708
709 status = i40e_asq_send_command(hw, &desc, addrs,
710 sizeof(*addrs), cmd_details);
711 *flags = le16_to_cpu(cmd_data->command_flags);
712
713 return status;
714 }
715
716 /**
717 * i40e_aq_mac_address_write - Change the MAC addresses
718 * @hw: pointer to the hw struct
719 * @flags: indicates which MAC to be written
720 * @mac_addr: address to write
721 * @cmd_details: pointer to command details structure or NULL
722 **/
i40e_aq_mac_address_write(struct i40e_hw * hw,u16 flags,u8 * mac_addr,struct i40e_asq_cmd_details * cmd_details)723 int i40e_aq_mac_address_write(struct i40e_hw *hw,
724 u16 flags, u8 *mac_addr,
725 struct i40e_asq_cmd_details *cmd_details)
726 {
727 struct i40e_aq_desc desc;
728 struct i40e_aqc_mac_address_write *cmd_data =
729 (struct i40e_aqc_mac_address_write *)&desc.params.raw;
730 int status;
731
732 i40e_fill_default_direct_cmd_desc(&desc,
733 i40e_aqc_opc_mac_address_write);
734 cmd_data->command_flags = cpu_to_le16(flags);
735 cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
736 cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
737 ((u32)mac_addr[3] << 16) |
738 ((u32)mac_addr[4] << 8) |
739 mac_addr[5]);
740
741 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
742
743 return status;
744 }
745
746 /**
747 * i40e_get_mac_addr - get MAC address
748 * @hw: pointer to the HW structure
749 * @mac_addr: pointer to MAC address
750 *
751 * Reads the adapter's MAC address from register
752 **/
i40e_get_mac_addr(struct i40e_hw * hw,u8 * mac_addr)753 int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
754 {
755 struct i40e_aqc_mac_address_read_data addrs;
756 u16 flags = 0;
757 int status;
758
759 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
760
761 if (flags & I40E_AQC_LAN_ADDR_VALID)
762 ether_addr_copy(mac_addr, addrs.pf_lan_mac);
763
764 return status;
765 }
766
767 /**
768 * i40e_get_port_mac_addr - get Port MAC address
769 * @hw: pointer to the HW structure
770 * @mac_addr: pointer to Port MAC address
771 *
772 * Reads the adapter's Port MAC address
773 **/
i40e_get_port_mac_addr(struct i40e_hw * hw,u8 * mac_addr)774 int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
775 {
776 struct i40e_aqc_mac_address_read_data addrs;
777 u16 flags = 0;
778 int status;
779
780 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
781 if (status)
782 return status;
783
784 if (flags & I40E_AQC_PORT_ADDR_VALID)
785 ether_addr_copy(mac_addr, addrs.port_mac);
786 else
787 status = -EINVAL;
788
789 return status;
790 }
791
792 /**
793 * i40e_pre_tx_queue_cfg - pre tx queue configure
794 * @hw: pointer to the HW structure
795 * @queue: target PF queue index
796 * @enable: state change request
797 *
798 * Handles hw requirement to indicate intention to enable
799 * or disable target queue.
800 **/
i40e_pre_tx_queue_cfg(struct i40e_hw * hw,u32 queue,bool enable)801 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
802 {
803 u32 abs_queue_idx = hw->func_caps.base_queue + queue;
804 u32 reg_block = 0;
805 u32 reg_val;
806
807 if (abs_queue_idx >= 128) {
808 reg_block = abs_queue_idx / 128;
809 abs_queue_idx %= 128;
810 }
811
812 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
813 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
814 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
815
816 if (enable)
817 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
818 else
819 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
820
821 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
822 }
823
824 /**
825 * i40e_read_pba_string - Reads part number string from EEPROM
826 * @hw: pointer to hardware structure
827 * @pba_num: stores the part number string from the EEPROM
828 * @pba_num_size: part number string buffer length
829 *
830 * Reads the part number string from the EEPROM.
831 **/
i40e_read_pba_string(struct i40e_hw * hw,u8 * pba_num,u32 pba_num_size)832 int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
833 u32 pba_num_size)
834 {
835 u16 pba_word = 0;
836 u16 pba_size = 0;
837 u16 pba_ptr = 0;
838 int status = 0;
839 u16 i = 0;
840
841 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
842 if (status || (pba_word != 0xFAFA)) {
843 hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n");
844 return status;
845 }
846
847 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
848 if (status) {
849 hw_dbg(hw, "Failed to read PBA Block pointer.\n");
850 return status;
851 }
852
853 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
854 if (status) {
855 hw_dbg(hw, "Failed to read PBA Block size.\n");
856 return status;
857 }
858
859 /* Subtract one to get PBA word count (PBA Size word is included in
860 * total size)
861 */
862 pba_size--;
863 if (pba_num_size < (((u32)pba_size * 2) + 1)) {
864 hw_dbg(hw, "Buffer too small for PBA data.\n");
865 return -EINVAL;
866 }
867
868 for (i = 0; i < pba_size; i++) {
869 status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
870 if (status) {
871 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i);
872 return status;
873 }
874
875 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
876 pba_num[(i * 2) + 1] = pba_word & 0xFF;
877 }
878 pba_num[(pba_size * 2)] = '\0';
879
880 return status;
881 }
882
883 /**
884 * i40e_get_media_type - Gets media type
885 * @hw: pointer to the hardware structure
886 **/
i40e_get_media_type(struct i40e_hw * hw)887 static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
888 {
889 enum i40e_media_type media;
890
891 switch (hw->phy.link_info.phy_type) {
892 case I40E_PHY_TYPE_10GBASE_SR:
893 case I40E_PHY_TYPE_10GBASE_LR:
894 case I40E_PHY_TYPE_1000BASE_SX:
895 case I40E_PHY_TYPE_1000BASE_LX:
896 case I40E_PHY_TYPE_40GBASE_SR4:
897 case I40E_PHY_TYPE_40GBASE_LR4:
898 case I40E_PHY_TYPE_25GBASE_LR:
899 case I40E_PHY_TYPE_25GBASE_SR:
900 media = I40E_MEDIA_TYPE_FIBER;
901 break;
902 case I40E_PHY_TYPE_100BASE_TX:
903 case I40E_PHY_TYPE_1000BASE_T:
904 case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
905 case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
906 case I40E_PHY_TYPE_10GBASE_T:
907 media = I40E_MEDIA_TYPE_BASET;
908 break;
909 case I40E_PHY_TYPE_10GBASE_CR1_CU:
910 case I40E_PHY_TYPE_40GBASE_CR4_CU:
911 case I40E_PHY_TYPE_10GBASE_CR1:
912 case I40E_PHY_TYPE_40GBASE_CR4:
913 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
914 case I40E_PHY_TYPE_40GBASE_AOC:
915 case I40E_PHY_TYPE_10GBASE_AOC:
916 case I40E_PHY_TYPE_25GBASE_CR:
917 case I40E_PHY_TYPE_25GBASE_AOC:
918 case I40E_PHY_TYPE_25GBASE_ACC:
919 media = I40E_MEDIA_TYPE_DA;
920 break;
921 case I40E_PHY_TYPE_1000BASE_KX:
922 case I40E_PHY_TYPE_10GBASE_KX4:
923 case I40E_PHY_TYPE_10GBASE_KR:
924 case I40E_PHY_TYPE_40GBASE_KR4:
925 case I40E_PHY_TYPE_20GBASE_KR2:
926 case I40E_PHY_TYPE_25GBASE_KR:
927 media = I40E_MEDIA_TYPE_BACKPLANE;
928 break;
929 case I40E_PHY_TYPE_SGMII:
930 case I40E_PHY_TYPE_XAUI:
931 case I40E_PHY_TYPE_XFI:
932 case I40E_PHY_TYPE_XLAUI:
933 case I40E_PHY_TYPE_XLPPI:
934 default:
935 media = I40E_MEDIA_TYPE_UNKNOWN;
936 break;
937 }
938
939 return media;
940 }
941
942 /**
943 * i40e_poll_globr - Poll for Global Reset completion
944 * @hw: pointer to the hardware structure
945 * @retry_limit: how many times to retry before failure
946 **/
i40e_poll_globr(struct i40e_hw * hw,u32 retry_limit)947 static int i40e_poll_globr(struct i40e_hw *hw,
948 u32 retry_limit)
949 {
950 u32 cnt, reg = 0;
951
952 for (cnt = 0; cnt < retry_limit; cnt++) {
953 reg = rd32(hw, I40E_GLGEN_RSTAT);
954 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
955 return 0;
956 msleep(100);
957 }
958
959 hw_dbg(hw, "Global reset failed.\n");
960 hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg);
961
962 return -EIO;
963 }
964
965 #define I40E_PF_RESET_WAIT_COUNT_A0 200
966 #define I40E_PF_RESET_WAIT_COUNT 200
967 /**
968 * i40e_pf_reset - Reset the PF
969 * @hw: pointer to the hardware structure
970 *
971 * Assuming someone else has triggered a global reset,
972 * assure the global reset is complete and then reset the PF
973 **/
i40e_pf_reset(struct i40e_hw * hw)974 int i40e_pf_reset(struct i40e_hw *hw)
975 {
976 u32 cnt = 0;
977 u32 cnt1 = 0;
978 u32 reg = 0;
979 u32 grst_del;
980
981 /* Poll for Global Reset steady state in case of recent GRST.
982 * The grst delay value is in 100ms units, and we'll wait a
983 * couple counts longer to be sure we don't just miss the end.
984 */
985 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
986 I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
987 I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
988
989 /* It can take upto 15 secs for GRST steady state.
990 * Bump it to 16 secs max to be safe.
991 */
992 grst_del = grst_del * 20;
993
994 for (cnt = 0; cnt < grst_del; cnt++) {
995 reg = rd32(hw, I40E_GLGEN_RSTAT);
996 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
997 break;
998 msleep(100);
999 }
1000 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
1001 hw_dbg(hw, "Global reset polling failed to complete.\n");
1002 return -EIO;
1003 }
1004
1005 /* Now Wait for the FW to be ready */
1006 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
1007 reg = rd32(hw, I40E_GLNVM_ULD);
1008 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1009 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
1010 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1011 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
1012 hw_dbg(hw, "Core and Global modules ready %d\n", cnt1);
1013 break;
1014 }
1015 usleep_range(10000, 20000);
1016 }
1017 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1018 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
1019 hw_dbg(hw, "wait for FW Reset complete timedout\n");
1020 hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg);
1021 return -EIO;
1022 }
1023
1024 /* If there was a Global Reset in progress when we got here,
1025 * we don't need to do the PF Reset
1026 */
1027 if (!cnt) {
1028 u32 reg2 = 0;
1029 if (hw->revision_id == 0)
1030 cnt = I40E_PF_RESET_WAIT_COUNT_A0;
1031 else
1032 cnt = I40E_PF_RESET_WAIT_COUNT;
1033 reg = rd32(hw, I40E_PFGEN_CTRL);
1034 wr32(hw, I40E_PFGEN_CTRL,
1035 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
1036 for (; cnt; cnt--) {
1037 reg = rd32(hw, I40E_PFGEN_CTRL);
1038 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
1039 break;
1040 reg2 = rd32(hw, I40E_GLGEN_RSTAT);
1041 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK)
1042 break;
1043 usleep_range(1000, 2000);
1044 }
1045 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
1046 if (i40e_poll_globr(hw, grst_del))
1047 return -EIO;
1048 } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
1049 hw_dbg(hw, "PF reset polling failed to complete.\n");
1050 return -EIO;
1051 }
1052 }
1053
1054 i40e_clear_pxe_mode(hw);
1055
1056 return 0;
1057 }
1058
1059 /**
1060 * i40e_clear_hw - clear out any left over hw state
1061 * @hw: pointer to the hw struct
1062 *
1063 * Clear queues and interrupts, typically called at init time,
1064 * but after the capabilities have been found so we know how many
1065 * queues and msix vectors have been allocated.
1066 **/
i40e_clear_hw(struct i40e_hw * hw)1067 void i40e_clear_hw(struct i40e_hw *hw)
1068 {
1069 u32 num_queues, base_queue;
1070 u32 num_pf_int;
1071 u32 num_vf_int;
1072 u32 num_vfs;
1073 u32 i, j;
1074 u32 val;
1075 u32 eol = 0x7ff;
1076
1077 /* get number of interrupts, queues, and VFs */
1078 val = rd32(hw, I40E_GLPCI_CNF2);
1079 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
1080 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
1081 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
1082 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
1083
1084 val = rd32(hw, I40E_PFLAN_QALLOC);
1085 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
1086 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1087 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
1088 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
1089 if (val & I40E_PFLAN_QALLOC_VALID_MASK && j >= base_queue)
1090 num_queues = (j - base_queue) + 1;
1091 else
1092 num_queues = 0;
1093
1094 val = rd32(hw, I40E_PF_VT_PFALLOC);
1095 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
1096 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
1097 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
1098 I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
1099 if (val & I40E_PF_VT_PFALLOC_VALID_MASK && j >= i)
1100 num_vfs = (j - i) + 1;
1101 else
1102 num_vfs = 0;
1103
1104 /* stop all the interrupts */
1105 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
1106 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
1107 for (i = 0; i < num_pf_int - 2; i++)
1108 wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
1109
1110 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
1111 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1112 wr32(hw, I40E_PFINT_LNKLST0, val);
1113 for (i = 0; i < num_pf_int - 2; i++)
1114 wr32(hw, I40E_PFINT_LNKLSTN(i), val);
1115 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1116 for (i = 0; i < num_vfs; i++)
1117 wr32(hw, I40E_VPINT_LNKLST0(i), val);
1118 for (i = 0; i < num_vf_int - 2; i++)
1119 wr32(hw, I40E_VPINT_LNKLSTN(i), val);
1120
1121 /* warn the HW of the coming Tx disables */
1122 for (i = 0; i < num_queues; i++) {
1123 u32 abs_queue_idx = base_queue + i;
1124 u32 reg_block = 0;
1125
1126 if (abs_queue_idx >= 128) {
1127 reg_block = abs_queue_idx / 128;
1128 abs_queue_idx %= 128;
1129 }
1130
1131 val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
1132 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
1133 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
1134 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
1135
1136 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
1137 }
1138 udelay(400);
1139
1140 /* stop all the queues */
1141 for (i = 0; i < num_queues; i++) {
1142 wr32(hw, I40E_QINT_TQCTL(i), 0);
1143 wr32(hw, I40E_QTX_ENA(i), 0);
1144 wr32(hw, I40E_QINT_RQCTL(i), 0);
1145 wr32(hw, I40E_QRX_ENA(i), 0);
1146 }
1147
1148 /* short wait for all queue disables to settle */
1149 udelay(50);
1150 }
1151
1152 /**
1153 * i40e_clear_pxe_mode - clear pxe operations mode
1154 * @hw: pointer to the hw struct
1155 *
1156 * Make sure all PXE mode settings are cleared, including things
1157 * like descriptor fetch/write-back mode.
1158 **/
i40e_clear_pxe_mode(struct i40e_hw * hw)1159 void i40e_clear_pxe_mode(struct i40e_hw *hw)
1160 {
1161 u32 reg;
1162
1163 if (i40e_check_asq_alive(hw))
1164 i40e_aq_clear_pxe_mode(hw, NULL);
1165
1166 /* Clear single descriptor fetch/write-back mode */
1167 reg = rd32(hw, I40E_GLLAN_RCTL_0);
1168
1169 if (hw->revision_id == 0) {
1170 /* As a work around clear PXE_MODE instead of setting it */
1171 wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK)));
1172 } else {
1173 wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
1174 }
1175 }
1176
1177 /**
1178 * i40e_led_is_mine - helper to find matching led
1179 * @hw: pointer to the hw struct
1180 * @idx: index into GPIO registers
1181 *
1182 * returns: 0 if no match, otherwise the value of the GPIO_CTL register
1183 */
i40e_led_is_mine(struct i40e_hw * hw,int idx)1184 static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
1185 {
1186 u32 gpio_val = 0;
1187 u32 port;
1188
1189 if (!I40E_IS_X710TL_DEVICE(hw->device_id) &&
1190 !hw->func_caps.led[idx])
1191 return 0;
1192 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
1193 port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
1194 I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
1195
1196 /* if PRT_NUM_NA is 1 then this LED is not port specific, OR
1197 * if it is not our port then ignore
1198 */
1199 if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
1200 (port != hw->port))
1201 return 0;
1202
1203 return gpio_val;
1204 }
1205
1206 #define I40E_FW_LED BIT(4)
1207 #define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \
1208 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
1209
1210 #define I40E_LED0 22
1211
1212 #define I40E_PIN_FUNC_SDP 0x0
1213 #define I40E_PIN_FUNC_LED 0x1
1214
1215 /**
1216 * i40e_led_get - return current on/off mode
1217 * @hw: pointer to the hw struct
1218 *
1219 * The value returned is the 'mode' field as defined in the
1220 * GPIO register definitions: 0x0 = off, 0xf = on, and other
1221 * values are variations of possible behaviors relating to
1222 * blink, link, and wire.
1223 **/
i40e_led_get(struct i40e_hw * hw)1224 u32 i40e_led_get(struct i40e_hw *hw)
1225 {
1226 u32 mode = 0;
1227 int i;
1228
1229 /* as per the documentation GPIO 22-29 are the LED
1230 * GPIO pins named LED0..LED7
1231 */
1232 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1233 u32 gpio_val = i40e_led_is_mine(hw, i);
1234
1235 if (!gpio_val)
1236 continue;
1237
1238 mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
1239 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
1240 break;
1241 }
1242
1243 return mode;
1244 }
1245
1246 /**
1247 * i40e_led_set - set new on/off mode
1248 * @hw: pointer to the hw struct
1249 * @mode: 0=off, 0xf=on (else see manual for mode details)
1250 * @blink: true if the LED should blink when on, false if steady
1251 *
1252 * if this function is used to turn on the blink it should
1253 * be used to disable the blink when restoring the original state.
1254 **/
i40e_led_set(struct i40e_hw * hw,u32 mode,bool blink)1255 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
1256 {
1257 int i;
1258
1259 if (mode & ~I40E_LED_MODE_VALID) {
1260 hw_dbg(hw, "invalid mode passed in %X\n", mode);
1261 return;
1262 }
1263
1264 /* as per the documentation GPIO 22-29 are the LED
1265 * GPIO pins named LED0..LED7
1266 */
1267 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1268 u32 gpio_val = i40e_led_is_mine(hw, i);
1269
1270 if (!gpio_val)
1271 continue;
1272
1273 if (I40E_IS_X710TL_DEVICE(hw->device_id)) {
1274 u32 pin_func = 0;
1275
1276 if (mode & I40E_FW_LED)
1277 pin_func = I40E_PIN_FUNC_SDP;
1278 else
1279 pin_func = I40E_PIN_FUNC_LED;
1280
1281 gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK;
1282 gpio_val |= ((pin_func <<
1283 I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) &
1284 I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK);
1285 }
1286 gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
1287 /* this & is a bit of paranoia, but serves as a range check */
1288 gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
1289 I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
1290
1291 if (blink)
1292 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1293 else
1294 gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1295
1296 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
1297 break;
1298 }
1299 }
1300
1301 /* Admin command wrappers */
1302
1303 /**
1304 * i40e_aq_get_phy_capabilities
1305 * @hw: pointer to the hw struct
1306 * @abilities: structure for PHY capabilities to be filled
1307 * @qualified_modules: report Qualified Modules
1308 * @report_init: report init capabilities (active are default)
1309 * @cmd_details: pointer to command details structure or NULL
1310 *
1311 * Returns the various PHY abilities supported on the Port.
1312 **/
1313 int
i40e_aq_get_phy_capabilities(struct i40e_hw * hw,bool qualified_modules,bool report_init,struct i40e_aq_get_phy_abilities_resp * abilities,struct i40e_asq_cmd_details * cmd_details)1314 i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
1315 bool qualified_modules, bool report_init,
1316 struct i40e_aq_get_phy_abilities_resp *abilities,
1317 struct i40e_asq_cmd_details *cmd_details)
1318 {
1319 u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
1320 u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
1321 struct i40e_aq_desc desc;
1322 int status;
1323
1324 if (!abilities)
1325 return -EINVAL;
1326
1327 do {
1328 i40e_fill_default_direct_cmd_desc(&desc,
1329 i40e_aqc_opc_get_phy_abilities);
1330
1331 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1332 if (abilities_size > I40E_AQ_LARGE_BUF)
1333 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1334
1335 if (qualified_modules)
1336 desc.params.external.param0 |=
1337 cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
1338
1339 if (report_init)
1340 desc.params.external.param0 |=
1341 cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
1342
1343 status = i40e_asq_send_command(hw, &desc, abilities,
1344 abilities_size, cmd_details);
1345
1346 switch (hw->aq.asq_last_status) {
1347 case I40E_AQ_RC_EIO:
1348 status = -EIO;
1349 break;
1350 case I40E_AQ_RC_EAGAIN:
1351 usleep_range(1000, 2000);
1352 total_delay++;
1353 status = -EIO;
1354 break;
1355 /* also covers I40E_AQ_RC_OK */
1356 default:
1357 break;
1358 }
1359
1360 } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) &&
1361 (total_delay < max_delay));
1362
1363 if (status)
1364 return status;
1365
1366 if (report_init) {
1367 if (hw->mac.type == I40E_MAC_XL710 &&
1368 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
1369 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
1370 status = i40e_aq_get_link_info(hw, true, NULL, NULL);
1371 } else {
1372 hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
1373 hw->phy.phy_types |=
1374 ((u64)abilities->phy_type_ext << 32);
1375 }
1376 }
1377
1378 return status;
1379 }
1380
1381 /**
1382 * i40e_aq_set_phy_config
1383 * @hw: pointer to the hw struct
1384 * @config: structure with PHY configuration to be set
1385 * @cmd_details: pointer to command details structure or NULL
1386 *
1387 * Set the various PHY configuration parameters
1388 * supported on the Port.One or more of the Set PHY config parameters may be
1389 * ignored in an MFP mode as the PF may not have the privilege to set some
1390 * of the PHY Config parameters. This status will be indicated by the
1391 * command response.
1392 **/
i40e_aq_set_phy_config(struct i40e_hw * hw,struct i40e_aq_set_phy_config * config,struct i40e_asq_cmd_details * cmd_details)1393 int i40e_aq_set_phy_config(struct i40e_hw *hw,
1394 struct i40e_aq_set_phy_config *config,
1395 struct i40e_asq_cmd_details *cmd_details)
1396 {
1397 struct i40e_aq_desc desc;
1398 struct i40e_aq_set_phy_config *cmd =
1399 (struct i40e_aq_set_phy_config *)&desc.params.raw;
1400 int status;
1401
1402 if (!config)
1403 return -EINVAL;
1404
1405 i40e_fill_default_direct_cmd_desc(&desc,
1406 i40e_aqc_opc_set_phy_config);
1407
1408 *cmd = *config;
1409
1410 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1411
1412 return status;
1413 }
1414
1415 static noinline_for_stack int
i40e_set_fc_status(struct i40e_hw * hw,struct i40e_aq_get_phy_abilities_resp * abilities,bool atomic_restart)1416 i40e_set_fc_status(struct i40e_hw *hw,
1417 struct i40e_aq_get_phy_abilities_resp *abilities,
1418 bool atomic_restart)
1419 {
1420 struct i40e_aq_set_phy_config config;
1421 enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
1422 u8 pause_mask = 0x0;
1423
1424 switch (fc_mode) {
1425 case I40E_FC_FULL:
1426 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1427 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1428 break;
1429 case I40E_FC_RX_PAUSE:
1430 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1431 break;
1432 case I40E_FC_TX_PAUSE:
1433 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1434 break;
1435 default:
1436 break;
1437 }
1438
1439 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
1440 /* clear the old pause settings */
1441 config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
1442 ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
1443 /* set the new abilities */
1444 config.abilities |= pause_mask;
1445 /* If the abilities have changed, then set the new config */
1446 if (config.abilities == abilities->abilities)
1447 return 0;
1448
1449 /* Auto restart link so settings take effect */
1450 if (atomic_restart)
1451 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1452 /* Copy over all the old settings */
1453 config.phy_type = abilities->phy_type;
1454 config.phy_type_ext = abilities->phy_type_ext;
1455 config.link_speed = abilities->link_speed;
1456 config.eee_capability = abilities->eee_capability;
1457 config.eeer = abilities->eeer_val;
1458 config.low_power_ctrl = abilities->d3_lpan;
1459 config.fec_config = abilities->fec_cfg_curr_mod_ext_info &
1460 I40E_AQ_PHY_FEC_CONFIG_MASK;
1461
1462 return i40e_aq_set_phy_config(hw, &config, NULL);
1463 }
1464
1465 /**
1466 * i40e_set_fc
1467 * @hw: pointer to the hw struct
1468 * @aq_failures: buffer to return AdminQ failure information
1469 * @atomic_restart: whether to enable atomic link restart
1470 *
1471 * Set the requested flow control mode using set_phy_config.
1472 **/
i40e_set_fc(struct i40e_hw * hw,u8 * aq_failures,bool atomic_restart)1473 int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
1474 bool atomic_restart)
1475 {
1476 struct i40e_aq_get_phy_abilities_resp abilities;
1477 int status;
1478
1479 *aq_failures = 0x0;
1480
1481 /* Get the current phy config */
1482 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
1483 NULL);
1484 if (status) {
1485 *aq_failures |= I40E_SET_FC_AQ_FAIL_GET;
1486 return status;
1487 }
1488
1489 status = i40e_set_fc_status(hw, &abilities, atomic_restart);
1490 if (status)
1491 *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
1492
1493 /* Update the link info */
1494 status = i40e_update_link_info(hw);
1495 if (status) {
1496 /* Wait a little bit (on 40G cards it sometimes takes a really
1497 * long time for link to come back from the atomic reset)
1498 * and try once more
1499 */
1500 msleep(1000);
1501 status = i40e_update_link_info(hw);
1502 }
1503 if (status)
1504 *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
1505
1506 return status;
1507 }
1508
1509 /**
1510 * i40e_aq_clear_pxe_mode
1511 * @hw: pointer to the hw struct
1512 * @cmd_details: pointer to command details structure or NULL
1513 *
1514 * Tell the firmware that the driver is taking over from PXE
1515 **/
i40e_aq_clear_pxe_mode(struct i40e_hw * hw,struct i40e_asq_cmd_details * cmd_details)1516 int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
1517 struct i40e_asq_cmd_details *cmd_details)
1518 {
1519 struct i40e_aq_desc desc;
1520 struct i40e_aqc_clear_pxe *cmd =
1521 (struct i40e_aqc_clear_pxe *)&desc.params.raw;
1522 int status;
1523
1524 i40e_fill_default_direct_cmd_desc(&desc,
1525 i40e_aqc_opc_clear_pxe_mode);
1526
1527 cmd->rx_cnt = 0x2;
1528
1529 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1530
1531 wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
1532
1533 return status;
1534 }
1535
1536 /**
1537 * i40e_aq_set_link_restart_an
1538 * @hw: pointer to the hw struct
1539 * @enable_link: if true: enable link, if false: disable link
1540 * @cmd_details: pointer to command details structure or NULL
1541 *
1542 * Sets up the link and restarts the Auto-Negotiation over the link.
1543 **/
i40e_aq_set_link_restart_an(struct i40e_hw * hw,bool enable_link,struct i40e_asq_cmd_details * cmd_details)1544 int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
1545 bool enable_link,
1546 struct i40e_asq_cmd_details *cmd_details)
1547 {
1548 struct i40e_aq_desc desc;
1549 struct i40e_aqc_set_link_restart_an *cmd =
1550 (struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
1551 int status;
1552
1553 i40e_fill_default_direct_cmd_desc(&desc,
1554 i40e_aqc_opc_set_link_restart_an);
1555
1556 cmd->command = I40E_AQ_PHY_RESTART_AN;
1557 if (enable_link)
1558 cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
1559 else
1560 cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
1561
1562 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1563
1564 return status;
1565 }
1566
1567 /**
1568 * i40e_aq_get_link_info
1569 * @hw: pointer to the hw struct
1570 * @enable_lse: enable/disable LinkStatusEvent reporting
1571 * @link: pointer to link status structure - optional
1572 * @cmd_details: pointer to command details structure or NULL
1573 *
1574 * Returns the link status of the adapter.
1575 **/
i40e_aq_get_link_info(struct i40e_hw * hw,bool enable_lse,struct i40e_link_status * link,struct i40e_asq_cmd_details * cmd_details)1576 int i40e_aq_get_link_info(struct i40e_hw *hw,
1577 bool enable_lse, struct i40e_link_status *link,
1578 struct i40e_asq_cmd_details *cmd_details)
1579 {
1580 struct i40e_aq_desc desc;
1581 struct i40e_aqc_get_link_status *resp =
1582 (struct i40e_aqc_get_link_status *)&desc.params.raw;
1583 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
1584 bool tx_pause, rx_pause;
1585 u16 command_flags;
1586 int status;
1587
1588 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
1589
1590 if (enable_lse)
1591 command_flags = I40E_AQ_LSE_ENABLE;
1592 else
1593 command_flags = I40E_AQ_LSE_DISABLE;
1594 resp->command_flags = cpu_to_le16(command_flags);
1595
1596 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1597
1598 if (status)
1599 goto aq_get_link_info_exit;
1600
1601 /* save off old link status information */
1602 hw->phy.link_info_old = *hw_link_info;
1603
1604 /* update link status */
1605 hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
1606 hw->phy.media_type = i40e_get_media_type(hw);
1607 hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
1608 hw_link_info->link_info = resp->link_info;
1609 hw_link_info->an_info = resp->an_info;
1610 hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
1611 I40E_AQ_CONFIG_FEC_RS_ENA);
1612 hw_link_info->ext_info = resp->ext_info;
1613 hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK;
1614 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
1615 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
1616
1617 /* update fc info */
1618 tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX);
1619 rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX);
1620 if (tx_pause & rx_pause)
1621 hw->fc.current_mode = I40E_FC_FULL;
1622 else if (tx_pause)
1623 hw->fc.current_mode = I40E_FC_TX_PAUSE;
1624 else if (rx_pause)
1625 hw->fc.current_mode = I40E_FC_RX_PAUSE;
1626 else
1627 hw->fc.current_mode = I40E_FC_NONE;
1628
1629 if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
1630 hw_link_info->crc_enable = true;
1631 else
1632 hw_link_info->crc_enable = false;
1633
1634 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED))
1635 hw_link_info->lse_enable = true;
1636 else
1637 hw_link_info->lse_enable = false;
1638
1639 if ((hw->mac.type == I40E_MAC_XL710) &&
1640 (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
1641 hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
1642 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
1643
1644 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE &&
1645 hw->mac.type != I40E_MAC_X722) {
1646 __le32 tmp;
1647
1648 memcpy(&tmp, resp->link_type, sizeof(tmp));
1649 hw->phy.phy_types = le32_to_cpu(tmp);
1650 hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
1651 }
1652
1653 /* save link status information */
1654 if (link)
1655 *link = *hw_link_info;
1656
1657 /* flag cleared so helper functions don't call AQ again */
1658 hw->phy.get_link_info = false;
1659
1660 aq_get_link_info_exit:
1661 return status;
1662 }
1663
1664 /**
1665 * i40e_aq_set_phy_int_mask
1666 * @hw: pointer to the hw struct
1667 * @mask: interrupt mask to be set
1668 * @cmd_details: pointer to command details structure or NULL
1669 *
1670 * Set link interrupt mask.
1671 **/
i40e_aq_set_phy_int_mask(struct i40e_hw * hw,u16 mask,struct i40e_asq_cmd_details * cmd_details)1672 int i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
1673 u16 mask,
1674 struct i40e_asq_cmd_details *cmd_details)
1675 {
1676 struct i40e_aq_desc desc;
1677 struct i40e_aqc_set_phy_int_mask *cmd =
1678 (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
1679 int status;
1680
1681 i40e_fill_default_direct_cmd_desc(&desc,
1682 i40e_aqc_opc_set_phy_int_mask);
1683
1684 cmd->event_mask = cpu_to_le16(mask);
1685
1686 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1687
1688 return status;
1689 }
1690
1691 /**
1692 * i40e_aq_set_mac_loopback
1693 * @hw: pointer to the HW struct
1694 * @ena_lpbk: Enable or Disable loopback
1695 * @cmd_details: pointer to command details structure or NULL
1696 *
1697 * Enable/disable loopback on a given port
1698 */
i40e_aq_set_mac_loopback(struct i40e_hw * hw,bool ena_lpbk,struct i40e_asq_cmd_details * cmd_details)1699 int i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk,
1700 struct i40e_asq_cmd_details *cmd_details)
1701 {
1702 struct i40e_aq_desc desc;
1703 struct i40e_aqc_set_lb_mode *cmd =
1704 (struct i40e_aqc_set_lb_mode *)&desc.params.raw;
1705
1706 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_lb_modes);
1707 if (ena_lpbk) {
1708 if (hw->nvm.version <= I40E_LEGACY_LOOPBACK_NVM_VER)
1709 cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL_LEGACY);
1710 else
1711 cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL);
1712 }
1713
1714 return i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1715 }
1716
1717 /**
1718 * i40e_aq_set_phy_debug
1719 * @hw: pointer to the hw struct
1720 * @cmd_flags: debug command flags
1721 * @cmd_details: pointer to command details structure or NULL
1722 *
1723 * Reset the external PHY.
1724 **/
i40e_aq_set_phy_debug(struct i40e_hw * hw,u8 cmd_flags,struct i40e_asq_cmd_details * cmd_details)1725 int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
1726 struct i40e_asq_cmd_details *cmd_details)
1727 {
1728 struct i40e_aq_desc desc;
1729 struct i40e_aqc_set_phy_debug *cmd =
1730 (struct i40e_aqc_set_phy_debug *)&desc.params.raw;
1731 int status;
1732
1733 i40e_fill_default_direct_cmd_desc(&desc,
1734 i40e_aqc_opc_set_phy_debug);
1735
1736 cmd->command_flags = cmd_flags;
1737
1738 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1739
1740 return status;
1741 }
1742
1743 /**
1744 * i40e_is_aq_api_ver_ge
1745 * @aq: pointer to AdminQ info containing HW API version to compare
1746 * @maj: API major value
1747 * @min: API minor value
1748 *
1749 * Assert whether current HW API version is greater/equal than provided.
1750 **/
i40e_is_aq_api_ver_ge(struct i40e_adminq_info * aq,u16 maj,u16 min)1751 static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
1752 u16 min)
1753 {
1754 return (aq->api_maj_ver > maj ||
1755 (aq->api_maj_ver == maj && aq->api_min_ver >= min));
1756 }
1757
1758 /**
1759 * i40e_aq_add_vsi
1760 * @hw: pointer to the hw struct
1761 * @vsi_ctx: pointer to a vsi context struct
1762 * @cmd_details: pointer to command details structure or NULL
1763 *
1764 * Add a VSI context to the hardware.
1765 **/
i40e_aq_add_vsi(struct i40e_hw * hw,struct i40e_vsi_context * vsi_ctx,struct i40e_asq_cmd_details * cmd_details)1766 int i40e_aq_add_vsi(struct i40e_hw *hw,
1767 struct i40e_vsi_context *vsi_ctx,
1768 struct i40e_asq_cmd_details *cmd_details)
1769 {
1770 struct i40e_aq_desc desc;
1771 struct i40e_aqc_add_get_update_vsi *cmd =
1772 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
1773 struct i40e_aqc_add_get_update_vsi_completion *resp =
1774 (struct i40e_aqc_add_get_update_vsi_completion *)
1775 &desc.params.raw;
1776 int status;
1777
1778 i40e_fill_default_direct_cmd_desc(&desc,
1779 i40e_aqc_opc_add_vsi);
1780
1781 cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
1782 cmd->connection_type = vsi_ctx->connection_type;
1783 cmd->vf_id = vsi_ctx->vf_num;
1784 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1785
1786 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
1787
1788 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
1789 sizeof(vsi_ctx->info),
1790 cmd_details, true);
1791
1792 if (status)
1793 goto aq_add_vsi_exit;
1794
1795 vsi_ctx->seid = le16_to_cpu(resp->seid);
1796 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
1797 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
1798 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1799
1800 aq_add_vsi_exit:
1801 return status;
1802 }
1803
1804 /**
1805 * i40e_aq_set_default_vsi
1806 * @hw: pointer to the hw struct
1807 * @seid: vsi number
1808 * @cmd_details: pointer to command details structure or NULL
1809 **/
i40e_aq_set_default_vsi(struct i40e_hw * hw,u16 seid,struct i40e_asq_cmd_details * cmd_details)1810 int i40e_aq_set_default_vsi(struct i40e_hw *hw,
1811 u16 seid,
1812 struct i40e_asq_cmd_details *cmd_details)
1813 {
1814 struct i40e_aq_desc desc;
1815 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1816 (struct i40e_aqc_set_vsi_promiscuous_modes *)
1817 &desc.params.raw;
1818 int status;
1819
1820 i40e_fill_default_direct_cmd_desc(&desc,
1821 i40e_aqc_opc_set_vsi_promiscuous_modes);
1822
1823 cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1824 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1825 cmd->seid = cpu_to_le16(seid);
1826
1827 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1828
1829 return status;
1830 }
1831
1832 /**
1833 * i40e_aq_clear_default_vsi
1834 * @hw: pointer to the hw struct
1835 * @seid: vsi number
1836 * @cmd_details: pointer to command details structure or NULL
1837 **/
i40e_aq_clear_default_vsi(struct i40e_hw * hw,u16 seid,struct i40e_asq_cmd_details * cmd_details)1838 int i40e_aq_clear_default_vsi(struct i40e_hw *hw,
1839 u16 seid,
1840 struct i40e_asq_cmd_details *cmd_details)
1841 {
1842 struct i40e_aq_desc desc;
1843 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1844 (struct i40e_aqc_set_vsi_promiscuous_modes *)
1845 &desc.params.raw;
1846 int status;
1847
1848 i40e_fill_default_direct_cmd_desc(&desc,
1849 i40e_aqc_opc_set_vsi_promiscuous_modes);
1850
1851 cmd->promiscuous_flags = cpu_to_le16(0);
1852 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1853 cmd->seid = cpu_to_le16(seid);
1854
1855 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1856
1857 return status;
1858 }
1859
1860 /**
1861 * i40e_aq_set_vsi_unicast_promiscuous
1862 * @hw: pointer to the hw struct
1863 * @seid: vsi number
1864 * @set: set unicast promiscuous enable/disable
1865 * @cmd_details: pointer to command details structure or NULL
1866 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
1867 **/
i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw * hw,u16 seid,bool set,struct i40e_asq_cmd_details * cmd_details,bool rx_only_promisc)1868 int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
1869 u16 seid, bool set,
1870 struct i40e_asq_cmd_details *cmd_details,
1871 bool rx_only_promisc)
1872 {
1873 struct i40e_aq_desc desc;
1874 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1875 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
1876 u16 flags = 0;
1877 int status;
1878
1879 i40e_fill_default_direct_cmd_desc(&desc,
1880 i40e_aqc_opc_set_vsi_promiscuous_modes);
1881
1882 if (set) {
1883 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
1884 if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
1885 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
1886 }
1887
1888 cmd->promiscuous_flags = cpu_to_le16(flags);
1889
1890 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
1891 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
1892 cmd->valid_flags |=
1893 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
1894
1895 cmd->seid = cpu_to_le16(seid);
1896 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1897
1898 return status;
1899 }
1900
1901 /**
1902 * i40e_aq_set_vsi_multicast_promiscuous
1903 * @hw: pointer to the hw struct
1904 * @seid: vsi number
1905 * @set: set multicast promiscuous enable/disable
1906 * @cmd_details: pointer to command details structure or NULL
1907 **/
i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw * hw,u16 seid,bool set,struct i40e_asq_cmd_details * cmd_details)1908 int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
1909 u16 seid, bool set,
1910 struct i40e_asq_cmd_details *cmd_details)
1911 {
1912 struct i40e_aq_desc desc;
1913 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1914 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
1915 u16 flags = 0;
1916 int status;
1917
1918 i40e_fill_default_direct_cmd_desc(&desc,
1919 i40e_aqc_opc_set_vsi_promiscuous_modes);
1920
1921 if (set)
1922 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
1923
1924 cmd->promiscuous_flags = cpu_to_le16(flags);
1925
1926 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
1927
1928 cmd->seid = cpu_to_le16(seid);
1929 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1930
1931 return status;
1932 }
1933
1934 /**
1935 * i40e_aq_set_vsi_mc_promisc_on_vlan
1936 * @hw: pointer to the hw struct
1937 * @seid: vsi number
1938 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
1939 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
1940 * @cmd_details: pointer to command details structure or NULL
1941 **/
i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw * hw,u16 seid,bool enable,u16 vid,struct i40e_asq_cmd_details * cmd_details)1942 int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
1943 u16 seid, bool enable,
1944 u16 vid,
1945 struct i40e_asq_cmd_details *cmd_details)
1946 {
1947 struct i40e_aq_desc desc;
1948 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1949 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
1950 u16 flags = 0;
1951 int status;
1952
1953 i40e_fill_default_direct_cmd_desc(&desc,
1954 i40e_aqc_opc_set_vsi_promiscuous_modes);
1955
1956 if (enable)
1957 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
1958
1959 cmd->promiscuous_flags = cpu_to_le16(flags);
1960 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
1961 cmd->seid = cpu_to_le16(seid);
1962 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
1963
1964 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
1965 cmd_details, true);
1966
1967 return status;
1968 }
1969
1970 /**
1971 * i40e_aq_set_vsi_uc_promisc_on_vlan
1972 * @hw: pointer to the hw struct
1973 * @seid: vsi number
1974 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
1975 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
1976 * @cmd_details: pointer to command details structure or NULL
1977 **/
i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw * hw,u16 seid,bool enable,u16 vid,struct i40e_asq_cmd_details * cmd_details)1978 int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
1979 u16 seid, bool enable,
1980 u16 vid,
1981 struct i40e_asq_cmd_details *cmd_details)
1982 {
1983 struct i40e_aq_desc desc;
1984 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1985 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
1986 u16 flags = 0;
1987 int status;
1988
1989 i40e_fill_default_direct_cmd_desc(&desc,
1990 i40e_aqc_opc_set_vsi_promiscuous_modes);
1991
1992 if (enable) {
1993 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
1994 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
1995 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
1996 }
1997
1998 cmd->promiscuous_flags = cpu_to_le16(flags);
1999 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
2000 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
2001 cmd->valid_flags |=
2002 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
2003 cmd->seid = cpu_to_le16(seid);
2004 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2005
2006 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
2007 cmd_details, true);
2008
2009 return status;
2010 }
2011
2012 /**
2013 * i40e_aq_set_vsi_bc_promisc_on_vlan
2014 * @hw: pointer to the hw struct
2015 * @seid: vsi number
2016 * @enable: set broadcast promiscuous enable/disable for a given VLAN
2017 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
2018 * @cmd_details: pointer to command details structure or NULL
2019 **/
i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw * hw,u16 seid,bool enable,u16 vid,struct i40e_asq_cmd_details * cmd_details)2020 int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
2021 u16 seid, bool enable, u16 vid,
2022 struct i40e_asq_cmd_details *cmd_details)
2023 {
2024 struct i40e_aq_desc desc;
2025 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2026 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2027 u16 flags = 0;
2028 int status;
2029
2030 i40e_fill_default_direct_cmd_desc(&desc,
2031 i40e_aqc_opc_set_vsi_promiscuous_modes);
2032
2033 if (enable)
2034 flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
2035
2036 cmd->promiscuous_flags = cpu_to_le16(flags);
2037 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2038 cmd->seid = cpu_to_le16(seid);
2039 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2040
2041 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2042
2043 return status;
2044 }
2045
2046 /**
2047 * i40e_aq_set_vsi_broadcast
2048 * @hw: pointer to the hw struct
2049 * @seid: vsi number
2050 * @set_filter: true to set filter, false to clear filter
2051 * @cmd_details: pointer to command details structure or NULL
2052 *
2053 * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
2054 **/
i40e_aq_set_vsi_broadcast(struct i40e_hw * hw,u16 seid,bool set_filter,struct i40e_asq_cmd_details * cmd_details)2055 int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
2056 u16 seid, bool set_filter,
2057 struct i40e_asq_cmd_details *cmd_details)
2058 {
2059 struct i40e_aq_desc desc;
2060 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2061 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2062 int status;
2063
2064 i40e_fill_default_direct_cmd_desc(&desc,
2065 i40e_aqc_opc_set_vsi_promiscuous_modes);
2066
2067 if (set_filter)
2068 cmd->promiscuous_flags
2069 |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2070 else
2071 cmd->promiscuous_flags
2072 &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2073
2074 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2075 cmd->seid = cpu_to_le16(seid);
2076 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2077
2078 return status;
2079 }
2080
2081 /**
2082 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
2083 * @hw: pointer to the hw struct
2084 * @seid: vsi number
2085 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2086 * @cmd_details: pointer to command details structure or NULL
2087 **/
i40e_aq_set_vsi_vlan_promisc(struct i40e_hw * hw,u16 seid,bool enable,struct i40e_asq_cmd_details * cmd_details)2088 int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
2089 u16 seid, bool enable,
2090 struct i40e_asq_cmd_details *cmd_details)
2091 {
2092 struct i40e_aq_desc desc;
2093 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2094 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2095 u16 flags = 0;
2096 int status;
2097
2098 i40e_fill_default_direct_cmd_desc(&desc,
2099 i40e_aqc_opc_set_vsi_promiscuous_modes);
2100 if (enable)
2101 flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
2102
2103 cmd->promiscuous_flags = cpu_to_le16(flags);
2104 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN);
2105 cmd->seid = cpu_to_le16(seid);
2106
2107 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2108
2109 return status;
2110 }
2111
2112 /**
2113 * i40e_aq_get_vsi_params - get VSI configuration info
2114 * @hw: pointer to the hw struct
2115 * @vsi_ctx: pointer to a vsi context struct
2116 * @cmd_details: pointer to command details structure or NULL
2117 **/
i40e_aq_get_vsi_params(struct i40e_hw * hw,struct i40e_vsi_context * vsi_ctx,struct i40e_asq_cmd_details * cmd_details)2118 int i40e_aq_get_vsi_params(struct i40e_hw *hw,
2119 struct i40e_vsi_context *vsi_ctx,
2120 struct i40e_asq_cmd_details *cmd_details)
2121 {
2122 struct i40e_aq_desc desc;
2123 struct i40e_aqc_add_get_update_vsi *cmd =
2124 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2125 struct i40e_aqc_add_get_update_vsi_completion *resp =
2126 (struct i40e_aqc_add_get_update_vsi_completion *)
2127 &desc.params.raw;
2128 int status;
2129
2130 i40e_fill_default_direct_cmd_desc(&desc,
2131 i40e_aqc_opc_get_vsi_parameters);
2132
2133 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2134
2135 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2136
2137 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
2138 sizeof(vsi_ctx->info), NULL);
2139
2140 if (status)
2141 goto aq_get_vsi_params_exit;
2142
2143 vsi_ctx->seid = le16_to_cpu(resp->seid);
2144 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
2145 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2146 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2147
2148 aq_get_vsi_params_exit:
2149 return status;
2150 }
2151
2152 /**
2153 * i40e_aq_update_vsi_params
2154 * @hw: pointer to the hw struct
2155 * @vsi_ctx: pointer to a vsi context struct
2156 * @cmd_details: pointer to command details structure or NULL
2157 *
2158 * Update a VSI context.
2159 **/
i40e_aq_update_vsi_params(struct i40e_hw * hw,struct i40e_vsi_context * vsi_ctx,struct i40e_asq_cmd_details * cmd_details)2160 int i40e_aq_update_vsi_params(struct i40e_hw *hw,
2161 struct i40e_vsi_context *vsi_ctx,
2162 struct i40e_asq_cmd_details *cmd_details)
2163 {
2164 struct i40e_aq_desc desc;
2165 struct i40e_aqc_add_get_update_vsi *cmd =
2166 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2167 struct i40e_aqc_add_get_update_vsi_completion *resp =
2168 (struct i40e_aqc_add_get_update_vsi_completion *)
2169 &desc.params.raw;
2170 int status;
2171
2172 i40e_fill_default_direct_cmd_desc(&desc,
2173 i40e_aqc_opc_update_vsi_parameters);
2174 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2175
2176 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2177
2178 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
2179 sizeof(vsi_ctx->info),
2180 cmd_details, true);
2181
2182 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2183 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2184
2185 return status;
2186 }
2187
2188 /**
2189 * i40e_aq_get_switch_config
2190 * @hw: pointer to the hardware structure
2191 * @buf: pointer to the result buffer
2192 * @buf_size: length of input buffer
2193 * @start_seid: seid to start for the report, 0 == beginning
2194 * @cmd_details: pointer to command details structure or NULL
2195 *
2196 * Fill the buf with switch configuration returned from AdminQ command
2197 **/
i40e_aq_get_switch_config(struct i40e_hw * hw,struct i40e_aqc_get_switch_config_resp * buf,u16 buf_size,u16 * start_seid,struct i40e_asq_cmd_details * cmd_details)2198 int i40e_aq_get_switch_config(struct i40e_hw *hw,
2199 struct i40e_aqc_get_switch_config_resp *buf,
2200 u16 buf_size, u16 *start_seid,
2201 struct i40e_asq_cmd_details *cmd_details)
2202 {
2203 struct i40e_aq_desc desc;
2204 struct i40e_aqc_switch_seid *scfg =
2205 (struct i40e_aqc_switch_seid *)&desc.params.raw;
2206 int status;
2207
2208 i40e_fill_default_direct_cmd_desc(&desc,
2209 i40e_aqc_opc_get_switch_config);
2210 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2211 if (buf_size > I40E_AQ_LARGE_BUF)
2212 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2213 scfg->seid = cpu_to_le16(*start_seid);
2214
2215 status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
2216 *start_seid = le16_to_cpu(scfg->seid);
2217
2218 return status;
2219 }
2220
2221 /**
2222 * i40e_aq_set_switch_config
2223 * @hw: pointer to the hardware structure
2224 * @flags: bit flag values to set
2225 * @mode: cloud filter mode
2226 * @valid_flags: which bit flags to set
2227 * @mode: cloud filter mode
2228 * @cmd_details: pointer to command details structure or NULL
2229 *
2230 * Set switch configuration bits
2231 **/
i40e_aq_set_switch_config(struct i40e_hw * hw,u16 flags,u16 valid_flags,u8 mode,struct i40e_asq_cmd_details * cmd_details)2232 int i40e_aq_set_switch_config(struct i40e_hw *hw,
2233 u16 flags,
2234 u16 valid_flags, u8 mode,
2235 struct i40e_asq_cmd_details *cmd_details)
2236 {
2237 struct i40e_aq_desc desc;
2238 struct i40e_aqc_set_switch_config *scfg =
2239 (struct i40e_aqc_set_switch_config *)&desc.params.raw;
2240 int status;
2241
2242 i40e_fill_default_direct_cmd_desc(&desc,
2243 i40e_aqc_opc_set_switch_config);
2244 scfg->flags = cpu_to_le16(flags);
2245 scfg->valid_flags = cpu_to_le16(valid_flags);
2246 scfg->mode = mode;
2247 if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
2248 scfg->switch_tag = cpu_to_le16(hw->switch_tag);
2249 scfg->first_tag = cpu_to_le16(hw->first_tag);
2250 scfg->second_tag = cpu_to_le16(hw->second_tag);
2251 }
2252 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2253
2254 return status;
2255 }
2256
2257 /**
2258 * i40e_aq_get_firmware_version
2259 * @hw: pointer to the hw struct
2260 * @fw_major_version: firmware major version
2261 * @fw_minor_version: firmware minor version
2262 * @fw_build: firmware build number
2263 * @api_major_version: major queue version
2264 * @api_minor_version: minor queue version
2265 * @cmd_details: pointer to command details structure or NULL
2266 *
2267 * Get the firmware version from the admin queue commands
2268 **/
i40e_aq_get_firmware_version(struct i40e_hw * hw,u16 * fw_major_version,u16 * fw_minor_version,u32 * fw_build,u16 * api_major_version,u16 * api_minor_version,struct i40e_asq_cmd_details * cmd_details)2269 int i40e_aq_get_firmware_version(struct i40e_hw *hw,
2270 u16 *fw_major_version, u16 *fw_minor_version,
2271 u32 *fw_build,
2272 u16 *api_major_version, u16 *api_minor_version,
2273 struct i40e_asq_cmd_details *cmd_details)
2274 {
2275 struct i40e_aq_desc desc;
2276 struct i40e_aqc_get_version *resp =
2277 (struct i40e_aqc_get_version *)&desc.params.raw;
2278 int status;
2279
2280 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
2281
2282 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2283
2284 if (!status) {
2285 if (fw_major_version)
2286 *fw_major_version = le16_to_cpu(resp->fw_major);
2287 if (fw_minor_version)
2288 *fw_minor_version = le16_to_cpu(resp->fw_minor);
2289 if (fw_build)
2290 *fw_build = le32_to_cpu(resp->fw_build);
2291 if (api_major_version)
2292 *api_major_version = le16_to_cpu(resp->api_major);
2293 if (api_minor_version)
2294 *api_minor_version = le16_to_cpu(resp->api_minor);
2295 }
2296
2297 return status;
2298 }
2299
2300 /**
2301 * i40e_aq_send_driver_version
2302 * @hw: pointer to the hw struct
2303 * @dv: driver's major, minor version
2304 * @cmd_details: pointer to command details structure or NULL
2305 *
2306 * Send the driver version to the firmware
2307 **/
i40e_aq_send_driver_version(struct i40e_hw * hw,struct i40e_driver_version * dv,struct i40e_asq_cmd_details * cmd_details)2308 int i40e_aq_send_driver_version(struct i40e_hw *hw,
2309 struct i40e_driver_version *dv,
2310 struct i40e_asq_cmd_details *cmd_details)
2311 {
2312 struct i40e_aq_desc desc;
2313 struct i40e_aqc_driver_version *cmd =
2314 (struct i40e_aqc_driver_version *)&desc.params.raw;
2315 int status;
2316 u16 len;
2317
2318 if (dv == NULL)
2319 return -EINVAL;
2320
2321 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
2322
2323 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
2324 cmd->driver_major_ver = dv->major_version;
2325 cmd->driver_minor_ver = dv->minor_version;
2326 cmd->driver_build_ver = dv->build_version;
2327 cmd->driver_subbuild_ver = dv->subbuild_version;
2328
2329 len = 0;
2330 while (len < sizeof(dv->driver_string) &&
2331 (dv->driver_string[len] < 0x80) &&
2332 dv->driver_string[len])
2333 len++;
2334 status = i40e_asq_send_command(hw, &desc, dv->driver_string,
2335 len, cmd_details);
2336
2337 return status;
2338 }
2339
2340 /**
2341 * i40e_get_link_status - get status of the HW network link
2342 * @hw: pointer to the hw struct
2343 * @link_up: pointer to bool (true/false = linkup/linkdown)
2344 *
2345 * Variable link_up true if link is up, false if link is down.
2346 * The variable link_up is invalid if returned value of status != 0
2347 *
2348 * Side effect: LinkStatusEvent reporting becomes enabled
2349 **/
i40e_get_link_status(struct i40e_hw * hw,bool * link_up)2350 int i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
2351 {
2352 int status = 0;
2353
2354 if (hw->phy.get_link_info) {
2355 status = i40e_update_link_info(hw);
2356
2357 if (status)
2358 i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
2359 status);
2360 }
2361
2362 *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
2363
2364 return status;
2365 }
2366
2367 /**
2368 * i40e_update_link_info - update status of the HW network link
2369 * @hw: pointer to the hw struct
2370 **/
i40e_update_link_info(struct i40e_hw * hw)2371 noinline_for_stack int i40e_update_link_info(struct i40e_hw *hw)
2372 {
2373 struct i40e_aq_get_phy_abilities_resp abilities;
2374 int status = 0;
2375
2376 status = i40e_aq_get_link_info(hw, true, NULL, NULL);
2377 if (status)
2378 return status;
2379
2380 /* extra checking needed to ensure link info to user is timely */
2381 if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2382 ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) ||
2383 !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) {
2384 status = i40e_aq_get_phy_capabilities(hw, false, false,
2385 &abilities, NULL);
2386 if (status)
2387 return status;
2388
2389 if (abilities.fec_cfg_curr_mod_ext_info &
2390 I40E_AQ_ENABLE_FEC_AUTO)
2391 hw->phy.link_info.req_fec_info =
2392 (I40E_AQ_REQUEST_FEC_KR |
2393 I40E_AQ_REQUEST_FEC_RS);
2394 else
2395 hw->phy.link_info.req_fec_info =
2396 abilities.fec_cfg_curr_mod_ext_info &
2397 (I40E_AQ_REQUEST_FEC_KR |
2398 I40E_AQ_REQUEST_FEC_RS);
2399
2400 memcpy(hw->phy.link_info.module_type, &abilities.module_type,
2401 sizeof(hw->phy.link_info.module_type));
2402 }
2403
2404 return status;
2405 }
2406
2407 /**
2408 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
2409 * @hw: pointer to the hw struct
2410 * @uplink_seid: the MAC or other gizmo SEID
2411 * @downlink_seid: the VSI SEID
2412 * @enabled_tc: bitmap of TCs to be enabled
2413 * @default_port: true for default port VSI, false for control port
2414 * @veb_seid: pointer to where to put the resulting VEB SEID
2415 * @enable_stats: true to turn on VEB stats
2416 * @cmd_details: pointer to command details structure or NULL
2417 *
2418 * This asks the FW to add a VEB between the uplink and downlink
2419 * elements. If the uplink SEID is 0, this will be a floating VEB.
2420 **/
i40e_aq_add_veb(struct i40e_hw * hw,u16 uplink_seid,u16 downlink_seid,u8 enabled_tc,bool default_port,u16 * veb_seid,bool enable_stats,struct i40e_asq_cmd_details * cmd_details)2421 int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
2422 u16 downlink_seid, u8 enabled_tc,
2423 bool default_port, u16 *veb_seid,
2424 bool enable_stats,
2425 struct i40e_asq_cmd_details *cmd_details)
2426 {
2427 struct i40e_aq_desc desc;
2428 struct i40e_aqc_add_veb *cmd =
2429 (struct i40e_aqc_add_veb *)&desc.params.raw;
2430 struct i40e_aqc_add_veb_completion *resp =
2431 (struct i40e_aqc_add_veb_completion *)&desc.params.raw;
2432 u16 veb_flags = 0;
2433 int status;
2434
2435 /* SEIDs need to either both be set or both be 0 for floating VEB */
2436 if (!!uplink_seid != !!downlink_seid)
2437 return -EINVAL;
2438
2439 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
2440
2441 cmd->uplink_seid = cpu_to_le16(uplink_seid);
2442 cmd->downlink_seid = cpu_to_le16(downlink_seid);
2443 cmd->enable_tcs = enabled_tc;
2444 if (!uplink_seid)
2445 veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
2446 if (default_port)
2447 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
2448 else
2449 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
2450
2451 /* reverse logic here: set the bitflag to disable the stats */
2452 if (!enable_stats)
2453 veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS;
2454
2455 cmd->veb_flags = cpu_to_le16(veb_flags);
2456
2457 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2458
2459 if (!status && veb_seid)
2460 *veb_seid = le16_to_cpu(resp->veb_seid);
2461
2462 return status;
2463 }
2464
2465 /**
2466 * i40e_aq_get_veb_parameters - Retrieve VEB parameters
2467 * @hw: pointer to the hw struct
2468 * @veb_seid: the SEID of the VEB to query
2469 * @switch_id: the uplink switch id
2470 * @floating: set to true if the VEB is floating
2471 * @statistic_index: index of the stats counter block for this VEB
2472 * @vebs_used: number of VEB's used by function
2473 * @vebs_free: total VEB's not reserved by any function
2474 * @cmd_details: pointer to command details structure or NULL
2475 *
2476 * This retrieves the parameters for a particular VEB, specified by
2477 * uplink_seid, and returns them to the caller.
2478 **/
i40e_aq_get_veb_parameters(struct i40e_hw * hw,u16 veb_seid,u16 * switch_id,bool * floating,u16 * statistic_index,u16 * vebs_used,u16 * vebs_free,struct i40e_asq_cmd_details * cmd_details)2479 int i40e_aq_get_veb_parameters(struct i40e_hw *hw,
2480 u16 veb_seid, u16 *switch_id,
2481 bool *floating, u16 *statistic_index,
2482 u16 *vebs_used, u16 *vebs_free,
2483 struct i40e_asq_cmd_details *cmd_details)
2484 {
2485 struct i40e_aq_desc desc;
2486 struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
2487 (struct i40e_aqc_get_veb_parameters_completion *)
2488 &desc.params.raw;
2489 int status;
2490
2491 if (veb_seid == 0)
2492 return -EINVAL;
2493
2494 i40e_fill_default_direct_cmd_desc(&desc,
2495 i40e_aqc_opc_get_veb_parameters);
2496 cmd_resp->seid = cpu_to_le16(veb_seid);
2497
2498 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2499 if (status)
2500 goto get_veb_exit;
2501
2502 if (switch_id)
2503 *switch_id = le16_to_cpu(cmd_resp->switch_id);
2504 if (statistic_index)
2505 *statistic_index = le16_to_cpu(cmd_resp->statistic_index);
2506 if (vebs_used)
2507 *vebs_used = le16_to_cpu(cmd_resp->vebs_used);
2508 if (vebs_free)
2509 *vebs_free = le16_to_cpu(cmd_resp->vebs_free);
2510 if (floating) {
2511 u16 flags = le16_to_cpu(cmd_resp->veb_flags);
2512
2513 if (flags & I40E_AQC_ADD_VEB_FLOATING)
2514 *floating = true;
2515 else
2516 *floating = false;
2517 }
2518
2519 get_veb_exit:
2520 return status;
2521 }
2522
2523 /**
2524 * i40e_prepare_add_macvlan
2525 * @mv_list: list of macvlans to be added
2526 * @desc: pointer to AQ descriptor structure
2527 * @count: length of the list
2528 * @seid: VSI for the mac address
2529 *
2530 * Internal helper function that prepares the add macvlan request
2531 * and returns the buffer size.
2532 **/
2533 static u16
i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data * mv_list,struct i40e_aq_desc * desc,u16 count,u16 seid)2534 i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
2535 struct i40e_aq_desc *desc, u16 count, u16 seid)
2536 {
2537 struct i40e_aqc_macvlan *cmd =
2538 (struct i40e_aqc_macvlan *)&desc->params.raw;
2539 u16 buf_size;
2540 int i;
2541
2542 buf_size = count * sizeof(*mv_list);
2543
2544 /* prep the rest of the request */
2545 i40e_fill_default_direct_cmd_desc(desc, i40e_aqc_opc_add_macvlan);
2546 cmd->num_addresses = cpu_to_le16(count);
2547 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2548 cmd->seid[1] = 0;
2549 cmd->seid[2] = 0;
2550
2551 for (i = 0; i < count; i++)
2552 if (is_multicast_ether_addr(mv_list[i].mac_addr))
2553 mv_list[i].flags |=
2554 cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
2555
2556 desc->flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2557 if (buf_size > I40E_AQ_LARGE_BUF)
2558 desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2559
2560 return buf_size;
2561 }
2562
2563 /**
2564 * i40e_aq_add_macvlan
2565 * @hw: pointer to the hw struct
2566 * @seid: VSI for the mac address
2567 * @mv_list: list of macvlans to be added
2568 * @count: length of the list
2569 * @cmd_details: pointer to command details structure or NULL
2570 *
2571 * Add MAC/VLAN addresses to the HW filtering
2572 **/
2573 int
i40e_aq_add_macvlan(struct i40e_hw * hw,u16 seid,struct i40e_aqc_add_macvlan_element_data * mv_list,u16 count,struct i40e_asq_cmd_details * cmd_details)2574 i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
2575 struct i40e_aqc_add_macvlan_element_data *mv_list,
2576 u16 count, struct i40e_asq_cmd_details *cmd_details)
2577 {
2578 struct i40e_aq_desc desc;
2579 u16 buf_size;
2580
2581 if (count == 0 || !mv_list || !hw)
2582 return -EINVAL;
2583
2584 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
2585
2586 return i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
2587 cmd_details, true);
2588 }
2589
2590 /**
2591 * i40e_aq_add_macvlan_v2
2592 * @hw: pointer to the hw struct
2593 * @seid: VSI for the mac address
2594 * @mv_list: list of macvlans to be added
2595 * @count: length of the list
2596 * @cmd_details: pointer to command details structure or NULL
2597 * @aq_status: pointer to Admin Queue status return value
2598 *
2599 * Add MAC/VLAN addresses to the HW filtering.
2600 * The _v2 version returns the last Admin Queue status in aq_status
2601 * to avoid race conditions in access to hw->aq.asq_last_status.
2602 * It also calls _v2 versions of asq_send_command functions to
2603 * get the aq_status on the stack.
2604 **/
2605 int
i40e_aq_add_macvlan_v2(struct i40e_hw * hw,u16 seid,struct i40e_aqc_add_macvlan_element_data * mv_list,u16 count,struct i40e_asq_cmd_details * cmd_details,enum i40e_admin_queue_err * aq_status)2606 i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
2607 struct i40e_aqc_add_macvlan_element_data *mv_list,
2608 u16 count, struct i40e_asq_cmd_details *cmd_details,
2609 enum i40e_admin_queue_err *aq_status)
2610 {
2611 struct i40e_aq_desc desc;
2612 u16 buf_size;
2613
2614 if (count == 0 || !mv_list || !hw)
2615 return -EINVAL;
2616
2617 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
2618
2619 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
2620 cmd_details, true, aq_status);
2621 }
2622
2623 /**
2624 * i40e_aq_remove_macvlan
2625 * @hw: pointer to the hw struct
2626 * @seid: VSI for the mac address
2627 * @mv_list: list of macvlans to be removed
2628 * @count: length of the list
2629 * @cmd_details: pointer to command details structure or NULL
2630 *
2631 * Remove MAC/VLAN addresses from the HW filtering
2632 **/
2633 int
i40e_aq_remove_macvlan(struct i40e_hw * hw,u16 seid,struct i40e_aqc_remove_macvlan_element_data * mv_list,u16 count,struct i40e_asq_cmd_details * cmd_details)2634 i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
2635 struct i40e_aqc_remove_macvlan_element_data *mv_list,
2636 u16 count, struct i40e_asq_cmd_details *cmd_details)
2637 {
2638 struct i40e_aq_desc desc;
2639 struct i40e_aqc_macvlan *cmd =
2640 (struct i40e_aqc_macvlan *)&desc.params.raw;
2641 u16 buf_size;
2642 int status;
2643
2644 if (count == 0 || !mv_list || !hw)
2645 return -EINVAL;
2646
2647 buf_size = count * sizeof(*mv_list);
2648
2649 /* prep the rest of the request */
2650 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
2651 cmd->num_addresses = cpu_to_le16(count);
2652 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2653 cmd->seid[1] = 0;
2654 cmd->seid[2] = 0;
2655
2656 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2657 if (buf_size > I40E_AQ_LARGE_BUF)
2658 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2659
2660 status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
2661 cmd_details, true);
2662
2663 return status;
2664 }
2665
2666 /**
2667 * i40e_aq_remove_macvlan_v2
2668 * @hw: pointer to the hw struct
2669 * @seid: VSI for the mac address
2670 * @mv_list: list of macvlans to be removed
2671 * @count: length of the list
2672 * @cmd_details: pointer to command details structure or NULL
2673 * @aq_status: pointer to Admin Queue status return value
2674 *
2675 * Remove MAC/VLAN addresses from the HW filtering.
2676 * The _v2 version returns the last Admin Queue status in aq_status
2677 * to avoid race conditions in access to hw->aq.asq_last_status.
2678 * It also calls _v2 versions of asq_send_command functions to
2679 * get the aq_status on the stack.
2680 **/
2681 int
i40e_aq_remove_macvlan_v2(struct i40e_hw * hw,u16 seid,struct i40e_aqc_remove_macvlan_element_data * mv_list,u16 count,struct i40e_asq_cmd_details * cmd_details,enum i40e_admin_queue_err * aq_status)2682 i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
2683 struct i40e_aqc_remove_macvlan_element_data *mv_list,
2684 u16 count, struct i40e_asq_cmd_details *cmd_details,
2685 enum i40e_admin_queue_err *aq_status)
2686 {
2687 struct i40e_aqc_macvlan *cmd;
2688 struct i40e_aq_desc desc;
2689 u16 buf_size;
2690
2691 if (count == 0 || !mv_list || !hw)
2692 return -EINVAL;
2693
2694 buf_size = count * sizeof(*mv_list);
2695
2696 /* prep the rest of the request */
2697 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
2698 cmd = (struct i40e_aqc_macvlan *)&desc.params.raw;
2699 cmd->num_addresses = cpu_to_le16(count);
2700 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2701 cmd->seid[1] = 0;
2702 cmd->seid[2] = 0;
2703
2704 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2705 if (buf_size > I40E_AQ_LARGE_BUF)
2706 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2707
2708 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
2709 cmd_details, true, aq_status);
2710 }
2711
2712 /**
2713 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
2714 * @hw: pointer to the hw struct
2715 * @opcode: AQ opcode for add or delete mirror rule
2716 * @sw_seid: Switch SEID (to which rule refers)
2717 * @rule_type: Rule Type (ingress/egress/VLAN)
2718 * @id: Destination VSI SEID or Rule ID
2719 * @count: length of the list
2720 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2721 * @cmd_details: pointer to command details structure or NULL
2722 * @rule_id: Rule ID returned from FW
2723 * @rules_used: Number of rules used in internal switch
2724 * @rules_free: Number of rules free in internal switch
2725 *
2726 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
2727 * VEBs/VEPA elements only
2728 **/
i40e_mirrorrule_op(struct i40e_hw * hw,u16 opcode,u16 sw_seid,u16 rule_type,u16 id,u16 count,__le16 * mr_list,struct i40e_asq_cmd_details * cmd_details,u16 * rule_id,u16 * rules_used,u16 * rules_free)2729 static int i40e_mirrorrule_op(struct i40e_hw *hw,
2730 u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
2731 u16 count, __le16 *mr_list,
2732 struct i40e_asq_cmd_details *cmd_details,
2733 u16 *rule_id, u16 *rules_used, u16 *rules_free)
2734 {
2735 struct i40e_aq_desc desc;
2736 struct i40e_aqc_add_delete_mirror_rule *cmd =
2737 (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
2738 struct i40e_aqc_add_delete_mirror_rule_completion *resp =
2739 (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
2740 u16 buf_size;
2741 int status;
2742
2743 buf_size = count * sizeof(*mr_list);
2744
2745 /* prep the rest of the request */
2746 i40e_fill_default_direct_cmd_desc(&desc, opcode);
2747 cmd->seid = cpu_to_le16(sw_seid);
2748 cmd->rule_type = cpu_to_le16(rule_type &
2749 I40E_AQC_MIRROR_RULE_TYPE_MASK);
2750 cmd->num_entries = cpu_to_le16(count);
2751 /* Dest VSI for add, rule_id for delete */
2752 cmd->destination = cpu_to_le16(id);
2753 if (mr_list) {
2754 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2755 I40E_AQ_FLAG_RD));
2756 if (buf_size > I40E_AQ_LARGE_BUF)
2757 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2758 }
2759
2760 status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
2761 cmd_details);
2762 if (!status ||
2763 hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
2764 if (rule_id)
2765 *rule_id = le16_to_cpu(resp->rule_id);
2766 if (rules_used)
2767 *rules_used = le16_to_cpu(resp->mirror_rules_used);
2768 if (rules_free)
2769 *rules_free = le16_to_cpu(resp->mirror_rules_free);
2770 }
2771 return status;
2772 }
2773
2774 /**
2775 * i40e_aq_add_mirrorrule - add a mirror rule
2776 * @hw: pointer to the hw struct
2777 * @sw_seid: Switch SEID (to which rule refers)
2778 * @rule_type: Rule Type (ingress/egress/VLAN)
2779 * @dest_vsi: SEID of VSI to which packets will be mirrored
2780 * @count: length of the list
2781 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2782 * @cmd_details: pointer to command details structure or NULL
2783 * @rule_id: Rule ID returned from FW
2784 * @rules_used: Number of rules used in internal switch
2785 * @rules_free: Number of rules free in internal switch
2786 *
2787 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
2788 **/
i40e_aq_add_mirrorrule(struct i40e_hw * hw,u16 sw_seid,u16 rule_type,u16 dest_vsi,u16 count,__le16 * mr_list,struct i40e_asq_cmd_details * cmd_details,u16 * rule_id,u16 * rules_used,u16 * rules_free)2789 int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2790 u16 rule_type, u16 dest_vsi, u16 count,
2791 __le16 *mr_list,
2792 struct i40e_asq_cmd_details *cmd_details,
2793 u16 *rule_id, u16 *rules_used, u16 *rules_free)
2794 {
2795 if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
2796 rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
2797 if (count == 0 || !mr_list)
2798 return -EINVAL;
2799 }
2800
2801 return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
2802 rule_type, dest_vsi, count, mr_list,
2803 cmd_details, rule_id, rules_used, rules_free);
2804 }
2805
2806 /**
2807 * i40e_aq_delete_mirrorrule - delete a mirror rule
2808 * @hw: pointer to the hw struct
2809 * @sw_seid: Switch SEID (to which rule refers)
2810 * @rule_type: Rule Type (ingress/egress/VLAN)
2811 * @count: length of the list
2812 * @rule_id: Rule ID that is returned in the receive desc as part of
2813 * add_mirrorrule.
2814 * @mr_list: list of mirrored VLAN IDs to be removed
2815 * @cmd_details: pointer to command details structure or NULL
2816 * @rules_used: Number of rules used in internal switch
2817 * @rules_free: Number of rules free in internal switch
2818 *
2819 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
2820 **/
i40e_aq_delete_mirrorrule(struct i40e_hw * hw,u16 sw_seid,u16 rule_type,u16 rule_id,u16 count,__le16 * mr_list,struct i40e_asq_cmd_details * cmd_details,u16 * rules_used,u16 * rules_free)2821 int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2822 u16 rule_type, u16 rule_id, u16 count,
2823 __le16 *mr_list,
2824 struct i40e_asq_cmd_details *cmd_details,
2825 u16 *rules_used, u16 *rules_free)
2826 {
2827 /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
2828 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
2829 /* count and mr_list shall be valid for rule_type INGRESS VLAN
2830 * mirroring. For other rule_type, count and rule_type should
2831 * not matter.
2832 */
2833 if (count == 0 || !mr_list)
2834 return -EINVAL;
2835 }
2836
2837 return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
2838 rule_type, rule_id, count, mr_list,
2839 cmd_details, NULL, rules_used, rules_free);
2840 }
2841
2842 /**
2843 * i40e_aq_send_msg_to_vf
2844 * @hw: pointer to the hardware structure
2845 * @vfid: VF id to send msg
2846 * @v_opcode: opcodes for VF-PF communication
2847 * @v_retval: return error code
2848 * @msg: pointer to the msg buffer
2849 * @msglen: msg length
2850 * @cmd_details: pointer to command details
2851 *
2852 * send msg to vf
2853 **/
i40e_aq_send_msg_to_vf(struct i40e_hw * hw,u16 vfid,u32 v_opcode,u32 v_retval,u8 * msg,u16 msglen,struct i40e_asq_cmd_details * cmd_details)2854 int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
2855 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
2856 struct i40e_asq_cmd_details *cmd_details)
2857 {
2858 struct i40e_aq_desc desc;
2859 struct i40e_aqc_pf_vf_message *cmd =
2860 (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
2861 int status;
2862
2863 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
2864 cmd->id = cpu_to_le32(vfid);
2865 desc.cookie_high = cpu_to_le32(v_opcode);
2866 desc.cookie_low = cpu_to_le32(v_retval);
2867 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
2868 if (msglen) {
2869 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2870 I40E_AQ_FLAG_RD));
2871 if (msglen > I40E_AQ_LARGE_BUF)
2872 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2873 desc.datalen = cpu_to_le16(msglen);
2874 }
2875 status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
2876
2877 return status;
2878 }
2879
2880 /**
2881 * i40e_aq_debug_read_register
2882 * @hw: pointer to the hw struct
2883 * @reg_addr: register address
2884 * @reg_val: register value
2885 * @cmd_details: pointer to command details structure or NULL
2886 *
2887 * Read the register using the admin queue commands
2888 **/
i40e_aq_debug_read_register(struct i40e_hw * hw,u32 reg_addr,u64 * reg_val,struct i40e_asq_cmd_details * cmd_details)2889 int i40e_aq_debug_read_register(struct i40e_hw *hw,
2890 u32 reg_addr, u64 *reg_val,
2891 struct i40e_asq_cmd_details *cmd_details)
2892 {
2893 struct i40e_aq_desc desc;
2894 struct i40e_aqc_debug_reg_read_write *cmd_resp =
2895 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
2896 int status;
2897
2898 if (reg_val == NULL)
2899 return -EINVAL;
2900
2901 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
2902
2903 cmd_resp->address = cpu_to_le32(reg_addr);
2904
2905 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2906
2907 if (!status) {
2908 *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) |
2909 (u64)le32_to_cpu(cmd_resp->value_low);
2910 }
2911
2912 return status;
2913 }
2914
2915 /**
2916 * i40e_aq_debug_write_register
2917 * @hw: pointer to the hw struct
2918 * @reg_addr: register address
2919 * @reg_val: register value
2920 * @cmd_details: pointer to command details structure or NULL
2921 *
2922 * Write to a register using the admin queue commands
2923 **/
i40e_aq_debug_write_register(struct i40e_hw * hw,u32 reg_addr,u64 reg_val,struct i40e_asq_cmd_details * cmd_details)2924 int i40e_aq_debug_write_register(struct i40e_hw *hw,
2925 u32 reg_addr, u64 reg_val,
2926 struct i40e_asq_cmd_details *cmd_details)
2927 {
2928 struct i40e_aq_desc desc;
2929 struct i40e_aqc_debug_reg_read_write *cmd =
2930 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
2931 int status;
2932
2933 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
2934
2935 cmd->address = cpu_to_le32(reg_addr);
2936 cmd->value_high = cpu_to_le32((u32)(reg_val >> 32));
2937 cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF));
2938
2939 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2940
2941 return status;
2942 }
2943
2944 /**
2945 * i40e_aq_request_resource
2946 * @hw: pointer to the hw struct
2947 * @resource: resource id
2948 * @access: access type
2949 * @sdp_number: resource number
2950 * @timeout: the maximum time in ms that the driver may hold the resource
2951 * @cmd_details: pointer to command details structure or NULL
2952 *
2953 * requests common resource using the admin queue commands
2954 **/
i40e_aq_request_resource(struct i40e_hw * hw,enum i40e_aq_resources_ids resource,enum i40e_aq_resource_access_type access,u8 sdp_number,u64 * timeout,struct i40e_asq_cmd_details * cmd_details)2955 int i40e_aq_request_resource(struct i40e_hw *hw,
2956 enum i40e_aq_resources_ids resource,
2957 enum i40e_aq_resource_access_type access,
2958 u8 sdp_number, u64 *timeout,
2959 struct i40e_asq_cmd_details *cmd_details)
2960 {
2961 struct i40e_aq_desc desc;
2962 struct i40e_aqc_request_resource *cmd_resp =
2963 (struct i40e_aqc_request_resource *)&desc.params.raw;
2964 int status;
2965
2966 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
2967
2968 cmd_resp->resource_id = cpu_to_le16(resource);
2969 cmd_resp->access_type = cpu_to_le16(access);
2970 cmd_resp->resource_number = cpu_to_le32(sdp_number);
2971
2972 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2973 /* The completion specifies the maximum time in ms that the driver
2974 * may hold the resource in the Timeout field.
2975 * If the resource is held by someone else, the command completes with
2976 * busy return value and the timeout field indicates the maximum time
2977 * the current owner of the resource has to free it.
2978 */
2979 if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
2980 *timeout = le32_to_cpu(cmd_resp->timeout);
2981
2982 return status;
2983 }
2984
2985 /**
2986 * i40e_aq_release_resource
2987 * @hw: pointer to the hw struct
2988 * @resource: resource id
2989 * @sdp_number: resource number
2990 * @cmd_details: pointer to command details structure or NULL
2991 *
2992 * release common resource using the admin queue commands
2993 **/
i40e_aq_release_resource(struct i40e_hw * hw,enum i40e_aq_resources_ids resource,u8 sdp_number,struct i40e_asq_cmd_details * cmd_details)2994 int i40e_aq_release_resource(struct i40e_hw *hw,
2995 enum i40e_aq_resources_ids resource,
2996 u8 sdp_number,
2997 struct i40e_asq_cmd_details *cmd_details)
2998 {
2999 struct i40e_aq_desc desc;
3000 struct i40e_aqc_request_resource *cmd =
3001 (struct i40e_aqc_request_resource *)&desc.params.raw;
3002 int status;
3003
3004 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
3005
3006 cmd->resource_id = cpu_to_le16(resource);
3007 cmd->resource_number = cpu_to_le32(sdp_number);
3008
3009 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3010
3011 return status;
3012 }
3013
3014 /**
3015 * i40e_aq_read_nvm
3016 * @hw: pointer to the hw struct
3017 * @module_pointer: module pointer location in words from the NVM beginning
3018 * @offset: byte offset from the module beginning
3019 * @length: length of the section to be read (in bytes from the offset)
3020 * @data: command buffer (size [bytes] = length)
3021 * @last_command: tells if this is the last command in a series
3022 * @cmd_details: pointer to command details structure or NULL
3023 *
3024 * Read the NVM using the admin queue commands
3025 **/
i40e_aq_read_nvm(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 length,void * data,bool last_command,struct i40e_asq_cmd_details * cmd_details)3026 int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
3027 u32 offset, u16 length, void *data,
3028 bool last_command,
3029 struct i40e_asq_cmd_details *cmd_details)
3030 {
3031 struct i40e_aq_desc desc;
3032 struct i40e_aqc_nvm_update *cmd =
3033 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3034 int status;
3035
3036 /* In offset the highest byte must be zeroed. */
3037 if (offset & 0xFF000000) {
3038 status = -EINVAL;
3039 goto i40e_aq_read_nvm_exit;
3040 }
3041
3042 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
3043
3044 /* If this is the last command in a series, set the proper flag. */
3045 if (last_command)
3046 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3047 cmd->module_pointer = module_pointer;
3048 cmd->offset = cpu_to_le32(offset);
3049 cmd->length = cpu_to_le16(length);
3050
3051 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3052 if (length > I40E_AQ_LARGE_BUF)
3053 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3054
3055 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3056
3057 i40e_aq_read_nvm_exit:
3058 return status;
3059 }
3060
3061 /**
3062 * i40e_aq_erase_nvm
3063 * @hw: pointer to the hw struct
3064 * @module_pointer: module pointer location in words from the NVM beginning
3065 * @offset: offset in the module (expressed in 4 KB from module's beginning)
3066 * @length: length of the section to be erased (expressed in 4 KB)
3067 * @last_command: tells if this is the last command in a series
3068 * @cmd_details: pointer to command details structure or NULL
3069 *
3070 * Erase the NVM sector using the admin queue commands
3071 **/
i40e_aq_erase_nvm(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 length,bool last_command,struct i40e_asq_cmd_details * cmd_details)3072 int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
3073 u32 offset, u16 length, bool last_command,
3074 struct i40e_asq_cmd_details *cmd_details)
3075 {
3076 struct i40e_aq_desc desc;
3077 struct i40e_aqc_nvm_update *cmd =
3078 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3079 int status;
3080
3081 /* In offset the highest byte must be zeroed. */
3082 if (offset & 0xFF000000) {
3083 status = -EINVAL;
3084 goto i40e_aq_erase_nvm_exit;
3085 }
3086
3087 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
3088
3089 /* If this is the last command in a series, set the proper flag. */
3090 if (last_command)
3091 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3092 cmd->module_pointer = module_pointer;
3093 cmd->offset = cpu_to_le32(offset);
3094 cmd->length = cpu_to_le16(length);
3095
3096 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3097
3098 i40e_aq_erase_nvm_exit:
3099 return status;
3100 }
3101
3102 /**
3103 * i40e_parse_discover_capabilities
3104 * @hw: pointer to the hw struct
3105 * @buff: pointer to a buffer containing device/function capability records
3106 * @cap_count: number of capability records in the list
3107 * @list_type_opc: type of capabilities list to parse
3108 *
3109 * Parse the device/function capabilities list.
3110 **/
i40e_parse_discover_capabilities(struct i40e_hw * hw,void * buff,u32 cap_count,enum i40e_admin_queue_opc list_type_opc)3111 static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
3112 u32 cap_count,
3113 enum i40e_admin_queue_opc list_type_opc)
3114 {
3115 struct i40e_aqc_list_capabilities_element_resp *cap;
3116 u32 valid_functions, num_functions;
3117 u32 number, logical_id, phys_id;
3118 struct i40e_hw_capabilities *p;
3119 u16 id, ocp_cfg_word0;
3120 u8 major_rev;
3121 int status;
3122 u32 i = 0;
3123
3124 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
3125
3126 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
3127 p = &hw->dev_caps;
3128 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
3129 p = &hw->func_caps;
3130 else
3131 return;
3132
3133 for (i = 0; i < cap_count; i++, cap++) {
3134 id = le16_to_cpu(cap->id);
3135 number = le32_to_cpu(cap->number);
3136 logical_id = le32_to_cpu(cap->logical_id);
3137 phys_id = le32_to_cpu(cap->phys_id);
3138 major_rev = cap->major_rev;
3139
3140 switch (id) {
3141 case I40E_AQ_CAP_ID_SWITCH_MODE:
3142 p->switch_mode = number;
3143 break;
3144 case I40E_AQ_CAP_ID_MNG_MODE:
3145 p->management_mode = number;
3146 if (major_rev > 1) {
3147 p->mng_protocols_over_mctp = logical_id;
3148 i40e_debug(hw, I40E_DEBUG_INIT,
3149 "HW Capability: Protocols over MCTP = %d\n",
3150 p->mng_protocols_over_mctp);
3151 } else {
3152 p->mng_protocols_over_mctp = 0;
3153 }
3154 break;
3155 case I40E_AQ_CAP_ID_NPAR_ACTIVE:
3156 p->npar_enable = number;
3157 break;
3158 case I40E_AQ_CAP_ID_OS2BMC_CAP:
3159 p->os2bmc = number;
3160 break;
3161 case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
3162 p->valid_functions = number;
3163 break;
3164 case I40E_AQ_CAP_ID_SRIOV:
3165 if (number == 1)
3166 p->sr_iov_1_1 = true;
3167 break;
3168 case I40E_AQ_CAP_ID_VF:
3169 p->num_vfs = number;
3170 p->vf_base_id = logical_id;
3171 break;
3172 case I40E_AQ_CAP_ID_VMDQ:
3173 if (number == 1)
3174 p->vmdq = true;
3175 break;
3176 case I40E_AQ_CAP_ID_8021QBG:
3177 if (number == 1)
3178 p->evb_802_1_qbg = true;
3179 break;
3180 case I40E_AQ_CAP_ID_8021QBR:
3181 if (number == 1)
3182 p->evb_802_1_qbh = true;
3183 break;
3184 case I40E_AQ_CAP_ID_VSI:
3185 p->num_vsis = number;
3186 break;
3187 case I40E_AQ_CAP_ID_DCB:
3188 if (number == 1) {
3189 p->dcb = true;
3190 p->enabled_tcmap = logical_id;
3191 p->maxtc = phys_id;
3192 }
3193 break;
3194 case I40E_AQ_CAP_ID_FCOE:
3195 if (number == 1)
3196 p->fcoe = true;
3197 break;
3198 case I40E_AQ_CAP_ID_ISCSI:
3199 if (number == 1)
3200 p->iscsi = true;
3201 break;
3202 case I40E_AQ_CAP_ID_RSS:
3203 p->rss = true;
3204 p->rss_table_size = number;
3205 p->rss_table_entry_width = logical_id;
3206 break;
3207 case I40E_AQ_CAP_ID_RXQ:
3208 p->num_rx_qp = number;
3209 p->base_queue = phys_id;
3210 break;
3211 case I40E_AQ_CAP_ID_TXQ:
3212 p->num_tx_qp = number;
3213 p->base_queue = phys_id;
3214 break;
3215 case I40E_AQ_CAP_ID_MSIX:
3216 p->num_msix_vectors = number;
3217 i40e_debug(hw, I40E_DEBUG_INIT,
3218 "HW Capability: MSIX vector count = %d\n",
3219 p->num_msix_vectors);
3220 break;
3221 case I40E_AQ_CAP_ID_VF_MSIX:
3222 p->num_msix_vectors_vf = number;
3223 break;
3224 case I40E_AQ_CAP_ID_FLEX10:
3225 if (major_rev == 1) {
3226 if (number == 1) {
3227 p->flex10_enable = true;
3228 p->flex10_capable = true;
3229 }
3230 } else {
3231 /* Capability revision >= 2 */
3232 if (number & 1)
3233 p->flex10_enable = true;
3234 if (number & 2)
3235 p->flex10_capable = true;
3236 }
3237 p->flex10_mode = logical_id;
3238 p->flex10_status = phys_id;
3239 break;
3240 case I40E_AQ_CAP_ID_CEM:
3241 if (number == 1)
3242 p->mgmt_cem = true;
3243 break;
3244 case I40E_AQ_CAP_ID_IWARP:
3245 if (number == 1)
3246 p->iwarp = true;
3247 break;
3248 case I40E_AQ_CAP_ID_LED:
3249 if (phys_id < I40E_HW_CAP_MAX_GPIO)
3250 p->led[phys_id] = true;
3251 break;
3252 case I40E_AQ_CAP_ID_SDP:
3253 if (phys_id < I40E_HW_CAP_MAX_GPIO)
3254 p->sdp[phys_id] = true;
3255 break;
3256 case I40E_AQ_CAP_ID_MDIO:
3257 if (number == 1) {
3258 p->mdio_port_num = phys_id;
3259 p->mdio_port_mode = logical_id;
3260 }
3261 break;
3262 case I40E_AQ_CAP_ID_1588:
3263 if (number == 1)
3264 p->ieee_1588 = true;
3265 break;
3266 case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
3267 p->fd = true;
3268 p->fd_filters_guaranteed = number;
3269 p->fd_filters_best_effort = logical_id;
3270 break;
3271 case I40E_AQ_CAP_ID_WSR_PROT:
3272 p->wr_csr_prot = (u64)number;
3273 p->wr_csr_prot |= (u64)logical_id << 32;
3274 break;
3275 case I40E_AQ_CAP_ID_NVM_MGMT:
3276 if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
3277 p->sec_rev_disabled = true;
3278 if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
3279 p->update_disabled = true;
3280 break;
3281 default:
3282 break;
3283 }
3284 }
3285
3286 if (p->fcoe)
3287 i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
3288
3289 /* Software override ensuring FCoE is disabled if npar or mfp
3290 * mode because it is not supported in these modes.
3291 */
3292 if (p->npar_enable || p->flex10_enable)
3293 p->fcoe = false;
3294
3295 /* count the enabled ports (aka the "not disabled" ports) */
3296 hw->num_ports = 0;
3297 for (i = 0; i < 4; i++) {
3298 u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
3299 u64 port_cfg = 0;
3300
3301 /* use AQ read to get the physical register offset instead
3302 * of the port relative offset
3303 */
3304 i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
3305 if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
3306 hw->num_ports++;
3307 }
3308
3309 /* OCP cards case: if a mezz is removed the Ethernet port is at
3310 * disabled state in PRTGEN_CNF register. Additional NVM read is
3311 * needed in order to check if we are dealing with OCP card.
3312 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting
3313 * physical ports results in wrong partition id calculation and thus
3314 * not supporting WoL.
3315 */
3316 if (hw->mac.type == I40E_MAC_X722) {
3317 if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) {
3318 status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR,
3319 2 * I40E_SR_OCP_CFG_WORD0,
3320 sizeof(ocp_cfg_word0),
3321 &ocp_cfg_word0, true, NULL);
3322 if (!status &&
3323 (ocp_cfg_word0 & I40E_SR_OCP_ENABLED))
3324 hw->num_ports = 4;
3325 i40e_release_nvm(hw);
3326 }
3327 }
3328
3329 valid_functions = p->valid_functions;
3330 num_functions = 0;
3331 while (valid_functions) {
3332 if (valid_functions & 1)
3333 num_functions++;
3334 valid_functions >>= 1;
3335 }
3336
3337 /* partition id is 1-based, and functions are evenly spread
3338 * across the ports as partitions
3339 */
3340 if (hw->num_ports != 0) {
3341 hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
3342 hw->num_partitions = num_functions / hw->num_ports;
3343 }
3344
3345 /* additional HW specific goodies that might
3346 * someday be HW version specific
3347 */
3348 p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
3349 }
3350
3351 /**
3352 * i40e_aq_discover_capabilities
3353 * @hw: pointer to the hw struct
3354 * @buff: a virtual buffer to hold the capabilities
3355 * @buff_size: Size of the virtual buffer
3356 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
3357 * @list_type_opc: capabilities type to discover - pass in the command opcode
3358 * @cmd_details: pointer to command details structure or NULL
3359 *
3360 * Get the device capabilities descriptions from the firmware
3361 **/
i40e_aq_discover_capabilities(struct i40e_hw * hw,void * buff,u16 buff_size,u16 * data_size,enum i40e_admin_queue_opc list_type_opc,struct i40e_asq_cmd_details * cmd_details)3362 int i40e_aq_discover_capabilities(struct i40e_hw *hw,
3363 void *buff, u16 buff_size, u16 *data_size,
3364 enum i40e_admin_queue_opc list_type_opc,
3365 struct i40e_asq_cmd_details *cmd_details)
3366 {
3367 struct i40e_aqc_list_capabilites *cmd;
3368 struct i40e_aq_desc desc;
3369 int status = 0;
3370
3371 cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
3372
3373 if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
3374 list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
3375 status = -EINVAL;
3376 goto exit;
3377 }
3378
3379 i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
3380
3381 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3382 if (buff_size > I40E_AQ_LARGE_BUF)
3383 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3384
3385 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3386 *data_size = le16_to_cpu(desc.datalen);
3387
3388 if (status)
3389 goto exit;
3390
3391 i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count),
3392 list_type_opc);
3393
3394 exit:
3395 return status;
3396 }
3397
3398 /**
3399 * i40e_aq_update_nvm
3400 * @hw: pointer to the hw struct
3401 * @module_pointer: module pointer location in words from the NVM beginning
3402 * @offset: byte offset from the module beginning
3403 * @length: length of the section to be written (in bytes from the offset)
3404 * @data: command buffer (size [bytes] = length)
3405 * @last_command: tells if this is the last command in a series
3406 * @preservation_flags: Preservation mode flags
3407 * @cmd_details: pointer to command details structure or NULL
3408 *
3409 * Update the NVM using the admin queue commands
3410 **/
i40e_aq_update_nvm(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 length,void * data,bool last_command,u8 preservation_flags,struct i40e_asq_cmd_details * cmd_details)3411 int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
3412 u32 offset, u16 length, void *data,
3413 bool last_command, u8 preservation_flags,
3414 struct i40e_asq_cmd_details *cmd_details)
3415 {
3416 struct i40e_aq_desc desc;
3417 struct i40e_aqc_nvm_update *cmd =
3418 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3419 int status;
3420
3421 /* In offset the highest byte must be zeroed. */
3422 if (offset & 0xFF000000) {
3423 status = -EINVAL;
3424 goto i40e_aq_update_nvm_exit;
3425 }
3426
3427 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3428
3429 /* If this is the last command in a series, set the proper flag. */
3430 if (last_command)
3431 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3432 if (hw->mac.type == I40E_MAC_X722) {
3433 if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
3434 cmd->command_flags |=
3435 (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
3436 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3437 else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
3438 cmd->command_flags |=
3439 (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
3440 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3441 }
3442 cmd->module_pointer = module_pointer;
3443 cmd->offset = cpu_to_le32(offset);
3444 cmd->length = cpu_to_le16(length);
3445
3446 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
3447 if (length > I40E_AQ_LARGE_BUF)
3448 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3449
3450 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3451
3452 i40e_aq_update_nvm_exit:
3453 return status;
3454 }
3455
3456 /**
3457 * i40e_aq_rearrange_nvm
3458 * @hw: pointer to the hw struct
3459 * @rearrange_nvm: defines direction of rearrangement
3460 * @cmd_details: pointer to command details structure or NULL
3461 *
3462 * Rearrange NVM structure, available only for transition FW
3463 **/
i40e_aq_rearrange_nvm(struct i40e_hw * hw,u8 rearrange_nvm,struct i40e_asq_cmd_details * cmd_details)3464 int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
3465 u8 rearrange_nvm,
3466 struct i40e_asq_cmd_details *cmd_details)
3467 {
3468 struct i40e_aqc_nvm_update *cmd;
3469 struct i40e_aq_desc desc;
3470 int status;
3471
3472 cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
3473
3474 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3475
3476 rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
3477 I40E_AQ_NVM_REARRANGE_TO_STRUCT);
3478
3479 if (!rearrange_nvm) {
3480 status = -EINVAL;
3481 goto i40e_aq_rearrange_nvm_exit;
3482 }
3483
3484 cmd->command_flags |= rearrange_nvm;
3485 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3486
3487 i40e_aq_rearrange_nvm_exit:
3488 return status;
3489 }
3490
3491 /**
3492 * i40e_aq_get_lldp_mib
3493 * @hw: pointer to the hw struct
3494 * @bridge_type: type of bridge requested
3495 * @mib_type: Local, Remote or both Local and Remote MIBs
3496 * @buff: pointer to a user supplied buffer to store the MIB block
3497 * @buff_size: size of the buffer (in bytes)
3498 * @local_len : length of the returned Local LLDP MIB
3499 * @remote_len: length of the returned Remote LLDP MIB
3500 * @cmd_details: pointer to command details structure or NULL
3501 *
3502 * Requests the complete LLDP MIB (entire packet).
3503 **/
i40e_aq_get_lldp_mib(struct i40e_hw * hw,u8 bridge_type,u8 mib_type,void * buff,u16 buff_size,u16 * local_len,u16 * remote_len,struct i40e_asq_cmd_details * cmd_details)3504 int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
3505 u8 mib_type, void *buff, u16 buff_size,
3506 u16 *local_len, u16 *remote_len,
3507 struct i40e_asq_cmd_details *cmd_details)
3508 {
3509 struct i40e_aq_desc desc;
3510 struct i40e_aqc_lldp_get_mib *cmd =
3511 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3512 struct i40e_aqc_lldp_get_mib *resp =
3513 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3514 int status;
3515
3516 if (buff_size == 0 || !buff)
3517 return -EINVAL;
3518
3519 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
3520 /* Indirect Command */
3521 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3522
3523 cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
3524 cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
3525 I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
3526
3527 desc.datalen = cpu_to_le16(buff_size);
3528
3529 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3530 if (buff_size > I40E_AQ_LARGE_BUF)
3531 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3532
3533 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3534 if (!status) {
3535 if (local_len != NULL)
3536 *local_len = le16_to_cpu(resp->local_len);
3537 if (remote_len != NULL)
3538 *remote_len = le16_to_cpu(resp->remote_len);
3539 }
3540
3541 return status;
3542 }
3543
3544 /**
3545 * i40e_aq_set_lldp_mib - Set the LLDP MIB
3546 * @hw: pointer to the hw struct
3547 * @mib_type: Local, Remote or both Local and Remote MIBs
3548 * @buff: pointer to a user supplied buffer to store the MIB block
3549 * @buff_size: size of the buffer (in bytes)
3550 * @cmd_details: pointer to command details structure or NULL
3551 *
3552 * Set the LLDP MIB.
3553 **/
3554 int
i40e_aq_set_lldp_mib(struct i40e_hw * hw,u8 mib_type,void * buff,u16 buff_size,struct i40e_asq_cmd_details * cmd_details)3555 i40e_aq_set_lldp_mib(struct i40e_hw *hw,
3556 u8 mib_type, void *buff, u16 buff_size,
3557 struct i40e_asq_cmd_details *cmd_details)
3558 {
3559 struct i40e_aqc_lldp_set_local_mib *cmd;
3560 struct i40e_aq_desc desc;
3561 int status;
3562
3563 cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
3564 if (buff_size == 0 || !buff)
3565 return -EINVAL;
3566
3567 i40e_fill_default_direct_cmd_desc(&desc,
3568 i40e_aqc_opc_lldp_set_local_mib);
3569 /* Indirect Command */
3570 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
3571 if (buff_size > I40E_AQ_LARGE_BUF)
3572 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3573 desc.datalen = cpu_to_le16(buff_size);
3574
3575 cmd->type = mib_type;
3576 cmd->length = cpu_to_le16(buff_size);
3577 cmd->address_high = cpu_to_le32(upper_32_bits((uintptr_t)buff));
3578 cmd->address_low = cpu_to_le32(lower_32_bits((uintptr_t)buff));
3579
3580 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3581 return status;
3582 }
3583
3584 /**
3585 * i40e_aq_cfg_lldp_mib_change_event
3586 * @hw: pointer to the hw struct
3587 * @enable_update: Enable or Disable event posting
3588 * @cmd_details: pointer to command details structure or NULL
3589 *
3590 * Enable or Disable posting of an event on ARQ when LLDP MIB
3591 * associated with the interface changes
3592 **/
i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw * hw,bool enable_update,struct i40e_asq_cmd_details * cmd_details)3593 int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
3594 bool enable_update,
3595 struct i40e_asq_cmd_details *cmd_details)
3596 {
3597 struct i40e_aq_desc desc;
3598 struct i40e_aqc_lldp_update_mib *cmd =
3599 (struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
3600 int status;
3601
3602 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
3603
3604 if (!enable_update)
3605 cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
3606
3607 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3608
3609 return status;
3610 }
3611
3612 /**
3613 * i40e_aq_restore_lldp
3614 * @hw: pointer to the hw struct
3615 * @setting: pointer to factory setting variable or NULL
3616 * @restore: True if factory settings should be restored
3617 * @cmd_details: pointer to command details structure or NULL
3618 *
3619 * Restore LLDP Agent factory settings if @restore set to True. In other case
3620 * only returns factory setting in AQ response.
3621 **/
3622 int
i40e_aq_restore_lldp(struct i40e_hw * hw,u8 * setting,bool restore,struct i40e_asq_cmd_details * cmd_details)3623 i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
3624 struct i40e_asq_cmd_details *cmd_details)
3625 {
3626 struct i40e_aq_desc desc;
3627 struct i40e_aqc_lldp_restore *cmd =
3628 (struct i40e_aqc_lldp_restore *)&desc.params.raw;
3629 int status;
3630
3631 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
3632 i40e_debug(hw, I40E_DEBUG_ALL,
3633 "Restore LLDP not supported by current FW version.\n");
3634 return -ENODEV;
3635 }
3636
3637 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
3638
3639 if (restore)
3640 cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
3641
3642 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3643
3644 if (setting)
3645 *setting = cmd->command & 1;
3646
3647 return status;
3648 }
3649
3650 /**
3651 * i40e_aq_stop_lldp
3652 * @hw: pointer to the hw struct
3653 * @shutdown_agent: True if LLDP Agent needs to be Shutdown
3654 * @persist: True if stop of LLDP should be persistent across power cycles
3655 * @cmd_details: pointer to command details structure or NULL
3656 *
3657 * Stop or Shutdown the embedded LLDP Agent
3658 **/
i40e_aq_stop_lldp(struct i40e_hw * hw,bool shutdown_agent,bool persist,struct i40e_asq_cmd_details * cmd_details)3659 int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
3660 bool persist,
3661 struct i40e_asq_cmd_details *cmd_details)
3662 {
3663 struct i40e_aq_desc desc;
3664 struct i40e_aqc_lldp_stop *cmd =
3665 (struct i40e_aqc_lldp_stop *)&desc.params.raw;
3666 int status;
3667
3668 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
3669
3670 if (shutdown_agent)
3671 cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
3672
3673 if (persist) {
3674 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
3675 cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST;
3676 else
3677 i40e_debug(hw, I40E_DEBUG_ALL,
3678 "Persistent Stop LLDP not supported by current FW version.\n");
3679 }
3680
3681 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3682
3683 return status;
3684 }
3685
3686 /**
3687 * i40e_aq_start_lldp
3688 * @hw: pointer to the hw struct
3689 * @persist: True if start of LLDP should be persistent across power cycles
3690 * @cmd_details: pointer to command details structure or NULL
3691 *
3692 * Start the embedded LLDP Agent on all ports.
3693 **/
i40e_aq_start_lldp(struct i40e_hw * hw,bool persist,struct i40e_asq_cmd_details * cmd_details)3694 int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
3695 struct i40e_asq_cmd_details *cmd_details)
3696 {
3697 struct i40e_aq_desc desc;
3698 struct i40e_aqc_lldp_start *cmd =
3699 (struct i40e_aqc_lldp_start *)&desc.params.raw;
3700 int status;
3701
3702 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
3703
3704 cmd->command = I40E_AQ_LLDP_AGENT_START;
3705
3706 if (persist) {
3707 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
3708 cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST;
3709 else
3710 i40e_debug(hw, I40E_DEBUG_ALL,
3711 "Persistent Start LLDP not supported by current FW version.\n");
3712 }
3713
3714 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3715
3716 return status;
3717 }
3718
3719 /**
3720 * i40e_aq_set_dcb_parameters
3721 * @hw: pointer to the hw struct
3722 * @cmd_details: pointer to command details structure or NULL
3723 * @dcb_enable: True if DCB configuration needs to be applied
3724 *
3725 **/
3726 int
i40e_aq_set_dcb_parameters(struct i40e_hw * hw,bool dcb_enable,struct i40e_asq_cmd_details * cmd_details)3727 i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
3728 struct i40e_asq_cmd_details *cmd_details)
3729 {
3730 struct i40e_aq_desc desc;
3731 struct i40e_aqc_set_dcb_parameters *cmd =
3732 (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
3733 int status;
3734
3735 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
3736 return -ENODEV;
3737
3738 i40e_fill_default_direct_cmd_desc(&desc,
3739 i40e_aqc_opc_set_dcb_parameters);
3740
3741 if (dcb_enable) {
3742 cmd->valid_flags = I40E_DCB_VALID;
3743 cmd->command = I40E_AQ_DCB_SET_AGENT;
3744 }
3745 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3746
3747 return status;
3748 }
3749
3750 /**
3751 * i40e_aq_get_cee_dcb_config
3752 * @hw: pointer to the hw struct
3753 * @buff: response buffer that stores CEE operational configuration
3754 * @buff_size: size of the buffer passed
3755 * @cmd_details: pointer to command details structure or NULL
3756 *
3757 * Get CEE DCBX mode operational configuration from firmware
3758 **/
i40e_aq_get_cee_dcb_config(struct i40e_hw * hw,void * buff,u16 buff_size,struct i40e_asq_cmd_details * cmd_details)3759 int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
3760 void *buff, u16 buff_size,
3761 struct i40e_asq_cmd_details *cmd_details)
3762 {
3763 struct i40e_aq_desc desc;
3764 int status;
3765
3766 if (buff_size == 0 || !buff)
3767 return -EINVAL;
3768
3769 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
3770
3771 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3772 status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
3773 cmd_details);
3774
3775 return status;
3776 }
3777
3778 /**
3779 * i40e_aq_add_udp_tunnel
3780 * @hw: pointer to the hw struct
3781 * @udp_port: the UDP port to add in Host byte order
3782 * @protocol_index: protocol index type
3783 * @filter_index: pointer to filter index
3784 * @cmd_details: pointer to command details structure or NULL
3785 *
3786 * Note: Firmware expects the udp_port value to be in Little Endian format,
3787 * and this function will call cpu_to_le16 to convert from Host byte order to
3788 * Little Endian order.
3789 **/
i40e_aq_add_udp_tunnel(struct i40e_hw * hw,u16 udp_port,u8 protocol_index,u8 * filter_index,struct i40e_asq_cmd_details * cmd_details)3790 int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
3791 u16 udp_port, u8 protocol_index,
3792 u8 *filter_index,
3793 struct i40e_asq_cmd_details *cmd_details)
3794 {
3795 struct i40e_aq_desc desc;
3796 struct i40e_aqc_add_udp_tunnel *cmd =
3797 (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
3798 struct i40e_aqc_del_udp_tunnel_completion *resp =
3799 (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
3800 int status;
3801
3802 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
3803
3804 cmd->udp_port = cpu_to_le16(udp_port);
3805 cmd->protocol_type = protocol_index;
3806
3807 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3808
3809 if (!status && filter_index)
3810 *filter_index = resp->index;
3811
3812 return status;
3813 }
3814
3815 /**
3816 * i40e_aq_del_udp_tunnel
3817 * @hw: pointer to the hw struct
3818 * @index: filter index
3819 * @cmd_details: pointer to command details structure or NULL
3820 **/
i40e_aq_del_udp_tunnel(struct i40e_hw * hw,u8 index,struct i40e_asq_cmd_details * cmd_details)3821 int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
3822 struct i40e_asq_cmd_details *cmd_details)
3823 {
3824 struct i40e_aq_desc desc;
3825 struct i40e_aqc_remove_udp_tunnel *cmd =
3826 (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
3827 int status;
3828
3829 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
3830
3831 cmd->index = index;
3832
3833 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3834
3835 return status;
3836 }
3837
3838 /**
3839 * i40e_aq_delete_element - Delete switch element
3840 * @hw: pointer to the hw struct
3841 * @seid: the SEID to delete from the switch
3842 * @cmd_details: pointer to command details structure or NULL
3843 *
3844 * This deletes a switch element from the switch.
3845 **/
i40e_aq_delete_element(struct i40e_hw * hw,u16 seid,struct i40e_asq_cmd_details * cmd_details)3846 int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
3847 struct i40e_asq_cmd_details *cmd_details)
3848 {
3849 struct i40e_aq_desc desc;
3850 struct i40e_aqc_switch_seid *cmd =
3851 (struct i40e_aqc_switch_seid *)&desc.params.raw;
3852 int status;
3853
3854 if (seid == 0)
3855 return -EINVAL;
3856
3857 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
3858
3859 cmd->seid = cpu_to_le16(seid);
3860
3861 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
3862 cmd_details, true);
3863
3864 return status;
3865 }
3866
3867 /**
3868 * i40e_aq_dcb_updated - DCB Updated Command
3869 * @hw: pointer to the hw struct
3870 * @cmd_details: pointer to command details structure or NULL
3871 *
3872 * EMP will return when the shared RPB settings have been
3873 * recomputed and modified. The retval field in the descriptor
3874 * will be set to 0 when RPB is modified.
3875 **/
i40e_aq_dcb_updated(struct i40e_hw * hw,struct i40e_asq_cmd_details * cmd_details)3876 int i40e_aq_dcb_updated(struct i40e_hw *hw,
3877 struct i40e_asq_cmd_details *cmd_details)
3878 {
3879 struct i40e_aq_desc desc;
3880 int status;
3881
3882 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
3883
3884 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3885
3886 return status;
3887 }
3888
3889 /**
3890 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
3891 * @hw: pointer to the hw struct
3892 * @seid: seid for the physical port/switching component/vsi
3893 * @buff: Indirect buffer to hold data parameters and response
3894 * @buff_size: Indirect buffer size
3895 * @opcode: Tx scheduler AQ command opcode
3896 * @cmd_details: pointer to command details structure or NULL
3897 *
3898 * Generic command handler for Tx scheduler AQ commands
3899 **/
i40e_aq_tx_sched_cmd(struct i40e_hw * hw,u16 seid,void * buff,u16 buff_size,enum i40e_admin_queue_opc opcode,struct i40e_asq_cmd_details * cmd_details)3900 static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
3901 void *buff, u16 buff_size,
3902 enum i40e_admin_queue_opc opcode,
3903 struct i40e_asq_cmd_details *cmd_details)
3904 {
3905 struct i40e_aq_desc desc;
3906 struct i40e_aqc_tx_sched_ind *cmd =
3907 (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
3908 int status;
3909 bool cmd_param_flag = false;
3910
3911 switch (opcode) {
3912 case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
3913 case i40e_aqc_opc_configure_vsi_tc_bw:
3914 case i40e_aqc_opc_enable_switching_comp_ets:
3915 case i40e_aqc_opc_modify_switching_comp_ets:
3916 case i40e_aqc_opc_disable_switching_comp_ets:
3917 case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
3918 case i40e_aqc_opc_configure_switching_comp_bw_config:
3919 cmd_param_flag = true;
3920 break;
3921 case i40e_aqc_opc_query_vsi_bw_config:
3922 case i40e_aqc_opc_query_vsi_ets_sla_config:
3923 case i40e_aqc_opc_query_switching_comp_ets_config:
3924 case i40e_aqc_opc_query_port_ets_config:
3925 case i40e_aqc_opc_query_switching_comp_bw_config:
3926 cmd_param_flag = false;
3927 break;
3928 default:
3929 return -EINVAL;
3930 }
3931
3932 i40e_fill_default_direct_cmd_desc(&desc, opcode);
3933
3934 /* Indirect command */
3935 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3936 if (cmd_param_flag)
3937 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
3938 if (buff_size > I40E_AQ_LARGE_BUF)
3939 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3940
3941 desc.datalen = cpu_to_le16(buff_size);
3942
3943 cmd->vsi_seid = cpu_to_le16(seid);
3944
3945 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3946
3947 return status;
3948 }
3949
3950 /**
3951 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
3952 * @hw: pointer to the hw struct
3953 * @seid: VSI seid
3954 * @credit: BW limit credits (0 = disabled)
3955 * @max_credit: Max BW limit credits
3956 * @cmd_details: pointer to command details structure or NULL
3957 **/
i40e_aq_config_vsi_bw_limit(struct i40e_hw * hw,u16 seid,u16 credit,u8 max_credit,struct i40e_asq_cmd_details * cmd_details)3958 int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
3959 u16 seid, u16 credit, u8 max_credit,
3960 struct i40e_asq_cmd_details *cmd_details)
3961 {
3962 struct i40e_aq_desc desc;
3963 struct i40e_aqc_configure_vsi_bw_limit *cmd =
3964 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
3965 int status;
3966
3967 i40e_fill_default_direct_cmd_desc(&desc,
3968 i40e_aqc_opc_configure_vsi_bw_limit);
3969
3970 cmd->vsi_seid = cpu_to_le16(seid);
3971 cmd->credit = cpu_to_le16(credit);
3972 cmd->max_credit = max_credit;
3973
3974 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3975
3976 return status;
3977 }
3978
3979 /**
3980 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
3981 * @hw: pointer to the hw struct
3982 * @seid: VSI seid
3983 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
3984 * @cmd_details: pointer to command details structure or NULL
3985 **/
i40e_aq_config_vsi_tc_bw(struct i40e_hw * hw,u16 seid,struct i40e_aqc_configure_vsi_tc_bw_data * bw_data,struct i40e_asq_cmd_details * cmd_details)3986 int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
3987 u16 seid,
3988 struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
3989 struct i40e_asq_cmd_details *cmd_details)
3990 {
3991 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
3992 i40e_aqc_opc_configure_vsi_tc_bw,
3993 cmd_details);
3994 }
3995
3996 /**
3997 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port
3998 * @hw: pointer to the hw struct
3999 * @seid: seid of the switching component connected to Physical Port
4000 * @ets_data: Buffer holding ETS parameters
4001 * @opcode: Tx scheduler AQ command opcode
4002 * @cmd_details: pointer to command details structure or NULL
4003 **/
4004 int
i40e_aq_config_switch_comp_ets(struct i40e_hw * hw,u16 seid,struct i40e_aqc_configure_switching_comp_ets_data * ets_data,enum i40e_admin_queue_opc opcode,struct i40e_asq_cmd_details * cmd_details)4005 i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
4006 u16 seid,
4007 struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
4008 enum i40e_admin_queue_opc opcode,
4009 struct i40e_asq_cmd_details *cmd_details)
4010 {
4011 return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
4012 sizeof(*ets_data), opcode, cmd_details);
4013 }
4014
4015 /**
4016 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC
4017 * @hw: pointer to the hw struct
4018 * @seid: seid of the switching component
4019 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
4020 * @cmd_details: pointer to command details structure or NULL
4021 **/
4022 int
i40e_aq_config_switch_comp_bw_config(struct i40e_hw * hw,u16 seid,struct i40e_aqc_configure_switching_comp_bw_config_data * bw_data,struct i40e_asq_cmd_details * cmd_details)4023 i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
4024 u16 seid,
4025 struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
4026 struct i40e_asq_cmd_details *cmd_details)
4027 {
4028 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4029 i40e_aqc_opc_configure_switching_comp_bw_config,
4030 cmd_details);
4031 }
4032
4033 /**
4034 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
4035 * @hw: pointer to the hw struct
4036 * @seid: seid of the VSI
4037 * @bw_data: Buffer to hold VSI BW configuration
4038 * @cmd_details: pointer to command details structure or NULL
4039 **/
4040 int
i40e_aq_query_vsi_bw_config(struct i40e_hw * hw,u16 seid,struct i40e_aqc_query_vsi_bw_config_resp * bw_data,struct i40e_asq_cmd_details * cmd_details)4041 i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
4042 u16 seid,
4043 struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
4044 struct i40e_asq_cmd_details *cmd_details)
4045 {
4046 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4047 i40e_aqc_opc_query_vsi_bw_config,
4048 cmd_details);
4049 }
4050
4051 /**
4052 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
4053 * @hw: pointer to the hw struct
4054 * @seid: seid of the VSI
4055 * @bw_data: Buffer to hold VSI BW configuration per TC
4056 * @cmd_details: pointer to command details structure or NULL
4057 **/
4058 int
i40e_aq_query_vsi_ets_sla_config(struct i40e_hw * hw,u16 seid,struct i40e_aqc_query_vsi_ets_sla_config_resp * bw_data,struct i40e_asq_cmd_details * cmd_details)4059 i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
4060 u16 seid,
4061 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
4062 struct i40e_asq_cmd_details *cmd_details)
4063 {
4064 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4065 i40e_aqc_opc_query_vsi_ets_sla_config,
4066 cmd_details);
4067 }
4068
4069 /**
4070 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
4071 * @hw: pointer to the hw struct
4072 * @seid: seid of the switching component
4073 * @bw_data: Buffer to hold switching component's per TC BW config
4074 * @cmd_details: pointer to command details structure or NULL
4075 **/
4076 int
i40e_aq_query_switch_comp_ets_config(struct i40e_hw * hw,u16 seid,struct i40e_aqc_query_switching_comp_ets_config_resp * bw_data,struct i40e_asq_cmd_details * cmd_details)4077 i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
4078 u16 seid,
4079 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
4080 struct i40e_asq_cmd_details *cmd_details)
4081 {
4082 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4083 i40e_aqc_opc_query_switching_comp_ets_config,
4084 cmd_details);
4085 }
4086
4087 /**
4088 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
4089 * @hw: pointer to the hw struct
4090 * @seid: seid of the VSI or switching component connected to Physical Port
4091 * @bw_data: Buffer to hold current ETS configuration for the Physical Port
4092 * @cmd_details: pointer to command details structure or NULL
4093 **/
4094 int
i40e_aq_query_port_ets_config(struct i40e_hw * hw,u16 seid,struct i40e_aqc_query_port_ets_config_resp * bw_data,struct i40e_asq_cmd_details * cmd_details)4095 i40e_aq_query_port_ets_config(struct i40e_hw *hw,
4096 u16 seid,
4097 struct i40e_aqc_query_port_ets_config_resp *bw_data,
4098 struct i40e_asq_cmd_details *cmd_details)
4099 {
4100 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4101 i40e_aqc_opc_query_port_ets_config,
4102 cmd_details);
4103 }
4104
4105 /**
4106 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
4107 * @hw: pointer to the hw struct
4108 * @seid: seid of the switching component
4109 * @bw_data: Buffer to hold switching component's BW configuration
4110 * @cmd_details: pointer to command details structure or NULL
4111 **/
4112 int
i40e_aq_query_switch_comp_bw_config(struct i40e_hw * hw,u16 seid,struct i40e_aqc_query_switching_comp_bw_config_resp * bw_data,struct i40e_asq_cmd_details * cmd_details)4113 i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
4114 u16 seid,
4115 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
4116 struct i40e_asq_cmd_details *cmd_details)
4117 {
4118 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4119 i40e_aqc_opc_query_switching_comp_bw_config,
4120 cmd_details);
4121 }
4122
4123 /**
4124 * i40e_validate_filter_settings
4125 * @hw: pointer to the hardware structure
4126 * @settings: Filter control settings
4127 *
4128 * Check and validate the filter control settings passed.
4129 * The function checks for the valid filter/context sizes being
4130 * passed for FCoE and PE.
4131 *
4132 * Returns 0 if the values passed are valid and within
4133 * range else returns an error.
4134 **/
4135 static int
i40e_validate_filter_settings(struct i40e_hw * hw,struct i40e_filter_control_settings * settings)4136 i40e_validate_filter_settings(struct i40e_hw *hw,
4137 struct i40e_filter_control_settings *settings)
4138 {
4139 u32 fcoe_cntx_size, fcoe_filt_size;
4140 u32 fcoe_fmax;
4141 u32 val;
4142
4143 /* Validate FCoE settings passed */
4144 switch (settings->fcoe_filt_num) {
4145 case I40E_HASH_FILTER_SIZE_1K:
4146 case I40E_HASH_FILTER_SIZE_2K:
4147 case I40E_HASH_FILTER_SIZE_4K:
4148 case I40E_HASH_FILTER_SIZE_8K:
4149 case I40E_HASH_FILTER_SIZE_16K:
4150 case I40E_HASH_FILTER_SIZE_32K:
4151 fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
4152 fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
4153 break;
4154 default:
4155 return -EINVAL;
4156 }
4157
4158 switch (settings->fcoe_cntx_num) {
4159 case I40E_DMA_CNTX_SIZE_512:
4160 case I40E_DMA_CNTX_SIZE_1K:
4161 case I40E_DMA_CNTX_SIZE_2K:
4162 case I40E_DMA_CNTX_SIZE_4K:
4163 fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
4164 fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
4165 break;
4166 default:
4167 return -EINVAL;
4168 }
4169
4170 /* Validate PE settings passed */
4171 switch (settings->pe_filt_num) {
4172 case I40E_HASH_FILTER_SIZE_1K:
4173 case I40E_HASH_FILTER_SIZE_2K:
4174 case I40E_HASH_FILTER_SIZE_4K:
4175 case I40E_HASH_FILTER_SIZE_8K:
4176 case I40E_HASH_FILTER_SIZE_16K:
4177 case I40E_HASH_FILTER_SIZE_32K:
4178 case I40E_HASH_FILTER_SIZE_64K:
4179 case I40E_HASH_FILTER_SIZE_128K:
4180 case I40E_HASH_FILTER_SIZE_256K:
4181 case I40E_HASH_FILTER_SIZE_512K:
4182 case I40E_HASH_FILTER_SIZE_1M:
4183 break;
4184 default:
4185 return -EINVAL;
4186 }
4187
4188 switch (settings->pe_cntx_num) {
4189 case I40E_DMA_CNTX_SIZE_512:
4190 case I40E_DMA_CNTX_SIZE_1K:
4191 case I40E_DMA_CNTX_SIZE_2K:
4192 case I40E_DMA_CNTX_SIZE_4K:
4193 case I40E_DMA_CNTX_SIZE_8K:
4194 case I40E_DMA_CNTX_SIZE_16K:
4195 case I40E_DMA_CNTX_SIZE_32K:
4196 case I40E_DMA_CNTX_SIZE_64K:
4197 case I40E_DMA_CNTX_SIZE_128K:
4198 case I40E_DMA_CNTX_SIZE_256K:
4199 break;
4200 default:
4201 return -EINVAL;
4202 }
4203
4204 /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
4205 val = rd32(hw, I40E_GLHMC_FCOEFMAX);
4206 fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
4207 >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
4208 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
4209 return -EINVAL;
4210
4211 return 0;
4212 }
4213
4214 /**
4215 * i40e_set_filter_control
4216 * @hw: pointer to the hardware structure
4217 * @settings: Filter control settings
4218 *
4219 * Set the Queue Filters for PE/FCoE and enable filters required
4220 * for a single PF. It is expected that these settings are programmed
4221 * at the driver initialization time.
4222 **/
i40e_set_filter_control(struct i40e_hw * hw,struct i40e_filter_control_settings * settings)4223 int i40e_set_filter_control(struct i40e_hw *hw,
4224 struct i40e_filter_control_settings *settings)
4225 {
4226 u32 hash_lut_size = 0;
4227 int ret = 0;
4228 u32 val;
4229
4230 if (!settings)
4231 return -EINVAL;
4232
4233 /* Validate the input settings */
4234 ret = i40e_validate_filter_settings(hw, settings);
4235 if (ret)
4236 return ret;
4237
4238 /* Read the PF Queue Filter control register */
4239 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
4240
4241 /* Program required PE hash buckets for the PF */
4242 val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
4243 val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
4244 I40E_PFQF_CTL_0_PEHSIZE_MASK;
4245 /* Program required PE contexts for the PF */
4246 val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
4247 val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
4248 I40E_PFQF_CTL_0_PEDSIZE_MASK;
4249
4250 /* Program required FCoE hash buckets for the PF */
4251 val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4252 val |= ((u32)settings->fcoe_filt_num <<
4253 I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
4254 I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4255 /* Program required FCoE DDP contexts for the PF */
4256 val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4257 val |= ((u32)settings->fcoe_cntx_num <<
4258 I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
4259 I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4260
4261 /* Program Hash LUT size for the PF */
4262 val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4263 if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
4264 hash_lut_size = 1;
4265 val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
4266 I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4267
4268 /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
4269 if (settings->enable_fdir)
4270 val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
4271 if (settings->enable_ethtype)
4272 val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
4273 if (settings->enable_macvlan)
4274 val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
4275
4276 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
4277
4278 return 0;
4279 }
4280
4281 /**
4282 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
4283 * @hw: pointer to the hw struct
4284 * @mac_addr: MAC address to use in the filter
4285 * @ethtype: Ethertype to use in the filter
4286 * @flags: Flags that needs to be applied to the filter
4287 * @vsi_seid: seid of the control VSI
4288 * @queue: VSI queue number to send the packet to
4289 * @is_add: Add control packet filter if True else remove
4290 * @stats: Structure to hold information on control filter counts
4291 * @cmd_details: pointer to command details structure or NULL
4292 *
4293 * This command will Add or Remove control packet filter for a control VSI.
4294 * In return it will update the total number of perfect filter count in
4295 * the stats member.
4296 **/
i40e_aq_add_rem_control_packet_filter(struct i40e_hw * hw,u8 * mac_addr,u16 ethtype,u16 flags,u16 vsi_seid,u16 queue,bool is_add,struct i40e_control_filter_stats * stats,struct i40e_asq_cmd_details * cmd_details)4297 int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
4298 u8 *mac_addr, u16 ethtype, u16 flags,
4299 u16 vsi_seid, u16 queue, bool is_add,
4300 struct i40e_control_filter_stats *stats,
4301 struct i40e_asq_cmd_details *cmd_details)
4302 {
4303 struct i40e_aq_desc desc;
4304 struct i40e_aqc_add_remove_control_packet_filter *cmd =
4305 (struct i40e_aqc_add_remove_control_packet_filter *)
4306 &desc.params.raw;
4307 struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
4308 (struct i40e_aqc_add_remove_control_packet_filter_completion *)
4309 &desc.params.raw;
4310 int status;
4311
4312 if (vsi_seid == 0)
4313 return -EINVAL;
4314
4315 if (is_add) {
4316 i40e_fill_default_direct_cmd_desc(&desc,
4317 i40e_aqc_opc_add_control_packet_filter);
4318 cmd->queue = cpu_to_le16(queue);
4319 } else {
4320 i40e_fill_default_direct_cmd_desc(&desc,
4321 i40e_aqc_opc_remove_control_packet_filter);
4322 }
4323
4324 if (mac_addr)
4325 ether_addr_copy(cmd->mac, mac_addr);
4326
4327 cmd->etype = cpu_to_le16(ethtype);
4328 cmd->flags = cpu_to_le16(flags);
4329 cmd->seid = cpu_to_le16(vsi_seid);
4330
4331 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4332
4333 if (!status && stats) {
4334 stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used);
4335 stats->etype_used = le16_to_cpu(resp->etype_used);
4336 stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free);
4337 stats->etype_free = le16_to_cpu(resp->etype_free);
4338 }
4339
4340 return status;
4341 }
4342
4343 /**
4344 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
4345 * @hw: pointer to the hw struct
4346 * @seid: VSI seid to add ethertype filter from
4347 **/
i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw * hw,u16 seid)4348 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
4349 u16 seid)
4350 {
4351 #define I40E_FLOW_CONTROL_ETHTYPE 0x8808
4352 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
4353 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
4354 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
4355 u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
4356 int status;
4357
4358 status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
4359 seid, 0, true, NULL,
4360 NULL);
4361 if (status)
4362 hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
4363 }
4364
4365 /**
4366 * i40e_aq_alternate_read
4367 * @hw: pointer to the hardware structure
4368 * @reg_addr0: address of first dword to be read
4369 * @reg_val0: pointer for data read from 'reg_addr0'
4370 * @reg_addr1: address of second dword to be read
4371 * @reg_val1: pointer for data read from 'reg_addr1'
4372 *
4373 * Read one or two dwords from alternate structure. Fields are indicated
4374 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
4375 * is not passed then only register at 'reg_addr0' is read.
4376 *
4377 **/
i40e_aq_alternate_read(struct i40e_hw * hw,u32 reg_addr0,u32 * reg_val0,u32 reg_addr1,u32 * reg_val1)4378 static int i40e_aq_alternate_read(struct i40e_hw *hw,
4379 u32 reg_addr0, u32 *reg_val0,
4380 u32 reg_addr1, u32 *reg_val1)
4381 {
4382 struct i40e_aq_desc desc;
4383 struct i40e_aqc_alternate_write *cmd_resp =
4384 (struct i40e_aqc_alternate_write *)&desc.params.raw;
4385 int status;
4386
4387 if (!reg_val0)
4388 return -EINVAL;
4389
4390 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
4391 cmd_resp->address0 = cpu_to_le32(reg_addr0);
4392 cmd_resp->address1 = cpu_to_le32(reg_addr1);
4393
4394 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
4395
4396 if (!status) {
4397 *reg_val0 = le32_to_cpu(cmd_resp->data0);
4398
4399 if (reg_val1)
4400 *reg_val1 = le32_to_cpu(cmd_resp->data1);
4401 }
4402
4403 return status;
4404 }
4405
4406 /**
4407 * i40e_aq_suspend_port_tx
4408 * @hw: pointer to the hardware structure
4409 * @seid: port seid
4410 * @cmd_details: pointer to command details structure or NULL
4411 *
4412 * Suspend port's Tx traffic
4413 **/
i40e_aq_suspend_port_tx(struct i40e_hw * hw,u16 seid,struct i40e_asq_cmd_details * cmd_details)4414 int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
4415 struct i40e_asq_cmd_details *cmd_details)
4416 {
4417 struct i40e_aqc_tx_sched_ind *cmd;
4418 struct i40e_aq_desc desc;
4419 int status;
4420
4421 cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
4422 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx);
4423 cmd->vsi_seid = cpu_to_le16(seid);
4424 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4425
4426 return status;
4427 }
4428
4429 /**
4430 * i40e_aq_resume_port_tx
4431 * @hw: pointer to the hardware structure
4432 * @cmd_details: pointer to command details structure or NULL
4433 *
4434 * Resume port's Tx traffic
4435 **/
i40e_aq_resume_port_tx(struct i40e_hw * hw,struct i40e_asq_cmd_details * cmd_details)4436 int i40e_aq_resume_port_tx(struct i40e_hw *hw,
4437 struct i40e_asq_cmd_details *cmd_details)
4438 {
4439 struct i40e_aq_desc desc;
4440 int status;
4441
4442 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
4443
4444 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4445
4446 return status;
4447 }
4448
4449 /**
4450 * i40e_set_pci_config_data - store PCI bus info
4451 * @hw: pointer to hardware structure
4452 * @link_status: the link status word from PCI config space
4453 *
4454 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
4455 **/
i40e_set_pci_config_data(struct i40e_hw * hw,u16 link_status)4456 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
4457 {
4458 hw->bus.type = i40e_bus_type_pci_express;
4459
4460 switch (link_status & PCI_EXP_LNKSTA_NLW) {
4461 case PCI_EXP_LNKSTA_NLW_X1:
4462 hw->bus.width = i40e_bus_width_pcie_x1;
4463 break;
4464 case PCI_EXP_LNKSTA_NLW_X2:
4465 hw->bus.width = i40e_bus_width_pcie_x2;
4466 break;
4467 case PCI_EXP_LNKSTA_NLW_X4:
4468 hw->bus.width = i40e_bus_width_pcie_x4;
4469 break;
4470 case PCI_EXP_LNKSTA_NLW_X8:
4471 hw->bus.width = i40e_bus_width_pcie_x8;
4472 break;
4473 default:
4474 hw->bus.width = i40e_bus_width_unknown;
4475 break;
4476 }
4477
4478 switch (link_status & PCI_EXP_LNKSTA_CLS) {
4479 case PCI_EXP_LNKSTA_CLS_2_5GB:
4480 hw->bus.speed = i40e_bus_speed_2500;
4481 break;
4482 case PCI_EXP_LNKSTA_CLS_5_0GB:
4483 hw->bus.speed = i40e_bus_speed_5000;
4484 break;
4485 case PCI_EXP_LNKSTA_CLS_8_0GB:
4486 hw->bus.speed = i40e_bus_speed_8000;
4487 break;
4488 default:
4489 hw->bus.speed = i40e_bus_speed_unknown;
4490 break;
4491 }
4492 }
4493
4494 /**
4495 * i40e_aq_debug_dump
4496 * @hw: pointer to the hardware structure
4497 * @cluster_id: specific cluster to dump
4498 * @table_id: table id within cluster
4499 * @start_index: index of line in the block to read
4500 * @buff_size: dump buffer size
4501 * @buff: dump buffer
4502 * @ret_buff_size: actual buffer size returned
4503 * @ret_next_table: next block to read
4504 * @ret_next_index: next index to read
4505 * @cmd_details: pointer to command details structure or NULL
4506 *
4507 * Dump internal FW/HW data for debug purposes.
4508 *
4509 **/
i40e_aq_debug_dump(struct i40e_hw * hw,u8 cluster_id,u8 table_id,u32 start_index,u16 buff_size,void * buff,u16 * ret_buff_size,u8 * ret_next_table,u32 * ret_next_index,struct i40e_asq_cmd_details * cmd_details)4510 int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
4511 u8 table_id, u32 start_index, u16 buff_size,
4512 void *buff, u16 *ret_buff_size,
4513 u8 *ret_next_table, u32 *ret_next_index,
4514 struct i40e_asq_cmd_details *cmd_details)
4515 {
4516 struct i40e_aq_desc desc;
4517 struct i40e_aqc_debug_dump_internals *cmd =
4518 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4519 struct i40e_aqc_debug_dump_internals *resp =
4520 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4521 int status;
4522
4523 if (buff_size == 0 || !buff)
4524 return -EINVAL;
4525
4526 i40e_fill_default_direct_cmd_desc(&desc,
4527 i40e_aqc_opc_debug_dump_internals);
4528 /* Indirect Command */
4529 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4530 if (buff_size > I40E_AQ_LARGE_BUF)
4531 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4532
4533 cmd->cluster_id = cluster_id;
4534 cmd->table_id = table_id;
4535 cmd->idx = cpu_to_le32(start_index);
4536
4537 desc.datalen = cpu_to_le16(buff_size);
4538
4539 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
4540 if (!status) {
4541 if (ret_buff_size)
4542 *ret_buff_size = le16_to_cpu(desc.datalen);
4543 if (ret_next_table)
4544 *ret_next_table = resp->table_id;
4545 if (ret_next_index)
4546 *ret_next_index = le32_to_cpu(resp->idx);
4547 }
4548
4549 return status;
4550 }
4551
4552 /**
4553 * i40e_read_bw_from_alt_ram
4554 * @hw: pointer to the hardware structure
4555 * @max_bw: pointer for max_bw read
4556 * @min_bw: pointer for min_bw read
4557 * @min_valid: pointer for bool that is true if min_bw is a valid value
4558 * @max_valid: pointer for bool that is true if max_bw is a valid value
4559 *
4560 * Read bw from the alternate ram for the given pf
4561 **/
i40e_read_bw_from_alt_ram(struct i40e_hw * hw,u32 * max_bw,u32 * min_bw,bool * min_valid,bool * max_valid)4562 int i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
4563 u32 *max_bw, u32 *min_bw,
4564 bool *min_valid, bool *max_valid)
4565 {
4566 u32 max_bw_addr, min_bw_addr;
4567 int status;
4568
4569 /* Calculate the address of the min/max bw registers */
4570 max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4571 I40E_ALT_STRUCT_MAX_BW_OFFSET +
4572 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4573 min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4574 I40E_ALT_STRUCT_MIN_BW_OFFSET +
4575 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4576
4577 /* Read the bandwidths from alt ram */
4578 status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
4579 min_bw_addr, min_bw);
4580
4581 if (*min_bw & I40E_ALT_BW_VALID_MASK)
4582 *min_valid = true;
4583 else
4584 *min_valid = false;
4585
4586 if (*max_bw & I40E_ALT_BW_VALID_MASK)
4587 *max_valid = true;
4588 else
4589 *max_valid = false;
4590
4591 return status;
4592 }
4593
4594 /**
4595 * i40e_aq_configure_partition_bw
4596 * @hw: pointer to the hardware structure
4597 * @bw_data: Buffer holding valid pfs and bw limits
4598 * @cmd_details: pointer to command details
4599 *
4600 * Configure partitions guaranteed/max bw
4601 **/
4602 int
i40e_aq_configure_partition_bw(struct i40e_hw * hw,struct i40e_aqc_configure_partition_bw_data * bw_data,struct i40e_asq_cmd_details * cmd_details)4603 i40e_aq_configure_partition_bw(struct i40e_hw *hw,
4604 struct i40e_aqc_configure_partition_bw_data *bw_data,
4605 struct i40e_asq_cmd_details *cmd_details)
4606 {
4607 u16 bwd_size = sizeof(*bw_data);
4608 struct i40e_aq_desc desc;
4609 int status;
4610
4611 i40e_fill_default_direct_cmd_desc(&desc,
4612 i40e_aqc_opc_configure_partition_bw);
4613
4614 /* Indirect command */
4615 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4616 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
4617
4618 if (bwd_size > I40E_AQ_LARGE_BUF)
4619 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4620
4621 desc.datalen = cpu_to_le16(bwd_size);
4622
4623 status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size,
4624 cmd_details);
4625
4626 return status;
4627 }
4628
4629 /**
4630 * i40e_read_phy_register_clause22
4631 * @hw: pointer to the HW structure
4632 * @reg: register address in the page
4633 * @phy_addr: PHY address on MDIO interface
4634 * @value: PHY register value
4635 *
4636 * Reads specified PHY register value
4637 **/
i40e_read_phy_register_clause22(struct i40e_hw * hw,u16 reg,u8 phy_addr,u16 * value)4638 int i40e_read_phy_register_clause22(struct i40e_hw *hw,
4639 u16 reg, u8 phy_addr, u16 *value)
4640 {
4641 u8 port_num = (u8)hw->func_caps.mdio_port_num;
4642 int status = -EIO;
4643 u32 command = 0;
4644 u16 retry = 1000;
4645
4646 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4647 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4648 (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) |
4649 (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4650 (I40E_GLGEN_MSCA_MDICMD_MASK);
4651 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4652 do {
4653 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4654 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4655 status = 0;
4656 break;
4657 }
4658 udelay(10);
4659 retry--;
4660 } while (retry);
4661
4662 if (status) {
4663 i40e_debug(hw, I40E_DEBUG_PHY,
4664 "PHY: Can't write command to external PHY.\n");
4665 } else {
4666 command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4667 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
4668 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
4669 }
4670
4671 return status;
4672 }
4673
4674 /**
4675 * i40e_write_phy_register_clause22
4676 * @hw: pointer to the HW structure
4677 * @reg: register address in the page
4678 * @phy_addr: PHY address on MDIO interface
4679 * @value: PHY register value
4680 *
4681 * Writes specified PHY register value
4682 **/
i40e_write_phy_register_clause22(struct i40e_hw * hw,u16 reg,u8 phy_addr,u16 value)4683 int i40e_write_phy_register_clause22(struct i40e_hw *hw,
4684 u16 reg, u8 phy_addr, u16 value)
4685 {
4686 u8 port_num = (u8)hw->func_caps.mdio_port_num;
4687 int status = -EIO;
4688 u32 command = 0;
4689 u16 retry = 1000;
4690
4691 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4692 wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4693
4694 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4695 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4696 (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) |
4697 (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4698 (I40E_GLGEN_MSCA_MDICMD_MASK);
4699
4700 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4701 do {
4702 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4703 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4704 status = 0;
4705 break;
4706 }
4707 udelay(10);
4708 retry--;
4709 } while (retry);
4710
4711 return status;
4712 }
4713
4714 /**
4715 * i40e_read_phy_register_clause45
4716 * @hw: pointer to the HW structure
4717 * @page: registers page number
4718 * @reg: register address in the page
4719 * @phy_addr: PHY address on MDIO interface
4720 * @value: PHY register value
4721 *
4722 * Reads specified PHY register value
4723 **/
i40e_read_phy_register_clause45(struct i40e_hw * hw,u8 page,u16 reg,u8 phy_addr,u16 * value)4724 int i40e_read_phy_register_clause45(struct i40e_hw *hw,
4725 u8 page, u16 reg, u8 phy_addr, u16 *value)
4726 {
4727 u8 port_num = hw->func_caps.mdio_port_num;
4728 int status = -EIO;
4729 u32 command = 0;
4730 u16 retry = 1000;
4731
4732 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4733 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4734 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4735 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4736 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4737 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4738 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4739 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4740 do {
4741 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4742 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4743 status = 0;
4744 break;
4745 }
4746 usleep_range(10, 20);
4747 retry--;
4748 } while (retry);
4749
4750 if (status) {
4751 i40e_debug(hw, I40E_DEBUG_PHY,
4752 "PHY: Can't write command to external PHY.\n");
4753 goto phy_read_end;
4754 }
4755
4756 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4757 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4758 (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) |
4759 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4760 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4761 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4762 status = -EIO;
4763 retry = 1000;
4764 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4765 do {
4766 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4767 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4768 status = 0;
4769 break;
4770 }
4771 usleep_range(10, 20);
4772 retry--;
4773 } while (retry);
4774
4775 if (!status) {
4776 command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4777 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
4778 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
4779 } else {
4780 i40e_debug(hw, I40E_DEBUG_PHY,
4781 "PHY: Can't read register value from external PHY.\n");
4782 }
4783
4784 phy_read_end:
4785 return status;
4786 }
4787
4788 /**
4789 * i40e_write_phy_register_clause45
4790 * @hw: pointer to the HW structure
4791 * @page: registers page number
4792 * @reg: register address in the page
4793 * @phy_addr: PHY address on MDIO interface
4794 * @value: PHY register value
4795 *
4796 * Writes value to specified PHY register
4797 **/
i40e_write_phy_register_clause45(struct i40e_hw * hw,u8 page,u16 reg,u8 phy_addr,u16 value)4798 int i40e_write_phy_register_clause45(struct i40e_hw *hw,
4799 u8 page, u16 reg, u8 phy_addr, u16 value)
4800 {
4801 u8 port_num = hw->func_caps.mdio_port_num;
4802 int status = -EIO;
4803 u16 retry = 1000;
4804 u32 command = 0;
4805
4806 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4807 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4808 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4809 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4810 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4811 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4812 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4813 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4814 do {
4815 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4816 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4817 status = 0;
4818 break;
4819 }
4820 usleep_range(10, 20);
4821 retry--;
4822 } while (retry);
4823 if (status) {
4824 i40e_debug(hw, I40E_DEBUG_PHY,
4825 "PHY: Can't write command to external PHY.\n");
4826 goto phy_write_end;
4827 }
4828
4829 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4830 wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4831
4832 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4833 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4834 (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) |
4835 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4836 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4837 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4838 status = -EIO;
4839 retry = 1000;
4840 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4841 do {
4842 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4843 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4844 status = 0;
4845 break;
4846 }
4847 usleep_range(10, 20);
4848 retry--;
4849 } while (retry);
4850
4851 phy_write_end:
4852 return status;
4853 }
4854
4855 /**
4856 * i40e_write_phy_register
4857 * @hw: pointer to the HW structure
4858 * @page: registers page number
4859 * @reg: register address in the page
4860 * @phy_addr: PHY address on MDIO interface
4861 * @value: PHY register value
4862 *
4863 * Writes value to specified PHY register
4864 **/
i40e_write_phy_register(struct i40e_hw * hw,u8 page,u16 reg,u8 phy_addr,u16 value)4865 int i40e_write_phy_register(struct i40e_hw *hw,
4866 u8 page, u16 reg, u8 phy_addr, u16 value)
4867 {
4868 int status;
4869
4870 switch (hw->device_id) {
4871 case I40E_DEV_ID_1G_BASE_T_X722:
4872 status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
4873 value);
4874 break;
4875 case I40E_DEV_ID_1G_BASE_T_BC:
4876 case I40E_DEV_ID_5G_BASE_T_BC:
4877 case I40E_DEV_ID_10G_BASE_T:
4878 case I40E_DEV_ID_10G_BASE_T4:
4879 case I40E_DEV_ID_10G_BASE_T_BC:
4880 case I40E_DEV_ID_10G_BASE_T_X722:
4881 case I40E_DEV_ID_25G_B:
4882 case I40E_DEV_ID_25G_SFP28:
4883 status = i40e_write_phy_register_clause45(hw, page, reg,
4884 phy_addr, value);
4885 break;
4886 default:
4887 status = -EIO;
4888 break;
4889 }
4890
4891 return status;
4892 }
4893
4894 /**
4895 * i40e_read_phy_register
4896 * @hw: pointer to the HW structure
4897 * @page: registers page number
4898 * @reg: register address in the page
4899 * @phy_addr: PHY address on MDIO interface
4900 * @value: PHY register value
4901 *
4902 * Reads specified PHY register value
4903 **/
i40e_read_phy_register(struct i40e_hw * hw,u8 page,u16 reg,u8 phy_addr,u16 * value)4904 int i40e_read_phy_register(struct i40e_hw *hw,
4905 u8 page, u16 reg, u8 phy_addr, u16 *value)
4906 {
4907 int status;
4908
4909 switch (hw->device_id) {
4910 case I40E_DEV_ID_1G_BASE_T_X722:
4911 status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
4912 value);
4913 break;
4914 case I40E_DEV_ID_1G_BASE_T_BC:
4915 case I40E_DEV_ID_5G_BASE_T_BC:
4916 case I40E_DEV_ID_10G_BASE_T:
4917 case I40E_DEV_ID_10G_BASE_T4:
4918 case I40E_DEV_ID_10G_BASE_T_BC:
4919 case I40E_DEV_ID_10G_BASE_T_X722:
4920 case I40E_DEV_ID_25G_B:
4921 case I40E_DEV_ID_25G_SFP28:
4922 status = i40e_read_phy_register_clause45(hw, page, reg,
4923 phy_addr, value);
4924 break;
4925 default:
4926 status = -EIO;
4927 break;
4928 }
4929
4930 return status;
4931 }
4932
4933 /**
4934 * i40e_get_phy_address
4935 * @hw: pointer to the HW structure
4936 * @dev_num: PHY port num that address we want
4937 *
4938 * Gets PHY address for current port
4939 **/
i40e_get_phy_address(struct i40e_hw * hw,u8 dev_num)4940 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
4941 {
4942 u8 port_num = hw->func_caps.mdio_port_num;
4943 u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num));
4944
4945 return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f;
4946 }
4947
4948 /**
4949 * i40e_blink_phy_link_led
4950 * @hw: pointer to the HW structure
4951 * @time: time how long led will blinks in secs
4952 * @interval: gap between LED on and off in msecs
4953 *
4954 * Blinks PHY link LED
4955 **/
i40e_blink_phy_link_led(struct i40e_hw * hw,u32 time,u32 interval)4956 int i40e_blink_phy_link_led(struct i40e_hw *hw,
4957 u32 time, u32 interval)
4958 {
4959 u16 led_addr = I40E_PHY_LED_PROV_REG_1;
4960 u16 gpio_led_port;
4961 u8 phy_addr = 0;
4962 int status = 0;
4963 u16 led_ctl;
4964 u8 port_num;
4965 u16 led_reg;
4966 u32 i;
4967
4968 i = rd32(hw, I40E_PFGEN_PORTNUM);
4969 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
4970 phy_addr = i40e_get_phy_address(hw, port_num);
4971
4972 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
4973 led_addr++) {
4974 status = i40e_read_phy_register_clause45(hw,
4975 I40E_PHY_COM_REG_PAGE,
4976 led_addr, phy_addr,
4977 &led_reg);
4978 if (status)
4979 goto phy_blinking_end;
4980 led_ctl = led_reg;
4981 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
4982 led_reg = 0;
4983 status = i40e_write_phy_register_clause45(hw,
4984 I40E_PHY_COM_REG_PAGE,
4985 led_addr, phy_addr,
4986 led_reg);
4987 if (status)
4988 goto phy_blinking_end;
4989 break;
4990 }
4991 }
4992
4993 if (time > 0 && interval > 0) {
4994 for (i = 0; i < time * 1000; i += interval) {
4995 status = i40e_read_phy_register_clause45(hw,
4996 I40E_PHY_COM_REG_PAGE,
4997 led_addr, phy_addr, &led_reg);
4998 if (status)
4999 goto restore_config;
5000 if (led_reg & I40E_PHY_LED_MANUAL_ON)
5001 led_reg = 0;
5002 else
5003 led_reg = I40E_PHY_LED_MANUAL_ON;
5004 status = i40e_write_phy_register_clause45(hw,
5005 I40E_PHY_COM_REG_PAGE,
5006 led_addr, phy_addr, led_reg);
5007 if (status)
5008 goto restore_config;
5009 msleep(interval);
5010 }
5011 }
5012
5013 restore_config:
5014 status = i40e_write_phy_register_clause45(hw,
5015 I40E_PHY_COM_REG_PAGE,
5016 led_addr, phy_addr, led_ctl);
5017
5018 phy_blinking_end:
5019 return status;
5020 }
5021
5022 /**
5023 * i40e_led_get_reg - read LED register
5024 * @hw: pointer to the HW structure
5025 * @led_addr: LED register address
5026 * @reg_val: read register value
5027 **/
i40e_led_get_reg(struct i40e_hw * hw,u16 led_addr,u32 * reg_val)5028 static int i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
5029 u32 *reg_val)
5030 {
5031 u8 phy_addr = 0;
5032 u8 port_num;
5033 int status;
5034 u32 i;
5035
5036 *reg_val = 0;
5037 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5038 status =
5039 i40e_aq_get_phy_register(hw,
5040 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5041 I40E_PHY_COM_REG_PAGE, true,
5042 I40E_PHY_LED_PROV_REG_1,
5043 reg_val, NULL);
5044 } else {
5045 i = rd32(hw, I40E_PFGEN_PORTNUM);
5046 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5047 phy_addr = i40e_get_phy_address(hw, port_num);
5048 status = i40e_read_phy_register_clause45(hw,
5049 I40E_PHY_COM_REG_PAGE,
5050 led_addr, phy_addr,
5051 (u16 *)reg_val);
5052 }
5053 return status;
5054 }
5055
5056 /**
5057 * i40e_led_set_reg - write LED register
5058 * @hw: pointer to the HW structure
5059 * @led_addr: LED register address
5060 * @reg_val: register value to write
5061 **/
i40e_led_set_reg(struct i40e_hw * hw,u16 led_addr,u32 reg_val)5062 static int i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
5063 u32 reg_val)
5064 {
5065 u8 phy_addr = 0;
5066 u8 port_num;
5067 int status;
5068 u32 i;
5069
5070 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5071 status =
5072 i40e_aq_set_phy_register(hw,
5073 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5074 I40E_PHY_COM_REG_PAGE, true,
5075 I40E_PHY_LED_PROV_REG_1,
5076 reg_val, NULL);
5077 } else {
5078 i = rd32(hw, I40E_PFGEN_PORTNUM);
5079 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5080 phy_addr = i40e_get_phy_address(hw, port_num);
5081 status = i40e_write_phy_register_clause45(hw,
5082 I40E_PHY_COM_REG_PAGE,
5083 led_addr, phy_addr,
5084 (u16)reg_val);
5085 }
5086
5087 return status;
5088 }
5089
5090 /**
5091 * i40e_led_get_phy - return current on/off mode
5092 * @hw: pointer to the hw struct
5093 * @led_addr: address of led register to use
5094 * @val: original value of register to use
5095 *
5096 **/
i40e_led_get_phy(struct i40e_hw * hw,u16 * led_addr,u16 * val)5097 int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
5098 u16 *val)
5099 {
5100 u16 gpio_led_port;
5101 u8 phy_addr = 0;
5102 u32 reg_val_aq;
5103 int status = 0;
5104 u16 temp_addr;
5105 u16 reg_val;
5106 u8 port_num;
5107 u32 i;
5108
5109 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5110 status =
5111 i40e_aq_get_phy_register(hw,
5112 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5113 I40E_PHY_COM_REG_PAGE, true,
5114 I40E_PHY_LED_PROV_REG_1,
5115 ®_val_aq, NULL);
5116 if (status == 0)
5117 *val = (u16)reg_val_aq;
5118 return status;
5119 }
5120 temp_addr = I40E_PHY_LED_PROV_REG_1;
5121 i = rd32(hw, I40E_PFGEN_PORTNUM);
5122 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5123 phy_addr = i40e_get_phy_address(hw, port_num);
5124
5125 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
5126 temp_addr++) {
5127 status = i40e_read_phy_register_clause45(hw,
5128 I40E_PHY_COM_REG_PAGE,
5129 temp_addr, phy_addr,
5130 ®_val);
5131 if (status)
5132 return status;
5133 *val = reg_val;
5134 if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
5135 *led_addr = temp_addr;
5136 break;
5137 }
5138 }
5139 return status;
5140 }
5141
5142 /**
5143 * i40e_led_set_phy
5144 * @hw: pointer to the HW structure
5145 * @on: true or false
5146 * @led_addr: address of led register to use
5147 * @mode: original val plus bit for set or ignore
5148 *
5149 * Set led's on or off when controlled by the PHY
5150 *
5151 **/
i40e_led_set_phy(struct i40e_hw * hw,bool on,u16 led_addr,u32 mode)5152 int i40e_led_set_phy(struct i40e_hw *hw, bool on,
5153 u16 led_addr, u32 mode)
5154 {
5155 u32 led_ctl = 0;
5156 u32 led_reg = 0;
5157 int status = 0;
5158
5159 status = i40e_led_get_reg(hw, led_addr, &led_reg);
5160 if (status)
5161 return status;
5162 led_ctl = led_reg;
5163 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
5164 led_reg = 0;
5165 status = i40e_led_set_reg(hw, led_addr, led_reg);
5166 if (status)
5167 return status;
5168 }
5169 status = i40e_led_get_reg(hw, led_addr, &led_reg);
5170 if (status)
5171 goto restore_config;
5172 if (on)
5173 led_reg = I40E_PHY_LED_MANUAL_ON;
5174 else
5175 led_reg = 0;
5176
5177 status = i40e_led_set_reg(hw, led_addr, led_reg);
5178 if (status)
5179 goto restore_config;
5180 if (mode & I40E_PHY_LED_MODE_ORIG) {
5181 led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
5182 status = i40e_led_set_reg(hw, led_addr, led_ctl);
5183 }
5184 return status;
5185
5186 restore_config:
5187 status = i40e_led_set_reg(hw, led_addr, led_ctl);
5188 return status;
5189 }
5190
5191 /**
5192 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register
5193 * @hw: pointer to the hw struct
5194 * @reg_addr: register address
5195 * @reg_val: ptr to register value
5196 * @cmd_details: pointer to command details structure or NULL
5197 *
5198 * Use the firmware to read the Rx control register,
5199 * especially useful if the Rx unit is under heavy pressure
5200 **/
i40e_aq_rx_ctl_read_register(struct i40e_hw * hw,u32 reg_addr,u32 * reg_val,struct i40e_asq_cmd_details * cmd_details)5201 int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
5202 u32 reg_addr, u32 *reg_val,
5203 struct i40e_asq_cmd_details *cmd_details)
5204 {
5205 struct i40e_aq_desc desc;
5206 struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
5207 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5208 int status;
5209
5210 if (!reg_val)
5211 return -EINVAL;
5212
5213 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
5214
5215 cmd_resp->address = cpu_to_le32(reg_addr);
5216
5217 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5218
5219 if (status == 0)
5220 *reg_val = le32_to_cpu(cmd_resp->value);
5221
5222 return status;
5223 }
5224
5225 /**
5226 * i40e_read_rx_ctl - read from an Rx control register
5227 * @hw: pointer to the hw struct
5228 * @reg_addr: register address
5229 **/
i40e_read_rx_ctl(struct i40e_hw * hw,u32 reg_addr)5230 u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
5231 {
5232 bool use_register;
5233 int status = 0;
5234 int retry = 5;
5235 u32 val = 0;
5236
5237 use_register = (((hw->aq.api_maj_ver == 1) &&
5238 (hw->aq.api_min_ver < 5)) ||
5239 (hw->mac.type == I40E_MAC_X722));
5240 if (!use_register) {
5241 do_retry:
5242 status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
5243 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5244 usleep_range(1000, 2000);
5245 retry--;
5246 goto do_retry;
5247 }
5248 }
5249
5250 /* if the AQ access failed, try the old-fashioned way */
5251 if (status || use_register)
5252 val = rd32(hw, reg_addr);
5253
5254 return val;
5255 }
5256
5257 /**
5258 * i40e_aq_rx_ctl_write_register
5259 * @hw: pointer to the hw struct
5260 * @reg_addr: register address
5261 * @reg_val: register value
5262 * @cmd_details: pointer to command details structure or NULL
5263 *
5264 * Use the firmware to write to an Rx control register,
5265 * especially useful if the Rx unit is under heavy pressure
5266 **/
i40e_aq_rx_ctl_write_register(struct i40e_hw * hw,u32 reg_addr,u32 reg_val,struct i40e_asq_cmd_details * cmd_details)5267 int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
5268 u32 reg_addr, u32 reg_val,
5269 struct i40e_asq_cmd_details *cmd_details)
5270 {
5271 struct i40e_aq_desc desc;
5272 struct i40e_aqc_rx_ctl_reg_read_write *cmd =
5273 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5274 int status;
5275
5276 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
5277
5278 cmd->address = cpu_to_le32(reg_addr);
5279 cmd->value = cpu_to_le32(reg_val);
5280
5281 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5282
5283 return status;
5284 }
5285
5286 /**
5287 * i40e_write_rx_ctl - write to an Rx control register
5288 * @hw: pointer to the hw struct
5289 * @reg_addr: register address
5290 * @reg_val: register value
5291 **/
i40e_write_rx_ctl(struct i40e_hw * hw,u32 reg_addr,u32 reg_val)5292 void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
5293 {
5294 bool use_register;
5295 int status = 0;
5296 int retry = 5;
5297
5298 use_register = (((hw->aq.api_maj_ver == 1) &&
5299 (hw->aq.api_min_ver < 5)) ||
5300 (hw->mac.type == I40E_MAC_X722));
5301 if (!use_register) {
5302 do_retry:
5303 status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
5304 reg_val, NULL);
5305 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5306 usleep_range(1000, 2000);
5307 retry--;
5308 goto do_retry;
5309 }
5310 }
5311
5312 /* if the AQ access failed, try the old-fashioned way */
5313 if (status || use_register)
5314 wr32(hw, reg_addr, reg_val);
5315 }
5316
5317 /**
5318 * i40e_mdio_if_number_selection - MDIO I/F number selection
5319 * @hw: pointer to the hw struct
5320 * @set_mdio: use MDIO I/F number specified by mdio_num
5321 * @mdio_num: MDIO I/F number
5322 * @cmd: pointer to PHY Register command structure
5323 **/
i40e_mdio_if_number_selection(struct i40e_hw * hw,bool set_mdio,u8 mdio_num,struct i40e_aqc_phy_register_access * cmd)5324 static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
5325 u8 mdio_num,
5326 struct i40e_aqc_phy_register_access *cmd)
5327 {
5328 if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) {
5329 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED)
5330 cmd->cmd_flags |=
5331 I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER |
5332 ((mdio_num <<
5333 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) &
5334 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK);
5335 else
5336 i40e_debug(hw, I40E_DEBUG_PHY,
5337 "MDIO I/F number selection not supported by current FW version.\n");
5338 }
5339 }
5340
5341 /**
5342 * i40e_aq_set_phy_register_ext
5343 * @hw: pointer to the hw struct
5344 * @phy_select: select which phy should be accessed
5345 * @dev_addr: PHY device address
5346 * @page_change: flag to indicate if phy page should be updated
5347 * @set_mdio: use MDIO I/F number specified by mdio_num
5348 * @mdio_num: MDIO I/F number
5349 * @reg_addr: PHY register address
5350 * @reg_val: new register value
5351 * @cmd_details: pointer to command details structure or NULL
5352 *
5353 * Write the external PHY register.
5354 * NOTE: In common cases MDIO I/F number should not be changed, thats why you
5355 * may use simple wrapper i40e_aq_set_phy_register.
5356 **/
i40e_aq_set_phy_register_ext(struct i40e_hw * hw,u8 phy_select,u8 dev_addr,bool page_change,bool set_mdio,u8 mdio_num,u32 reg_addr,u32 reg_val,struct i40e_asq_cmd_details * cmd_details)5357 int i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
5358 u8 phy_select, u8 dev_addr, bool page_change,
5359 bool set_mdio, u8 mdio_num,
5360 u32 reg_addr, u32 reg_val,
5361 struct i40e_asq_cmd_details *cmd_details)
5362 {
5363 struct i40e_aq_desc desc;
5364 struct i40e_aqc_phy_register_access *cmd =
5365 (struct i40e_aqc_phy_register_access *)&desc.params.raw;
5366 int status;
5367
5368 i40e_fill_default_direct_cmd_desc(&desc,
5369 i40e_aqc_opc_set_phy_register);
5370
5371 cmd->phy_interface = phy_select;
5372 cmd->dev_address = dev_addr;
5373 cmd->reg_address = cpu_to_le32(reg_addr);
5374 cmd->reg_value = cpu_to_le32(reg_val);
5375
5376 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
5377
5378 if (!page_change)
5379 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
5380
5381 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5382
5383 return status;
5384 }
5385
5386 /**
5387 * i40e_aq_get_phy_register_ext
5388 * @hw: pointer to the hw struct
5389 * @phy_select: select which phy should be accessed
5390 * @dev_addr: PHY device address
5391 * @page_change: flag to indicate if phy page should be updated
5392 * @set_mdio: use MDIO I/F number specified by mdio_num
5393 * @mdio_num: MDIO I/F number
5394 * @reg_addr: PHY register address
5395 * @reg_val: read register value
5396 * @cmd_details: pointer to command details structure or NULL
5397 *
5398 * Read the external PHY register.
5399 * NOTE: In common cases MDIO I/F number should not be changed, thats why you
5400 * may use simple wrapper i40e_aq_get_phy_register.
5401 **/
i40e_aq_get_phy_register_ext(struct i40e_hw * hw,u8 phy_select,u8 dev_addr,bool page_change,bool set_mdio,u8 mdio_num,u32 reg_addr,u32 * reg_val,struct i40e_asq_cmd_details * cmd_details)5402 int i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
5403 u8 phy_select, u8 dev_addr, bool page_change,
5404 bool set_mdio, u8 mdio_num,
5405 u32 reg_addr, u32 *reg_val,
5406 struct i40e_asq_cmd_details *cmd_details)
5407 {
5408 struct i40e_aq_desc desc;
5409 struct i40e_aqc_phy_register_access *cmd =
5410 (struct i40e_aqc_phy_register_access *)&desc.params.raw;
5411 int status;
5412
5413 i40e_fill_default_direct_cmd_desc(&desc,
5414 i40e_aqc_opc_get_phy_register);
5415
5416 cmd->phy_interface = phy_select;
5417 cmd->dev_address = dev_addr;
5418 cmd->reg_address = cpu_to_le32(reg_addr);
5419
5420 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
5421
5422 if (!page_change)
5423 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
5424
5425 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5426 if (!status)
5427 *reg_val = le32_to_cpu(cmd->reg_value);
5428
5429 return status;
5430 }
5431
5432 /**
5433 * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
5434 * @hw: pointer to the hw struct
5435 * @buff: command buffer (size in bytes = buff_size)
5436 * @buff_size: buffer size in bytes
5437 * @track_id: package tracking id
5438 * @error_offset: returns error offset
5439 * @error_info: returns error information
5440 * @cmd_details: pointer to command details structure or NULL
5441 **/
i40e_aq_write_ddp(struct i40e_hw * hw,void * buff,u16 buff_size,u32 track_id,u32 * error_offset,u32 * error_info,struct i40e_asq_cmd_details * cmd_details)5442 int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
5443 u16 buff_size, u32 track_id,
5444 u32 *error_offset, u32 *error_info,
5445 struct i40e_asq_cmd_details *cmd_details)
5446 {
5447 struct i40e_aq_desc desc;
5448 struct i40e_aqc_write_personalization_profile *cmd =
5449 (struct i40e_aqc_write_personalization_profile *)
5450 &desc.params.raw;
5451 struct i40e_aqc_write_ddp_resp *resp;
5452 int status;
5453
5454 i40e_fill_default_direct_cmd_desc(&desc,
5455 i40e_aqc_opc_write_personalization_profile);
5456
5457 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
5458 if (buff_size > I40E_AQ_LARGE_BUF)
5459 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5460
5461 desc.datalen = cpu_to_le16(buff_size);
5462
5463 cmd->profile_track_id = cpu_to_le32(track_id);
5464
5465 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5466 if (!status) {
5467 resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
5468 if (error_offset)
5469 *error_offset = le32_to_cpu(resp->error_offset);
5470 if (error_info)
5471 *error_info = le32_to_cpu(resp->error_info);
5472 }
5473
5474 return status;
5475 }
5476
5477 /**
5478 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
5479 * @hw: pointer to the hw struct
5480 * @buff: command buffer (size in bytes = buff_size)
5481 * @buff_size: buffer size in bytes
5482 * @flags: AdminQ command flags
5483 * @cmd_details: pointer to command details structure or NULL
5484 **/
i40e_aq_get_ddp_list(struct i40e_hw * hw,void * buff,u16 buff_size,u8 flags,struct i40e_asq_cmd_details * cmd_details)5485 int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
5486 u16 buff_size, u8 flags,
5487 struct i40e_asq_cmd_details *cmd_details)
5488 {
5489 struct i40e_aq_desc desc;
5490 struct i40e_aqc_get_applied_profiles *cmd =
5491 (struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
5492 int status;
5493
5494 i40e_fill_default_direct_cmd_desc(&desc,
5495 i40e_aqc_opc_get_personalization_profile_list);
5496
5497 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
5498 if (buff_size > I40E_AQ_LARGE_BUF)
5499 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5500 desc.datalen = cpu_to_le16(buff_size);
5501
5502 cmd->flags = flags;
5503
5504 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5505
5506 return status;
5507 }
5508
5509 /**
5510 * i40e_find_segment_in_package
5511 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
5512 * @pkg_hdr: pointer to the package header to be searched
5513 *
5514 * This function searches a package file for a particular segment type. On
5515 * success it returns a pointer to the segment header, otherwise it will
5516 * return NULL.
5517 **/
5518 struct i40e_generic_seg_header *
i40e_find_segment_in_package(u32 segment_type,struct i40e_package_header * pkg_hdr)5519 i40e_find_segment_in_package(u32 segment_type,
5520 struct i40e_package_header *pkg_hdr)
5521 {
5522 struct i40e_generic_seg_header *segment;
5523 u32 i;
5524
5525 /* Search all package segments for the requested segment type */
5526 for (i = 0; i < pkg_hdr->segment_count; i++) {
5527 segment =
5528 (struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
5529 pkg_hdr->segment_offset[i]);
5530
5531 if (segment->type == segment_type)
5532 return segment;
5533 }
5534
5535 return NULL;
5536 }
5537
5538 /* Get section table in profile */
5539 #define I40E_SECTION_TABLE(profile, sec_tbl) \
5540 do { \
5541 struct i40e_profile_segment *p = (profile); \
5542 u32 count; \
5543 u32 *nvm; \
5544 count = p->device_table_count; \
5545 nvm = (u32 *)&p->device_table[count]; \
5546 sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \
5547 } while (0)
5548
5549 /* Get section header in profile */
5550 #define I40E_SECTION_HEADER(profile, offset) \
5551 (struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
5552
5553 /**
5554 * i40e_find_section_in_profile
5555 * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
5556 * @profile: pointer to the i40e segment header to be searched
5557 *
5558 * This function searches i40e segment for a particular section type. On
5559 * success it returns a pointer to the section header, otherwise it will
5560 * return NULL.
5561 **/
5562 struct i40e_profile_section_header *
i40e_find_section_in_profile(u32 section_type,struct i40e_profile_segment * profile)5563 i40e_find_section_in_profile(u32 section_type,
5564 struct i40e_profile_segment *profile)
5565 {
5566 struct i40e_profile_section_header *sec;
5567 struct i40e_section_table *sec_tbl;
5568 u32 sec_off;
5569 u32 i;
5570
5571 if (profile->header.type != SEGMENT_TYPE_I40E)
5572 return NULL;
5573
5574 I40E_SECTION_TABLE(profile, sec_tbl);
5575
5576 for (i = 0; i < sec_tbl->section_count; i++) {
5577 sec_off = sec_tbl->section_offset[i];
5578 sec = I40E_SECTION_HEADER(profile, sec_off);
5579 if (sec->section.type == section_type)
5580 return sec;
5581 }
5582
5583 return NULL;
5584 }
5585
5586 /**
5587 * i40e_ddp_exec_aq_section - Execute generic AQ for DDP
5588 * @hw: pointer to the hw struct
5589 * @aq: command buffer containing all data to execute AQ
5590 **/
i40e_ddp_exec_aq_section(struct i40e_hw * hw,struct i40e_profile_aq_section * aq)5591 static int i40e_ddp_exec_aq_section(struct i40e_hw *hw,
5592 struct i40e_profile_aq_section *aq)
5593 {
5594 struct i40e_aq_desc desc;
5595 u8 *msg = NULL;
5596 u16 msglen;
5597 int status;
5598
5599 i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
5600 desc.flags |= cpu_to_le16(aq->flags);
5601 memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw));
5602
5603 msglen = aq->datalen;
5604 if (msglen) {
5605 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
5606 I40E_AQ_FLAG_RD));
5607 if (msglen > I40E_AQ_LARGE_BUF)
5608 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5609 desc.datalen = cpu_to_le16(msglen);
5610 msg = &aq->data[0];
5611 }
5612
5613 status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL);
5614
5615 if (status) {
5616 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5617 "unable to exec DDP AQ opcode %u, error %d\n",
5618 aq->opcode, status);
5619 return status;
5620 }
5621
5622 /* copy returned desc to aq_buf */
5623 memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw));
5624
5625 return 0;
5626 }
5627
5628 /**
5629 * i40e_validate_profile
5630 * @hw: pointer to the hardware structure
5631 * @profile: pointer to the profile segment of the package to be validated
5632 * @track_id: package tracking id
5633 * @rollback: flag if the profile is for rollback.
5634 *
5635 * Validates supported devices and profile's sections.
5636 */
5637 static int
i40e_validate_profile(struct i40e_hw * hw,struct i40e_profile_segment * profile,u32 track_id,bool rollback)5638 i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5639 u32 track_id, bool rollback)
5640 {
5641 struct i40e_profile_section_header *sec = NULL;
5642 struct i40e_section_table *sec_tbl;
5643 u32 vendor_dev_id;
5644 int status = 0;
5645 u32 dev_cnt;
5646 u32 sec_off;
5647 u32 i;
5648
5649 if (track_id == I40E_DDP_TRACKID_INVALID) {
5650 i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
5651 return -EOPNOTSUPP;
5652 }
5653
5654 dev_cnt = profile->device_table_count;
5655 for (i = 0; i < dev_cnt; i++) {
5656 vendor_dev_id = profile->device_table[i].vendor_dev_id;
5657 if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL &&
5658 hw->device_id == (vendor_dev_id & 0xFFFF))
5659 break;
5660 }
5661 if (dev_cnt && i == dev_cnt) {
5662 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5663 "Device doesn't support DDP\n");
5664 return -ENODEV;
5665 }
5666
5667 I40E_SECTION_TABLE(profile, sec_tbl);
5668
5669 /* Validate sections types */
5670 for (i = 0; i < sec_tbl->section_count; i++) {
5671 sec_off = sec_tbl->section_offset[i];
5672 sec = I40E_SECTION_HEADER(profile, sec_off);
5673 if (rollback) {
5674 if (sec->section.type == SECTION_TYPE_MMIO ||
5675 sec->section.type == SECTION_TYPE_AQ ||
5676 sec->section.type == SECTION_TYPE_RB_AQ) {
5677 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5678 "Not a roll-back package\n");
5679 return -EOPNOTSUPP;
5680 }
5681 } else {
5682 if (sec->section.type == SECTION_TYPE_RB_AQ ||
5683 sec->section.type == SECTION_TYPE_RB_MMIO) {
5684 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5685 "Not an original package\n");
5686 return -EOPNOTSUPP;
5687 }
5688 }
5689 }
5690
5691 return status;
5692 }
5693
5694 /**
5695 * i40e_write_profile
5696 * @hw: pointer to the hardware structure
5697 * @profile: pointer to the profile segment of the package to be downloaded
5698 * @track_id: package tracking id
5699 *
5700 * Handles the download of a complete package.
5701 */
5702 int
i40e_write_profile(struct i40e_hw * hw,struct i40e_profile_segment * profile,u32 track_id)5703 i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5704 u32 track_id)
5705 {
5706 struct i40e_profile_section_header *sec = NULL;
5707 struct i40e_profile_aq_section *ddp_aq;
5708 struct i40e_section_table *sec_tbl;
5709 u32 offset = 0, info = 0;
5710 u32 section_size = 0;
5711 int status = 0;
5712 u32 sec_off;
5713 u32 i;
5714
5715 status = i40e_validate_profile(hw, profile, track_id, false);
5716 if (status)
5717 return status;
5718
5719 I40E_SECTION_TABLE(profile, sec_tbl);
5720
5721 for (i = 0; i < sec_tbl->section_count; i++) {
5722 sec_off = sec_tbl->section_offset[i];
5723 sec = I40E_SECTION_HEADER(profile, sec_off);
5724 /* Process generic admin command */
5725 if (sec->section.type == SECTION_TYPE_AQ) {
5726 ddp_aq = (struct i40e_profile_aq_section *)&sec[1];
5727 status = i40e_ddp_exec_aq_section(hw, ddp_aq);
5728 if (status) {
5729 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5730 "Failed to execute aq: section %d, opcode %u\n",
5731 i, ddp_aq->opcode);
5732 break;
5733 }
5734 sec->section.type = SECTION_TYPE_RB_AQ;
5735 }
5736
5737 /* Skip any non-mmio sections */
5738 if (sec->section.type != SECTION_TYPE_MMIO)
5739 continue;
5740
5741 section_size = sec->section.size +
5742 sizeof(struct i40e_profile_section_header);
5743
5744 /* Write MMIO section */
5745 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5746 track_id, &offset, &info, NULL);
5747 if (status) {
5748 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5749 "Failed to write profile: section %d, offset %d, info %d\n",
5750 i, offset, info);
5751 break;
5752 }
5753 }
5754 return status;
5755 }
5756
5757 /**
5758 * i40e_rollback_profile
5759 * @hw: pointer to the hardware structure
5760 * @profile: pointer to the profile segment of the package to be removed
5761 * @track_id: package tracking id
5762 *
5763 * Rolls back previously loaded package.
5764 */
5765 int
i40e_rollback_profile(struct i40e_hw * hw,struct i40e_profile_segment * profile,u32 track_id)5766 i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5767 u32 track_id)
5768 {
5769 struct i40e_profile_section_header *sec = NULL;
5770 struct i40e_section_table *sec_tbl;
5771 u32 offset = 0, info = 0;
5772 u32 section_size = 0;
5773 int status = 0;
5774 u32 sec_off;
5775 int i;
5776
5777 status = i40e_validate_profile(hw, profile, track_id, true);
5778 if (status)
5779 return status;
5780
5781 I40E_SECTION_TABLE(profile, sec_tbl);
5782
5783 /* For rollback write sections in reverse */
5784 for (i = sec_tbl->section_count - 1; i >= 0; i--) {
5785 sec_off = sec_tbl->section_offset[i];
5786 sec = I40E_SECTION_HEADER(profile, sec_off);
5787
5788 /* Skip any non-rollback sections */
5789 if (sec->section.type != SECTION_TYPE_RB_MMIO)
5790 continue;
5791
5792 section_size = sec->section.size +
5793 sizeof(struct i40e_profile_section_header);
5794
5795 /* Write roll-back MMIO section */
5796 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5797 track_id, &offset, &info, NULL);
5798 if (status) {
5799 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5800 "Failed to write profile: section %d, offset %d, info %d\n",
5801 i, offset, info);
5802 break;
5803 }
5804 }
5805 return status;
5806 }
5807
5808 /**
5809 * i40e_add_pinfo_to_list
5810 * @hw: pointer to the hardware structure
5811 * @profile: pointer to the profile segment of the package
5812 * @profile_info_sec: buffer for information section
5813 * @track_id: package tracking id
5814 *
5815 * Register a profile to the list of loaded profiles.
5816 */
5817 int
i40e_add_pinfo_to_list(struct i40e_hw * hw,struct i40e_profile_segment * profile,u8 * profile_info_sec,u32 track_id)5818 i40e_add_pinfo_to_list(struct i40e_hw *hw,
5819 struct i40e_profile_segment *profile,
5820 u8 *profile_info_sec, u32 track_id)
5821 {
5822 struct i40e_profile_section_header *sec = NULL;
5823 struct i40e_profile_info *pinfo;
5824 u32 offset = 0, info = 0;
5825 int status = 0;
5826
5827 sec = (struct i40e_profile_section_header *)profile_info_sec;
5828 sec->tbl_size = 1;
5829 sec->data_end = sizeof(struct i40e_profile_section_header) +
5830 sizeof(struct i40e_profile_info);
5831 sec->section.type = SECTION_TYPE_INFO;
5832 sec->section.offset = sizeof(struct i40e_profile_section_header);
5833 sec->section.size = sizeof(struct i40e_profile_info);
5834 pinfo = (struct i40e_profile_info *)(profile_info_sec +
5835 sec->section.offset);
5836 pinfo->track_id = track_id;
5837 pinfo->version = profile->version;
5838 pinfo->op = I40E_DDP_ADD_TRACKID;
5839 memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
5840
5841 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
5842 track_id, &offset, &info, NULL);
5843
5844 return status;
5845 }
5846
5847 /**
5848 * i40e_aq_add_cloud_filters
5849 * @hw: pointer to the hardware structure
5850 * @seid: VSI seid to add cloud filters from
5851 * @filters: Buffer which contains the filters to be added
5852 * @filter_count: number of filters contained in the buffer
5853 *
5854 * Set the cloud filters for a given VSI. The contents of the
5855 * i40e_aqc_cloud_filters_element_data are filled in by the caller
5856 * of the function.
5857 *
5858 **/
5859 int
i40e_aq_add_cloud_filters(struct i40e_hw * hw,u16 seid,struct i40e_aqc_cloud_filters_element_data * filters,u8 filter_count)5860 i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
5861 struct i40e_aqc_cloud_filters_element_data *filters,
5862 u8 filter_count)
5863 {
5864 struct i40e_aq_desc desc;
5865 struct i40e_aqc_add_remove_cloud_filters *cmd =
5866 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5867 u16 buff_len;
5868 int status;
5869
5870 i40e_fill_default_direct_cmd_desc(&desc,
5871 i40e_aqc_opc_add_cloud_filters);
5872
5873 buff_len = filter_count * sizeof(*filters);
5874 desc.datalen = cpu_to_le16(buff_len);
5875 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5876 cmd->num_filters = filter_count;
5877 cmd->seid = cpu_to_le16(seid);
5878
5879 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5880
5881 return status;
5882 }
5883
5884 /**
5885 * i40e_aq_add_cloud_filters_bb
5886 * @hw: pointer to the hardware structure
5887 * @seid: VSI seid to add cloud filters from
5888 * @filters: Buffer which contains the filters in big buffer to be added
5889 * @filter_count: number of filters contained in the buffer
5890 *
5891 * Set the big buffer cloud filters for a given VSI. The contents of the
5892 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
5893 * function.
5894 *
5895 **/
5896 int
i40e_aq_add_cloud_filters_bb(struct i40e_hw * hw,u16 seid,struct i40e_aqc_cloud_filters_element_bb * filters,u8 filter_count)5897 i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
5898 struct i40e_aqc_cloud_filters_element_bb *filters,
5899 u8 filter_count)
5900 {
5901 struct i40e_aq_desc desc;
5902 struct i40e_aqc_add_remove_cloud_filters *cmd =
5903 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5904 u16 buff_len;
5905 int status;
5906 int i;
5907
5908 i40e_fill_default_direct_cmd_desc(&desc,
5909 i40e_aqc_opc_add_cloud_filters);
5910
5911 buff_len = filter_count * sizeof(*filters);
5912 desc.datalen = cpu_to_le16(buff_len);
5913 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5914 cmd->num_filters = filter_count;
5915 cmd->seid = cpu_to_le16(seid);
5916 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
5917
5918 for (i = 0; i < filter_count; i++) {
5919 u16 tnl_type;
5920 u32 ti;
5921
5922 tnl_type = (le16_to_cpu(filters[i].element.flags) &
5923 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
5924 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
5925
5926 /* Due to hardware eccentricities, the VNI for Geneve is shifted
5927 * one more byte further than normally used for Tenant ID in
5928 * other tunnel types.
5929 */
5930 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
5931 ti = le32_to_cpu(filters[i].element.tenant_id);
5932 filters[i].element.tenant_id = cpu_to_le32(ti << 8);
5933 }
5934 }
5935
5936 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5937
5938 return status;
5939 }
5940
5941 /**
5942 * i40e_aq_rem_cloud_filters
5943 * @hw: pointer to the hardware structure
5944 * @seid: VSI seid to remove cloud filters from
5945 * @filters: Buffer which contains the filters to be removed
5946 * @filter_count: number of filters contained in the buffer
5947 *
5948 * Remove the cloud filters for a given VSI. The contents of the
5949 * i40e_aqc_cloud_filters_element_data are filled in by the caller
5950 * of the function.
5951 *
5952 **/
5953 int
i40e_aq_rem_cloud_filters(struct i40e_hw * hw,u16 seid,struct i40e_aqc_cloud_filters_element_data * filters,u8 filter_count)5954 i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
5955 struct i40e_aqc_cloud_filters_element_data *filters,
5956 u8 filter_count)
5957 {
5958 struct i40e_aq_desc desc;
5959 struct i40e_aqc_add_remove_cloud_filters *cmd =
5960 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5961 u16 buff_len;
5962 int status;
5963
5964 i40e_fill_default_direct_cmd_desc(&desc,
5965 i40e_aqc_opc_remove_cloud_filters);
5966
5967 buff_len = filter_count * sizeof(*filters);
5968 desc.datalen = cpu_to_le16(buff_len);
5969 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5970 cmd->num_filters = filter_count;
5971 cmd->seid = cpu_to_le16(seid);
5972
5973 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5974
5975 return status;
5976 }
5977
5978 /**
5979 * i40e_aq_rem_cloud_filters_bb
5980 * @hw: pointer to the hardware structure
5981 * @seid: VSI seid to remove cloud filters from
5982 * @filters: Buffer which contains the filters in big buffer to be removed
5983 * @filter_count: number of filters contained in the buffer
5984 *
5985 * Remove the big buffer cloud filters for a given VSI. The contents of the
5986 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
5987 * function.
5988 *
5989 **/
5990 int
i40e_aq_rem_cloud_filters_bb(struct i40e_hw * hw,u16 seid,struct i40e_aqc_cloud_filters_element_bb * filters,u8 filter_count)5991 i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
5992 struct i40e_aqc_cloud_filters_element_bb *filters,
5993 u8 filter_count)
5994 {
5995 struct i40e_aq_desc desc;
5996 struct i40e_aqc_add_remove_cloud_filters *cmd =
5997 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5998 u16 buff_len;
5999 int status;
6000 int i;
6001
6002 i40e_fill_default_direct_cmd_desc(&desc,
6003 i40e_aqc_opc_remove_cloud_filters);
6004
6005 buff_len = filter_count * sizeof(*filters);
6006 desc.datalen = cpu_to_le16(buff_len);
6007 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
6008 cmd->num_filters = filter_count;
6009 cmd->seid = cpu_to_le16(seid);
6010 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
6011
6012 for (i = 0; i < filter_count; i++) {
6013 u16 tnl_type;
6014 u32 ti;
6015
6016 tnl_type = (le16_to_cpu(filters[i].element.flags) &
6017 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
6018 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
6019
6020 /* Due to hardware eccentricities, the VNI for Geneve is shifted
6021 * one more byte further than normally used for Tenant ID in
6022 * other tunnel types.
6023 */
6024 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
6025 ti = le32_to_cpu(filters[i].element.tenant_id);
6026 filters[i].element.tenant_id = cpu_to_le32(ti << 8);
6027 }
6028 }
6029
6030 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
6031
6032 return status;
6033 }
6034