1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2013-2022, Intel Corporation. */
3
4 #ifndef _VIRTCHNL_H_
5 #define _VIRTCHNL_H_
6
7 #include <linux/bitops.h>
8 #include <linux/bits.h>
9 #include <linux/overflow.h>
10 #include <uapi/linux/if_ether.h>
11
12 /* Description:
13 * This header file describes the Virtual Function (VF) - Physical Function
14 * (PF) communication protocol used by the drivers for all devices starting
15 * from our 40G product line
16 *
17 * Admin queue buffer usage:
18 * desc->opcode is always aqc_opc_send_msg_to_pf
19 * flags, retval, datalen, and data addr are all used normally.
20 * The Firmware copies the cookie fields when sending messages between the
21 * PF and VF, but uses all other fields internally. Due to this limitation,
22 * we must send all messages as "indirect", i.e. using an external buffer.
23 *
24 * All the VSI indexes are relative to the VF. Each VF can have maximum of
25 * three VSIs. All the queue indexes are relative to the VSI. Each VF can
26 * have a maximum of sixteen queues for all of its VSIs.
27 *
28 * The PF is required to return a status code in v_retval for all messages
29 * except RESET_VF, which does not require any response. The returned value
30 * is of virtchnl_status_code type, defined here.
31 *
32 * In general, VF driver initialization should roughly follow the order of
33 * these opcodes. The VF driver must first validate the API version of the
34 * PF driver, then request a reset, then get resources, then configure
35 * queues and interrupts. After these operations are complete, the VF
36 * driver may start its queues, optionally add MAC and VLAN filters, and
37 * process traffic.
38 */
39
40 /* START GENERIC DEFINES
41 * Need to ensure the following enums and defines hold the same meaning and
42 * value in current and future projects
43 */
44
45 /* Error Codes */
46 enum virtchnl_status_code {
47 VIRTCHNL_STATUS_SUCCESS = 0,
48 VIRTCHNL_STATUS_ERR_PARAM = -5,
49 VIRTCHNL_STATUS_ERR_NO_MEMORY = -18,
50 VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
51 VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
52 VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
53 VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53,
54 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64,
55 };
56
57 /* Backward compatibility */
58 #define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
59 #define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
60
61 #define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0
62 #define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
63 #define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
64 #define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
65 #define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
66 #define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
67 #define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
68 #define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7
69
70 enum virtchnl_link_speed {
71 VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
72 VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
73 VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
74 VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
75 VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
76 VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
77 VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
78 VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT),
79 VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT),
80 };
81
82 /* for hsplit_0 field of Rx HMC context */
83 /* deprecated with AVF 1.0 */
84 enum virtchnl_rx_hsplit {
85 VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0,
86 VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1,
87 VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2,
88 VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
89 VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
90 };
91
92 /* END GENERIC DEFINES */
93
94 /* Opcodes for VF-PF communication. These are placed in the v_opcode field
95 * of the virtchnl_msg structure.
96 */
97 enum virtchnl_ops {
98 /* The PF sends status change events to VFs using
99 * the VIRTCHNL_OP_EVENT opcode.
100 * VFs send requests to the PF using the other ops.
101 * Use of "advanced opcode" features must be negotiated as part of capabilities
102 * exchange and are not considered part of base mode feature set.
103 */
104 VIRTCHNL_OP_UNKNOWN = 0,
105 VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
106 VIRTCHNL_OP_RESET_VF = 2,
107 VIRTCHNL_OP_GET_VF_RESOURCES = 3,
108 VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
109 VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
110 VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
111 VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
112 VIRTCHNL_OP_ENABLE_QUEUES = 8,
113 VIRTCHNL_OP_DISABLE_QUEUES = 9,
114 VIRTCHNL_OP_ADD_ETH_ADDR = 10,
115 VIRTCHNL_OP_DEL_ETH_ADDR = 11,
116 VIRTCHNL_OP_ADD_VLAN = 12,
117 VIRTCHNL_OP_DEL_VLAN = 13,
118 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
119 VIRTCHNL_OP_GET_STATS = 15,
120 VIRTCHNL_OP_RSVD = 16,
121 VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
122 /* opcode 19 is reserved */
123 VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
124 VIRTCHNL_OP_RDMA = VIRTCHNL_OP_IWARP,
125 VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
126 VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP = VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
127 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
128 VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP = VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
129 VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
130 VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
131 VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
132 VIRTCHNL_OP_SET_RSS_HENA = 26,
133 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
134 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
135 VIRTCHNL_OP_REQUEST_QUEUES = 29,
136 VIRTCHNL_OP_ENABLE_CHANNELS = 30,
137 VIRTCHNL_OP_DISABLE_CHANNELS = 31,
138 VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
139 VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
140 /* opcode 34 - 43 are reserved */
141 VIRTCHNL_OP_GET_SUPPORTED_RXDIDS = 44,
142 VIRTCHNL_OP_ADD_RSS_CFG = 45,
143 VIRTCHNL_OP_DEL_RSS_CFG = 46,
144 VIRTCHNL_OP_ADD_FDIR_FILTER = 47,
145 VIRTCHNL_OP_DEL_FDIR_FILTER = 48,
146 VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS = 51,
147 VIRTCHNL_OP_ADD_VLAN_V2 = 52,
148 VIRTCHNL_OP_DEL_VLAN_V2 = 53,
149 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 = 54,
150 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55,
151 VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56,
152 VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57,
153 VIRTCHNL_OP_MAX,
154 };
155
156 /* These macros are used to generate compilation errors if a structure/union
157 * is not exactly the correct length. It gives a divide by zero error if the
158 * structure/union is not of the correct size, otherwise it creates an enum
159 * that is never used.
160 */
161 #define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
162 { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
163 #define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
164 { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
165
166 /* Message descriptions and data structures. */
167
168 /* VIRTCHNL_OP_VERSION
169 * VF posts its version number to the PF. PF responds with its version number
170 * in the same format, along with a return code.
171 * Reply from PF has its major/minor versions also in param0 and param1.
172 * If there is a major version mismatch, then the VF cannot operate.
173 * If there is a minor version mismatch, then the VF can operate but should
174 * add a warning to the system log.
175 *
176 * This enum element MUST always be specified as == 1, regardless of other
177 * changes in the API. The PF must always respond to this message without
178 * error regardless of version mismatch.
179 */
180 #define VIRTCHNL_VERSION_MAJOR 1
181 #define VIRTCHNL_VERSION_MINOR 1
182 #define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
183
184 struct virtchnl_version_info {
185 u32 major;
186 u32 minor;
187 };
188
189 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
190
191 #define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
192 #define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
193
194 /* VIRTCHNL_OP_RESET_VF
195 * VF sends this request to PF with no parameters
196 * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
197 * until reset completion is indicated. The admin queue must be reinitialized
198 * after this operation.
199 *
200 * When reset is complete, PF must ensure that all queues in all VSIs associated
201 * with the VF are stopped, all queue configurations in the HMC are set to 0,
202 * and all MAC and VLAN filters (except the default MAC address) on all VSIs
203 * are cleared.
204 */
205
206 /* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
207 * vsi_type should always be 6 for backward compatibility. Add other fields
208 * as needed.
209 */
210 enum virtchnl_vsi_type {
211 VIRTCHNL_VSI_TYPE_INVALID = 0,
212 VIRTCHNL_VSI_SRIOV = 6,
213 };
214
215 /* VIRTCHNL_OP_GET_VF_RESOURCES
216 * Version 1.0 VF sends this request to PF with no parameters
217 * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
218 * PF responds with an indirect message containing
219 * virtchnl_vf_resource and one or more
220 * virtchnl_vsi_resource structures.
221 */
222
223 struct virtchnl_vsi_resource {
224 u16 vsi_id;
225 u16 num_queue_pairs;
226
227 /* see enum virtchnl_vsi_type */
228 s32 vsi_type;
229 u16 qset_handle;
230 u8 default_mac_addr[ETH_ALEN];
231 };
232
233 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
234
235 /* VF capability flags
236 * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
237 * TX/RX Checksum offloading and TSO for non-tunnelled packets.
238 */
239 #define VIRTCHNL_VF_OFFLOAD_L2 BIT(0)
240 #define VIRTCHNL_VF_OFFLOAD_RDMA BIT(1)
241 #define VIRTCHNL_VF_CAP_RDMA VIRTCHNL_VF_OFFLOAD_RDMA
242 #define VIRTCHNL_VF_OFFLOAD_RSS_AQ BIT(3)
243 #define VIRTCHNL_VF_OFFLOAD_RSS_REG BIT(4)
244 #define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR BIT(5)
245 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES BIT(6)
246 /* used to negotiate communicating link speeds in Mbps */
247 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7)
248 #define VIRTCHNL_VF_OFFLOAD_CRC BIT(10)
249 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15)
250 #define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16)
251 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17)
252 #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 BIT(18)
253 #define VIRTCHNL_VF_OFFLOAD_RSS_PF BIT(19)
254 #define VIRTCHNL_VF_OFFLOAD_ENCAP BIT(20)
255 #define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM BIT(21)
256 #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM BIT(22)
257 #define VIRTCHNL_VF_OFFLOAD_ADQ BIT(23)
258 #define VIRTCHNL_VF_OFFLOAD_USO BIT(25)
259 #define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC BIT(26)
260 #define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF BIT(27)
261 #define VIRTCHNL_VF_OFFLOAD_FDIR_PF BIT(28)
262
263 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
264 VIRTCHNL_VF_OFFLOAD_VLAN | \
265 VIRTCHNL_VF_OFFLOAD_RSS_PF)
266
267 struct virtchnl_vf_resource {
268 u16 num_vsis;
269 u16 num_queue_pairs;
270 u16 max_vectors;
271 u16 max_mtu;
272
273 u32 vf_cap_flags;
274 u32 rss_key_size;
275 u32 rss_lut_size;
276
277 struct virtchnl_vsi_resource vsi_res[];
278 };
279
280 VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_vf_resource);
281 #define virtchnl_vf_resource_LEGACY_SIZEOF 36
282
283 /* VIRTCHNL_OP_CONFIG_TX_QUEUE
284 * VF sends this message to set up parameters for one TX queue.
285 * External data buffer contains one instance of virtchnl_txq_info.
286 * PF configures requested queue and returns a status code.
287 */
288
289 /* Tx queue config info */
290 struct virtchnl_txq_info {
291 u16 vsi_id;
292 u16 queue_id;
293 u16 ring_len; /* number of descriptors, multiple of 8 */
294 u16 headwb_enabled; /* deprecated with AVF 1.0 */
295 u64 dma_ring_addr;
296 u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
297 };
298
299 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
300
301 /* VIRTCHNL_OP_CONFIG_RX_QUEUE
302 * VF sends this message to set up parameters for one RX queue.
303 * External data buffer contains one instance of virtchnl_rxq_info.
304 * PF configures requested queue and returns a status code. The
305 * crc_disable flag disables CRC stripping on the VF. Setting
306 * the crc_disable flag to 1 will disable CRC stripping for each
307 * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC
308 * offload must have been set prior to sending this info or the PF
309 * will ignore the request. This flag should be set the same for
310 * all of the queues for a VF.
311 */
312
313 /* Rx queue config info */
314 struct virtchnl_rxq_info {
315 u16 vsi_id;
316 u16 queue_id;
317 u32 ring_len; /* number of descriptors, multiple of 32 */
318 u16 hdr_size;
319 u16 splithdr_enabled; /* deprecated with AVF 1.0 */
320 u32 databuffer_size;
321 u32 max_pkt_size;
322 u8 crc_disable;
323 u8 rxdid;
324 u8 pad1[2];
325 u64 dma_ring_addr;
326
327 /* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */
328 s32 rx_split_pos;
329 u32 pad2;
330 };
331
332 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
333
334 /* VIRTCHNL_OP_CONFIG_VSI_QUEUES
335 * VF sends this message to set parameters for all active TX and RX queues
336 * associated with the specified VSI.
337 * PF configures queues and returns status.
338 * If the number of queues specified is greater than the number of queues
339 * associated with the VSI, an error is returned and no queues are configured.
340 * NOTE: The VF is not required to configure all queues in a single request.
341 * It may send multiple messages. PF drivers must correctly handle all VF
342 * requests.
343 */
344 struct virtchnl_queue_pair_info {
345 /* NOTE: vsi_id and queue_id should be identical for both queues. */
346 struct virtchnl_txq_info txq;
347 struct virtchnl_rxq_info rxq;
348 };
349
350 VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
351
352 struct virtchnl_vsi_queue_config_info {
353 u16 vsi_id;
354 u16 num_queue_pairs;
355 u32 pad;
356 struct virtchnl_queue_pair_info qpair[];
357 };
358
359 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vsi_queue_config_info);
360 #define virtchnl_vsi_queue_config_info_LEGACY_SIZEOF 72
361
362 /* VIRTCHNL_OP_REQUEST_QUEUES
363 * VF sends this message to request the PF to allocate additional queues to
364 * this VF. Each VF gets a guaranteed number of queues on init but asking for
365 * additional queues must be negotiated. This is a best effort request as it
366 * is possible the PF does not have enough queues left to support the request.
367 * If the PF cannot support the number requested it will respond with the
368 * maximum number it is able to support. If the request is successful, PF will
369 * then reset the VF to institute required changes.
370 */
371
372 /* VF resource request */
373 struct virtchnl_vf_res_request {
374 u16 num_queue_pairs;
375 };
376
377 /* VIRTCHNL_OP_CONFIG_IRQ_MAP
378 * VF uses this message to map vectors to queues.
379 * The rxq_map and txq_map fields are bitmaps used to indicate which queues
380 * are to be associated with the specified vector.
381 * The "other" causes are always mapped to vector 0. The VF may not request
382 * that vector 0 be used for traffic.
383 * PF configures interrupt mapping and returns status.
384 * NOTE: due to hardware requirements, all active queues (both TX and RX)
385 * should be mapped to interrupts, even if the driver intends to operate
386 * only in polling mode. In this case the interrupt may be disabled, but
387 * the ITR timer will still run to trigger writebacks.
388 */
389 struct virtchnl_vector_map {
390 u16 vsi_id;
391 u16 vector_id;
392 u16 rxq_map;
393 u16 txq_map;
394 u16 rxitr_idx;
395 u16 txitr_idx;
396 };
397
398 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
399
400 struct virtchnl_irq_map_info {
401 u16 num_vectors;
402 struct virtchnl_vector_map vecmap[];
403 };
404
405 VIRTCHNL_CHECK_STRUCT_LEN(2, virtchnl_irq_map_info);
406 #define virtchnl_irq_map_info_LEGACY_SIZEOF 14
407
408 /* VIRTCHNL_OP_ENABLE_QUEUES
409 * VIRTCHNL_OP_DISABLE_QUEUES
410 * VF sends these message to enable or disable TX/RX queue pairs.
411 * The queues fields are bitmaps indicating which queues to act upon.
412 * (Currently, we only support 16 queues per VF, but we make the field
413 * u32 to allow for expansion.)
414 * PF performs requested action and returns status.
415 * NOTE: The VF is not required to enable/disable all queues in a single
416 * request. It may send multiple messages.
417 * PF drivers must correctly handle all VF requests.
418 */
419 struct virtchnl_queue_select {
420 u16 vsi_id;
421 u16 pad;
422 u32 rx_queues;
423 u32 tx_queues;
424 };
425
426 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
427
428 /* VIRTCHNL_OP_ADD_ETH_ADDR
429 * VF sends this message in order to add one or more unicast or multicast
430 * address filters for the specified VSI.
431 * PF adds the filters and returns status.
432 */
433
434 /* VIRTCHNL_OP_DEL_ETH_ADDR
435 * VF sends this message in order to remove one or more unicast or multicast
436 * filters for the specified VSI.
437 * PF removes the filters and returns status.
438 */
439
440 /* VIRTCHNL_ETHER_ADDR_LEGACY
441 * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad
442 * bytes. Moving forward all VF drivers should not set type to
443 * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy
444 * behavior. The control plane function (i.e. PF) can use a best effort method
445 * of tracking the primary/device unicast in this case, but there is no
446 * guarantee and functionality depends on the implementation of the PF.
447 */
448
449 /* VIRTCHNL_ETHER_ADDR_PRIMARY
450 * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the
451 * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and
452 * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane
453 * function (i.e. PF) to accurately track and use this MAC address for
454 * displaying on the host and for VM/function reset.
455 */
456
457 /* VIRTCHNL_ETHER_ADDR_EXTRA
458 * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra
459 * unicast and/or multicast filters that are being added/deleted via
460 * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively.
461 */
462 struct virtchnl_ether_addr {
463 u8 addr[ETH_ALEN];
464 u8 type;
465 #define VIRTCHNL_ETHER_ADDR_LEGACY 0
466 #define VIRTCHNL_ETHER_ADDR_PRIMARY 1
467 #define VIRTCHNL_ETHER_ADDR_EXTRA 2
468 #define VIRTCHNL_ETHER_ADDR_TYPE_MASK 3 /* first two bits of type are valid */
469 u8 pad;
470 };
471
472 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
473
474 struct virtchnl_ether_addr_list {
475 u16 vsi_id;
476 u16 num_elements;
477 struct virtchnl_ether_addr list[];
478 };
479
480 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_ether_addr_list);
481 #define virtchnl_ether_addr_list_LEGACY_SIZEOF 12
482
483 /* VIRTCHNL_OP_ADD_VLAN
484 * VF sends this message to add one or more VLAN tag filters for receives.
485 * PF adds the filters and returns status.
486 * If a port VLAN is configured by the PF, this operation will return an
487 * error to the VF.
488 */
489
490 /* VIRTCHNL_OP_DEL_VLAN
491 * VF sends this message to remove one or more VLAN tag filters for receives.
492 * PF removes the filters and returns status.
493 * If a port VLAN is configured by the PF, this operation will return an
494 * error to the VF.
495 */
496
497 struct virtchnl_vlan_filter_list {
498 u16 vsi_id;
499 u16 num_elements;
500 u16 vlan_id[];
501 };
502
503 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_vlan_filter_list);
504 #define virtchnl_vlan_filter_list_LEGACY_SIZEOF 6
505
506 /* This enum is used for all of the VIRTCHNL_VF_OFFLOAD_VLAN_V2_CAPS related
507 * structures and opcodes.
508 *
509 * VIRTCHNL_VLAN_UNSUPPORTED - This field is not supported and if a VF driver
510 * populates it the PF should return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED.
511 *
512 * VIRTCHNL_VLAN_ETHERTYPE_8100 - This field supports 0x8100 ethertype.
513 * VIRTCHNL_VLAN_ETHERTYPE_88A8 - This field supports 0x88A8 ethertype.
514 * VIRTCHNL_VLAN_ETHERTYPE_9100 - This field supports 0x9100 ethertype.
515 *
516 * VIRTCHNL_VLAN_ETHERTYPE_AND - Used when multiple ethertypes can be supported
517 * by the PF concurrently. For example, if the PF can support
518 * VIRTCHNL_VLAN_ETHERTYPE_8100 AND VIRTCHNL_VLAN_ETHERTYPE_88A8 filters it
519 * would OR the following bits:
520 *
521 * VIRTHCNL_VLAN_ETHERTYPE_8100 |
522 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
523 * VIRTCHNL_VLAN_ETHERTYPE_AND;
524 *
525 * The VF would interpret this as VLAN filtering can be supported on both 0x8100
526 * and 0x88A8 VLAN ethertypes.
527 *
528 * VIRTCHNL_ETHERTYPE_XOR - Used when only a single ethertype can be supported
529 * by the PF concurrently. For example if the PF can support
530 * VIRTCHNL_VLAN_ETHERTYPE_8100 XOR VIRTCHNL_VLAN_ETHERTYPE_88A8 stripping
531 * offload it would OR the following bits:
532 *
533 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
534 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
535 * VIRTCHNL_VLAN_ETHERTYPE_XOR;
536 *
537 * The VF would interpret this as VLAN stripping can be supported on either
538 * 0x8100 or 0x88a8 VLAN ethertypes. So when requesting VLAN stripping via
539 * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 the specified ethertype will override
540 * the previously set value.
541 *
542 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 - Used to tell the VF to insert and/or
543 * strip the VLAN tag using the L2TAG1 field of the Tx/Rx descriptors.
544 *
545 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to insert hardware
546 * offloaded VLAN tags using the L2TAG2 field of the Tx descriptor.
547 *
548 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to strip hardware
549 * offloaded VLAN tags using the L2TAG2_2 field of the Rx descriptor.
550 *
551 * VIRTCHNL_VLAN_PRIO - This field supports VLAN priority bits. This is used for
552 * VLAN filtering if the underlying PF supports it.
553 *
554 * VIRTCHNL_VLAN_TOGGLE_ALLOWED - This field is used to say whether a
555 * certain VLAN capability can be toggled. For example if the underlying PF/CP
556 * allows the VF to toggle VLAN filtering, stripping, and/or insertion it should
557 * set this bit along with the supported ethertypes.
558 */
559 enum virtchnl_vlan_support {
560 VIRTCHNL_VLAN_UNSUPPORTED = 0,
561 VIRTCHNL_VLAN_ETHERTYPE_8100 = BIT(0),
562 VIRTCHNL_VLAN_ETHERTYPE_88A8 = BIT(1),
563 VIRTCHNL_VLAN_ETHERTYPE_9100 = BIT(2),
564 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 = BIT(8),
565 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 = BIT(9),
566 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 = BIT(10),
567 VIRTCHNL_VLAN_PRIO = BIT(24),
568 VIRTCHNL_VLAN_FILTER_MASK = BIT(28),
569 VIRTCHNL_VLAN_ETHERTYPE_AND = BIT(29),
570 VIRTCHNL_VLAN_ETHERTYPE_XOR = BIT(30),
571 VIRTCHNL_VLAN_TOGGLE = BIT(31),
572 };
573
574 /* This structure is used as part of the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
575 * for filtering, insertion, and stripping capabilities.
576 *
577 * If only outer capabilities are supported (for filtering, insertion, and/or
578 * stripping) then this refers to the outer most or single VLAN from the VF's
579 * perspective.
580 *
581 * If only inner capabilities are supported (for filtering, insertion, and/or
582 * stripping) then this refers to the outer most or single VLAN from the VF's
583 * perspective. Functionally this is the same as if only outer capabilities are
584 * supported. The VF driver is just forced to use the inner fields when
585 * adding/deleting filters and enabling/disabling offloads (if supported).
586 *
587 * If both outer and inner capabilities are supported (for filtering, insertion,
588 * and/or stripping) then outer refers to the outer most or single VLAN and
589 * inner refers to the second VLAN, if it exists, in the packet.
590 *
591 * There is no support for tunneled VLAN offloads, so outer or inner are never
592 * referring to a tunneled packet from the VF's perspective.
593 */
594 struct virtchnl_vlan_supported_caps {
595 u32 outer;
596 u32 inner;
597 };
598
599 /* The PF populates these fields based on the supported VLAN filtering. If a
600 * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
601 * reject any VIRTCHNL_OP_ADD_VLAN_V2 or VIRTCHNL_OP_DEL_VLAN_V2 messages using
602 * the unsupported fields.
603 *
604 * Also, a VF is only allowed to toggle its VLAN filtering setting if the
605 * VIRTCHNL_VLAN_TOGGLE bit is set.
606 *
607 * The ethertype(s) specified in the ethertype_init field are the ethertypes
608 * enabled for VLAN filtering. VLAN filtering in this case refers to the outer
609 * most VLAN from the VF's perspective. If both inner and outer filtering are
610 * allowed then ethertype_init only refers to the outer most VLAN as only
611 * VLAN ethertype supported for inner VLAN filtering is
612 * VIRTCHNL_VLAN_ETHERTYPE_8100. By default, inner VLAN filtering is disabled
613 * when both inner and outer filtering are allowed.
614 *
615 * The max_filters field tells the VF how many VLAN filters it's allowed to have
616 * at any one time. If it exceeds this amount and tries to add another filter,
617 * then the request will be rejected by the PF. To prevent failures, the VF
618 * should keep track of how many VLAN filters it has added and not attempt to
619 * add more than max_filters.
620 */
621 struct virtchnl_vlan_filtering_caps {
622 struct virtchnl_vlan_supported_caps filtering_support;
623 u32 ethertype_init;
624 u16 max_filters;
625 u8 pad[2];
626 };
627
628 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_filtering_caps);
629
630 /* This enum is used for the virtchnl_vlan_offload_caps structure to specify
631 * if the PF supports a different ethertype for stripping and insertion.
632 *
633 * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION - The ethertype(s) specified
634 * for stripping affect the ethertype(s) specified for insertion and visa versa
635 * as well. If the VF tries to configure VLAN stripping via
636 * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 with VIRTCHNL_VLAN_ETHERTYPE_8100 then
637 * that will be the ethertype for both stripping and insertion.
638 *
639 * VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED - The ethertype(s) specified for
640 * stripping do not affect the ethertype(s) specified for insertion and visa
641 * versa.
642 */
643 enum virtchnl_vlan_ethertype_match {
644 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION = 0,
645 VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED = 1,
646 };
647
648 /* The PF populates these fields based on the supported VLAN offloads. If a
649 * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
650 * reject any VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 or
651 * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 messages using the unsupported fields.
652 *
653 * Also, a VF is only allowed to toggle its VLAN offload setting if the
654 * VIRTCHNL_VLAN_TOGGLE_ALLOWED bit is set.
655 *
656 * The VF driver needs to be aware of how the tags are stripped by hardware and
657 * inserted by the VF driver based on the level of offload support. The PF will
658 * populate these fields based on where the VLAN tags are expected to be
659 * offloaded via the VIRTHCNL_VLAN_TAG_LOCATION_* bits. The VF will need to
660 * interpret these fields. See the definition of the
661 * VIRTCHNL_VLAN_TAG_LOCATION_* bits above the virtchnl_vlan_support
662 * enumeration.
663 */
664 struct virtchnl_vlan_offload_caps {
665 struct virtchnl_vlan_supported_caps stripping_support;
666 struct virtchnl_vlan_supported_caps insertion_support;
667 u32 ethertype_init;
668 u8 ethertype_match;
669 u8 pad[3];
670 };
671
672 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_vlan_offload_caps);
673
674 /* VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
675 * VF sends this message to determine its VLAN capabilities.
676 *
677 * PF will mark which capabilities it supports based on hardware support and
678 * current configuration. For example, if a port VLAN is configured the PF will
679 * not allow outer VLAN filtering, stripping, or insertion to be configured so
680 * it will block these features from the VF.
681 *
682 * The VF will need to cross reference its capabilities with the PFs
683 * capabilities in the response message from the PF to determine the VLAN
684 * support.
685 */
686 struct virtchnl_vlan_caps {
687 struct virtchnl_vlan_filtering_caps filtering;
688 struct virtchnl_vlan_offload_caps offloads;
689 };
690
691 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_caps);
692
693 struct virtchnl_vlan {
694 u16 tci; /* tci[15:13] = PCP and tci[11:0] = VID */
695 u16 tci_mask; /* only valid if VIRTCHNL_VLAN_FILTER_MASK set in
696 * filtering caps
697 */
698 u16 tpid; /* 0x8100, 0x88a8, etc. and only type(s) set in
699 * filtering caps. Note that tpid here does not refer to
700 * VIRTCHNL_VLAN_ETHERTYPE_*, but it refers to the
701 * actual 2-byte VLAN TPID
702 */
703 u8 pad[2];
704 };
705
706 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan);
707
708 struct virtchnl_vlan_filter {
709 struct virtchnl_vlan inner;
710 struct virtchnl_vlan outer;
711 u8 pad[16];
712 };
713
714 VIRTCHNL_CHECK_STRUCT_LEN(32, virtchnl_vlan_filter);
715
716 /* VIRTCHNL_OP_ADD_VLAN_V2
717 * VIRTCHNL_OP_DEL_VLAN_V2
718 *
719 * VF sends these messages to add/del one or more VLAN tag filters for Rx
720 * traffic.
721 *
722 * The PF attempts to add the filters and returns status.
723 *
724 * The VF should only ever attempt to add/del virtchnl_vlan_filter(s) using the
725 * supported fields negotiated via VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS.
726 */
727 struct virtchnl_vlan_filter_list_v2 {
728 u16 vport_id;
729 u16 num_elements;
730 u8 pad[4];
731 struct virtchnl_vlan_filter filters[];
732 };
733
734 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan_filter_list_v2);
735 #define virtchnl_vlan_filter_list_v2_LEGACY_SIZEOF 40
736
737 /* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
738 * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
739 * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
740 * VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
741 *
742 * VF sends this message to enable or disable VLAN stripping or insertion. It
743 * also needs to specify an ethertype. The VF knows which VLAN ethertypes are
744 * allowed and whether or not it's allowed to enable/disable the specific
745 * offload via the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to
746 * parse the virtchnl_vlan_caps.offloads fields to determine which offload
747 * messages are allowed.
748 *
749 * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
750 * following manner the VF will be allowed to enable and/or disable 0x8100 inner
751 * VLAN insertion and/or stripping via the opcodes listed above. Inner in this
752 * case means the outer most or single VLAN from the VF's perspective. This is
753 * because no outer offloads are supported. See the comments above the
754 * virtchnl_vlan_supported_caps structure for more details.
755 *
756 * virtchnl_vlan_caps.offloads.stripping_support.inner =
757 * VIRTCHNL_VLAN_TOGGLE |
758 * VIRTCHNL_VLAN_ETHERTYPE_8100;
759 *
760 * virtchnl_vlan_caps.offloads.insertion_support.inner =
761 * VIRTCHNL_VLAN_TOGGLE |
762 * VIRTCHNL_VLAN_ETHERTYPE_8100;
763 *
764 * In order to enable inner (again note that in this case inner is the outer
765 * most or single VLAN from the VF's perspective) VLAN stripping for 0x8100
766 * VLANs, the VF would populate the virtchnl_vlan_setting structure in the
767 * following manner and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
768 *
769 * virtchnl_vlan_setting.inner_ethertype_setting =
770 * VIRTCHNL_VLAN_ETHERTYPE_8100;
771 *
772 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
773 * initialization.
774 *
775 * The reason that VLAN TPID(s) are not being used for the
776 * outer_ethertype_setting and inner_ethertype_setting fields is because it's
777 * possible a device could support VLAN insertion and/or stripping offload on
778 * multiple ethertypes concurrently, so this method allows a VF to request
779 * multiple ethertypes in one message using the virtchnl_vlan_support
780 * enumeration.
781 *
782 * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
783 * following manner the VF will be allowed to enable 0x8100 and 0x88a8 outer
784 * VLAN insertion and stripping simultaneously. The
785 * virtchnl_vlan_caps.offloads.ethertype_match field will also have to be
786 * populated based on what the PF can support.
787 *
788 * virtchnl_vlan_caps.offloads.stripping_support.outer =
789 * VIRTCHNL_VLAN_TOGGLE |
790 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
791 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
792 * VIRTCHNL_VLAN_ETHERTYPE_AND;
793 *
794 * virtchnl_vlan_caps.offloads.insertion_support.outer =
795 * VIRTCHNL_VLAN_TOGGLE |
796 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
797 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
798 * VIRTCHNL_VLAN_ETHERTYPE_AND;
799 *
800 * In order to enable outer VLAN stripping for 0x8100 and 0x88a8 VLANs, the VF
801 * would populate the virthcnl_vlan_offload_structure in the following manner
802 * and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
803 *
804 * virtchnl_vlan_setting.outer_ethertype_setting =
805 * VIRTHCNL_VLAN_ETHERTYPE_8100 |
806 * VIRTHCNL_VLAN_ETHERTYPE_88A8;
807 *
808 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
809 * initialization.
810 *
811 * There is also the case where a PF and the underlying hardware can support
812 * VLAN offloads on multiple ethertypes, but not concurrently. For example, if
813 * the PF populates the virtchnl_vlan_caps.offloads in the following manner the
814 * VF will be allowed to enable and/or disable 0x8100 XOR 0x88a8 outer VLAN
815 * offloads. The ethertypes must match for stripping and insertion.
816 *
817 * virtchnl_vlan_caps.offloads.stripping_support.outer =
818 * VIRTCHNL_VLAN_TOGGLE |
819 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
820 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
821 * VIRTCHNL_VLAN_ETHERTYPE_XOR;
822 *
823 * virtchnl_vlan_caps.offloads.insertion_support.outer =
824 * VIRTCHNL_VLAN_TOGGLE |
825 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
826 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
827 * VIRTCHNL_VLAN_ETHERTYPE_XOR;
828 *
829 * virtchnl_vlan_caps.offloads.ethertype_match =
830 * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
831 *
832 * In order to enable outer VLAN stripping for 0x88a8 VLANs, the VF would
833 * populate the virtchnl_vlan_setting structure in the following manner and send
834 * the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2. Also, this will change the
835 * ethertype for VLAN insertion if it's enabled. So, for completeness, a
836 * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 with the same ethertype should be sent.
837 *
838 * virtchnl_vlan_setting.outer_ethertype_setting = VIRTHCNL_VLAN_ETHERTYPE_88A8;
839 *
840 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
841 * initialization.
842 */
843 struct virtchnl_vlan_setting {
844 u32 outer_ethertype_setting;
845 u32 inner_ethertype_setting;
846 u16 vport_id;
847 u8 pad[6];
848 };
849
850 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_setting);
851
852 /* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
853 * VF sends VSI id and flags.
854 * PF returns status code in retval.
855 * Note: we assume that broadcast accept mode is always enabled.
856 */
857 struct virtchnl_promisc_info {
858 u16 vsi_id;
859 u16 flags;
860 };
861
862 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
863
864 #define FLAG_VF_UNICAST_PROMISC 0x00000001
865 #define FLAG_VF_MULTICAST_PROMISC 0x00000002
866
867 /* VIRTCHNL_OP_GET_STATS
868 * VF sends this message to request stats for the selected VSI. VF uses
869 * the virtchnl_queue_select struct to specify the VSI. The queue_id
870 * field is ignored by the PF.
871 *
872 * PF replies with struct eth_stats in an external buffer.
873 */
874
875 /* VIRTCHNL_OP_CONFIG_RSS_KEY
876 * VIRTCHNL_OP_CONFIG_RSS_LUT
877 * VF sends these messages to configure RSS. Only supported if both PF
878 * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
879 * configuration negotiation. If this is the case, then the RSS fields in
880 * the VF resource struct are valid.
881 * Both the key and LUT are initialized to 0 by the PF, meaning that
882 * RSS is effectively disabled until set up by the VF.
883 */
884 struct virtchnl_rss_key {
885 u16 vsi_id;
886 u16 key_len;
887 u8 key[]; /* RSS hash key, packed bytes */
888 };
889
890 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_key);
891 #define virtchnl_rss_key_LEGACY_SIZEOF 6
892
893 struct virtchnl_rss_lut {
894 u16 vsi_id;
895 u16 lut_entries;
896 u8 lut[]; /* RSS lookup table */
897 };
898
899 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_lut);
900 #define virtchnl_rss_lut_LEGACY_SIZEOF 6
901
902 /* VIRTCHNL_OP_GET_RSS_HENA_CAPS
903 * VIRTCHNL_OP_SET_RSS_HENA
904 * VF sends these messages to get and set the hash filter enable bits for RSS.
905 * By default, the PF sets these to all possible traffic types that the
906 * hardware supports. The VF can query this value if it wants to change the
907 * traffic types that are hashed by the hardware.
908 */
909 struct virtchnl_rss_hena {
910 u64 hena;
911 };
912
913 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
914
915 /* VIRTCHNL_OP_ENABLE_CHANNELS
916 * VIRTCHNL_OP_DISABLE_CHANNELS
917 * VF sends these messages to enable or disable channels based on
918 * the user specified queue count and queue offset for each traffic class.
919 * This struct encompasses all the information that the PF needs from
920 * VF to create a channel.
921 */
922 struct virtchnl_channel_info {
923 u16 count; /* number of queues in a channel */
924 u16 offset; /* queues in a channel start from 'offset' */
925 u32 pad;
926 u64 max_tx_rate;
927 };
928
929 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
930
931 struct virtchnl_tc_info {
932 u32 num_tc;
933 u32 pad;
934 struct virtchnl_channel_info list[];
935 };
936
937 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_tc_info);
938 #define virtchnl_tc_info_LEGACY_SIZEOF 24
939
940 /* VIRTCHNL_ADD_CLOUD_FILTER
941 * VIRTCHNL_DEL_CLOUD_FILTER
942 * VF sends these messages to add or delete a cloud filter based on the
943 * user specified match and action filters. These structures encompass
944 * all the information that the PF needs from the VF to add/delete a
945 * cloud filter.
946 */
947
948 struct virtchnl_l4_spec {
949 u8 src_mac[ETH_ALEN];
950 u8 dst_mac[ETH_ALEN];
951 __be16 vlan_id;
952 __be16 pad; /* reserved for future use */
953 __be32 src_ip[4];
954 __be32 dst_ip[4];
955 __be16 src_port;
956 __be16 dst_port;
957 };
958
959 VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec);
960
961 union virtchnl_flow_spec {
962 struct virtchnl_l4_spec tcp_spec;
963 u8 buffer[128]; /* reserved for future use */
964 };
965
966 VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec);
967
968 enum virtchnl_action {
969 /* action types */
970 VIRTCHNL_ACTION_DROP = 0,
971 VIRTCHNL_ACTION_TC_REDIRECT,
972 VIRTCHNL_ACTION_PASSTHRU,
973 VIRTCHNL_ACTION_QUEUE,
974 VIRTCHNL_ACTION_Q_REGION,
975 VIRTCHNL_ACTION_MARK,
976 VIRTCHNL_ACTION_COUNT,
977 };
978
979 enum virtchnl_flow_type {
980 /* flow types */
981 VIRTCHNL_TCP_V4_FLOW = 0,
982 VIRTCHNL_TCP_V6_FLOW,
983 };
984
985 struct virtchnl_filter {
986 union virtchnl_flow_spec data;
987 union virtchnl_flow_spec mask;
988
989 /* see enum virtchnl_flow_type */
990 s32 flow_type;
991
992 /* see enum virtchnl_action */
993 s32 action;
994 u32 action_meta;
995 u8 field_flags;
996 u8 pad[3];
997 };
998
999 VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
1000
1001 struct virtchnl_supported_rxdids {
1002 u64 supported_rxdids;
1003 };
1004
1005 /* VIRTCHNL_OP_EVENT
1006 * PF sends this message to inform the VF driver of events that may affect it.
1007 * No direct response is expected from the VF, though it may generate other
1008 * messages in response to this one.
1009 */
1010 enum virtchnl_event_codes {
1011 VIRTCHNL_EVENT_UNKNOWN = 0,
1012 VIRTCHNL_EVENT_LINK_CHANGE,
1013 VIRTCHNL_EVENT_RESET_IMPENDING,
1014 VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
1015 };
1016
1017 #define PF_EVENT_SEVERITY_INFO 0
1018 #define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
1019
1020 struct virtchnl_pf_event {
1021 /* see enum virtchnl_event_codes */
1022 s32 event;
1023 union {
1024 /* If the PF driver does not support the new speed reporting
1025 * capabilities then use link_event else use link_event_adv to
1026 * get the speed and link information. The ability to understand
1027 * new speeds is indicated by setting the capability flag
1028 * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
1029 * in virtchnl_vf_resource struct and can be used to determine
1030 * which link event struct to use below.
1031 */
1032 struct {
1033 enum virtchnl_link_speed link_speed;
1034 bool link_status;
1035 u8 pad[3];
1036 } link_event;
1037 struct {
1038 /* link_speed provided in Mbps */
1039 u32 link_speed;
1040 u8 link_status;
1041 u8 pad[3];
1042 } link_event_adv;
1043 } event_data;
1044
1045 s32 severity;
1046 };
1047
1048 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
1049
1050 /* used to specify if a ceq_idx or aeq_idx is invalid */
1051 #define VIRTCHNL_RDMA_INVALID_QUEUE_IDX 0xFFFF
1052 /* VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP
1053 * VF uses this message to request PF to map RDMA vectors to RDMA queues.
1054 * The request for this originates from the VF RDMA driver through
1055 * a client interface between VF LAN and VF RDMA driver.
1056 * A vector could have an AEQ and CEQ attached to it although
1057 * there is a single AEQ per VF RDMA instance in which case
1058 * most vectors will have an VIRTCHNL_RDMA_INVALID_QUEUE_IDX for aeq and valid
1059 * idx for ceqs There will never be a case where there will be multiple CEQs
1060 * attached to a single vector.
1061 * PF configures interrupt mapping and returns status.
1062 */
1063
1064 struct virtchnl_rdma_qv_info {
1065 u32 v_idx; /* msix_vector */
1066 u16 ceq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */
1067 u16 aeq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */
1068 u8 itr_idx;
1069 u8 pad[3];
1070 };
1071
1072 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_rdma_qv_info);
1073
1074 struct virtchnl_rdma_qvlist_info {
1075 u32 num_vectors;
1076 struct virtchnl_rdma_qv_info qv_info[];
1077 };
1078
1079 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rdma_qvlist_info);
1080 #define virtchnl_rdma_qvlist_info_LEGACY_SIZEOF 16
1081
1082 /* VF reset states - these are written into the RSTAT register:
1083 * VFGEN_RSTAT on the VF
1084 * When the PF initiates a reset, it writes 0
1085 * When the reset is complete, it writes 1
1086 * When the PF detects that the VF has recovered, it writes 2
1087 * VF checks this register periodically to determine if a reset has occurred,
1088 * then polls it to know when the reset is complete.
1089 * If either the PF or VF reads the register while the hardware
1090 * is in a reset state, it will return DEADBEEF, which, when masked
1091 * will result in 3.
1092 */
1093 enum virtchnl_vfr_states {
1094 VIRTCHNL_VFR_INPROGRESS = 0,
1095 VIRTCHNL_VFR_COMPLETED,
1096 VIRTCHNL_VFR_VFACTIVE,
1097 };
1098
1099 /* Type of RSS algorithm */
1100 enum virtchnl_rss_algorithm {
1101 VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
1102 VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1,
1103 VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
1104 VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3,
1105 };
1106
1107 #define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
1108 #define PROTO_HDR_SHIFT 5
1109 #define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT)
1110 #define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1)
1111
1112 /* VF use these macros to configure each protocol header.
1113 * Specify which protocol headers and protocol header fields base on
1114 * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field.
1115 * @param hdr: a struct of virtchnl_proto_hdr
1116 * @param hdr_type: ETH/IPV4/TCP, etc
1117 * @param field: SRC/DST/TEID/SPI, etc
1118 */
1119 #define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \
1120 ((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK))
1121 #define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \
1122 ((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK))
1123 #define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \
1124 ((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK))
1125 #define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr) ((hdr)->field_selector)
1126
1127 #define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
1128 (VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \
1129 VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
1130 #define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
1131 (VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \
1132 VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
1133
1134 #define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \
1135 ((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type)
1136 #define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \
1137 (((hdr)->type) >> PROTO_HDR_SHIFT)
1138 #define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \
1139 ((hdr)->type == ((s32)((val) >> PROTO_HDR_SHIFT)))
1140 #define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \
1141 (VIRTCHNL_TEST_PROTO_HDR_TYPE((hdr), (val)) && \
1142 VIRTCHNL_TEST_PROTO_HDR_FIELD((hdr), (val)))
1143
1144 /* Protocol header type within a packet segment. A segment consists of one or
1145 * more protocol headers that make up a logical group of protocol headers. Each
1146 * logical group of protocol headers encapsulates or is encapsulated using/by
1147 * tunneling or encapsulation protocols for network virtualization.
1148 */
1149 enum virtchnl_proto_hdr_type {
1150 VIRTCHNL_PROTO_HDR_NONE,
1151 VIRTCHNL_PROTO_HDR_ETH,
1152 VIRTCHNL_PROTO_HDR_S_VLAN,
1153 VIRTCHNL_PROTO_HDR_C_VLAN,
1154 VIRTCHNL_PROTO_HDR_IPV4,
1155 VIRTCHNL_PROTO_HDR_IPV6,
1156 VIRTCHNL_PROTO_HDR_TCP,
1157 VIRTCHNL_PROTO_HDR_UDP,
1158 VIRTCHNL_PROTO_HDR_SCTP,
1159 VIRTCHNL_PROTO_HDR_GTPU_IP,
1160 VIRTCHNL_PROTO_HDR_GTPU_EH,
1161 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
1162 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
1163 VIRTCHNL_PROTO_HDR_PPPOE,
1164 VIRTCHNL_PROTO_HDR_L2TPV3,
1165 VIRTCHNL_PROTO_HDR_ESP,
1166 VIRTCHNL_PROTO_HDR_AH,
1167 VIRTCHNL_PROTO_HDR_PFCP,
1168 };
1169
1170 /* Protocol header field within a protocol header. */
1171 enum virtchnl_proto_hdr_field {
1172 /* ETHER */
1173 VIRTCHNL_PROTO_HDR_ETH_SRC =
1174 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH),
1175 VIRTCHNL_PROTO_HDR_ETH_DST,
1176 VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE,
1177 /* S-VLAN */
1178 VIRTCHNL_PROTO_HDR_S_VLAN_ID =
1179 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN),
1180 /* C-VLAN */
1181 VIRTCHNL_PROTO_HDR_C_VLAN_ID =
1182 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN),
1183 /* IPV4 */
1184 VIRTCHNL_PROTO_HDR_IPV4_SRC =
1185 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4),
1186 VIRTCHNL_PROTO_HDR_IPV4_DST,
1187 VIRTCHNL_PROTO_HDR_IPV4_DSCP,
1188 VIRTCHNL_PROTO_HDR_IPV4_TTL,
1189 VIRTCHNL_PROTO_HDR_IPV4_PROT,
1190 /* IPV6 */
1191 VIRTCHNL_PROTO_HDR_IPV6_SRC =
1192 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6),
1193 VIRTCHNL_PROTO_HDR_IPV6_DST,
1194 VIRTCHNL_PROTO_HDR_IPV6_TC,
1195 VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT,
1196 VIRTCHNL_PROTO_HDR_IPV6_PROT,
1197 /* TCP */
1198 VIRTCHNL_PROTO_HDR_TCP_SRC_PORT =
1199 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP),
1200 VIRTCHNL_PROTO_HDR_TCP_DST_PORT,
1201 /* UDP */
1202 VIRTCHNL_PROTO_HDR_UDP_SRC_PORT =
1203 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP),
1204 VIRTCHNL_PROTO_HDR_UDP_DST_PORT,
1205 /* SCTP */
1206 VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT =
1207 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP),
1208 VIRTCHNL_PROTO_HDR_SCTP_DST_PORT,
1209 /* GTPU_IP */
1210 VIRTCHNL_PROTO_HDR_GTPU_IP_TEID =
1211 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP),
1212 /* GTPU_EH */
1213 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU =
1214 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH),
1215 VIRTCHNL_PROTO_HDR_GTPU_EH_QFI,
1216 /* PPPOE */
1217 VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID =
1218 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE),
1219 /* L2TPV3 */
1220 VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID =
1221 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3),
1222 /* ESP */
1223 VIRTCHNL_PROTO_HDR_ESP_SPI =
1224 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP),
1225 /* AH */
1226 VIRTCHNL_PROTO_HDR_AH_SPI =
1227 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH),
1228 /* PFCP */
1229 VIRTCHNL_PROTO_HDR_PFCP_S_FIELD =
1230 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP),
1231 VIRTCHNL_PROTO_HDR_PFCP_SEID,
1232 };
1233
1234 struct virtchnl_proto_hdr {
1235 /* see enum virtchnl_proto_hdr_type */
1236 s32 type;
1237 u32 field_selector; /* a bit mask to select field for header type */
1238 u8 buffer[64];
1239 /**
1240 * binary buffer in network order for specific header type.
1241 * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
1242 * header is expected to be copied into the buffer.
1243 */
1244 };
1245
1246 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
1247
1248 struct virtchnl_proto_hdrs {
1249 u8 tunnel_level;
1250 u8 pad[3];
1251 /**
1252 * specify where protocol header start from.
1253 * 0 - from the outer layer
1254 * 1 - from the first inner layer
1255 * 2 - from the second inner layer
1256 * ....
1257 **/
1258 int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
1259 struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
1260 };
1261
1262 VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
1263
1264 struct virtchnl_rss_cfg {
1265 struct virtchnl_proto_hdrs proto_hdrs; /* protocol headers */
1266
1267 /* see enum virtchnl_rss_algorithm; rss algorithm type */
1268 s32 rss_algorithm;
1269 u8 reserved[128]; /* reserve for future */
1270 };
1271
1272 VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
1273
1274 /* action configuration for FDIR */
1275 struct virtchnl_filter_action {
1276 /* see enum virtchnl_action type */
1277 s32 type;
1278 union {
1279 /* used for queue and qgroup action */
1280 struct {
1281 u16 index;
1282 u8 region;
1283 } queue;
1284 /* used for count action */
1285 struct {
1286 /* share counter ID with other flow rules */
1287 u8 shared;
1288 u32 id; /* counter ID */
1289 } count;
1290 /* used for mark action */
1291 u32 mark_id;
1292 u8 reserve[32];
1293 } act_conf;
1294 };
1295
1296 VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action);
1297
1298 #define VIRTCHNL_MAX_NUM_ACTIONS 8
1299
1300 struct virtchnl_filter_action_set {
1301 /* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */
1302 int count;
1303 struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS];
1304 };
1305
1306 VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set);
1307
1308 /* pattern and action for FDIR rule */
1309 struct virtchnl_fdir_rule {
1310 struct virtchnl_proto_hdrs proto_hdrs;
1311 struct virtchnl_filter_action_set action_set;
1312 };
1313
1314 VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
1315
1316 /* Status returned to VF after VF requests FDIR commands
1317 * VIRTCHNL_FDIR_SUCCESS
1318 * VF FDIR related request is successfully done by PF
1319 * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER.
1320 *
1321 * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE
1322 * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource.
1323 *
1324 * VIRTCHNL_FDIR_FAILURE_RULE_EXIST
1325 * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed.
1326 *
1327 * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT
1328 * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule.
1329 *
1330 * VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST
1331 * OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist.
1332 *
1333 * VIRTCHNL_FDIR_FAILURE_RULE_INVALID
1334 * OP_ADD_FDIR_FILTER request is failed due to parameters validation
1335 * or HW doesn't support.
1336 *
1337 * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT
1338 * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out
1339 * for programming.
1340 *
1341 * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID
1342 * OP_QUERY_FDIR_FILTER request is failed due to parameters validation,
1343 * for example, VF query counter of a rule who has no counter action.
1344 */
1345 enum virtchnl_fdir_prgm_status {
1346 VIRTCHNL_FDIR_SUCCESS = 0,
1347 VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE,
1348 VIRTCHNL_FDIR_FAILURE_RULE_EXIST,
1349 VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT,
1350 VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST,
1351 VIRTCHNL_FDIR_FAILURE_RULE_INVALID,
1352 VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT,
1353 VIRTCHNL_FDIR_FAILURE_QUERY_INVALID,
1354 };
1355
1356 /* VIRTCHNL_OP_ADD_FDIR_FILTER
1357 * VF sends this request to PF by filling out vsi_id,
1358 * validate_only and rule_cfg. PF will return flow_id
1359 * if the request is successfully done and return add_status to VF.
1360 */
1361 struct virtchnl_fdir_add {
1362 u16 vsi_id; /* INPUT */
1363 /*
1364 * 1 for validating a fdir rule, 0 for creating a fdir rule.
1365 * Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER.
1366 */
1367 u16 validate_only; /* INPUT */
1368 u32 flow_id; /* OUTPUT */
1369 struct virtchnl_fdir_rule rule_cfg; /* INPUT */
1370
1371 /* see enum virtchnl_fdir_prgm_status; OUTPUT */
1372 s32 status;
1373 };
1374
1375 VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add);
1376
1377 /* VIRTCHNL_OP_DEL_FDIR_FILTER
1378 * VF sends this request to PF by filling out vsi_id
1379 * and flow_id. PF will return del_status to VF.
1380 */
1381 struct virtchnl_fdir_del {
1382 u16 vsi_id; /* INPUT */
1383 u16 pad;
1384 u32 flow_id; /* INPUT */
1385
1386 /* see enum virtchnl_fdir_prgm_status; OUTPUT */
1387 s32 status;
1388 };
1389
1390 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
1391
1392 #define __vss_byone(p, member, count, old) \
1393 (struct_size(p, member, count) + (old - 1 - struct_size(p, member, 0)))
1394
1395 #define __vss_byelem(p, member, count, old) \
1396 (struct_size(p, member, count - 1) + (old - struct_size(p, member, 0)))
1397
1398 #define __vss_full(p, member, count, old) \
1399 (struct_size(p, member, count) + (old - struct_size(p, member, 0)))
1400
1401 #define __vss(type, func, p, member, count) \
1402 struct type: func(p, member, count, type##_LEGACY_SIZEOF)
1403
1404 #define virtchnl_struct_size(p, m, c) \
1405 _Generic(*p, \
1406 __vss(virtchnl_vf_resource, __vss_full, p, m, c), \
1407 __vss(virtchnl_vsi_queue_config_info, __vss_full, p, m, c), \
1408 __vss(virtchnl_irq_map_info, __vss_full, p, m, c), \
1409 __vss(virtchnl_ether_addr_list, __vss_full, p, m, c), \
1410 __vss(virtchnl_vlan_filter_list, __vss_full, p, m, c), \
1411 __vss(virtchnl_vlan_filter_list_v2, __vss_byelem, p, m, c), \
1412 __vss(virtchnl_tc_info, __vss_byelem, p, m, c), \
1413 __vss(virtchnl_rdma_qvlist_info, __vss_byelem, p, m, c), \
1414 __vss(virtchnl_rss_key, __vss_byone, p, m, c), \
1415 __vss(virtchnl_rss_lut, __vss_byone, p, m, c))
1416
1417 /**
1418 * virtchnl_vc_validate_vf_msg
1419 * @ver: Virtchnl version info
1420 * @v_opcode: Opcode for the message
1421 * @msg: pointer to the msg buffer
1422 * @msglen: msg length
1423 *
1424 * validate msg format against struct for each opcode
1425 */
1426 static inline int
virtchnl_vc_validate_vf_msg(struct virtchnl_version_info * ver,u32 v_opcode,u8 * msg,u16 msglen)1427 virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
1428 u8 *msg, u16 msglen)
1429 {
1430 bool err_msg_format = false;
1431 u32 valid_len = 0;
1432
1433 /* Validate message length. */
1434 switch (v_opcode) {
1435 case VIRTCHNL_OP_VERSION:
1436 valid_len = sizeof(struct virtchnl_version_info);
1437 break;
1438 case VIRTCHNL_OP_RESET_VF:
1439 break;
1440 case VIRTCHNL_OP_GET_VF_RESOURCES:
1441 if (VF_IS_V11(ver))
1442 valid_len = sizeof(u32);
1443 break;
1444 case VIRTCHNL_OP_CONFIG_TX_QUEUE:
1445 valid_len = sizeof(struct virtchnl_txq_info);
1446 break;
1447 case VIRTCHNL_OP_CONFIG_RX_QUEUE:
1448 valid_len = sizeof(struct virtchnl_rxq_info);
1449 break;
1450 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1451 valid_len = virtchnl_vsi_queue_config_info_LEGACY_SIZEOF;
1452 if (msglen >= valid_len) {
1453 struct virtchnl_vsi_queue_config_info *vqc =
1454 (struct virtchnl_vsi_queue_config_info *)msg;
1455 valid_len = virtchnl_struct_size(vqc, qpair,
1456 vqc->num_queue_pairs);
1457 if (vqc->num_queue_pairs == 0)
1458 err_msg_format = true;
1459 }
1460 break;
1461 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1462 valid_len = virtchnl_irq_map_info_LEGACY_SIZEOF;
1463 if (msglen >= valid_len) {
1464 struct virtchnl_irq_map_info *vimi =
1465 (struct virtchnl_irq_map_info *)msg;
1466 valid_len = virtchnl_struct_size(vimi, vecmap,
1467 vimi->num_vectors);
1468 if (vimi->num_vectors == 0)
1469 err_msg_format = true;
1470 }
1471 break;
1472 case VIRTCHNL_OP_ENABLE_QUEUES:
1473 case VIRTCHNL_OP_DISABLE_QUEUES:
1474 valid_len = sizeof(struct virtchnl_queue_select);
1475 break;
1476 case VIRTCHNL_OP_ADD_ETH_ADDR:
1477 case VIRTCHNL_OP_DEL_ETH_ADDR:
1478 valid_len = virtchnl_ether_addr_list_LEGACY_SIZEOF;
1479 if (msglen >= valid_len) {
1480 struct virtchnl_ether_addr_list *veal =
1481 (struct virtchnl_ether_addr_list *)msg;
1482 valid_len = virtchnl_struct_size(veal, list,
1483 veal->num_elements);
1484 if (veal->num_elements == 0)
1485 err_msg_format = true;
1486 }
1487 break;
1488 case VIRTCHNL_OP_ADD_VLAN:
1489 case VIRTCHNL_OP_DEL_VLAN:
1490 valid_len = virtchnl_vlan_filter_list_LEGACY_SIZEOF;
1491 if (msglen >= valid_len) {
1492 struct virtchnl_vlan_filter_list *vfl =
1493 (struct virtchnl_vlan_filter_list *)msg;
1494 valid_len = virtchnl_struct_size(vfl, vlan_id,
1495 vfl->num_elements);
1496 if (vfl->num_elements == 0)
1497 err_msg_format = true;
1498 }
1499 break;
1500 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1501 valid_len = sizeof(struct virtchnl_promisc_info);
1502 break;
1503 case VIRTCHNL_OP_GET_STATS:
1504 valid_len = sizeof(struct virtchnl_queue_select);
1505 break;
1506 case VIRTCHNL_OP_RDMA:
1507 /* These messages are opaque to us and will be validated in
1508 * the RDMA client code. We just need to check for nonzero
1509 * length. The firmware will enforce max length restrictions.
1510 */
1511 if (msglen)
1512 valid_len = msglen;
1513 else
1514 err_msg_format = true;
1515 break;
1516 case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP:
1517 break;
1518 case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
1519 valid_len = virtchnl_rdma_qvlist_info_LEGACY_SIZEOF;
1520 if (msglen >= valid_len) {
1521 struct virtchnl_rdma_qvlist_info *qv =
1522 (struct virtchnl_rdma_qvlist_info *)msg;
1523
1524 valid_len = virtchnl_struct_size(qv, qv_info,
1525 qv->num_vectors);
1526 }
1527 break;
1528 case VIRTCHNL_OP_CONFIG_RSS_KEY:
1529 valid_len = virtchnl_rss_key_LEGACY_SIZEOF;
1530 if (msglen >= valid_len) {
1531 struct virtchnl_rss_key *vrk =
1532 (struct virtchnl_rss_key *)msg;
1533 valid_len = virtchnl_struct_size(vrk, key,
1534 vrk->key_len);
1535 }
1536 break;
1537 case VIRTCHNL_OP_CONFIG_RSS_LUT:
1538 valid_len = virtchnl_rss_lut_LEGACY_SIZEOF;
1539 if (msglen >= valid_len) {
1540 struct virtchnl_rss_lut *vrl =
1541 (struct virtchnl_rss_lut *)msg;
1542 valid_len = virtchnl_struct_size(vrl, lut,
1543 vrl->lut_entries);
1544 }
1545 break;
1546 case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
1547 break;
1548 case VIRTCHNL_OP_SET_RSS_HENA:
1549 valid_len = sizeof(struct virtchnl_rss_hena);
1550 break;
1551 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
1552 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
1553 break;
1554 case VIRTCHNL_OP_REQUEST_QUEUES:
1555 valid_len = sizeof(struct virtchnl_vf_res_request);
1556 break;
1557 case VIRTCHNL_OP_ENABLE_CHANNELS:
1558 valid_len = virtchnl_tc_info_LEGACY_SIZEOF;
1559 if (msglen >= valid_len) {
1560 struct virtchnl_tc_info *vti =
1561 (struct virtchnl_tc_info *)msg;
1562 valid_len = virtchnl_struct_size(vti, list,
1563 vti->num_tc);
1564 if (vti->num_tc == 0)
1565 err_msg_format = true;
1566 }
1567 break;
1568 case VIRTCHNL_OP_DISABLE_CHANNELS:
1569 break;
1570 case VIRTCHNL_OP_ADD_CLOUD_FILTER:
1571 case VIRTCHNL_OP_DEL_CLOUD_FILTER:
1572 valid_len = sizeof(struct virtchnl_filter);
1573 break;
1574 case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
1575 break;
1576 case VIRTCHNL_OP_ADD_RSS_CFG:
1577 case VIRTCHNL_OP_DEL_RSS_CFG:
1578 valid_len = sizeof(struct virtchnl_rss_cfg);
1579 break;
1580 case VIRTCHNL_OP_ADD_FDIR_FILTER:
1581 valid_len = sizeof(struct virtchnl_fdir_add);
1582 break;
1583 case VIRTCHNL_OP_DEL_FDIR_FILTER:
1584 valid_len = sizeof(struct virtchnl_fdir_del);
1585 break;
1586 case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
1587 break;
1588 case VIRTCHNL_OP_ADD_VLAN_V2:
1589 case VIRTCHNL_OP_DEL_VLAN_V2:
1590 valid_len = virtchnl_vlan_filter_list_v2_LEGACY_SIZEOF;
1591 if (msglen >= valid_len) {
1592 struct virtchnl_vlan_filter_list_v2 *vfl =
1593 (struct virtchnl_vlan_filter_list_v2 *)msg;
1594
1595 valid_len = virtchnl_struct_size(vfl, filters,
1596 vfl->num_elements);
1597
1598 if (vfl->num_elements == 0) {
1599 err_msg_format = true;
1600 break;
1601 }
1602 }
1603 break;
1604 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
1605 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
1606 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
1607 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
1608 valid_len = sizeof(struct virtchnl_vlan_setting);
1609 break;
1610 /* These are always errors coming from the VF. */
1611 case VIRTCHNL_OP_EVENT:
1612 case VIRTCHNL_OP_UNKNOWN:
1613 default:
1614 return VIRTCHNL_STATUS_ERR_PARAM;
1615 }
1616 /* few more checks */
1617 if (err_msg_format || valid_len != msglen)
1618 return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
1619
1620 return 0;
1621 }
1622 #endif /* _VIRTCHNL_H_ */
1623