xref: /openbmc/linux/drivers/net/ethernet/intel/ice/ice_flow.h (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1  /* SPDX-License-Identifier: GPL-2.0 */
2  /* Copyright (c) 2019, Intel Corporation. */
3  
4  #ifndef _ICE_FLOW_H_
5  #define _ICE_FLOW_H_
6  
7  #include "ice_flex_type.h"
8  
9  #define ICE_FLOW_ENTRY_HANDLE_INVAL	0
10  #define ICE_FLOW_FLD_OFF_INVAL		0xffff
11  
12  /* Generate flow hash field from flow field type(s) */
13  #define ICE_FLOW_HASH_ETH	\
14  	(BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA) | \
15  	 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA))
16  #define ICE_FLOW_HASH_IPV4	\
17  	(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | \
18  	 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA))
19  #define ICE_FLOW_HASH_IPV6	\
20  	(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \
21  	 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA))
22  #define ICE_FLOW_HASH_TCP_PORT	\
23  	(BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \
24  	 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT))
25  #define ICE_FLOW_HASH_UDP_PORT	\
26  	(BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | \
27  	 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT))
28  #define ICE_FLOW_HASH_SCTP_PORT	\
29  	(BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | \
30  	 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT))
31  
32  #define ICE_HASH_INVALID	0
33  #define ICE_HASH_TCP_IPV4	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT)
34  #define ICE_HASH_TCP_IPV6	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
35  #define ICE_HASH_UDP_IPV4	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
36  #define ICE_HASH_UDP_IPV6	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
37  
38  #define ICE_FLOW_HASH_GTP_TEID \
39  	(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
40  
41  #define ICE_FLOW_HASH_GTP_IPV4_TEID \
42  	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_TEID)
43  #define ICE_FLOW_HASH_GTP_IPV6_TEID \
44  	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_TEID)
45  
46  #define ICE_FLOW_HASH_GTP_U_TEID \
47  	(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID))
48  
49  #define ICE_FLOW_HASH_GTP_U_IPV4_TEID \
50  	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_TEID)
51  #define ICE_FLOW_HASH_GTP_U_IPV6_TEID \
52  	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_TEID)
53  
54  #define ICE_FLOW_HASH_GTP_U_EH_TEID \
55  	(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_TEID))
56  
57  #define ICE_FLOW_HASH_GTP_U_EH_QFI \
58  	(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_QFI))
59  
60  #define ICE_FLOW_HASH_GTP_U_IPV4_EH \
61  	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
62  	 ICE_FLOW_HASH_GTP_U_EH_QFI)
63  #define ICE_FLOW_HASH_GTP_U_IPV6_EH \
64  	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
65  	 ICE_FLOW_HASH_GTP_U_EH_QFI)
66  
67  #define ICE_FLOW_HASH_PPPOE_SESS_ID \
68  	(BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID))
69  
70  #define ICE_FLOW_HASH_PPPOE_SESS_ID_ETH \
71  	(ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_PPPOE_SESS_ID)
72  #define ICE_FLOW_HASH_PPPOE_TCP_ID \
73  	(ICE_FLOW_HASH_TCP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID)
74  #define ICE_FLOW_HASH_PPPOE_UDP_ID \
75  	(ICE_FLOW_HASH_UDP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID)
76  
77  #define ICE_FLOW_HASH_PFCP_SEID \
78  	(BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID))
79  #define ICE_FLOW_HASH_PFCP_IPV4_SEID \
80  	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_PFCP_SEID)
81  #define ICE_FLOW_HASH_PFCP_IPV6_SEID \
82  	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_PFCP_SEID)
83  
84  #define ICE_FLOW_HASH_L2TPV3_SESS_ID \
85  	(BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID))
86  #define ICE_FLOW_HASH_L2TPV3_IPV4_SESS_ID \
87  	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_L2TPV3_SESS_ID)
88  #define ICE_FLOW_HASH_L2TPV3_IPV6_SESS_ID \
89  	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_L2TPV3_SESS_ID)
90  
91  #define ICE_FLOW_HASH_ESP_SPI \
92  	(BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI))
93  #define ICE_FLOW_HASH_ESP_IPV4_SPI \
94  	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_ESP_SPI)
95  #define ICE_FLOW_HASH_ESP_IPV6_SPI \
96  	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_ESP_SPI)
97  
98  #define ICE_FLOW_HASH_AH_SPI \
99  	(BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI))
100  #define ICE_FLOW_HASH_AH_IPV4_SPI \
101  	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_AH_SPI)
102  #define ICE_FLOW_HASH_AH_IPV6_SPI \
103  	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_AH_SPI)
104  
105  #define ICE_FLOW_HASH_NAT_T_ESP_SPI \
106  	(BIT_ULL(ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI))
107  #define ICE_FLOW_HASH_NAT_T_ESP_IPV4_SPI \
108  	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
109  #define ICE_FLOW_HASH_NAT_T_ESP_IPV6_SPI \
110  	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
111  
112  /* Protocol header fields within a packet segment. A segment consists of one or
113   * more protocol headers that make up a logical group of protocol headers. Each
114   * logical group of protocol headers encapsulates or is encapsulated using/by
115   * tunneling or encapsulation protocols for network virtualization such as GRE,
116   * VxLAN, etc.
117   */
118  enum ice_flow_seg_hdr {
119  	ICE_FLOW_SEG_HDR_NONE		= 0x00000000,
120  	ICE_FLOW_SEG_HDR_ETH		= 0x00000001,
121  	ICE_FLOW_SEG_HDR_VLAN		= 0x00000002,
122  	ICE_FLOW_SEG_HDR_IPV4		= 0x00000004,
123  	ICE_FLOW_SEG_HDR_IPV6		= 0x00000008,
124  	ICE_FLOW_SEG_HDR_ARP		= 0x00000010,
125  	ICE_FLOW_SEG_HDR_ICMP		= 0x00000020,
126  	ICE_FLOW_SEG_HDR_TCP		= 0x00000040,
127  	ICE_FLOW_SEG_HDR_UDP		= 0x00000080,
128  	ICE_FLOW_SEG_HDR_SCTP		= 0x00000100,
129  	ICE_FLOW_SEG_HDR_GRE		= 0x00000200,
130  	ICE_FLOW_SEG_HDR_GTPC		= 0x00000400,
131  	ICE_FLOW_SEG_HDR_GTPC_TEID	= 0x00000800,
132  	ICE_FLOW_SEG_HDR_GTPU_IP	= 0x00001000,
133  	ICE_FLOW_SEG_HDR_GTPU_EH	= 0x00002000,
134  	ICE_FLOW_SEG_HDR_GTPU_DWN	= 0x00004000,
135  	ICE_FLOW_SEG_HDR_GTPU_UP	= 0x00008000,
136  	ICE_FLOW_SEG_HDR_PPPOE		= 0x00010000,
137  	ICE_FLOW_SEG_HDR_PFCP_NODE	= 0x00020000,
138  	ICE_FLOW_SEG_HDR_PFCP_SESSION	= 0x00040000,
139  	ICE_FLOW_SEG_HDR_L2TPV3		= 0x00080000,
140  	ICE_FLOW_SEG_HDR_ESP		= 0x00100000,
141  	ICE_FLOW_SEG_HDR_AH		= 0x00200000,
142  	ICE_FLOW_SEG_HDR_NAT_T_ESP	= 0x00400000,
143  	ICE_FLOW_SEG_HDR_ETH_NON_IP	= 0x00800000,
144  	/* The following is an additive bit for ICE_FLOW_SEG_HDR_IPV4 and
145  	 * ICE_FLOW_SEG_HDR_IPV6 which include the IPV4 other PTYPEs
146  	 */
147  	ICE_FLOW_SEG_HDR_IPV_OTHER      = 0x20000000,
148  };
149  
150  /* These segments all have the same PTYPES, but are otherwise distinguished by
151   * the value of the gtp_eh_pdu and gtp_eh_pdu_link flags:
152   *
153   *                                gtp_eh_pdu     gtp_eh_pdu_link
154   * ICE_FLOW_SEG_HDR_GTPU_IP           0              0
155   * ICE_FLOW_SEG_HDR_GTPU_EH           1              don't care
156   * ICE_FLOW_SEG_HDR_GTPU_DWN          1              0
157   * ICE_FLOW_SEG_HDR_GTPU_UP           1              1
158   */
159  #define ICE_FLOW_SEG_HDR_GTPU (ICE_FLOW_SEG_HDR_GTPU_IP | \
160  			       ICE_FLOW_SEG_HDR_GTPU_EH | \
161  			       ICE_FLOW_SEG_HDR_GTPU_DWN | \
162  			       ICE_FLOW_SEG_HDR_GTPU_UP)
163  #define ICE_FLOW_SEG_HDR_PFCP (ICE_FLOW_SEG_HDR_PFCP_NODE | \
164  			       ICE_FLOW_SEG_HDR_PFCP_SESSION)
165  
166  enum ice_flow_field {
167  	/* L2 */
168  	ICE_FLOW_FIELD_IDX_ETH_DA,
169  	ICE_FLOW_FIELD_IDX_ETH_SA,
170  	ICE_FLOW_FIELD_IDX_S_VLAN,
171  	ICE_FLOW_FIELD_IDX_C_VLAN,
172  	ICE_FLOW_FIELD_IDX_ETH_TYPE,
173  	/* L3 */
174  	ICE_FLOW_FIELD_IDX_IPV4_DSCP,
175  	ICE_FLOW_FIELD_IDX_IPV6_DSCP,
176  	ICE_FLOW_FIELD_IDX_IPV4_TTL,
177  	ICE_FLOW_FIELD_IDX_IPV4_PROT,
178  	ICE_FLOW_FIELD_IDX_IPV6_TTL,
179  	ICE_FLOW_FIELD_IDX_IPV6_PROT,
180  	ICE_FLOW_FIELD_IDX_IPV4_SA,
181  	ICE_FLOW_FIELD_IDX_IPV4_DA,
182  	ICE_FLOW_FIELD_IDX_IPV6_SA,
183  	ICE_FLOW_FIELD_IDX_IPV6_DA,
184  	/* L4 */
185  	ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
186  	ICE_FLOW_FIELD_IDX_TCP_DST_PORT,
187  	ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
188  	ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
189  	ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
190  	ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
191  	ICE_FLOW_FIELD_IDX_TCP_FLAGS,
192  	/* ARP */
193  	ICE_FLOW_FIELD_IDX_ARP_SIP,
194  	ICE_FLOW_FIELD_IDX_ARP_DIP,
195  	ICE_FLOW_FIELD_IDX_ARP_SHA,
196  	ICE_FLOW_FIELD_IDX_ARP_DHA,
197  	ICE_FLOW_FIELD_IDX_ARP_OP,
198  	/* ICMP */
199  	ICE_FLOW_FIELD_IDX_ICMP_TYPE,
200  	ICE_FLOW_FIELD_IDX_ICMP_CODE,
201  	/* GRE */
202  	ICE_FLOW_FIELD_IDX_GRE_KEYID,
203  	/* GTPC_TEID */
204  	ICE_FLOW_FIELD_IDX_GTPC_TEID,
205  	/* GTPU_IP */
206  	ICE_FLOW_FIELD_IDX_GTPU_IP_TEID,
207  	/* GTPU_EH */
208  	ICE_FLOW_FIELD_IDX_GTPU_EH_TEID,
209  	ICE_FLOW_FIELD_IDX_GTPU_EH_QFI,
210  	/* GTPU_UP */
211  	ICE_FLOW_FIELD_IDX_GTPU_UP_TEID,
212  	/* GTPU_DWN */
213  	ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID,
214  	/* PPPoE */
215  	ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID,
216  	/* PFCP */
217  	ICE_FLOW_FIELD_IDX_PFCP_SEID,
218  	/* L2TPv3 */
219  	ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID,
220  	/* ESP */
221  	ICE_FLOW_FIELD_IDX_ESP_SPI,
222  	/* AH */
223  	ICE_FLOW_FIELD_IDX_AH_SPI,
224  	/* NAT_T ESP */
225  	ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
226  	 /* The total number of enums must not exceed 64 */
227  	ICE_FLOW_FIELD_IDX_MAX
228  };
229  
230  /* Flow headers and fields for AVF support */
231  enum ice_flow_avf_hdr_field {
232  	/* Values 0 - 28 are reserved for future use */
233  	ICE_AVF_FLOW_FIELD_INVALID		= 0,
234  	ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP	= 29,
235  	ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
236  	ICE_AVF_FLOW_FIELD_IPV4_UDP,
237  	ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
238  	ICE_AVF_FLOW_FIELD_IPV4_TCP,
239  	ICE_AVF_FLOW_FIELD_IPV4_SCTP,
240  	ICE_AVF_FLOW_FIELD_IPV4_OTHER,
241  	ICE_AVF_FLOW_FIELD_FRAG_IPV4,
242  	/* Values 37-38 are reserved */
243  	ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP	= 39,
244  	ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
245  	ICE_AVF_FLOW_FIELD_IPV6_UDP,
246  	ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
247  	ICE_AVF_FLOW_FIELD_IPV6_TCP,
248  	ICE_AVF_FLOW_FIELD_IPV6_SCTP,
249  	ICE_AVF_FLOW_FIELD_IPV6_OTHER,
250  	ICE_AVF_FLOW_FIELD_FRAG_IPV6,
251  	ICE_AVF_FLOW_FIELD_RSVD47,
252  	ICE_AVF_FLOW_FIELD_FCOE_OX,
253  	ICE_AVF_FLOW_FIELD_FCOE_RX,
254  	ICE_AVF_FLOW_FIELD_FCOE_OTHER,
255  	/* Values 51-62 are reserved */
256  	ICE_AVF_FLOW_FIELD_L2_PAYLOAD		= 63,
257  	ICE_AVF_FLOW_FIELD_MAX
258  };
259  
260  /* Supported RSS offloads  This macro is defined to support
261   * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware
262   * capabilities to the caller of this ops.
263   */
264  #define ICE_DEFAULT_RSS_HENA ( \
265  	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
266  	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
267  	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
268  	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
269  	BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
270  	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
271  	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
272  	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
273  	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
274  	BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
275  	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
276  	BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
277  	BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
278  	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
279  	BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
280  	BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
281  
282  enum ice_flow_dir {
283  	ICE_FLOW_RX		= 0x02,
284  };
285  
286  enum ice_flow_priority {
287  	ICE_FLOW_PRIO_LOW,
288  	ICE_FLOW_PRIO_NORMAL,
289  	ICE_FLOW_PRIO_HIGH
290  };
291  
292  #define ICE_FLOW_SEG_MAX		2
293  #define ICE_FLOW_SEG_RAW_FLD_MAX	2
294  #define ICE_FLOW_FV_EXTRACT_SZ		2
295  
296  #define ICE_FLOW_SET_HDRS(seg, val)	((seg)->hdrs |= (u32)(val))
297  
298  struct ice_flow_seg_xtrct {
299  	u8 prot_id;	/* Protocol ID of extracted header field */
300  	u16 off;	/* Starting offset of the field in header in bytes */
301  	u8 idx;		/* Index of FV entry used */
302  	u8 disp;	/* Displacement of field in bits fr. FV entry's start */
303  	u16 mask;	/* Mask for field */
304  };
305  
306  enum ice_flow_fld_match_type {
307  	ICE_FLOW_FLD_TYPE_REG,		/* Value, mask */
308  	ICE_FLOW_FLD_TYPE_RANGE,	/* Value, mask, last (upper bound) */
309  	ICE_FLOW_FLD_TYPE_PREFIX,	/* IP address, prefix, size of prefix */
310  	ICE_FLOW_FLD_TYPE_SIZE,		/* Value, mask, size of match */
311  };
312  
313  struct ice_flow_fld_loc {
314  	/* Describe offsets of field information relative to the beginning of
315  	 * input buffer provided when adding flow entries.
316  	 */
317  	u16 val;	/* Offset where the value is located */
318  	u16 mask;	/* Offset where the mask/prefix value is located */
319  	u16 last;	/* Length or offset where the upper value is located */
320  };
321  
322  struct ice_flow_fld_info {
323  	enum ice_flow_fld_match_type type;
324  	/* Location where to retrieve data from an input buffer */
325  	struct ice_flow_fld_loc src;
326  	/* Location where to put the data into the final entry buffer */
327  	struct ice_flow_fld_loc entry;
328  	struct ice_flow_seg_xtrct xtrct;
329  };
330  
331  struct ice_flow_seg_fld_raw {
332  	struct ice_flow_fld_info info;
333  	u16 off;	/* Offset from the start of the segment */
334  };
335  
336  struct ice_flow_seg_info {
337  	u32 hdrs;	/* Bitmask indicating protocol headers present */
338  	u64 match;	/* Bitmask indicating header fields to be matched */
339  	u64 range;	/* Bitmask indicating header fields matched as ranges */
340  
341  	struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
342  
343  	u8 raws_cnt;	/* Number of raw fields to be matched */
344  	struct ice_flow_seg_fld_raw raws[ICE_FLOW_SEG_RAW_FLD_MAX];
345  };
346  
347  /* This structure describes a flow entry, and is tracked only in this file */
348  struct ice_flow_entry {
349  	struct list_head l_entry;
350  
351  	u64 id;
352  	struct ice_flow_prof *prof;
353  	/* Flow entry's content */
354  	void *entry;
355  	enum ice_flow_priority priority;
356  	u16 vsi_handle;
357  	u16 entry_sz;
358  };
359  
360  #define ICE_FLOW_ENTRY_HNDL(e)	((u64)(uintptr_t)e)
361  #define ICE_FLOW_ENTRY_PTR(h)	((struct ice_flow_entry *)(uintptr_t)(h))
362  
363  struct ice_flow_prof {
364  	struct list_head l_entry;
365  
366  	u64 id;
367  	enum ice_flow_dir dir;
368  	u8 segs_cnt;
369  
370  	/* Keep track of flow entries associated with this flow profile */
371  	struct mutex entries_lock;
372  	struct list_head entries;
373  
374  	struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX];
375  
376  	/* software VSI handles referenced by this flow profile */
377  	DECLARE_BITMAP(vsis, ICE_MAX_VSI);
378  };
379  
380  struct ice_rss_cfg {
381  	struct list_head l_entry;
382  	/* bitmap of VSIs added to the RSS entry */
383  	DECLARE_BITMAP(vsis, ICE_MAX_VSI);
384  	u64 hashed_flds;
385  	u32 packet_hdr;
386  };
387  
388  int
389  ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
390  		  u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
391  		  struct ice_flow_prof **prof);
392  int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
393  int
394  ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
395  		   u64 entry_id, u16 vsi, enum ice_flow_priority prio,
396  		   void *data, u64 *entry_h);
397  int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h);
398  void
399  ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
400  		 u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
401  void
402  ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
403  		     u16 val_loc, u16 mask_loc);
404  int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id);
405  void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
406  int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
407  int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
408  int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
409  int
410  ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
411  		u32 addl_hdrs);
412  int
413  ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
414  		u32 addl_hdrs);
415  u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
416  #endif /* _ICE_FLOW_H_ */
417