1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3 
4 #include "ice_common.h"
5 #include "ice_flow.h"
6 
7 /* Describe properties of a protocol header field */
8 struct ice_flow_field_info {
9 	enum ice_flow_seg_hdr hdr;
10 	s16 off;	/* Offset from start of a protocol header, in bits */
11 	u16 size;	/* Size of fields in bits */
12 };
13 
14 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
15 	.hdr = _hdr, \
16 	.off = (_offset_bytes) * BITS_PER_BYTE, \
17 	.size = (_size_bytes) * BITS_PER_BYTE, \
18 }
19 
20 /* Table containing properties of supported protocol header fields */
21 static const
22 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
23 	/* IPv4 / IPv6 */
24 	/* ICE_FLOW_FIELD_IDX_IPV4_SA */
25 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, sizeof(struct in_addr)),
26 	/* ICE_FLOW_FIELD_IDX_IPV4_DA */
27 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, sizeof(struct in_addr)),
28 	/* ICE_FLOW_FIELD_IDX_IPV6_SA */
29 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, sizeof(struct in6_addr)),
30 	/* ICE_FLOW_FIELD_IDX_IPV6_DA */
31 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, sizeof(struct in6_addr)),
32 	/* Transport */
33 	/* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
34 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, sizeof(__be16)),
35 	/* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
36 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, sizeof(__be16)),
37 	/* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
38 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, sizeof(__be16)),
39 	/* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
40 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, sizeof(__be16)),
41 	/* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
42 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)),
43 	/* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
44 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),
45 	/* GRE */
46 	/* ICE_FLOW_FIELD_IDX_GRE_KEYID */
47 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12,
48 			  sizeof_field(struct gre_full_hdr, key)),
49 };
50 
51 /* Bitmaps indicating relevant packet types for a particular protocol header
52  *
53  * Packet types for packets with an Outer/First/Single IPv4 header
54  */
55 static const u32 ice_ptypes_ipv4_ofos[] = {
56 	0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
57 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
58 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
59 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
60 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
61 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
62 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
63 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
64 };
65 
66 /* Packet types for packets with an Innermost/Last IPv4 header */
67 static const u32 ice_ptypes_ipv4_il[] = {
68 	0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
69 	0x0000000E, 0x00000000, 0x00000000, 0x00000000,
70 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
71 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
72 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
73 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
74 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
75 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
76 };
77 
78 /* Packet types for packets with an Outer/First/Single IPv6 header */
79 static const u32 ice_ptypes_ipv6_ofos[] = {
80 	0x00000000, 0x00000000, 0x77000000, 0x10002000,
81 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
82 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
83 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
84 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
85 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
86 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
87 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
88 };
89 
90 /* Packet types for packets with an Innermost/Last IPv6 header */
91 static const u32 ice_ptypes_ipv6_il[] = {
92 	0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
93 	0x00000770, 0x00000000, 0x00000000, 0x00000000,
94 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
95 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
96 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
97 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
98 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
99 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
100 };
101 
102 /* UDP Packet types for non-tunneled packets or tunneled
103  * packets with inner UDP.
104  */
105 static const u32 ice_ptypes_udp_il[] = {
106 	0x81000000, 0x20204040, 0x04000010, 0x80810102,
107 	0x00000040, 0x00000000, 0x00000000, 0x00000000,
108 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
109 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
110 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
111 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
112 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
113 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
114 };
115 
116 /* Packet types for packets with an Innermost/Last TCP header */
117 static const u32 ice_ptypes_tcp_il[] = {
118 	0x04000000, 0x80810102, 0x10000040, 0x02040408,
119 	0x00000102, 0x00000000, 0x00000000, 0x00000000,
120 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
121 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
122 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
123 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
124 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
125 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
126 };
127 
128 /* Packet types for packets with an Innermost/Last SCTP header */
129 static const u32 ice_ptypes_sctp_il[] = {
130 	0x08000000, 0x01020204, 0x20000081, 0x04080810,
131 	0x00000204, 0x00000000, 0x00000000, 0x00000000,
132 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
133 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
134 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
135 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
136 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
137 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
138 };
139 
140 /* Packet types for packets with an Outermost/First GRE header */
141 static const u32 ice_ptypes_gre_of[] = {
142 	0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
143 	0x0000017E, 0x00000000, 0x00000000, 0x00000000,
144 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
145 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
146 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
147 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
148 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
149 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
150 };
151 
152 /* Manage parameters and info. used during the creation of a flow profile */
153 struct ice_flow_prof_params {
154 	enum ice_block blk;
155 	u16 entry_length; /* # of bytes formatted entry will require */
156 	u8 es_cnt;
157 	struct ice_flow_prof *prof;
158 
159 	/* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
160 	 * This will give us the direction flags.
161 	 */
162 	struct ice_fv_word es[ICE_MAX_FV_WORDS];
163 	DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX);
164 };
165 
166 #define ICE_FLOW_SEG_HDRS_L3_MASK	\
167 	(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
168 #define ICE_FLOW_SEG_HDRS_L4_MASK	\
169 	(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
170 
171 /**
172  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
173  * @segs: array of one or more packet segments that describe the flow
174  * @segs_cnt: number of packet segments provided
175  */
176 static enum ice_status
177 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
178 {
179 	u8 i;
180 
181 	for (i = 0; i < segs_cnt; i++) {
182 		/* Multiple L3 headers */
183 		if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
184 		    !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
185 			return ICE_ERR_PARAM;
186 
187 		/* Multiple L4 headers */
188 		if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
189 		    !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
190 			return ICE_ERR_PARAM;
191 	}
192 
193 	return 0;
194 }
195 
196 /* Sizes of fixed known protocol headers without header options */
197 #define ICE_FLOW_PROT_HDR_SZ_MAC	14
198 #define ICE_FLOW_PROT_HDR_SZ_IPV4	20
199 #define ICE_FLOW_PROT_HDR_SZ_IPV6	40
200 #define ICE_FLOW_PROT_HDR_SZ_TCP	20
201 #define ICE_FLOW_PROT_HDR_SZ_UDP	8
202 #define ICE_FLOW_PROT_HDR_SZ_SCTP	12
203 
204 /**
205  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
206  * @params: information about the flow to be processed
207  * @seg: index of packet segment whose header size is to be determined
208  */
209 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
210 {
211 	u16 sz = ICE_FLOW_PROT_HDR_SZ_MAC;
212 
213 	/* L3 headers */
214 	if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
215 		sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
216 	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
217 		sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
218 
219 	/* L4 headers */
220 	if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
221 		sz += ICE_FLOW_PROT_HDR_SZ_TCP;
222 	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
223 		sz += ICE_FLOW_PROT_HDR_SZ_UDP;
224 	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
225 		sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
226 
227 	return sz;
228 }
229 
230 /**
231  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
232  * @params: information about the flow to be processed
233  *
234  * This function identifies the packet types associated with the protocol
235  * headers being present in packet segments of the specified flow profile.
236  */
237 static enum ice_status
238 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
239 {
240 	struct ice_flow_prof *prof;
241 	u8 i;
242 
243 	memset(params->ptypes, 0xff, sizeof(params->ptypes));
244 
245 	prof = params->prof;
246 
247 	for (i = 0; i < params->prof->segs_cnt; i++) {
248 		const unsigned long *src;
249 		u32 hdrs;
250 
251 		hdrs = prof->segs[i].hdrs;
252 
253 		if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
254 			src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos :
255 				(const unsigned long *)ice_ptypes_ipv4_il;
256 			bitmap_and(params->ptypes, params->ptypes, src,
257 				   ICE_FLOW_PTYPE_MAX);
258 		} else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
259 			src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos :
260 				(const unsigned long *)ice_ptypes_ipv6_il;
261 			bitmap_and(params->ptypes, params->ptypes, src,
262 				   ICE_FLOW_PTYPE_MAX);
263 		}
264 
265 		if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
266 			src = (const unsigned long *)ice_ptypes_udp_il;
267 			bitmap_and(params->ptypes, params->ptypes, src,
268 				   ICE_FLOW_PTYPE_MAX);
269 		} else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
270 			bitmap_and(params->ptypes, params->ptypes,
271 				   (const unsigned long *)ice_ptypes_tcp_il,
272 				   ICE_FLOW_PTYPE_MAX);
273 		} else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
274 			src = (const unsigned long *)ice_ptypes_sctp_il;
275 			bitmap_and(params->ptypes, params->ptypes, src,
276 				   ICE_FLOW_PTYPE_MAX);
277 		} else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
278 			if (!i) {
279 				src = (const unsigned long *)ice_ptypes_gre_of;
280 				bitmap_and(params->ptypes, params->ptypes,
281 					   src, ICE_FLOW_PTYPE_MAX);
282 			}
283 		}
284 	}
285 
286 	return 0;
287 }
288 
289 /**
290  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
291  * @hw: pointer to the HW struct
292  * @params: information about the flow to be processed
293  * @seg: packet segment index of the field to be extracted
294  * @fld: ID of field to be extracted
295  *
296  * This function determines the protocol ID, offset, and size of the given
297  * field. It then allocates one or more extraction sequence entries for the
298  * given field, and fill the entries with protocol ID and offset information.
299  */
300 static enum ice_status
301 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
302 		    u8 seg, enum ice_flow_field fld)
303 {
304 	enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
305 	u8 fv_words = hw->blk[params->blk].es.fvw;
306 	struct ice_flow_fld_info *flds;
307 	u16 cnt, ese_bits, i;
308 	u16 off;
309 
310 	flds = params->prof->segs[seg].fields;
311 
312 	switch (fld) {
313 	case ICE_FLOW_FIELD_IDX_IPV4_SA:
314 	case ICE_FLOW_FIELD_IDX_IPV4_DA:
315 		prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
316 		break;
317 	case ICE_FLOW_FIELD_IDX_IPV6_SA:
318 	case ICE_FLOW_FIELD_IDX_IPV6_DA:
319 		prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
320 		break;
321 	case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
322 	case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
323 		prot_id = ICE_PROT_TCP_IL;
324 		break;
325 	case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
326 	case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
327 		prot_id = ICE_PROT_UDP_IL_OR_S;
328 		break;
329 	case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
330 	case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
331 		prot_id = ICE_PROT_SCTP_IL;
332 		break;
333 	case ICE_FLOW_FIELD_IDX_GRE_KEYID:
334 		prot_id = ICE_PROT_GRE_OF;
335 		break;
336 	default:
337 		return ICE_ERR_NOT_IMPL;
338 	}
339 
340 	/* Each extraction sequence entry is a word in size, and extracts a
341 	 * word-aligned offset from a protocol header.
342 	 */
343 	ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
344 
345 	flds[fld].xtrct.prot_id = prot_id;
346 	flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
347 		ICE_FLOW_FV_EXTRACT_SZ;
348 	flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
349 	flds[fld].xtrct.idx = params->es_cnt;
350 
351 	/* Adjust the next field-entry index after accommodating the number of
352 	 * entries this field consumes
353 	 */
354 	cnt = DIV_ROUND_UP(flds[fld].xtrct.disp + ice_flds_info[fld].size,
355 			   ese_bits);
356 
357 	/* Fill in the extraction sequence entries needed for this field */
358 	off = flds[fld].xtrct.off;
359 	for (i = 0; i < cnt; i++) {
360 		u8 idx;
361 
362 		/* Make sure the number of extraction sequence required
363 		 * does not exceed the block's capability
364 		 */
365 		if (params->es_cnt >= fv_words)
366 			return ICE_ERR_MAX_LIMIT;
367 
368 		/* some blocks require a reversed field vector layout */
369 		if (hw->blk[params->blk].es.reverse)
370 			idx = fv_words - params->es_cnt - 1;
371 		else
372 			idx = params->es_cnt;
373 
374 		params->es[idx].prot_id = prot_id;
375 		params->es[idx].off = off;
376 		params->es_cnt++;
377 
378 		off += ICE_FLOW_FV_EXTRACT_SZ;
379 	}
380 
381 	return 0;
382 }
383 
384 /**
385  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
386  * @hw: pointer to the HW struct
387  * @params: information about the flow to be processed
388  * @seg: index of packet segment whose raw fields are to be be extracted
389  */
390 static enum ice_status
391 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
392 		     u8 seg)
393 {
394 	u16 fv_words;
395 	u16 hdrs_sz;
396 	u8 i;
397 
398 	if (!params->prof->segs[seg].raws_cnt)
399 		return 0;
400 
401 	if (params->prof->segs[seg].raws_cnt >
402 	    ARRAY_SIZE(params->prof->segs[seg].raws))
403 		return ICE_ERR_MAX_LIMIT;
404 
405 	/* Offsets within the segment headers are not supported */
406 	hdrs_sz = ice_flow_calc_seg_sz(params, seg);
407 	if (!hdrs_sz)
408 		return ICE_ERR_PARAM;
409 
410 	fv_words = hw->blk[params->blk].es.fvw;
411 
412 	for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
413 		struct ice_flow_seg_fld_raw *raw;
414 		u16 off, cnt, j;
415 
416 		raw = &params->prof->segs[seg].raws[i];
417 
418 		/* Storing extraction information */
419 		raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
420 		raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
421 			ICE_FLOW_FV_EXTRACT_SZ;
422 		raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
423 			BITS_PER_BYTE;
424 		raw->info.xtrct.idx = params->es_cnt;
425 
426 		/* Determine the number of field vector entries this raw field
427 		 * consumes.
428 		 */
429 		cnt = DIV_ROUND_UP(raw->info.xtrct.disp +
430 				   (raw->info.src.last * BITS_PER_BYTE),
431 				   (ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE));
432 		off = raw->info.xtrct.off;
433 		for (j = 0; j < cnt; j++) {
434 			u16 idx;
435 
436 			/* Make sure the number of extraction sequence required
437 			 * does not exceed the block's capability
438 			 */
439 			if (params->es_cnt >= hw->blk[params->blk].es.count ||
440 			    params->es_cnt >= ICE_MAX_FV_WORDS)
441 				return ICE_ERR_MAX_LIMIT;
442 
443 			/* some blocks require a reversed field vector layout */
444 			if (hw->blk[params->blk].es.reverse)
445 				idx = fv_words - params->es_cnt - 1;
446 			else
447 				idx = params->es_cnt;
448 
449 			params->es[idx].prot_id = raw->info.xtrct.prot_id;
450 			params->es[idx].off = off;
451 			params->es_cnt++;
452 			off += ICE_FLOW_FV_EXTRACT_SZ;
453 		}
454 	}
455 
456 	return 0;
457 }
458 
459 /**
460  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
461  * @hw: pointer to the HW struct
462  * @params: information about the flow to be processed
463  *
464  * This function iterates through all matched fields in the given segments, and
465  * creates an extraction sequence for the fields.
466  */
467 static enum ice_status
468 ice_flow_create_xtrct_seq(struct ice_hw *hw,
469 			  struct ice_flow_prof_params *params)
470 {
471 	struct ice_flow_prof *prof = params->prof;
472 	enum ice_status status = 0;
473 	u8 i;
474 
475 	for (i = 0; i < prof->segs_cnt; i++) {
476 		u8 j;
477 
478 		for_each_set_bit(j, (unsigned long *)&prof->segs[i].match,
479 				 ICE_FLOW_FIELD_IDX_MAX) {
480 			status = ice_flow_xtract_fld(hw, params, i,
481 						     (enum ice_flow_field)j);
482 			if (status)
483 				return status;
484 		}
485 
486 		/* Process raw matching bytes */
487 		status = ice_flow_xtract_raws(hw, params, i);
488 		if (status)
489 			return status;
490 	}
491 
492 	return status;
493 }
494 
495 /**
496  * ice_flow_proc_segs - process all packet segments associated with a profile
497  * @hw: pointer to the HW struct
498  * @params: information about the flow to be processed
499  */
500 static enum ice_status
501 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
502 {
503 	enum ice_status status;
504 
505 	status = ice_flow_proc_seg_hdrs(params);
506 	if (status)
507 		return status;
508 
509 	status = ice_flow_create_xtrct_seq(hw, params);
510 	if (status)
511 		return status;
512 
513 	switch (params->blk) {
514 	case ICE_BLK_FD:
515 	case ICE_BLK_RSS:
516 		status = 0;
517 		break;
518 	default:
519 		return ICE_ERR_NOT_IMPL;
520 	}
521 
522 	return status;
523 }
524 
525 #define ICE_FLOW_FIND_PROF_CHK_FLDS	0x00000001
526 #define ICE_FLOW_FIND_PROF_CHK_VSI	0x00000002
527 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR	0x00000004
528 
529 /**
530  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
531  * @hw: pointer to the HW struct
532  * @blk: classification stage
533  * @dir: flow direction
534  * @segs: array of one or more packet segments that describe the flow
535  * @segs_cnt: number of packet segments provided
536  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
537  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
538  */
539 static struct ice_flow_prof *
540 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
541 			 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
542 			 u8 segs_cnt, u16 vsi_handle, u32 conds)
543 {
544 	struct ice_flow_prof *p, *prof = NULL;
545 
546 	mutex_lock(&hw->fl_profs_locks[blk]);
547 	list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
548 		if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
549 		    segs_cnt && segs_cnt == p->segs_cnt) {
550 			u8 i;
551 
552 			/* Check for profile-VSI association if specified */
553 			if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
554 			    ice_is_vsi_valid(hw, vsi_handle) &&
555 			    !test_bit(vsi_handle, p->vsis))
556 				continue;
557 
558 			/* Protocol headers must be checked. Matched fields are
559 			 * checked if specified.
560 			 */
561 			for (i = 0; i < segs_cnt; i++)
562 				if (segs[i].hdrs != p->segs[i].hdrs ||
563 				    ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
564 				     segs[i].match != p->segs[i].match))
565 					break;
566 
567 			/* A match is found if all segments are matched */
568 			if (i == segs_cnt) {
569 				prof = p;
570 				break;
571 			}
572 		}
573 	mutex_unlock(&hw->fl_profs_locks[blk]);
574 
575 	return prof;
576 }
577 
578 /**
579  * ice_flow_find_prof_id - Look up a profile with given profile ID
580  * @hw: pointer to the HW struct
581  * @blk: classification stage
582  * @prof_id: unique ID to identify this flow profile
583  */
584 static struct ice_flow_prof *
585 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
586 {
587 	struct ice_flow_prof *p;
588 
589 	list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
590 		if (p->id == prof_id)
591 			return p;
592 
593 	return NULL;
594 }
595 
596 /**
597  * ice_dealloc_flow_entry - Deallocate flow entry memory
598  * @hw: pointer to the HW struct
599  * @entry: flow entry to be removed
600  */
601 static void
602 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
603 {
604 	if (!entry)
605 		return;
606 
607 	if (entry->entry)
608 		devm_kfree(ice_hw_to_dev(hw), entry->entry);
609 
610 	devm_kfree(ice_hw_to_dev(hw), entry);
611 }
612 
613 /**
614  * ice_flow_rem_entry_sync - Remove a flow entry
615  * @hw: pointer to the HW struct
616  * @blk: classification stage
617  * @entry: flow entry to be removed
618  */
619 static enum ice_status
620 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
621 			struct ice_flow_entry *entry)
622 {
623 	if (!entry)
624 		return ICE_ERR_BAD_PTR;
625 
626 	list_del(&entry->l_entry);
627 
628 	ice_dealloc_flow_entry(hw, entry);
629 
630 	return 0;
631 }
632 
633 /**
634  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
635  * @hw: pointer to the HW struct
636  * @blk: classification stage
637  * @dir: flow direction
638  * @prof_id: unique ID to identify this flow profile
639  * @segs: array of one or more packet segments that describe the flow
640  * @segs_cnt: number of packet segments provided
641  * @prof: stores the returned flow profile added
642  *
643  * Assumption: the caller has acquired the lock to the profile list
644  */
645 static enum ice_status
646 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
647 		       enum ice_flow_dir dir, u64 prof_id,
648 		       struct ice_flow_seg_info *segs, u8 segs_cnt,
649 		       struct ice_flow_prof **prof)
650 {
651 	struct ice_flow_prof_params params;
652 	enum ice_status status;
653 	u8 i;
654 
655 	if (!prof)
656 		return ICE_ERR_BAD_PTR;
657 
658 	memset(&params, 0, sizeof(params));
659 	params.prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params.prof),
660 				   GFP_KERNEL);
661 	if (!params.prof)
662 		return ICE_ERR_NO_MEMORY;
663 
664 	/* initialize extraction sequence to all invalid (0xff) */
665 	for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
666 		params.es[i].prot_id = ICE_PROT_INVALID;
667 		params.es[i].off = ICE_FV_OFFSET_INVAL;
668 	}
669 
670 	params.blk = blk;
671 	params.prof->id = prof_id;
672 	params.prof->dir = dir;
673 	params.prof->segs_cnt = segs_cnt;
674 
675 	/* Make a copy of the segments that need to be persistent in the flow
676 	 * profile instance
677 	 */
678 	for (i = 0; i < segs_cnt; i++)
679 		memcpy(&params.prof->segs[i], &segs[i], sizeof(*segs));
680 
681 	status = ice_flow_proc_segs(hw, &params);
682 	if (status) {
683 		ice_debug(hw, ICE_DBG_FLOW,
684 			  "Error processing a flow's packet segments\n");
685 		goto out;
686 	}
687 
688 	/* Add a HW profile for this flow profile */
689 	status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes, params.es);
690 	if (status) {
691 		ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
692 		goto out;
693 	}
694 
695 	INIT_LIST_HEAD(&params.prof->entries);
696 	mutex_init(&params.prof->entries_lock);
697 	*prof = params.prof;
698 
699 out:
700 	if (status)
701 		devm_kfree(ice_hw_to_dev(hw), params.prof);
702 
703 	return status;
704 }
705 
706 /**
707  * ice_flow_rem_prof_sync - remove a flow profile
708  * @hw: pointer to the hardware structure
709  * @blk: classification stage
710  * @prof: pointer to flow profile to remove
711  *
712  * Assumption: the caller has acquired the lock to the profile list
713  */
714 static enum ice_status
715 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
716 		       struct ice_flow_prof *prof)
717 {
718 	enum ice_status status;
719 
720 	/* Remove all remaining flow entries before removing the flow profile */
721 	if (!list_empty(&prof->entries)) {
722 		struct ice_flow_entry *e, *t;
723 
724 		mutex_lock(&prof->entries_lock);
725 
726 		list_for_each_entry_safe(e, t, &prof->entries, l_entry) {
727 			status = ice_flow_rem_entry_sync(hw, blk, e);
728 			if (status)
729 				break;
730 		}
731 
732 		mutex_unlock(&prof->entries_lock);
733 	}
734 
735 	/* Remove all hardware profiles associated with this flow profile */
736 	status = ice_rem_prof(hw, blk, prof->id);
737 	if (!status) {
738 		list_del(&prof->l_entry);
739 		mutex_destroy(&prof->entries_lock);
740 		devm_kfree(ice_hw_to_dev(hw), prof);
741 	}
742 
743 	return status;
744 }
745 
746 /**
747  * ice_flow_assoc_prof - associate a VSI with a flow profile
748  * @hw: pointer to the hardware structure
749  * @blk: classification stage
750  * @prof: pointer to flow profile
751  * @vsi_handle: software VSI handle
752  *
753  * Assumption: the caller has acquired the lock to the profile list
754  * and the software VSI handle has been validated
755  */
756 static enum ice_status
757 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
758 		    struct ice_flow_prof *prof, u16 vsi_handle)
759 {
760 	enum ice_status status = 0;
761 
762 	if (!test_bit(vsi_handle, prof->vsis)) {
763 		status = ice_add_prof_id_flow(hw, blk,
764 					      ice_get_hw_vsi_num(hw,
765 								 vsi_handle),
766 					      prof->id);
767 		if (!status)
768 			set_bit(vsi_handle, prof->vsis);
769 		else
770 			ice_debug(hw, ICE_DBG_FLOW,
771 				  "HW profile add failed, %d\n",
772 				  status);
773 	}
774 
775 	return status;
776 }
777 
778 /**
779  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
780  * @hw: pointer to the hardware structure
781  * @blk: classification stage
782  * @prof: pointer to flow profile
783  * @vsi_handle: software VSI handle
784  *
785  * Assumption: the caller has acquired the lock to the profile list
786  * and the software VSI handle has been validated
787  */
788 static enum ice_status
789 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
790 		       struct ice_flow_prof *prof, u16 vsi_handle)
791 {
792 	enum ice_status status = 0;
793 
794 	if (test_bit(vsi_handle, prof->vsis)) {
795 		status = ice_rem_prof_id_flow(hw, blk,
796 					      ice_get_hw_vsi_num(hw,
797 								 vsi_handle),
798 					      prof->id);
799 		if (!status)
800 			clear_bit(vsi_handle, prof->vsis);
801 		else
802 			ice_debug(hw, ICE_DBG_FLOW,
803 				  "HW profile remove failed, %d\n",
804 				  status);
805 	}
806 
807 	return status;
808 }
809 
810 /**
811  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
812  * @hw: pointer to the HW struct
813  * @blk: classification stage
814  * @dir: flow direction
815  * @prof_id: unique ID to identify this flow profile
816  * @segs: array of one or more packet segments that describe the flow
817  * @segs_cnt: number of packet segments provided
818  * @prof: stores the returned flow profile added
819  */
820 enum ice_status
821 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
822 		  u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
823 		  struct ice_flow_prof **prof)
824 {
825 	enum ice_status status;
826 
827 	if (segs_cnt > ICE_FLOW_SEG_MAX)
828 		return ICE_ERR_MAX_LIMIT;
829 
830 	if (!segs_cnt)
831 		return ICE_ERR_PARAM;
832 
833 	if (!segs)
834 		return ICE_ERR_BAD_PTR;
835 
836 	status = ice_flow_val_hdrs(segs, segs_cnt);
837 	if (status)
838 		return status;
839 
840 	mutex_lock(&hw->fl_profs_locks[blk]);
841 
842 	status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
843 					prof);
844 	if (!status)
845 		list_add(&(*prof)->l_entry, &hw->fl_profs[blk]);
846 
847 	mutex_unlock(&hw->fl_profs_locks[blk]);
848 
849 	return status;
850 }
851 
852 /**
853  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
854  * @hw: pointer to the HW struct
855  * @blk: the block for which the flow profile is to be removed
856  * @prof_id: unique ID of the flow profile to be removed
857  */
858 enum ice_status
859 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
860 {
861 	struct ice_flow_prof *prof;
862 	enum ice_status status;
863 
864 	mutex_lock(&hw->fl_profs_locks[blk]);
865 
866 	prof = ice_flow_find_prof_id(hw, blk, prof_id);
867 	if (!prof) {
868 		status = ICE_ERR_DOES_NOT_EXIST;
869 		goto out;
870 	}
871 
872 	/* prof becomes invalid after the call */
873 	status = ice_flow_rem_prof_sync(hw, blk, prof);
874 
875 out:
876 	mutex_unlock(&hw->fl_profs_locks[blk]);
877 
878 	return status;
879 }
880 
881 /**
882  * ice_flow_add_entry - Add a flow entry
883  * @hw: pointer to the HW struct
884  * @blk: classification stage
885  * @prof_id: ID of the profile to add a new flow entry to
886  * @entry_id: unique ID to identify this flow entry
887  * @vsi_handle: software VSI handle for the flow entry
888  * @prio: priority of the flow entry
889  * @data: pointer to a data buffer containing flow entry's match values/masks
890  * @entry_h: pointer to buffer that receives the new flow entry's handle
891  */
892 enum ice_status
893 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
894 		   u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
895 		   void *data, u64 *entry_h)
896 {
897 	struct ice_flow_entry *e = NULL;
898 	struct ice_flow_prof *prof;
899 	enum ice_status status;
900 
901 	/* No flow entry data is expected for RSS */
902 	if (!entry_h || (!data && blk != ICE_BLK_RSS))
903 		return ICE_ERR_BAD_PTR;
904 
905 	if (!ice_is_vsi_valid(hw, vsi_handle))
906 		return ICE_ERR_PARAM;
907 
908 	mutex_lock(&hw->fl_profs_locks[blk]);
909 
910 	prof = ice_flow_find_prof_id(hw, blk, prof_id);
911 	if (!prof) {
912 		status = ICE_ERR_DOES_NOT_EXIST;
913 	} else {
914 		/* Allocate memory for the entry being added and associate
915 		 * the VSI to the found flow profile
916 		 */
917 		e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL);
918 		if (!e)
919 			status = ICE_ERR_NO_MEMORY;
920 		else
921 			status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
922 	}
923 
924 	mutex_unlock(&hw->fl_profs_locks[blk]);
925 	if (status)
926 		goto out;
927 
928 	e->id = entry_id;
929 	e->vsi_handle = vsi_handle;
930 	e->prof = prof;
931 	e->priority = prio;
932 
933 	switch (blk) {
934 	case ICE_BLK_FD:
935 	case ICE_BLK_RSS:
936 		break;
937 	default:
938 		status = ICE_ERR_NOT_IMPL;
939 		goto out;
940 	}
941 
942 	mutex_lock(&prof->entries_lock);
943 	list_add(&e->l_entry, &prof->entries);
944 	mutex_unlock(&prof->entries_lock);
945 
946 	*entry_h = ICE_FLOW_ENTRY_HNDL(e);
947 
948 out:
949 	if (status && e) {
950 		if (e->entry)
951 			devm_kfree(ice_hw_to_dev(hw), e->entry);
952 		devm_kfree(ice_hw_to_dev(hw), e);
953 	}
954 
955 	return status;
956 }
957 
958 /**
959  * ice_flow_rem_entry - Remove a flow entry
960  * @hw: pointer to the HW struct
961  * @blk: classification stage
962  * @entry_h: handle to the flow entry to be removed
963  */
964 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
965 				   u64 entry_h)
966 {
967 	struct ice_flow_entry *entry;
968 	struct ice_flow_prof *prof;
969 	enum ice_status status = 0;
970 
971 	if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
972 		return ICE_ERR_PARAM;
973 
974 	entry = ICE_FLOW_ENTRY_PTR(entry_h);
975 
976 	/* Retain the pointer to the flow profile as the entry will be freed */
977 	prof = entry->prof;
978 
979 	if (prof) {
980 		mutex_lock(&prof->entries_lock);
981 		status = ice_flow_rem_entry_sync(hw, blk, entry);
982 		mutex_unlock(&prof->entries_lock);
983 	}
984 
985 	return status;
986 }
987 
988 /**
989  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
990  * @seg: packet segment the field being set belongs to
991  * @fld: field to be set
992  * @field_type: type of the field
993  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
994  *           entry's input buffer
995  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
996  *            input buffer
997  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
998  *            entry's input buffer
999  *
1000  * This helper function stores information of a field being matched, including
1001  * the type of the field and the locations of the value to match, the mask, and
1002  * and the upper-bound value in the start of the input buffer for a flow entry.
1003  * This function should only be used for fixed-size data structures.
1004  *
1005  * This function also opportunistically determines the protocol headers to be
1006  * present based on the fields being set. Some fields cannot be used alone to
1007  * determine the protocol headers present. Sometimes, fields for particular
1008  * protocol headers are not matched. In those cases, the protocol headers
1009  * must be explicitly set.
1010  */
1011 static void
1012 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
1013 		     enum ice_flow_fld_match_type field_type, u16 val_loc,
1014 		     u16 mask_loc, u16 last_loc)
1015 {
1016 	u64 bit = BIT_ULL(fld);
1017 
1018 	seg->match |= bit;
1019 	if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
1020 		seg->range |= bit;
1021 
1022 	seg->fields[fld].type = field_type;
1023 	seg->fields[fld].src.val = val_loc;
1024 	seg->fields[fld].src.mask = mask_loc;
1025 	seg->fields[fld].src.last = last_loc;
1026 
1027 	ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
1028 }
1029 
1030 /**
1031  * ice_flow_set_fld - specifies locations of field from entry's input buffer
1032  * @seg: packet segment the field being set belongs to
1033  * @fld: field to be set
1034  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
1035  *           entry's input buffer
1036  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
1037  *            input buffer
1038  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
1039  *            entry's input buffer
1040  * @range: indicate if field being matched is to be in a range
1041  *
1042  * This function specifies the locations, in the form of byte offsets from the
1043  * start of the input buffer for a flow entry, from where the value to match,
1044  * the mask value, and upper value can be extracted. These locations are then
1045  * stored in the flow profile. When adding a flow entry associated with the
1046  * flow profile, these locations will be used to quickly extract the values and
1047  * create the content of a match entry. This function should only be used for
1048  * fixed-size data structures.
1049  */
1050 void
1051 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
1052 		 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
1053 {
1054 	enum ice_flow_fld_match_type t = range ?
1055 		ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
1056 
1057 	ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
1058 }
1059 
1060 /**
1061  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
1062  * @seg: packet segment the field being set belongs to
1063  * @off: offset of the raw field from the beginning of the segment in bytes
1064  * @len: length of the raw pattern to be matched
1065  * @val_loc: location of the value to match from entry's input buffer
1066  * @mask_loc: location of mask value from entry's input buffer
1067  *
1068  * This function specifies the offset of the raw field to be match from the
1069  * beginning of the specified packet segment, and the locations, in the form of
1070  * byte offsets from the start of the input buffer for a flow entry, from where
1071  * the value to match and the mask value to be extracted. These locations are
1072  * then stored in the flow profile. When adding flow entries to the associated
1073  * flow profile, these locations can be used to quickly extract the values to
1074  * create the content of a match entry. This function should only be used for
1075  * fixed-size data structures.
1076  */
1077 void
1078 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
1079 		     u16 val_loc, u16 mask_loc)
1080 {
1081 	if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
1082 		seg->raws[seg->raws_cnt].off = off;
1083 		seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
1084 		seg->raws[seg->raws_cnt].info.src.val = val_loc;
1085 		seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
1086 		/* The "last" field is used to store the length of the field */
1087 		seg->raws[seg->raws_cnt].info.src.last = len;
1088 	}
1089 
1090 	/* Overflows of "raws" will be handled as an error condition later in
1091 	 * the flow when this information is processed.
1092 	 */
1093 	seg->raws_cnt++;
1094 }
1095 
1096 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
1097 	(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
1098 
1099 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
1100 	(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
1101 
1102 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
1103 	(ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
1104 	 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
1105 
1106 /**
1107  * ice_flow_set_rss_seg_info - setup packet segments for RSS
1108  * @segs: pointer to the flow field segment(s)
1109  * @hash_fields: fields to be hashed on for the segment(s)
1110  * @flow_hdr: protocol header fields within a packet segment
1111  *
1112  * Helper function to extract fields from hash bitmap and use flow
1113  * header value to set flow field segment for further use in flow
1114  * profile entry or removal.
1115  */
1116 static enum ice_status
1117 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
1118 			  u32 flow_hdr)
1119 {
1120 	u64 val;
1121 	u8 i;
1122 
1123 	for_each_set_bit(i, (unsigned long *)&hash_fields,
1124 			 ICE_FLOW_FIELD_IDX_MAX)
1125 		ice_flow_set_fld(segs, (enum ice_flow_field)i,
1126 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1127 				 ICE_FLOW_FLD_OFF_INVAL, false);
1128 
1129 	ICE_FLOW_SET_HDRS(segs, flow_hdr);
1130 
1131 	if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS)
1132 		return ICE_ERR_PARAM;
1133 
1134 	val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
1135 	if (val && !is_power_of_2(val))
1136 		return ICE_ERR_CFG;
1137 
1138 	val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
1139 	if (val && !is_power_of_2(val))
1140 		return ICE_ERR_CFG;
1141 
1142 	return 0;
1143 }
1144 
1145 /**
1146  * ice_rem_vsi_rss_list - remove VSI from RSS list
1147  * @hw: pointer to the hardware structure
1148  * @vsi_handle: software VSI handle
1149  *
1150  * Remove the VSI from all RSS configurations in the list.
1151  */
1152 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
1153 {
1154 	struct ice_rss_cfg *r, *tmp;
1155 
1156 	if (list_empty(&hw->rss_list_head))
1157 		return;
1158 
1159 	mutex_lock(&hw->rss_locks);
1160 	list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
1161 		if (test_and_clear_bit(vsi_handle, r->vsis))
1162 			if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
1163 				list_del(&r->l_entry);
1164 				devm_kfree(ice_hw_to_dev(hw), r);
1165 			}
1166 	mutex_unlock(&hw->rss_locks);
1167 }
1168 
1169 /**
1170  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
1171  * @hw: pointer to the hardware structure
1172  * @vsi_handle: software VSI handle
1173  *
1174  * This function will iterate through all flow profiles and disassociate
1175  * the VSI from that profile. If the flow profile has no VSIs it will
1176  * be removed.
1177  */
1178 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
1179 {
1180 	const enum ice_block blk = ICE_BLK_RSS;
1181 	struct ice_flow_prof *p, *t;
1182 	enum ice_status status = 0;
1183 
1184 	if (!ice_is_vsi_valid(hw, vsi_handle))
1185 		return ICE_ERR_PARAM;
1186 
1187 	if (list_empty(&hw->fl_profs[blk]))
1188 		return 0;
1189 
1190 	mutex_lock(&hw->fl_profs_locks[blk]);
1191 	list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry)
1192 		if (test_bit(vsi_handle, p->vsis)) {
1193 			status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
1194 			if (status)
1195 				break;
1196 
1197 			if (bitmap_empty(p->vsis, ICE_MAX_VSI)) {
1198 				status = ice_flow_rem_prof_sync(hw, blk, p);
1199 				if (status)
1200 					break;
1201 			}
1202 		}
1203 	mutex_unlock(&hw->fl_profs_locks[blk]);
1204 
1205 	return status;
1206 }
1207 
1208 /**
1209  * ice_rem_rss_list - remove RSS configuration from list
1210  * @hw: pointer to the hardware structure
1211  * @vsi_handle: software VSI handle
1212  * @prof: pointer to flow profile
1213  *
1214  * Assumption: lock has already been acquired for RSS list
1215  */
1216 static void
1217 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
1218 {
1219 	struct ice_rss_cfg *r, *tmp;
1220 
1221 	/* Search for RSS hash fields associated to the VSI that match the
1222 	 * hash configurations associated to the flow profile. If found
1223 	 * remove from the RSS entry list of the VSI context and delete entry.
1224 	 */
1225 	list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
1226 		if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
1227 		    r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
1228 			clear_bit(vsi_handle, r->vsis);
1229 			if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
1230 				list_del(&r->l_entry);
1231 				devm_kfree(ice_hw_to_dev(hw), r);
1232 			}
1233 			return;
1234 		}
1235 }
1236 
1237 /**
1238  * ice_add_rss_list - add RSS configuration to list
1239  * @hw: pointer to the hardware structure
1240  * @vsi_handle: software VSI handle
1241  * @prof: pointer to flow profile
1242  *
1243  * Assumption: lock has already been acquired for RSS list
1244  */
1245 static enum ice_status
1246 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
1247 {
1248 	struct ice_rss_cfg *r, *rss_cfg;
1249 
1250 	list_for_each_entry(r, &hw->rss_list_head, l_entry)
1251 		if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
1252 		    r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
1253 			set_bit(vsi_handle, r->vsis);
1254 			return 0;
1255 		}
1256 
1257 	rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg),
1258 			       GFP_KERNEL);
1259 	if (!rss_cfg)
1260 		return ICE_ERR_NO_MEMORY;
1261 
1262 	rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
1263 	rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
1264 	set_bit(vsi_handle, rss_cfg->vsis);
1265 
1266 	list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head);
1267 
1268 	return 0;
1269 }
1270 
1271 #define ICE_FLOW_PROF_HASH_S	0
1272 #define ICE_FLOW_PROF_HASH_M	(0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
1273 #define ICE_FLOW_PROF_HDR_S	32
1274 #define ICE_FLOW_PROF_HDR_M	(0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
1275 #define ICE_FLOW_PROF_ENCAP_S	63
1276 #define ICE_FLOW_PROF_ENCAP_M	(BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
1277 
1278 #define ICE_RSS_OUTER_HEADERS	1
1279 #define ICE_RSS_INNER_HEADERS	2
1280 
1281 /* Flow profile ID format:
1282  * [0:31] - Packet match fields
1283  * [32:62] - Protocol header
1284  * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
1285  */
1286 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
1287 	(u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
1288 	      (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
1289 	      ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
1290 
1291 /**
1292  * ice_add_rss_cfg_sync - add an RSS configuration
1293  * @hw: pointer to the hardware structure
1294  * @vsi_handle: software VSI handle
1295  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
1296  * @addl_hdrs: protocol header fields
1297  * @segs_cnt: packet segment count
1298  *
1299  * Assumption: lock has already been acquired for RSS list
1300  */
1301 static enum ice_status
1302 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
1303 		     u32 addl_hdrs, u8 segs_cnt)
1304 {
1305 	const enum ice_block blk = ICE_BLK_RSS;
1306 	struct ice_flow_prof *prof = NULL;
1307 	struct ice_flow_seg_info *segs;
1308 	enum ice_status status;
1309 
1310 	if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
1311 		return ICE_ERR_PARAM;
1312 
1313 	segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
1314 	if (!segs)
1315 		return ICE_ERR_NO_MEMORY;
1316 
1317 	/* Construct the packet segment info from the hashed fields */
1318 	status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
1319 					   addl_hdrs);
1320 	if (status)
1321 		goto exit;
1322 
1323 	/* Search for a flow profile that has matching headers, hash fields
1324 	 * and has the input VSI associated to it. If found, no further
1325 	 * operations required and exit.
1326 	 */
1327 	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
1328 					vsi_handle,
1329 					ICE_FLOW_FIND_PROF_CHK_FLDS |
1330 					ICE_FLOW_FIND_PROF_CHK_VSI);
1331 	if (prof)
1332 		goto exit;
1333 
1334 	/* Check if a flow profile exists with the same protocol headers and
1335 	 * associated with the input VSI. If so disassociate the VSI from
1336 	 * this profile. The VSI will be added to a new profile created with
1337 	 * the protocol header and new hash field configuration.
1338 	 */
1339 	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
1340 					vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
1341 	if (prof) {
1342 		status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
1343 		if (!status)
1344 			ice_rem_rss_list(hw, vsi_handle, prof);
1345 		else
1346 			goto exit;
1347 
1348 		/* Remove profile if it has no VSIs associated */
1349 		if (bitmap_empty(prof->vsis, ICE_MAX_VSI)) {
1350 			status = ice_flow_rem_prof(hw, blk, prof->id);
1351 			if (status)
1352 				goto exit;
1353 		}
1354 	}
1355 
1356 	/* Search for a profile that has same match fields only. If this
1357 	 * exists then associate the VSI to this profile.
1358 	 */
1359 	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
1360 					vsi_handle,
1361 					ICE_FLOW_FIND_PROF_CHK_FLDS);
1362 	if (prof) {
1363 		status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
1364 		if (!status)
1365 			status = ice_add_rss_list(hw, vsi_handle, prof);
1366 		goto exit;
1367 	}
1368 
1369 	/* Create a new flow profile with generated profile and packet
1370 	 * segment information.
1371 	 */
1372 	status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
1373 				   ICE_FLOW_GEN_PROFID(hashed_flds,
1374 						       segs[segs_cnt - 1].hdrs,
1375 						       segs_cnt),
1376 				   segs, segs_cnt, &prof);
1377 	if (status)
1378 		goto exit;
1379 
1380 	status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
1381 	/* If association to a new flow profile failed then this profile can
1382 	 * be removed.
1383 	 */
1384 	if (status) {
1385 		ice_flow_rem_prof(hw, blk, prof->id);
1386 		goto exit;
1387 	}
1388 
1389 	status = ice_add_rss_list(hw, vsi_handle, prof);
1390 
1391 exit:
1392 	kfree(segs);
1393 	return status;
1394 }
1395 
1396 /**
1397  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
1398  * @hw: pointer to the hardware structure
1399  * @vsi_handle: software VSI handle
1400  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
1401  * @addl_hdrs: protocol header fields
1402  *
1403  * This function will generate a flow profile based on fields associated with
1404  * the input fields to hash on, the flow type and use the VSI number to add
1405  * a flow entry to the profile.
1406  */
1407 enum ice_status
1408 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
1409 		u32 addl_hdrs)
1410 {
1411 	enum ice_status status;
1412 
1413 	if (hashed_flds == ICE_HASH_INVALID ||
1414 	    !ice_is_vsi_valid(hw, vsi_handle))
1415 		return ICE_ERR_PARAM;
1416 
1417 	mutex_lock(&hw->rss_locks);
1418 	status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
1419 				      ICE_RSS_OUTER_HEADERS);
1420 	if (!status)
1421 		status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
1422 					      addl_hdrs, ICE_RSS_INNER_HEADERS);
1423 	mutex_unlock(&hw->rss_locks);
1424 
1425 	return status;
1426 }
1427 
1428 /* Mapping of AVF hash bit fields to an L3-L4 hash combination.
1429  * As the ice_flow_avf_hdr_field represent individual bit shifts in a hash,
1430  * convert its values to their appropriate flow L3, L4 values.
1431  */
1432 #define ICE_FLOW_AVF_RSS_IPV4_MASKS \
1433 	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
1434 	 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4))
1435 #define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \
1436 	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
1437 	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP))
1438 #define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \
1439 	(BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
1440 	 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
1441 	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP))
1442 #define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \
1443 	(ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \
1444 	 ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP))
1445 
1446 #define ICE_FLOW_AVF_RSS_IPV6_MASKS \
1447 	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
1448 	 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6))
1449 #define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \
1450 	(BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
1451 	 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \
1452 	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP))
1453 #define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \
1454 	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
1455 	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP))
1456 #define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \
1457 	(ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \
1458 	 ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP))
1459 
1460 /**
1461  * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
1462  * @hw: pointer to the hardware structure
1463  * @vsi_handle: software VSI handle
1464  * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
1465  *
1466  * This function will take the hash bitmap provided by the AVF driver via a
1467  * message, convert it to ICE-compatible values, and configure RSS flow
1468  * profiles.
1469  */
1470 enum ice_status
1471 ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
1472 {
1473 	enum ice_status status = 0;
1474 	u64 hash_flds;
1475 
1476 	if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
1477 	    !ice_is_vsi_valid(hw, vsi_handle))
1478 		return ICE_ERR_PARAM;
1479 
1480 	/* Make sure no unsupported bits are specified */
1481 	if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS |
1482 			 ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS))
1483 		return ICE_ERR_CFG;
1484 
1485 	hash_flds = avf_hash;
1486 
1487 	/* Always create an L3 RSS configuration for any L4 RSS configuration */
1488 	if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS)
1489 		hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS;
1490 
1491 	if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)
1492 		hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS;
1493 
1494 	/* Create the corresponding RSS configuration for each valid hash bit */
1495 	while (hash_flds) {
1496 		u64 rss_hash = ICE_HASH_INVALID;
1497 
1498 		if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) {
1499 			if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) {
1500 				rss_hash = ICE_FLOW_HASH_IPV4;
1501 				hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS;
1502 			} else if (hash_flds &
1503 				   ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) {
1504 				rss_hash = ICE_FLOW_HASH_IPV4 |
1505 					ICE_FLOW_HASH_TCP_PORT;
1506 				hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS;
1507 			} else if (hash_flds &
1508 				   ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) {
1509 				rss_hash = ICE_FLOW_HASH_IPV4 |
1510 					ICE_FLOW_HASH_UDP_PORT;
1511 				hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS;
1512 			} else if (hash_flds &
1513 				   BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) {
1514 				rss_hash = ICE_FLOW_HASH_IPV4 |
1515 					ICE_FLOW_HASH_SCTP_PORT;
1516 				hash_flds &=
1517 					~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP);
1518 			}
1519 		} else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) {
1520 			if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) {
1521 				rss_hash = ICE_FLOW_HASH_IPV6;
1522 				hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS;
1523 			} else if (hash_flds &
1524 				   ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) {
1525 				rss_hash = ICE_FLOW_HASH_IPV6 |
1526 					ICE_FLOW_HASH_TCP_PORT;
1527 				hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS;
1528 			} else if (hash_flds &
1529 				   ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) {
1530 				rss_hash = ICE_FLOW_HASH_IPV6 |
1531 					ICE_FLOW_HASH_UDP_PORT;
1532 				hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS;
1533 			} else if (hash_flds &
1534 				   BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) {
1535 				rss_hash = ICE_FLOW_HASH_IPV6 |
1536 					ICE_FLOW_HASH_SCTP_PORT;
1537 				hash_flds &=
1538 					~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP);
1539 			}
1540 		}
1541 
1542 		if (rss_hash == ICE_HASH_INVALID)
1543 			return ICE_ERR_OUT_OF_RANGE;
1544 
1545 		status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
1546 					 ICE_FLOW_SEG_HDR_NONE);
1547 		if (status)
1548 			break;
1549 	}
1550 
1551 	return status;
1552 }
1553 
1554 /**
1555  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
1556  * @hw: pointer to the hardware structure
1557  * @vsi_handle: software VSI handle
1558  */
1559 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
1560 {
1561 	enum ice_status status = 0;
1562 	struct ice_rss_cfg *r;
1563 
1564 	if (!ice_is_vsi_valid(hw, vsi_handle))
1565 		return ICE_ERR_PARAM;
1566 
1567 	mutex_lock(&hw->rss_locks);
1568 	list_for_each_entry(r, &hw->rss_list_head, l_entry) {
1569 		if (test_bit(vsi_handle, r->vsis)) {
1570 			status = ice_add_rss_cfg_sync(hw, vsi_handle,
1571 						      r->hashed_flds,
1572 						      r->packet_hdr,
1573 						      ICE_RSS_OUTER_HEADERS);
1574 			if (status)
1575 				break;
1576 			status = ice_add_rss_cfg_sync(hw, vsi_handle,
1577 						      r->hashed_flds,
1578 						      r->packet_hdr,
1579 						      ICE_RSS_INNER_HEADERS);
1580 			if (status)
1581 				break;
1582 		}
1583 	}
1584 	mutex_unlock(&hw->rss_locks);
1585 
1586 	return status;
1587 }
1588 
1589 /**
1590  * ice_get_rss_cfg - returns hashed fields for the given header types
1591  * @hw: pointer to the hardware structure
1592  * @vsi_handle: software VSI handle
1593  * @hdrs: protocol header type
1594  *
1595  * This function will return the match fields of the first instance of flow
1596  * profile having the given header types and containing input VSI
1597  */
1598 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
1599 {
1600 	struct ice_rss_cfg *r, *rss_cfg = NULL;
1601 
1602 	/* verify if the protocol header is non zero and VSI is valid */
1603 	if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
1604 		return ICE_HASH_INVALID;
1605 
1606 	mutex_lock(&hw->rss_locks);
1607 	list_for_each_entry(r, &hw->rss_list_head, l_entry)
1608 		if (test_bit(vsi_handle, r->vsis) &&
1609 		    r->packet_hdr == hdrs) {
1610 			rss_cfg = r;
1611 			break;
1612 		}
1613 	mutex_unlock(&hw->rss_locks);
1614 
1615 	return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;
1616 }
1617