1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3 
4 #include "ice_common.h"
5 #include "ice_flow.h"
6 
7 /* Describe properties of a protocol header field */
8 struct ice_flow_field_info {
9 	enum ice_flow_seg_hdr hdr;
10 	s16 off;	/* Offset from start of a protocol header, in bits */
11 	u16 size;	/* Size of fields in bits */
12 };
13 
14 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
15 	.hdr = _hdr, \
16 	.off = (_offset_bytes) * BITS_PER_BYTE, \
17 	.size = (_size_bytes) * BITS_PER_BYTE, \
18 }
19 
20 /* Table containing properties of supported protocol header fields */
21 static const
22 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
23 	/* IPv4 / IPv6 */
24 	/* ICE_FLOW_FIELD_IDX_IPV4_SA */
25 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, sizeof(struct in_addr)),
26 	/* ICE_FLOW_FIELD_IDX_IPV4_DA */
27 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, sizeof(struct in_addr)),
28 	/* ICE_FLOW_FIELD_IDX_IPV6_SA */
29 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, sizeof(struct in6_addr)),
30 	/* ICE_FLOW_FIELD_IDX_IPV6_DA */
31 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, sizeof(struct in6_addr)),
32 	/* Transport */
33 	/* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
34 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, sizeof(__be16)),
35 	/* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
36 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, sizeof(__be16)),
37 	/* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
38 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, sizeof(__be16)),
39 	/* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
40 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, sizeof(__be16)),
41 	/* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
42 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)),
43 	/* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
44 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),
45 	/* GRE */
46 	/* ICE_FLOW_FIELD_IDX_GRE_KEYID */
47 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12,
48 			  sizeof_field(struct gre_full_hdr, key)),
49 };
50 
51 /* Bitmaps indicating relevant packet types for a particular protocol header
52  *
53  * Packet types for packets with an Outer/First/Single IPv4 header
54  */
55 static const u32 ice_ptypes_ipv4_ofos[] = {
56 	0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
57 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
58 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
59 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
60 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
61 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
62 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
63 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
64 };
65 
66 /* Packet types for packets with an Innermost/Last IPv4 header */
67 static const u32 ice_ptypes_ipv4_il[] = {
68 	0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
69 	0x0000000E, 0x00000000, 0x00000000, 0x00000000,
70 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
71 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
72 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
73 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
74 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
75 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
76 };
77 
78 /* Packet types for packets with an Outer/First/Single IPv6 header */
79 static const u32 ice_ptypes_ipv6_ofos[] = {
80 	0x00000000, 0x00000000, 0x77000000, 0x10002000,
81 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
82 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
83 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
84 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
85 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
86 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
87 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
88 };
89 
90 /* Packet types for packets with an Innermost/Last IPv6 header */
91 static const u32 ice_ptypes_ipv6_il[] = {
92 	0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
93 	0x00000770, 0x00000000, 0x00000000, 0x00000000,
94 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
95 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
96 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
97 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
98 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
99 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
100 };
101 
102 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
103 static const u32 ice_ipv4_ofos_no_l4[] = {
104 	0x10C00000, 0x04000800, 0x00000000, 0x00000000,
105 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
106 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
107 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
108 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
109 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
110 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
111 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
112 };
113 
114 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
115 static const u32 ice_ipv4_il_no_l4[] = {
116 	0x60000000, 0x18043008, 0x80000002, 0x6010c021,
117 	0x00000008, 0x00000000, 0x00000000, 0x00000000,
118 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
119 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
120 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
121 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
122 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
123 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
124 };
125 
126 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
127 static const u32 ice_ipv6_ofos_no_l4[] = {
128 	0x00000000, 0x00000000, 0x43000000, 0x10002000,
129 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
130 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
131 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
132 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
133 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
134 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
135 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
136 };
137 
138 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
139 static const u32 ice_ipv6_il_no_l4[] = {
140 	0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
141 	0x00000430, 0x00000000, 0x00000000, 0x00000000,
142 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
143 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
144 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
145 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
146 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
147 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
148 };
149 
150 /* UDP Packet types for non-tunneled packets or tunneled
151  * packets with inner UDP.
152  */
153 static const u32 ice_ptypes_udp_il[] = {
154 	0x81000000, 0x20204040, 0x04000010, 0x80810102,
155 	0x00000040, 0x00000000, 0x00000000, 0x00000000,
156 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
157 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
158 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
159 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
160 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
161 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
162 };
163 
164 /* Packet types for packets with an Innermost/Last TCP header */
165 static const u32 ice_ptypes_tcp_il[] = {
166 	0x04000000, 0x80810102, 0x10000040, 0x02040408,
167 	0x00000102, 0x00000000, 0x00000000, 0x00000000,
168 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
169 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
170 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
171 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
172 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
173 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
174 };
175 
176 /* Packet types for packets with an Innermost/Last SCTP header */
177 static const u32 ice_ptypes_sctp_il[] = {
178 	0x08000000, 0x01020204, 0x20000081, 0x04080810,
179 	0x00000204, 0x00000000, 0x00000000, 0x00000000,
180 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
181 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
182 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
183 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
184 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
185 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
186 };
187 
188 /* Packet types for packets with an Outermost/First GRE header */
189 static const u32 ice_ptypes_gre_of[] = {
190 	0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
191 	0x0000017E, 0x00000000, 0x00000000, 0x00000000,
192 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
193 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
194 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
195 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
196 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
197 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
198 };
199 
200 /* Manage parameters and info. used during the creation of a flow profile */
201 struct ice_flow_prof_params {
202 	enum ice_block blk;
203 	u16 entry_length; /* # of bytes formatted entry will require */
204 	u8 es_cnt;
205 	struct ice_flow_prof *prof;
206 
207 	/* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
208 	 * This will give us the direction flags.
209 	 */
210 	struct ice_fv_word es[ICE_MAX_FV_WORDS];
211 	DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX);
212 };
213 
214 #define ICE_FLOW_SEG_HDRS_L3_MASK	\
215 	(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
216 #define ICE_FLOW_SEG_HDRS_L4_MASK	\
217 	(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
218 
219 /**
220  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
221  * @segs: array of one or more packet segments that describe the flow
222  * @segs_cnt: number of packet segments provided
223  */
224 static enum ice_status
225 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
226 {
227 	u8 i;
228 
229 	for (i = 0; i < segs_cnt; i++) {
230 		/* Multiple L3 headers */
231 		if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
232 		    !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
233 			return ICE_ERR_PARAM;
234 
235 		/* Multiple L4 headers */
236 		if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
237 		    !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
238 			return ICE_ERR_PARAM;
239 	}
240 
241 	return 0;
242 }
243 
244 /* Sizes of fixed known protocol headers without header options */
245 #define ICE_FLOW_PROT_HDR_SZ_MAC	14
246 #define ICE_FLOW_PROT_HDR_SZ_IPV4	20
247 #define ICE_FLOW_PROT_HDR_SZ_IPV6	40
248 #define ICE_FLOW_PROT_HDR_SZ_TCP	20
249 #define ICE_FLOW_PROT_HDR_SZ_UDP	8
250 #define ICE_FLOW_PROT_HDR_SZ_SCTP	12
251 
252 /**
253  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
254  * @params: information about the flow to be processed
255  * @seg: index of packet segment whose header size is to be determined
256  */
257 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
258 {
259 	u16 sz = ICE_FLOW_PROT_HDR_SZ_MAC;
260 
261 	/* L3 headers */
262 	if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
263 		sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
264 	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
265 		sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
266 
267 	/* L4 headers */
268 	if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
269 		sz += ICE_FLOW_PROT_HDR_SZ_TCP;
270 	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
271 		sz += ICE_FLOW_PROT_HDR_SZ_UDP;
272 	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
273 		sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
274 
275 	return sz;
276 }
277 
278 /**
279  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
280  * @params: information about the flow to be processed
281  *
282  * This function identifies the packet types associated with the protocol
283  * headers being present in packet segments of the specified flow profile.
284  */
285 static enum ice_status
286 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
287 {
288 	struct ice_flow_prof *prof;
289 	u8 i;
290 
291 	memset(params->ptypes, 0xff, sizeof(params->ptypes));
292 
293 	prof = params->prof;
294 
295 	for (i = 0; i < params->prof->segs_cnt; i++) {
296 		const unsigned long *src;
297 		u32 hdrs;
298 
299 		hdrs = prof->segs[i].hdrs;
300 
301 		if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
302 		    !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) {
303 			src = !i ? (const unsigned long *)ice_ipv4_ofos_no_l4 :
304 				(const unsigned long *)ice_ipv4_il_no_l4;
305 			bitmap_and(params->ptypes, params->ptypes, src,
306 				   ICE_FLOW_PTYPE_MAX);
307 		} else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
308 			src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos :
309 				(const unsigned long *)ice_ptypes_ipv4_il;
310 			bitmap_and(params->ptypes, params->ptypes, src,
311 				   ICE_FLOW_PTYPE_MAX);
312 		} else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
313 			   !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) {
314 			src = !i ? (const unsigned long *)ice_ipv6_ofos_no_l4 :
315 				(const unsigned long *)ice_ipv6_il_no_l4;
316 			bitmap_and(params->ptypes, params->ptypes, src,
317 				   ICE_FLOW_PTYPE_MAX);
318 		} else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
319 			src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos :
320 				(const unsigned long *)ice_ptypes_ipv6_il;
321 			bitmap_and(params->ptypes, params->ptypes, src,
322 				   ICE_FLOW_PTYPE_MAX);
323 		}
324 
325 		if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
326 			src = (const unsigned long *)ice_ptypes_udp_il;
327 			bitmap_and(params->ptypes, params->ptypes, src,
328 				   ICE_FLOW_PTYPE_MAX);
329 		} else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
330 			bitmap_and(params->ptypes, params->ptypes,
331 				   (const unsigned long *)ice_ptypes_tcp_il,
332 				   ICE_FLOW_PTYPE_MAX);
333 		} else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
334 			src = (const unsigned long *)ice_ptypes_sctp_il;
335 			bitmap_and(params->ptypes, params->ptypes, src,
336 				   ICE_FLOW_PTYPE_MAX);
337 		} else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
338 			if (!i) {
339 				src = (const unsigned long *)ice_ptypes_gre_of;
340 				bitmap_and(params->ptypes, params->ptypes,
341 					   src, ICE_FLOW_PTYPE_MAX);
342 			}
343 		}
344 	}
345 
346 	return 0;
347 }
348 
349 /**
350  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
351  * @hw: pointer to the HW struct
352  * @params: information about the flow to be processed
353  * @seg: packet segment index of the field to be extracted
354  * @fld: ID of field to be extracted
355  *
356  * This function determines the protocol ID, offset, and size of the given
357  * field. It then allocates one or more extraction sequence entries for the
358  * given field, and fill the entries with protocol ID and offset information.
359  */
360 static enum ice_status
361 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
362 		    u8 seg, enum ice_flow_field fld)
363 {
364 	enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
365 	u8 fv_words = hw->blk[params->blk].es.fvw;
366 	struct ice_flow_fld_info *flds;
367 	u16 cnt, ese_bits, i;
368 	u16 off;
369 
370 	flds = params->prof->segs[seg].fields;
371 
372 	switch (fld) {
373 	case ICE_FLOW_FIELD_IDX_IPV4_SA:
374 	case ICE_FLOW_FIELD_IDX_IPV4_DA:
375 		prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
376 		break;
377 	case ICE_FLOW_FIELD_IDX_IPV6_SA:
378 	case ICE_FLOW_FIELD_IDX_IPV6_DA:
379 		prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
380 		break;
381 	case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
382 	case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
383 		prot_id = ICE_PROT_TCP_IL;
384 		break;
385 	case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
386 	case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
387 		prot_id = ICE_PROT_UDP_IL_OR_S;
388 		break;
389 	case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
390 	case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
391 		prot_id = ICE_PROT_SCTP_IL;
392 		break;
393 	case ICE_FLOW_FIELD_IDX_GRE_KEYID:
394 		prot_id = ICE_PROT_GRE_OF;
395 		break;
396 	default:
397 		return ICE_ERR_NOT_IMPL;
398 	}
399 
400 	/* Each extraction sequence entry is a word in size, and extracts a
401 	 * word-aligned offset from a protocol header.
402 	 */
403 	ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
404 
405 	flds[fld].xtrct.prot_id = prot_id;
406 	flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
407 		ICE_FLOW_FV_EXTRACT_SZ;
408 	flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
409 	flds[fld].xtrct.idx = params->es_cnt;
410 
411 	/* Adjust the next field-entry index after accommodating the number of
412 	 * entries this field consumes
413 	 */
414 	cnt = DIV_ROUND_UP(flds[fld].xtrct.disp + ice_flds_info[fld].size,
415 			   ese_bits);
416 
417 	/* Fill in the extraction sequence entries needed for this field */
418 	off = flds[fld].xtrct.off;
419 	for (i = 0; i < cnt; i++) {
420 		u8 idx;
421 
422 		/* Make sure the number of extraction sequence required
423 		 * does not exceed the block's capability
424 		 */
425 		if (params->es_cnt >= fv_words)
426 			return ICE_ERR_MAX_LIMIT;
427 
428 		/* some blocks require a reversed field vector layout */
429 		if (hw->blk[params->blk].es.reverse)
430 			idx = fv_words - params->es_cnt - 1;
431 		else
432 			idx = params->es_cnt;
433 
434 		params->es[idx].prot_id = prot_id;
435 		params->es[idx].off = off;
436 		params->es_cnt++;
437 
438 		off += ICE_FLOW_FV_EXTRACT_SZ;
439 	}
440 
441 	return 0;
442 }
443 
444 /**
445  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
446  * @hw: pointer to the HW struct
447  * @params: information about the flow to be processed
448  * @seg: index of packet segment whose raw fields are to be extracted
449  */
450 static enum ice_status
451 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
452 		     u8 seg)
453 {
454 	u16 fv_words;
455 	u16 hdrs_sz;
456 	u8 i;
457 
458 	if (!params->prof->segs[seg].raws_cnt)
459 		return 0;
460 
461 	if (params->prof->segs[seg].raws_cnt >
462 	    ARRAY_SIZE(params->prof->segs[seg].raws))
463 		return ICE_ERR_MAX_LIMIT;
464 
465 	/* Offsets within the segment headers are not supported */
466 	hdrs_sz = ice_flow_calc_seg_sz(params, seg);
467 	if (!hdrs_sz)
468 		return ICE_ERR_PARAM;
469 
470 	fv_words = hw->blk[params->blk].es.fvw;
471 
472 	for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
473 		struct ice_flow_seg_fld_raw *raw;
474 		u16 off, cnt, j;
475 
476 		raw = &params->prof->segs[seg].raws[i];
477 
478 		/* Storing extraction information */
479 		raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
480 		raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
481 			ICE_FLOW_FV_EXTRACT_SZ;
482 		raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
483 			BITS_PER_BYTE;
484 		raw->info.xtrct.idx = params->es_cnt;
485 
486 		/* Determine the number of field vector entries this raw field
487 		 * consumes.
488 		 */
489 		cnt = DIV_ROUND_UP(raw->info.xtrct.disp +
490 				   (raw->info.src.last * BITS_PER_BYTE),
491 				   (ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE));
492 		off = raw->info.xtrct.off;
493 		for (j = 0; j < cnt; j++) {
494 			u16 idx;
495 
496 			/* Make sure the number of extraction sequence required
497 			 * does not exceed the block's capability
498 			 */
499 			if (params->es_cnt >= hw->blk[params->blk].es.count ||
500 			    params->es_cnt >= ICE_MAX_FV_WORDS)
501 				return ICE_ERR_MAX_LIMIT;
502 
503 			/* some blocks require a reversed field vector layout */
504 			if (hw->blk[params->blk].es.reverse)
505 				idx = fv_words - params->es_cnt - 1;
506 			else
507 				idx = params->es_cnt;
508 
509 			params->es[idx].prot_id = raw->info.xtrct.prot_id;
510 			params->es[idx].off = off;
511 			params->es_cnt++;
512 			off += ICE_FLOW_FV_EXTRACT_SZ;
513 		}
514 	}
515 
516 	return 0;
517 }
518 
519 /**
520  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
521  * @hw: pointer to the HW struct
522  * @params: information about the flow to be processed
523  *
524  * This function iterates through all matched fields in the given segments, and
525  * creates an extraction sequence for the fields.
526  */
527 static enum ice_status
528 ice_flow_create_xtrct_seq(struct ice_hw *hw,
529 			  struct ice_flow_prof_params *params)
530 {
531 	struct ice_flow_prof *prof = params->prof;
532 	enum ice_status status = 0;
533 	u8 i;
534 
535 	for (i = 0; i < prof->segs_cnt; i++) {
536 		u8 j;
537 
538 		for_each_set_bit(j, (unsigned long *)&prof->segs[i].match,
539 				 ICE_FLOW_FIELD_IDX_MAX) {
540 			status = ice_flow_xtract_fld(hw, params, i,
541 						     (enum ice_flow_field)j);
542 			if (status)
543 				return status;
544 		}
545 
546 		/* Process raw matching bytes */
547 		status = ice_flow_xtract_raws(hw, params, i);
548 		if (status)
549 			return status;
550 	}
551 
552 	return status;
553 }
554 
555 /**
556  * ice_flow_proc_segs - process all packet segments associated with a profile
557  * @hw: pointer to the HW struct
558  * @params: information about the flow to be processed
559  */
560 static enum ice_status
561 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
562 {
563 	enum ice_status status;
564 
565 	status = ice_flow_proc_seg_hdrs(params);
566 	if (status)
567 		return status;
568 
569 	status = ice_flow_create_xtrct_seq(hw, params);
570 	if (status)
571 		return status;
572 
573 	switch (params->blk) {
574 	case ICE_BLK_FD:
575 	case ICE_BLK_RSS:
576 		status = 0;
577 		break;
578 	default:
579 		return ICE_ERR_NOT_IMPL;
580 	}
581 
582 	return status;
583 }
584 
585 #define ICE_FLOW_FIND_PROF_CHK_FLDS	0x00000001
586 #define ICE_FLOW_FIND_PROF_CHK_VSI	0x00000002
587 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR	0x00000004
588 
589 /**
590  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
591  * @hw: pointer to the HW struct
592  * @blk: classification stage
593  * @dir: flow direction
594  * @segs: array of one or more packet segments that describe the flow
595  * @segs_cnt: number of packet segments provided
596  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
597  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
598  */
599 static struct ice_flow_prof *
600 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
601 			 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
602 			 u8 segs_cnt, u16 vsi_handle, u32 conds)
603 {
604 	struct ice_flow_prof *p, *prof = NULL;
605 
606 	mutex_lock(&hw->fl_profs_locks[blk]);
607 	list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
608 		if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
609 		    segs_cnt && segs_cnt == p->segs_cnt) {
610 			u8 i;
611 
612 			/* Check for profile-VSI association if specified */
613 			if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
614 			    ice_is_vsi_valid(hw, vsi_handle) &&
615 			    !test_bit(vsi_handle, p->vsis))
616 				continue;
617 
618 			/* Protocol headers must be checked. Matched fields are
619 			 * checked if specified.
620 			 */
621 			for (i = 0; i < segs_cnt; i++)
622 				if (segs[i].hdrs != p->segs[i].hdrs ||
623 				    ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
624 				     segs[i].match != p->segs[i].match))
625 					break;
626 
627 			/* A match is found if all segments are matched */
628 			if (i == segs_cnt) {
629 				prof = p;
630 				break;
631 			}
632 		}
633 	mutex_unlock(&hw->fl_profs_locks[blk]);
634 
635 	return prof;
636 }
637 
638 /**
639  * ice_flow_find_prof_id - Look up a profile with given profile ID
640  * @hw: pointer to the HW struct
641  * @blk: classification stage
642  * @prof_id: unique ID to identify this flow profile
643  */
644 static struct ice_flow_prof *
645 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
646 {
647 	struct ice_flow_prof *p;
648 
649 	list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
650 		if (p->id == prof_id)
651 			return p;
652 
653 	return NULL;
654 }
655 
656 /**
657  * ice_dealloc_flow_entry - Deallocate flow entry memory
658  * @hw: pointer to the HW struct
659  * @entry: flow entry to be removed
660  */
661 static void
662 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
663 {
664 	if (!entry)
665 		return;
666 
667 	if (entry->entry)
668 		devm_kfree(ice_hw_to_dev(hw), entry->entry);
669 
670 	devm_kfree(ice_hw_to_dev(hw), entry);
671 }
672 
673 /**
674  * ice_flow_rem_entry_sync - Remove a flow entry
675  * @hw: pointer to the HW struct
676  * @blk: classification stage
677  * @entry: flow entry to be removed
678  */
679 static enum ice_status
680 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
681 			struct ice_flow_entry *entry)
682 {
683 	if (!entry)
684 		return ICE_ERR_BAD_PTR;
685 
686 	list_del(&entry->l_entry);
687 
688 	ice_dealloc_flow_entry(hw, entry);
689 
690 	return 0;
691 }
692 
693 /**
694  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
695  * @hw: pointer to the HW struct
696  * @blk: classification stage
697  * @dir: flow direction
698  * @prof_id: unique ID to identify this flow profile
699  * @segs: array of one or more packet segments that describe the flow
700  * @segs_cnt: number of packet segments provided
701  * @prof: stores the returned flow profile added
702  *
703  * Assumption: the caller has acquired the lock to the profile list
704  */
705 static enum ice_status
706 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
707 		       enum ice_flow_dir dir, u64 prof_id,
708 		       struct ice_flow_seg_info *segs, u8 segs_cnt,
709 		       struct ice_flow_prof **prof)
710 {
711 	struct ice_flow_prof_params params;
712 	enum ice_status status;
713 	u8 i;
714 
715 	if (!prof)
716 		return ICE_ERR_BAD_PTR;
717 
718 	memset(&params, 0, sizeof(params));
719 	params.prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params.prof),
720 				   GFP_KERNEL);
721 	if (!params.prof)
722 		return ICE_ERR_NO_MEMORY;
723 
724 	/* initialize extraction sequence to all invalid (0xff) */
725 	for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
726 		params.es[i].prot_id = ICE_PROT_INVALID;
727 		params.es[i].off = ICE_FV_OFFSET_INVAL;
728 	}
729 
730 	params.blk = blk;
731 	params.prof->id = prof_id;
732 	params.prof->dir = dir;
733 	params.prof->segs_cnt = segs_cnt;
734 
735 	/* Make a copy of the segments that need to be persistent in the flow
736 	 * profile instance
737 	 */
738 	for (i = 0; i < segs_cnt; i++)
739 		memcpy(&params.prof->segs[i], &segs[i], sizeof(*segs));
740 
741 	status = ice_flow_proc_segs(hw, &params);
742 	if (status) {
743 		ice_debug(hw, ICE_DBG_FLOW,
744 			  "Error processing a flow's packet segments\n");
745 		goto out;
746 	}
747 
748 	/* Add a HW profile for this flow profile */
749 	status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes, params.es);
750 	if (status) {
751 		ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
752 		goto out;
753 	}
754 
755 	INIT_LIST_HEAD(&params.prof->entries);
756 	mutex_init(&params.prof->entries_lock);
757 	*prof = params.prof;
758 
759 out:
760 	if (status)
761 		devm_kfree(ice_hw_to_dev(hw), params.prof);
762 
763 	return status;
764 }
765 
766 /**
767  * ice_flow_rem_prof_sync - remove a flow profile
768  * @hw: pointer to the hardware structure
769  * @blk: classification stage
770  * @prof: pointer to flow profile to remove
771  *
772  * Assumption: the caller has acquired the lock to the profile list
773  */
774 static enum ice_status
775 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
776 		       struct ice_flow_prof *prof)
777 {
778 	enum ice_status status;
779 
780 	/* Remove all remaining flow entries before removing the flow profile */
781 	if (!list_empty(&prof->entries)) {
782 		struct ice_flow_entry *e, *t;
783 
784 		mutex_lock(&prof->entries_lock);
785 
786 		list_for_each_entry_safe(e, t, &prof->entries, l_entry) {
787 			status = ice_flow_rem_entry_sync(hw, blk, e);
788 			if (status)
789 				break;
790 		}
791 
792 		mutex_unlock(&prof->entries_lock);
793 	}
794 
795 	/* Remove all hardware profiles associated with this flow profile */
796 	status = ice_rem_prof(hw, blk, prof->id);
797 	if (!status) {
798 		list_del(&prof->l_entry);
799 		mutex_destroy(&prof->entries_lock);
800 		devm_kfree(ice_hw_to_dev(hw), prof);
801 	}
802 
803 	return status;
804 }
805 
806 /**
807  * ice_flow_assoc_prof - associate a VSI with a flow profile
808  * @hw: pointer to the hardware structure
809  * @blk: classification stage
810  * @prof: pointer to flow profile
811  * @vsi_handle: software VSI handle
812  *
813  * Assumption: the caller has acquired the lock to the profile list
814  * and the software VSI handle has been validated
815  */
816 static enum ice_status
817 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
818 		    struct ice_flow_prof *prof, u16 vsi_handle)
819 {
820 	enum ice_status status = 0;
821 
822 	if (!test_bit(vsi_handle, prof->vsis)) {
823 		status = ice_add_prof_id_flow(hw, blk,
824 					      ice_get_hw_vsi_num(hw,
825 								 vsi_handle),
826 					      prof->id);
827 		if (!status)
828 			set_bit(vsi_handle, prof->vsis);
829 		else
830 			ice_debug(hw, ICE_DBG_FLOW,
831 				  "HW profile add failed, %d\n",
832 				  status);
833 	}
834 
835 	return status;
836 }
837 
838 /**
839  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
840  * @hw: pointer to the hardware structure
841  * @blk: classification stage
842  * @prof: pointer to flow profile
843  * @vsi_handle: software VSI handle
844  *
845  * Assumption: the caller has acquired the lock to the profile list
846  * and the software VSI handle has been validated
847  */
848 static enum ice_status
849 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
850 		       struct ice_flow_prof *prof, u16 vsi_handle)
851 {
852 	enum ice_status status = 0;
853 
854 	if (test_bit(vsi_handle, prof->vsis)) {
855 		status = ice_rem_prof_id_flow(hw, blk,
856 					      ice_get_hw_vsi_num(hw,
857 								 vsi_handle),
858 					      prof->id);
859 		if (!status)
860 			clear_bit(vsi_handle, prof->vsis);
861 		else
862 			ice_debug(hw, ICE_DBG_FLOW,
863 				  "HW profile remove failed, %d\n",
864 				  status);
865 	}
866 
867 	return status;
868 }
869 
870 /**
871  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
872  * @hw: pointer to the HW struct
873  * @blk: classification stage
874  * @dir: flow direction
875  * @prof_id: unique ID to identify this flow profile
876  * @segs: array of one or more packet segments that describe the flow
877  * @segs_cnt: number of packet segments provided
878  * @prof: stores the returned flow profile added
879  */
880 enum ice_status
881 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
882 		  u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
883 		  struct ice_flow_prof **prof)
884 {
885 	enum ice_status status;
886 
887 	if (segs_cnt > ICE_FLOW_SEG_MAX)
888 		return ICE_ERR_MAX_LIMIT;
889 
890 	if (!segs_cnt)
891 		return ICE_ERR_PARAM;
892 
893 	if (!segs)
894 		return ICE_ERR_BAD_PTR;
895 
896 	status = ice_flow_val_hdrs(segs, segs_cnt);
897 	if (status)
898 		return status;
899 
900 	mutex_lock(&hw->fl_profs_locks[blk]);
901 
902 	status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
903 					prof);
904 	if (!status)
905 		list_add(&(*prof)->l_entry, &hw->fl_profs[blk]);
906 
907 	mutex_unlock(&hw->fl_profs_locks[blk]);
908 
909 	return status;
910 }
911 
912 /**
913  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
914  * @hw: pointer to the HW struct
915  * @blk: the block for which the flow profile is to be removed
916  * @prof_id: unique ID of the flow profile to be removed
917  */
918 enum ice_status
919 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
920 {
921 	struct ice_flow_prof *prof;
922 	enum ice_status status;
923 
924 	mutex_lock(&hw->fl_profs_locks[blk]);
925 
926 	prof = ice_flow_find_prof_id(hw, blk, prof_id);
927 	if (!prof) {
928 		status = ICE_ERR_DOES_NOT_EXIST;
929 		goto out;
930 	}
931 
932 	/* prof becomes invalid after the call */
933 	status = ice_flow_rem_prof_sync(hw, blk, prof);
934 
935 out:
936 	mutex_unlock(&hw->fl_profs_locks[blk]);
937 
938 	return status;
939 }
940 
941 /**
942  * ice_flow_add_entry - Add a flow entry
943  * @hw: pointer to the HW struct
944  * @blk: classification stage
945  * @prof_id: ID of the profile to add a new flow entry to
946  * @entry_id: unique ID to identify this flow entry
947  * @vsi_handle: software VSI handle for the flow entry
948  * @prio: priority of the flow entry
949  * @data: pointer to a data buffer containing flow entry's match values/masks
950  * @entry_h: pointer to buffer that receives the new flow entry's handle
951  */
952 enum ice_status
953 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
954 		   u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
955 		   void *data, u64 *entry_h)
956 {
957 	struct ice_flow_entry *e = NULL;
958 	struct ice_flow_prof *prof;
959 	enum ice_status status;
960 
961 	/* No flow entry data is expected for RSS */
962 	if (!entry_h || (!data && blk != ICE_BLK_RSS))
963 		return ICE_ERR_BAD_PTR;
964 
965 	if (!ice_is_vsi_valid(hw, vsi_handle))
966 		return ICE_ERR_PARAM;
967 
968 	mutex_lock(&hw->fl_profs_locks[blk]);
969 
970 	prof = ice_flow_find_prof_id(hw, blk, prof_id);
971 	if (!prof) {
972 		status = ICE_ERR_DOES_NOT_EXIST;
973 	} else {
974 		/* Allocate memory for the entry being added and associate
975 		 * the VSI to the found flow profile
976 		 */
977 		e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL);
978 		if (!e)
979 			status = ICE_ERR_NO_MEMORY;
980 		else
981 			status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
982 	}
983 
984 	mutex_unlock(&hw->fl_profs_locks[blk]);
985 	if (status)
986 		goto out;
987 
988 	e->id = entry_id;
989 	e->vsi_handle = vsi_handle;
990 	e->prof = prof;
991 	e->priority = prio;
992 
993 	switch (blk) {
994 	case ICE_BLK_FD:
995 	case ICE_BLK_RSS:
996 		break;
997 	default:
998 		status = ICE_ERR_NOT_IMPL;
999 		goto out;
1000 	}
1001 
1002 	mutex_lock(&prof->entries_lock);
1003 	list_add(&e->l_entry, &prof->entries);
1004 	mutex_unlock(&prof->entries_lock);
1005 
1006 	*entry_h = ICE_FLOW_ENTRY_HNDL(e);
1007 
1008 out:
1009 	if (status && e) {
1010 		if (e->entry)
1011 			devm_kfree(ice_hw_to_dev(hw), e->entry);
1012 		devm_kfree(ice_hw_to_dev(hw), e);
1013 	}
1014 
1015 	return status;
1016 }
1017 
1018 /**
1019  * ice_flow_rem_entry - Remove a flow entry
1020  * @hw: pointer to the HW struct
1021  * @blk: classification stage
1022  * @entry_h: handle to the flow entry to be removed
1023  */
1024 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
1025 				   u64 entry_h)
1026 {
1027 	struct ice_flow_entry *entry;
1028 	struct ice_flow_prof *prof;
1029 	enum ice_status status = 0;
1030 
1031 	if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
1032 		return ICE_ERR_PARAM;
1033 
1034 	entry = ICE_FLOW_ENTRY_PTR(entry_h);
1035 
1036 	/* Retain the pointer to the flow profile as the entry will be freed */
1037 	prof = entry->prof;
1038 
1039 	if (prof) {
1040 		mutex_lock(&prof->entries_lock);
1041 		status = ice_flow_rem_entry_sync(hw, blk, entry);
1042 		mutex_unlock(&prof->entries_lock);
1043 	}
1044 
1045 	return status;
1046 }
1047 
1048 /**
1049  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
1050  * @seg: packet segment the field being set belongs to
1051  * @fld: field to be set
1052  * @field_type: type of the field
1053  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
1054  *           entry's input buffer
1055  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
1056  *            input buffer
1057  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
1058  *            entry's input buffer
1059  *
1060  * This helper function stores information of a field being matched, including
1061  * the type of the field and the locations of the value to match, the mask, and
1062  * the upper-bound value in the start of the input buffer for a flow entry.
1063  * This function should only be used for fixed-size data structures.
1064  *
1065  * This function also opportunistically determines the protocol headers to be
1066  * present based on the fields being set. Some fields cannot be used alone to
1067  * determine the protocol headers present. Sometimes, fields for particular
1068  * protocol headers are not matched. In those cases, the protocol headers
1069  * must be explicitly set.
1070  */
1071 static void
1072 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
1073 		     enum ice_flow_fld_match_type field_type, u16 val_loc,
1074 		     u16 mask_loc, u16 last_loc)
1075 {
1076 	u64 bit = BIT_ULL(fld);
1077 
1078 	seg->match |= bit;
1079 	if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
1080 		seg->range |= bit;
1081 
1082 	seg->fields[fld].type = field_type;
1083 	seg->fields[fld].src.val = val_loc;
1084 	seg->fields[fld].src.mask = mask_loc;
1085 	seg->fields[fld].src.last = last_loc;
1086 
1087 	ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
1088 }
1089 
1090 /**
1091  * ice_flow_set_fld - specifies locations of field from entry's input buffer
1092  * @seg: packet segment the field being set belongs to
1093  * @fld: field to be set
1094  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
1095  *           entry's input buffer
1096  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
1097  *            input buffer
1098  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
1099  *            entry's input buffer
1100  * @range: indicate if field being matched is to be in a range
1101  *
1102  * This function specifies the locations, in the form of byte offsets from the
1103  * start of the input buffer for a flow entry, from where the value to match,
1104  * the mask value, and upper value can be extracted. These locations are then
1105  * stored in the flow profile. When adding a flow entry associated with the
1106  * flow profile, these locations will be used to quickly extract the values and
1107  * create the content of a match entry. This function should only be used for
1108  * fixed-size data structures.
1109  */
1110 void
1111 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
1112 		 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
1113 {
1114 	enum ice_flow_fld_match_type t = range ?
1115 		ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
1116 
1117 	ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
1118 }
1119 
1120 /**
1121  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
1122  * @seg: packet segment the field being set belongs to
1123  * @off: offset of the raw field from the beginning of the segment in bytes
1124  * @len: length of the raw pattern to be matched
1125  * @val_loc: location of the value to match from entry's input buffer
1126  * @mask_loc: location of mask value from entry's input buffer
1127  *
1128  * This function specifies the offset of the raw field to be match from the
1129  * beginning of the specified packet segment, and the locations, in the form of
1130  * byte offsets from the start of the input buffer for a flow entry, from where
1131  * the value to match and the mask value to be extracted. These locations are
1132  * then stored in the flow profile. When adding flow entries to the associated
1133  * flow profile, these locations can be used to quickly extract the values to
1134  * create the content of a match entry. This function should only be used for
1135  * fixed-size data structures.
1136  */
1137 void
1138 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
1139 		     u16 val_loc, u16 mask_loc)
1140 {
1141 	if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
1142 		seg->raws[seg->raws_cnt].off = off;
1143 		seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
1144 		seg->raws[seg->raws_cnt].info.src.val = val_loc;
1145 		seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
1146 		/* The "last" field is used to store the length of the field */
1147 		seg->raws[seg->raws_cnt].info.src.last = len;
1148 	}
1149 
1150 	/* Overflows of "raws" will be handled as an error condition later in
1151 	 * the flow when this information is processed.
1152 	 */
1153 	seg->raws_cnt++;
1154 }
1155 
1156 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
1157 	(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
1158 
1159 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
1160 	(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
1161 
1162 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
1163 	(ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
1164 	 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
1165 
1166 /**
1167  * ice_flow_set_rss_seg_info - setup packet segments for RSS
1168  * @segs: pointer to the flow field segment(s)
1169  * @hash_fields: fields to be hashed on for the segment(s)
1170  * @flow_hdr: protocol header fields within a packet segment
1171  *
1172  * Helper function to extract fields from hash bitmap and use flow
1173  * header value to set flow field segment for further use in flow
1174  * profile entry or removal.
1175  */
1176 static enum ice_status
1177 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
1178 			  u32 flow_hdr)
1179 {
1180 	u64 val;
1181 	u8 i;
1182 
1183 	for_each_set_bit(i, (unsigned long *)&hash_fields,
1184 			 ICE_FLOW_FIELD_IDX_MAX)
1185 		ice_flow_set_fld(segs, (enum ice_flow_field)i,
1186 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1187 				 ICE_FLOW_FLD_OFF_INVAL, false);
1188 
1189 	ICE_FLOW_SET_HDRS(segs, flow_hdr);
1190 
1191 	if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS)
1192 		return ICE_ERR_PARAM;
1193 
1194 	val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
1195 	if (val && !is_power_of_2(val))
1196 		return ICE_ERR_CFG;
1197 
1198 	val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
1199 	if (val && !is_power_of_2(val))
1200 		return ICE_ERR_CFG;
1201 
1202 	return 0;
1203 }
1204 
1205 /**
1206  * ice_rem_vsi_rss_list - remove VSI from RSS list
1207  * @hw: pointer to the hardware structure
1208  * @vsi_handle: software VSI handle
1209  *
1210  * Remove the VSI from all RSS configurations in the list.
1211  */
1212 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
1213 {
1214 	struct ice_rss_cfg *r, *tmp;
1215 
1216 	if (list_empty(&hw->rss_list_head))
1217 		return;
1218 
1219 	mutex_lock(&hw->rss_locks);
1220 	list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
1221 		if (test_and_clear_bit(vsi_handle, r->vsis))
1222 			if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
1223 				list_del(&r->l_entry);
1224 				devm_kfree(ice_hw_to_dev(hw), r);
1225 			}
1226 	mutex_unlock(&hw->rss_locks);
1227 }
1228 
1229 /**
1230  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
1231  * @hw: pointer to the hardware structure
1232  * @vsi_handle: software VSI handle
1233  *
1234  * This function will iterate through all flow profiles and disassociate
1235  * the VSI from that profile. If the flow profile has no VSIs it will
1236  * be removed.
1237  */
1238 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
1239 {
1240 	const enum ice_block blk = ICE_BLK_RSS;
1241 	struct ice_flow_prof *p, *t;
1242 	enum ice_status status = 0;
1243 
1244 	if (!ice_is_vsi_valid(hw, vsi_handle))
1245 		return ICE_ERR_PARAM;
1246 
1247 	if (list_empty(&hw->fl_profs[blk]))
1248 		return 0;
1249 
1250 	mutex_lock(&hw->rss_locks);
1251 	list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry)
1252 		if (test_bit(vsi_handle, p->vsis)) {
1253 			status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
1254 			if (status)
1255 				break;
1256 
1257 			if (bitmap_empty(p->vsis, ICE_MAX_VSI)) {
1258 				status = ice_flow_rem_prof(hw, blk, p->id);
1259 				if (status)
1260 					break;
1261 			}
1262 		}
1263 	mutex_unlock(&hw->rss_locks);
1264 
1265 	return status;
1266 }
1267 
1268 /**
1269  * ice_rem_rss_list - remove RSS configuration from list
1270  * @hw: pointer to the hardware structure
1271  * @vsi_handle: software VSI handle
1272  * @prof: pointer to flow profile
1273  *
1274  * Assumption: lock has already been acquired for RSS list
1275  */
1276 static void
1277 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
1278 {
1279 	struct ice_rss_cfg *r, *tmp;
1280 
1281 	/* Search for RSS hash fields associated to the VSI that match the
1282 	 * hash configurations associated to the flow profile. If found
1283 	 * remove from the RSS entry list of the VSI context and delete entry.
1284 	 */
1285 	list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
1286 		if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
1287 		    r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
1288 			clear_bit(vsi_handle, r->vsis);
1289 			if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
1290 				list_del(&r->l_entry);
1291 				devm_kfree(ice_hw_to_dev(hw), r);
1292 			}
1293 			return;
1294 		}
1295 }
1296 
1297 /**
1298  * ice_add_rss_list - add RSS configuration to list
1299  * @hw: pointer to the hardware structure
1300  * @vsi_handle: software VSI handle
1301  * @prof: pointer to flow profile
1302  *
1303  * Assumption: lock has already been acquired for RSS list
1304  */
1305 static enum ice_status
1306 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
1307 {
1308 	struct ice_rss_cfg *r, *rss_cfg;
1309 
1310 	list_for_each_entry(r, &hw->rss_list_head, l_entry)
1311 		if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
1312 		    r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
1313 			set_bit(vsi_handle, r->vsis);
1314 			return 0;
1315 		}
1316 
1317 	rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg),
1318 			       GFP_KERNEL);
1319 	if (!rss_cfg)
1320 		return ICE_ERR_NO_MEMORY;
1321 
1322 	rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
1323 	rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
1324 	set_bit(vsi_handle, rss_cfg->vsis);
1325 
1326 	list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head);
1327 
1328 	return 0;
1329 }
1330 
1331 #define ICE_FLOW_PROF_HASH_S	0
1332 #define ICE_FLOW_PROF_HASH_M	(0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
1333 #define ICE_FLOW_PROF_HDR_S	32
1334 #define ICE_FLOW_PROF_HDR_M	(0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
1335 #define ICE_FLOW_PROF_ENCAP_S	63
1336 #define ICE_FLOW_PROF_ENCAP_M	(BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
1337 
1338 #define ICE_RSS_OUTER_HEADERS	1
1339 #define ICE_RSS_INNER_HEADERS	2
1340 
1341 /* Flow profile ID format:
1342  * [0:31] - Packet match fields
1343  * [32:62] - Protocol header
1344  * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
1345  */
1346 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
1347 	(u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
1348 	      (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
1349 	      ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
1350 
1351 /**
1352  * ice_add_rss_cfg_sync - add an RSS configuration
1353  * @hw: pointer to the hardware structure
1354  * @vsi_handle: software VSI handle
1355  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
1356  * @addl_hdrs: protocol header fields
1357  * @segs_cnt: packet segment count
1358  *
1359  * Assumption: lock has already been acquired for RSS list
1360  */
1361 static enum ice_status
1362 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
1363 		     u32 addl_hdrs, u8 segs_cnt)
1364 {
1365 	const enum ice_block blk = ICE_BLK_RSS;
1366 	struct ice_flow_prof *prof = NULL;
1367 	struct ice_flow_seg_info *segs;
1368 	enum ice_status status;
1369 
1370 	if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
1371 		return ICE_ERR_PARAM;
1372 
1373 	segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
1374 	if (!segs)
1375 		return ICE_ERR_NO_MEMORY;
1376 
1377 	/* Construct the packet segment info from the hashed fields */
1378 	status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
1379 					   addl_hdrs);
1380 	if (status)
1381 		goto exit;
1382 
1383 	/* Search for a flow profile that has matching headers, hash fields
1384 	 * and has the input VSI associated to it. If found, no further
1385 	 * operations required and exit.
1386 	 */
1387 	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
1388 					vsi_handle,
1389 					ICE_FLOW_FIND_PROF_CHK_FLDS |
1390 					ICE_FLOW_FIND_PROF_CHK_VSI);
1391 	if (prof)
1392 		goto exit;
1393 
1394 	/* Check if a flow profile exists with the same protocol headers and
1395 	 * associated with the input VSI. If so disassociate the VSI from
1396 	 * this profile. The VSI will be added to a new profile created with
1397 	 * the protocol header and new hash field configuration.
1398 	 */
1399 	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
1400 					vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
1401 	if (prof) {
1402 		status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
1403 		if (!status)
1404 			ice_rem_rss_list(hw, vsi_handle, prof);
1405 		else
1406 			goto exit;
1407 
1408 		/* Remove profile if it has no VSIs associated */
1409 		if (bitmap_empty(prof->vsis, ICE_MAX_VSI)) {
1410 			status = ice_flow_rem_prof(hw, blk, prof->id);
1411 			if (status)
1412 				goto exit;
1413 		}
1414 	}
1415 
1416 	/* Search for a profile that has same match fields only. If this
1417 	 * exists then associate the VSI to this profile.
1418 	 */
1419 	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
1420 					vsi_handle,
1421 					ICE_FLOW_FIND_PROF_CHK_FLDS);
1422 	if (prof) {
1423 		status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
1424 		if (!status)
1425 			status = ice_add_rss_list(hw, vsi_handle, prof);
1426 		goto exit;
1427 	}
1428 
1429 	/* Create a new flow profile with generated profile and packet
1430 	 * segment information.
1431 	 */
1432 	status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
1433 				   ICE_FLOW_GEN_PROFID(hashed_flds,
1434 						       segs[segs_cnt - 1].hdrs,
1435 						       segs_cnt),
1436 				   segs, segs_cnt, &prof);
1437 	if (status)
1438 		goto exit;
1439 
1440 	status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
1441 	/* If association to a new flow profile failed then this profile can
1442 	 * be removed.
1443 	 */
1444 	if (status) {
1445 		ice_flow_rem_prof(hw, blk, prof->id);
1446 		goto exit;
1447 	}
1448 
1449 	status = ice_add_rss_list(hw, vsi_handle, prof);
1450 
1451 exit:
1452 	kfree(segs);
1453 	return status;
1454 }
1455 
1456 /**
1457  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
1458  * @hw: pointer to the hardware structure
1459  * @vsi_handle: software VSI handle
1460  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
1461  * @addl_hdrs: protocol header fields
1462  *
1463  * This function will generate a flow profile based on fields associated with
1464  * the input fields to hash on, the flow type and use the VSI number to add
1465  * a flow entry to the profile.
1466  */
1467 enum ice_status
1468 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
1469 		u32 addl_hdrs)
1470 {
1471 	enum ice_status status;
1472 
1473 	if (hashed_flds == ICE_HASH_INVALID ||
1474 	    !ice_is_vsi_valid(hw, vsi_handle))
1475 		return ICE_ERR_PARAM;
1476 
1477 	mutex_lock(&hw->rss_locks);
1478 	status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
1479 				      ICE_RSS_OUTER_HEADERS);
1480 	if (!status)
1481 		status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
1482 					      addl_hdrs, ICE_RSS_INNER_HEADERS);
1483 	mutex_unlock(&hw->rss_locks);
1484 
1485 	return status;
1486 }
1487 
1488 /* Mapping of AVF hash bit fields to an L3-L4 hash combination.
1489  * As the ice_flow_avf_hdr_field represent individual bit shifts in a hash,
1490  * convert its values to their appropriate flow L3, L4 values.
1491  */
1492 #define ICE_FLOW_AVF_RSS_IPV4_MASKS \
1493 	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
1494 	 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4))
1495 #define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \
1496 	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
1497 	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP))
1498 #define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \
1499 	(BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
1500 	 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
1501 	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP))
1502 #define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \
1503 	(ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \
1504 	 ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP))
1505 
1506 #define ICE_FLOW_AVF_RSS_IPV6_MASKS \
1507 	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
1508 	 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6))
1509 #define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \
1510 	(BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
1511 	 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \
1512 	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP))
1513 #define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \
1514 	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
1515 	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP))
1516 #define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \
1517 	(ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \
1518 	 ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP))
1519 
1520 /**
1521  * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
1522  * @hw: pointer to the hardware structure
1523  * @vsi_handle: software VSI handle
1524  * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
1525  *
1526  * This function will take the hash bitmap provided by the AVF driver via a
1527  * message, convert it to ICE-compatible values, and configure RSS flow
1528  * profiles.
1529  */
1530 enum ice_status
1531 ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
1532 {
1533 	enum ice_status status = 0;
1534 	u64 hash_flds;
1535 
1536 	if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
1537 	    !ice_is_vsi_valid(hw, vsi_handle))
1538 		return ICE_ERR_PARAM;
1539 
1540 	/* Make sure no unsupported bits are specified */
1541 	if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS |
1542 			 ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS))
1543 		return ICE_ERR_CFG;
1544 
1545 	hash_flds = avf_hash;
1546 
1547 	/* Always create an L3 RSS configuration for any L4 RSS configuration */
1548 	if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS)
1549 		hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS;
1550 
1551 	if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)
1552 		hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS;
1553 
1554 	/* Create the corresponding RSS configuration for each valid hash bit */
1555 	while (hash_flds) {
1556 		u64 rss_hash = ICE_HASH_INVALID;
1557 
1558 		if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) {
1559 			if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) {
1560 				rss_hash = ICE_FLOW_HASH_IPV4;
1561 				hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS;
1562 			} else if (hash_flds &
1563 				   ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) {
1564 				rss_hash = ICE_FLOW_HASH_IPV4 |
1565 					ICE_FLOW_HASH_TCP_PORT;
1566 				hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS;
1567 			} else if (hash_flds &
1568 				   ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) {
1569 				rss_hash = ICE_FLOW_HASH_IPV4 |
1570 					ICE_FLOW_HASH_UDP_PORT;
1571 				hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS;
1572 			} else if (hash_flds &
1573 				   BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) {
1574 				rss_hash = ICE_FLOW_HASH_IPV4 |
1575 					ICE_FLOW_HASH_SCTP_PORT;
1576 				hash_flds &=
1577 					~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP);
1578 			}
1579 		} else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) {
1580 			if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) {
1581 				rss_hash = ICE_FLOW_HASH_IPV6;
1582 				hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS;
1583 			} else if (hash_flds &
1584 				   ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) {
1585 				rss_hash = ICE_FLOW_HASH_IPV6 |
1586 					ICE_FLOW_HASH_TCP_PORT;
1587 				hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS;
1588 			} else if (hash_flds &
1589 				   ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) {
1590 				rss_hash = ICE_FLOW_HASH_IPV6 |
1591 					ICE_FLOW_HASH_UDP_PORT;
1592 				hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS;
1593 			} else if (hash_flds &
1594 				   BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) {
1595 				rss_hash = ICE_FLOW_HASH_IPV6 |
1596 					ICE_FLOW_HASH_SCTP_PORT;
1597 				hash_flds &=
1598 					~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP);
1599 			}
1600 		}
1601 
1602 		if (rss_hash == ICE_HASH_INVALID)
1603 			return ICE_ERR_OUT_OF_RANGE;
1604 
1605 		status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
1606 					 ICE_FLOW_SEG_HDR_NONE);
1607 		if (status)
1608 			break;
1609 	}
1610 
1611 	return status;
1612 }
1613 
1614 /**
1615  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
1616  * @hw: pointer to the hardware structure
1617  * @vsi_handle: software VSI handle
1618  */
1619 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
1620 {
1621 	enum ice_status status = 0;
1622 	struct ice_rss_cfg *r;
1623 
1624 	if (!ice_is_vsi_valid(hw, vsi_handle))
1625 		return ICE_ERR_PARAM;
1626 
1627 	mutex_lock(&hw->rss_locks);
1628 	list_for_each_entry(r, &hw->rss_list_head, l_entry) {
1629 		if (test_bit(vsi_handle, r->vsis)) {
1630 			status = ice_add_rss_cfg_sync(hw, vsi_handle,
1631 						      r->hashed_flds,
1632 						      r->packet_hdr,
1633 						      ICE_RSS_OUTER_HEADERS);
1634 			if (status)
1635 				break;
1636 			status = ice_add_rss_cfg_sync(hw, vsi_handle,
1637 						      r->hashed_flds,
1638 						      r->packet_hdr,
1639 						      ICE_RSS_INNER_HEADERS);
1640 			if (status)
1641 				break;
1642 		}
1643 	}
1644 	mutex_unlock(&hw->rss_locks);
1645 
1646 	return status;
1647 }
1648 
1649 /**
1650  * ice_get_rss_cfg - returns hashed fields for the given header types
1651  * @hw: pointer to the hardware structure
1652  * @vsi_handle: software VSI handle
1653  * @hdrs: protocol header type
1654  *
1655  * This function will return the match fields of the first instance of flow
1656  * profile having the given header types and containing input VSI
1657  */
1658 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
1659 {
1660 	u64 rss_hash = ICE_HASH_INVALID;
1661 	struct ice_rss_cfg *r;
1662 
1663 	/* verify if the protocol header is non zero and VSI is valid */
1664 	if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
1665 		return ICE_HASH_INVALID;
1666 
1667 	mutex_lock(&hw->rss_locks);
1668 	list_for_each_entry(r, &hw->rss_list_head, l_entry)
1669 		if (test_bit(vsi_handle, r->vsis) &&
1670 		    r->packet_hdr == hdrs) {
1671 			rss_hash = r->hashed_flds;
1672 			break;
1673 		}
1674 	mutex_unlock(&hw->rss_locks);
1675 
1676 	return rss_hash;
1677 }
1678