1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3 
4 #include "ice_common.h"
5 #include "ice_flow.h"
6 
7 /* Describe properties of a protocol header field */
8 struct ice_flow_field_info {
9 	enum ice_flow_seg_hdr hdr;
10 	s16 off;	/* Offset from start of a protocol header, in bits */
11 	u16 size;	/* Size of fields in bits */
12 };
13 
14 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
15 	.hdr = _hdr, \
16 	.off = (_offset_bytes) * BITS_PER_BYTE, \
17 	.size = (_size_bytes) * BITS_PER_BYTE, \
18 }
19 
20 /* Table containing properties of supported protocol header fields */
21 static const
22 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
23 	/* IPv4 / IPv6 */
24 	/* ICE_FLOW_FIELD_IDX_IPV4_SA */
25 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, sizeof(struct in_addr)),
26 	/* ICE_FLOW_FIELD_IDX_IPV4_DA */
27 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, sizeof(struct in_addr)),
28 	/* ICE_FLOW_FIELD_IDX_IPV6_SA */
29 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, sizeof(struct in6_addr)),
30 	/* ICE_FLOW_FIELD_IDX_IPV6_DA */
31 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, sizeof(struct in6_addr)),
32 	/* Transport */
33 	/* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
34 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, sizeof(__be16)),
35 	/* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
36 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, sizeof(__be16)),
37 	/* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
38 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, sizeof(__be16)),
39 	/* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
40 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, sizeof(__be16)),
41 	/* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
42 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)),
43 	/* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
44 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),
45 	/* GRE */
46 	/* ICE_FLOW_FIELD_IDX_GRE_KEYID */
47 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12,
48 			  sizeof_field(struct gre_full_hdr, key)),
49 };
50 
51 /* Bitmaps indicating relevant packet types for a particular protocol header
52  *
53  * Packet types for packets with an Outer/First/Single IPv4 header
54  */
55 static const u32 ice_ptypes_ipv4_ofos[] = {
56 	0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
57 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
58 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
59 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
60 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
61 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
62 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
63 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
64 };
65 
66 /* Packet types for packets with an Innermost/Last IPv4 header */
67 static const u32 ice_ptypes_ipv4_il[] = {
68 	0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
69 	0x0000000E, 0x00000000, 0x00000000, 0x00000000,
70 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
71 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
72 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
73 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
74 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
75 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
76 };
77 
78 /* Packet types for packets with an Outer/First/Single IPv6 header */
79 static const u32 ice_ptypes_ipv6_ofos[] = {
80 	0x00000000, 0x00000000, 0x77000000, 0x10002000,
81 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
82 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
83 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
84 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
85 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
86 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
87 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
88 };
89 
90 /* Packet types for packets with an Innermost/Last IPv6 header */
91 static const u32 ice_ptypes_ipv6_il[] = {
92 	0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
93 	0x00000770, 0x00000000, 0x00000000, 0x00000000,
94 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
95 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
96 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
97 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
98 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
99 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
100 };
101 
102 /* UDP Packet types for non-tunneled packets or tunneled
103  * packets with inner UDP.
104  */
105 static const u32 ice_ptypes_udp_il[] = {
106 	0x81000000, 0x20204040, 0x04000010, 0x80810102,
107 	0x00000040, 0x00000000, 0x00000000, 0x00000000,
108 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
109 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
110 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
111 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
112 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
113 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
114 };
115 
116 /* Packet types for packets with an Innermost/Last TCP header */
117 static const u32 ice_ptypes_tcp_il[] = {
118 	0x04000000, 0x80810102, 0x10000040, 0x02040408,
119 	0x00000102, 0x00000000, 0x00000000, 0x00000000,
120 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
121 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
122 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
123 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
124 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
125 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
126 };
127 
128 /* Packet types for packets with an Innermost/Last SCTP header */
129 static const u32 ice_ptypes_sctp_il[] = {
130 	0x08000000, 0x01020204, 0x20000081, 0x04080810,
131 	0x00000204, 0x00000000, 0x00000000, 0x00000000,
132 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
133 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
134 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
135 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
136 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
137 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
138 };
139 
140 /* Packet types for packets with an Outermost/First GRE header */
141 static const u32 ice_ptypes_gre_of[] = {
142 	0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
143 	0x0000017E, 0x00000000, 0x00000000, 0x00000000,
144 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
145 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
146 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
147 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
148 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
149 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
150 };
151 
152 /* Manage parameters and info. used during the creation of a flow profile */
153 struct ice_flow_prof_params {
154 	enum ice_block blk;
155 	u16 entry_length; /* # of bytes formatted entry will require */
156 	u8 es_cnt;
157 	struct ice_flow_prof *prof;
158 
159 	/* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
160 	 * This will give us the direction flags.
161 	 */
162 	struct ice_fv_word es[ICE_MAX_FV_WORDS];
163 	DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX);
164 };
165 
166 #define ICE_FLOW_SEG_HDRS_L3_MASK	\
167 	(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
168 #define ICE_FLOW_SEG_HDRS_L4_MASK	\
169 	(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
170 
171 /**
172  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
173  * @segs: array of one or more packet segments that describe the flow
174  * @segs_cnt: number of packet segments provided
175  */
176 static enum ice_status
177 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
178 {
179 	u8 i;
180 
181 	for (i = 0; i < segs_cnt; i++) {
182 		/* Multiple L3 headers */
183 		if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
184 		    !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
185 			return ICE_ERR_PARAM;
186 
187 		/* Multiple L4 headers */
188 		if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
189 		    !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
190 			return ICE_ERR_PARAM;
191 	}
192 
193 	return 0;
194 }
195 
196 /**
197  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
198  * @params: information about the flow to be processed
199  *
200  * This function identifies the packet types associated with the protocol
201  * headers being present in packet segments of the specified flow profile.
202  */
203 static enum ice_status
204 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
205 {
206 	struct ice_flow_prof *prof;
207 	u8 i;
208 
209 	memset(params->ptypes, 0xff, sizeof(params->ptypes));
210 
211 	prof = params->prof;
212 
213 	for (i = 0; i < params->prof->segs_cnt; i++) {
214 		const unsigned long *src;
215 		u32 hdrs;
216 
217 		hdrs = prof->segs[i].hdrs;
218 
219 		if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
220 			src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos :
221 				(const unsigned long *)ice_ptypes_ipv4_il;
222 			bitmap_and(params->ptypes, params->ptypes, src,
223 				   ICE_FLOW_PTYPE_MAX);
224 		} else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
225 			src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos :
226 				(const unsigned long *)ice_ptypes_ipv6_il;
227 			bitmap_and(params->ptypes, params->ptypes, src,
228 				   ICE_FLOW_PTYPE_MAX);
229 		}
230 
231 		if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
232 			src = (const unsigned long *)ice_ptypes_udp_il;
233 			bitmap_and(params->ptypes, params->ptypes, src,
234 				   ICE_FLOW_PTYPE_MAX);
235 		} else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
236 			bitmap_and(params->ptypes, params->ptypes,
237 				   (const unsigned long *)ice_ptypes_tcp_il,
238 				   ICE_FLOW_PTYPE_MAX);
239 		} else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
240 			src = (const unsigned long *)ice_ptypes_sctp_il;
241 			bitmap_and(params->ptypes, params->ptypes, src,
242 				   ICE_FLOW_PTYPE_MAX);
243 		} else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
244 			if (!i) {
245 				src = (const unsigned long *)ice_ptypes_gre_of;
246 				bitmap_and(params->ptypes, params->ptypes,
247 					   src, ICE_FLOW_PTYPE_MAX);
248 			}
249 		}
250 	}
251 
252 	return 0;
253 }
254 
255 /**
256  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
257  * @hw: pointer to the HW struct
258  * @params: information about the flow to be processed
259  * @seg: packet segment index of the field to be extracted
260  * @fld: ID of field to be extracted
261  *
262  * This function determines the protocol ID, offset, and size of the given
263  * field. It then allocates one or more extraction sequence entries for the
264  * given field, and fill the entries with protocol ID and offset information.
265  */
266 static enum ice_status
267 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
268 		    u8 seg, enum ice_flow_field fld)
269 {
270 	enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
271 	u8 fv_words = hw->blk[params->blk].es.fvw;
272 	struct ice_flow_fld_info *flds;
273 	u16 cnt, ese_bits, i;
274 	u16 off;
275 
276 	flds = params->prof->segs[seg].fields;
277 
278 	switch (fld) {
279 	case ICE_FLOW_FIELD_IDX_IPV4_SA:
280 	case ICE_FLOW_FIELD_IDX_IPV4_DA:
281 		prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
282 		break;
283 	case ICE_FLOW_FIELD_IDX_IPV6_SA:
284 	case ICE_FLOW_FIELD_IDX_IPV6_DA:
285 		prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
286 		break;
287 	case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
288 	case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
289 		prot_id = ICE_PROT_TCP_IL;
290 		break;
291 	case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
292 	case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
293 		prot_id = ICE_PROT_UDP_IL_OR_S;
294 		break;
295 	case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
296 	case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
297 		prot_id = ICE_PROT_SCTP_IL;
298 		break;
299 	case ICE_FLOW_FIELD_IDX_GRE_KEYID:
300 		prot_id = ICE_PROT_GRE_OF;
301 		break;
302 	default:
303 		return ICE_ERR_NOT_IMPL;
304 	}
305 
306 	/* Each extraction sequence entry is a word in size, and extracts a
307 	 * word-aligned offset from a protocol header.
308 	 */
309 	ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
310 
311 	flds[fld].xtrct.prot_id = prot_id;
312 	flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
313 		ICE_FLOW_FV_EXTRACT_SZ;
314 	flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
315 	flds[fld].xtrct.idx = params->es_cnt;
316 
317 	/* Adjust the next field-entry index after accommodating the number of
318 	 * entries this field consumes
319 	 */
320 	cnt = DIV_ROUND_UP(flds[fld].xtrct.disp + ice_flds_info[fld].size,
321 			   ese_bits);
322 
323 	/* Fill in the extraction sequence entries needed for this field */
324 	off = flds[fld].xtrct.off;
325 	for (i = 0; i < cnt; i++) {
326 		u8 idx;
327 
328 		/* Make sure the number of extraction sequence required
329 		 * does not exceed the block's capability
330 		 */
331 		if (params->es_cnt >= fv_words)
332 			return ICE_ERR_MAX_LIMIT;
333 
334 		/* some blocks require a reversed field vector layout */
335 		if (hw->blk[params->blk].es.reverse)
336 			idx = fv_words - params->es_cnt - 1;
337 		else
338 			idx = params->es_cnt;
339 
340 		params->es[idx].prot_id = prot_id;
341 		params->es[idx].off = off;
342 		params->es_cnt++;
343 
344 		off += ICE_FLOW_FV_EXTRACT_SZ;
345 	}
346 
347 	return 0;
348 }
349 
350 /**
351  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
352  * @hw: pointer to the HW struct
353  * @params: information about the flow to be processed
354  *
355  * This function iterates through all matched fields in the given segments, and
356  * creates an extraction sequence for the fields.
357  */
358 static enum ice_status
359 ice_flow_create_xtrct_seq(struct ice_hw *hw,
360 			  struct ice_flow_prof_params *params)
361 {
362 	struct ice_flow_prof *prof = params->prof;
363 	enum ice_status status = 0;
364 	u8 i;
365 
366 	for (i = 0; i < prof->segs_cnt; i++) {
367 		u8 j;
368 
369 		for_each_set_bit(j, (unsigned long *)&prof->segs[i].match,
370 				 ICE_FLOW_FIELD_IDX_MAX) {
371 			status = ice_flow_xtract_fld(hw, params, i,
372 						     (enum ice_flow_field)j);
373 			if (status)
374 				return status;
375 		}
376 	}
377 
378 	return status;
379 }
380 
381 /**
382  * ice_flow_proc_segs - process all packet segments associated with a profile
383  * @hw: pointer to the HW struct
384  * @params: information about the flow to be processed
385  */
386 static enum ice_status
387 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
388 {
389 	enum ice_status status;
390 
391 	status = ice_flow_proc_seg_hdrs(params);
392 	if (status)
393 		return status;
394 
395 	status = ice_flow_create_xtrct_seq(hw, params);
396 	if (status)
397 		return status;
398 
399 	switch (params->blk) {
400 	case ICE_BLK_FD:
401 	case ICE_BLK_RSS:
402 		status = 0;
403 		break;
404 	default:
405 		return ICE_ERR_NOT_IMPL;
406 	}
407 
408 	return status;
409 }
410 
411 #define ICE_FLOW_FIND_PROF_CHK_FLDS	0x00000001
412 #define ICE_FLOW_FIND_PROF_CHK_VSI	0x00000002
413 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR	0x00000004
414 
415 /**
416  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
417  * @hw: pointer to the HW struct
418  * @blk: classification stage
419  * @dir: flow direction
420  * @segs: array of one or more packet segments that describe the flow
421  * @segs_cnt: number of packet segments provided
422  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
423  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
424  */
425 static struct ice_flow_prof *
426 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
427 			 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
428 			 u8 segs_cnt, u16 vsi_handle, u32 conds)
429 {
430 	struct ice_flow_prof *p, *prof = NULL;
431 
432 	mutex_lock(&hw->fl_profs_locks[blk]);
433 	list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
434 		if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
435 		    segs_cnt && segs_cnt == p->segs_cnt) {
436 			u8 i;
437 
438 			/* Check for profile-VSI association if specified */
439 			if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
440 			    ice_is_vsi_valid(hw, vsi_handle) &&
441 			    !test_bit(vsi_handle, p->vsis))
442 				continue;
443 
444 			/* Protocol headers must be checked. Matched fields are
445 			 * checked if specified.
446 			 */
447 			for (i = 0; i < segs_cnt; i++)
448 				if (segs[i].hdrs != p->segs[i].hdrs ||
449 				    ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
450 				     segs[i].match != p->segs[i].match))
451 					break;
452 
453 			/* A match is found if all segments are matched */
454 			if (i == segs_cnt) {
455 				prof = p;
456 				break;
457 			}
458 		}
459 	mutex_unlock(&hw->fl_profs_locks[blk]);
460 
461 	return prof;
462 }
463 
464 /**
465  * ice_flow_find_prof_id - Look up a profile with given profile ID
466  * @hw: pointer to the HW struct
467  * @blk: classification stage
468  * @prof_id: unique ID to identify this flow profile
469  */
470 static struct ice_flow_prof *
471 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
472 {
473 	struct ice_flow_prof *p;
474 
475 	list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
476 		if (p->id == prof_id)
477 			return p;
478 
479 	return NULL;
480 }
481 
482 /**
483  * ice_dealloc_flow_entry - Deallocate flow entry memory
484  * @hw: pointer to the HW struct
485  * @entry: flow entry to be removed
486  */
487 static void
488 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
489 {
490 	if (!entry)
491 		return;
492 
493 	if (entry->entry)
494 		devm_kfree(ice_hw_to_dev(hw), entry->entry);
495 
496 	devm_kfree(ice_hw_to_dev(hw), entry);
497 }
498 
499 /**
500  * ice_flow_rem_entry_sync - Remove a flow entry
501  * @hw: pointer to the HW struct
502  * @blk: classification stage
503  * @entry: flow entry to be removed
504  */
505 static enum ice_status
506 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
507 			struct ice_flow_entry *entry)
508 {
509 	if (!entry)
510 		return ICE_ERR_BAD_PTR;
511 
512 	list_del(&entry->l_entry);
513 
514 	ice_dealloc_flow_entry(hw, entry);
515 
516 	return 0;
517 }
518 
519 /**
520  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
521  * @hw: pointer to the HW struct
522  * @blk: classification stage
523  * @dir: flow direction
524  * @prof_id: unique ID to identify this flow profile
525  * @segs: array of one or more packet segments that describe the flow
526  * @segs_cnt: number of packet segments provided
527  * @prof: stores the returned flow profile added
528  *
529  * Assumption: the caller has acquired the lock to the profile list
530  */
531 static enum ice_status
532 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
533 		       enum ice_flow_dir dir, u64 prof_id,
534 		       struct ice_flow_seg_info *segs, u8 segs_cnt,
535 		       struct ice_flow_prof **prof)
536 {
537 	struct ice_flow_prof_params params;
538 	enum ice_status status;
539 	u8 i;
540 
541 	if (!prof)
542 		return ICE_ERR_BAD_PTR;
543 
544 	memset(&params, 0, sizeof(params));
545 	params.prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params.prof),
546 				   GFP_KERNEL);
547 	if (!params.prof)
548 		return ICE_ERR_NO_MEMORY;
549 
550 	/* initialize extraction sequence to all invalid (0xff) */
551 	for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
552 		params.es[i].prot_id = ICE_PROT_INVALID;
553 		params.es[i].off = ICE_FV_OFFSET_INVAL;
554 	}
555 
556 	params.blk = blk;
557 	params.prof->id = prof_id;
558 	params.prof->dir = dir;
559 	params.prof->segs_cnt = segs_cnt;
560 
561 	/* Make a copy of the segments that need to be persistent in the flow
562 	 * profile instance
563 	 */
564 	for (i = 0; i < segs_cnt; i++)
565 		memcpy(&params.prof->segs[i], &segs[i], sizeof(*segs));
566 
567 	status = ice_flow_proc_segs(hw, &params);
568 	if (status) {
569 		ice_debug(hw, ICE_DBG_FLOW,
570 			  "Error processing a flow's packet segments\n");
571 		goto out;
572 	}
573 
574 	/* Add a HW profile for this flow profile */
575 	status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes, params.es);
576 	if (status) {
577 		ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
578 		goto out;
579 	}
580 
581 	INIT_LIST_HEAD(&params.prof->entries);
582 	mutex_init(&params.prof->entries_lock);
583 	*prof = params.prof;
584 
585 out:
586 	if (status)
587 		devm_kfree(ice_hw_to_dev(hw), params.prof);
588 
589 	return status;
590 }
591 
592 /**
593  * ice_flow_rem_prof_sync - remove a flow profile
594  * @hw: pointer to the hardware structure
595  * @blk: classification stage
596  * @prof: pointer to flow profile to remove
597  *
598  * Assumption: the caller has acquired the lock to the profile list
599  */
600 static enum ice_status
601 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
602 		       struct ice_flow_prof *prof)
603 {
604 	enum ice_status status;
605 
606 	/* Remove all remaining flow entries before removing the flow profile */
607 	if (!list_empty(&prof->entries)) {
608 		struct ice_flow_entry *e, *t;
609 
610 		mutex_lock(&prof->entries_lock);
611 
612 		list_for_each_entry_safe(e, t, &prof->entries, l_entry) {
613 			status = ice_flow_rem_entry_sync(hw, blk, e);
614 			if (status)
615 				break;
616 		}
617 
618 		mutex_unlock(&prof->entries_lock);
619 	}
620 
621 	/* Remove all hardware profiles associated with this flow profile */
622 	status = ice_rem_prof(hw, blk, prof->id);
623 	if (!status) {
624 		list_del(&prof->l_entry);
625 		mutex_destroy(&prof->entries_lock);
626 		devm_kfree(ice_hw_to_dev(hw), prof);
627 	}
628 
629 	return status;
630 }
631 
632 /**
633  * ice_flow_assoc_prof - associate a VSI with a flow profile
634  * @hw: pointer to the hardware structure
635  * @blk: classification stage
636  * @prof: pointer to flow profile
637  * @vsi_handle: software VSI handle
638  *
639  * Assumption: the caller has acquired the lock to the profile list
640  * and the software VSI handle has been validated
641  */
642 static enum ice_status
643 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
644 		    struct ice_flow_prof *prof, u16 vsi_handle)
645 {
646 	enum ice_status status = 0;
647 
648 	if (!test_bit(vsi_handle, prof->vsis)) {
649 		status = ice_add_prof_id_flow(hw, blk,
650 					      ice_get_hw_vsi_num(hw,
651 								 vsi_handle),
652 					      prof->id);
653 		if (!status)
654 			set_bit(vsi_handle, prof->vsis);
655 		else
656 			ice_debug(hw, ICE_DBG_FLOW,
657 				  "HW profile add failed, %d\n",
658 				  status);
659 	}
660 
661 	return status;
662 }
663 
664 /**
665  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
666  * @hw: pointer to the hardware structure
667  * @blk: classification stage
668  * @prof: pointer to flow profile
669  * @vsi_handle: software VSI handle
670  *
671  * Assumption: the caller has acquired the lock to the profile list
672  * and the software VSI handle has been validated
673  */
674 static enum ice_status
675 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
676 		       struct ice_flow_prof *prof, u16 vsi_handle)
677 {
678 	enum ice_status status = 0;
679 
680 	if (test_bit(vsi_handle, prof->vsis)) {
681 		status = ice_rem_prof_id_flow(hw, blk,
682 					      ice_get_hw_vsi_num(hw,
683 								 vsi_handle),
684 					      prof->id);
685 		if (!status)
686 			clear_bit(vsi_handle, prof->vsis);
687 		else
688 			ice_debug(hw, ICE_DBG_FLOW,
689 				  "HW profile remove failed, %d\n",
690 				  status);
691 	}
692 
693 	return status;
694 }
695 
696 /**
697  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
698  * @hw: pointer to the HW struct
699  * @blk: classification stage
700  * @dir: flow direction
701  * @prof_id: unique ID to identify this flow profile
702  * @segs: array of one or more packet segments that describe the flow
703  * @segs_cnt: number of packet segments provided
704  * @prof: stores the returned flow profile added
705  */
706 enum ice_status
707 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
708 		  u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
709 		  struct ice_flow_prof **prof)
710 {
711 	enum ice_status status;
712 
713 	if (segs_cnt > ICE_FLOW_SEG_MAX)
714 		return ICE_ERR_MAX_LIMIT;
715 
716 	if (!segs_cnt)
717 		return ICE_ERR_PARAM;
718 
719 	if (!segs)
720 		return ICE_ERR_BAD_PTR;
721 
722 	status = ice_flow_val_hdrs(segs, segs_cnt);
723 	if (status)
724 		return status;
725 
726 	mutex_lock(&hw->fl_profs_locks[blk]);
727 
728 	status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
729 					prof);
730 	if (!status)
731 		list_add(&(*prof)->l_entry, &hw->fl_profs[blk]);
732 
733 	mutex_unlock(&hw->fl_profs_locks[blk]);
734 
735 	return status;
736 }
737 
738 /**
739  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
740  * @hw: pointer to the HW struct
741  * @blk: the block for which the flow profile is to be removed
742  * @prof_id: unique ID of the flow profile to be removed
743  */
744 enum ice_status
745 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
746 {
747 	struct ice_flow_prof *prof;
748 	enum ice_status status;
749 
750 	mutex_lock(&hw->fl_profs_locks[blk]);
751 
752 	prof = ice_flow_find_prof_id(hw, blk, prof_id);
753 	if (!prof) {
754 		status = ICE_ERR_DOES_NOT_EXIST;
755 		goto out;
756 	}
757 
758 	/* prof becomes invalid after the call */
759 	status = ice_flow_rem_prof_sync(hw, blk, prof);
760 
761 out:
762 	mutex_unlock(&hw->fl_profs_locks[blk]);
763 
764 	return status;
765 }
766 
767 /**
768  * ice_flow_add_entry - Add a flow entry
769  * @hw: pointer to the HW struct
770  * @blk: classification stage
771  * @prof_id: ID of the profile to add a new flow entry to
772  * @entry_id: unique ID to identify this flow entry
773  * @vsi_handle: software VSI handle for the flow entry
774  * @prio: priority of the flow entry
775  * @data: pointer to a data buffer containing flow entry's match values/masks
776  * @entry_h: pointer to buffer that receives the new flow entry's handle
777  */
778 enum ice_status
779 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
780 		   u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
781 		   void *data, u64 *entry_h)
782 {
783 	struct ice_flow_entry *e = NULL;
784 	struct ice_flow_prof *prof;
785 	enum ice_status status;
786 
787 	/* No flow entry data is expected for RSS */
788 	if (!entry_h || (!data && blk != ICE_BLK_RSS))
789 		return ICE_ERR_BAD_PTR;
790 
791 	if (!ice_is_vsi_valid(hw, vsi_handle))
792 		return ICE_ERR_PARAM;
793 
794 	mutex_lock(&hw->fl_profs_locks[blk]);
795 
796 	prof = ice_flow_find_prof_id(hw, blk, prof_id);
797 	if (!prof) {
798 		status = ICE_ERR_DOES_NOT_EXIST;
799 	} else {
800 		/* Allocate memory for the entry being added and associate
801 		 * the VSI to the found flow profile
802 		 */
803 		e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL);
804 		if (!e)
805 			status = ICE_ERR_NO_MEMORY;
806 		else
807 			status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
808 	}
809 
810 	mutex_unlock(&hw->fl_profs_locks[blk]);
811 	if (status)
812 		goto out;
813 
814 	e->id = entry_id;
815 	e->vsi_handle = vsi_handle;
816 	e->prof = prof;
817 	e->priority = prio;
818 
819 	switch (blk) {
820 	case ICE_BLK_FD:
821 	case ICE_BLK_RSS:
822 		break;
823 	default:
824 		status = ICE_ERR_NOT_IMPL;
825 		goto out;
826 	}
827 
828 	mutex_lock(&prof->entries_lock);
829 	list_add(&e->l_entry, &prof->entries);
830 	mutex_unlock(&prof->entries_lock);
831 
832 	*entry_h = ICE_FLOW_ENTRY_HNDL(e);
833 
834 out:
835 	if (status && e) {
836 		if (e->entry)
837 			devm_kfree(ice_hw_to_dev(hw), e->entry);
838 		devm_kfree(ice_hw_to_dev(hw), e);
839 	}
840 
841 	return status;
842 }
843 
844 /**
845  * ice_flow_rem_entry - Remove a flow entry
846  * @hw: pointer to the HW struct
847  * @blk: classification stage
848  * @entry_h: handle to the flow entry to be removed
849  */
850 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
851 				   u64 entry_h)
852 {
853 	struct ice_flow_entry *entry;
854 	struct ice_flow_prof *prof;
855 	enum ice_status status = 0;
856 
857 	if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
858 		return ICE_ERR_PARAM;
859 
860 	entry = ICE_FLOW_ENTRY_PTR(entry_h);
861 
862 	/* Retain the pointer to the flow profile as the entry will be freed */
863 	prof = entry->prof;
864 
865 	if (prof) {
866 		mutex_lock(&prof->entries_lock);
867 		status = ice_flow_rem_entry_sync(hw, blk, entry);
868 		mutex_unlock(&prof->entries_lock);
869 	}
870 
871 	return status;
872 }
873 
874 /**
875  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
876  * @seg: packet segment the field being set belongs to
877  * @fld: field to be set
878  * @field_type: type of the field
879  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
880  *           entry's input buffer
881  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
882  *            input buffer
883  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
884  *            entry's input buffer
885  *
886  * This helper function stores information of a field being matched, including
887  * the type of the field and the locations of the value to match, the mask, and
888  * and the upper-bound value in the start of the input buffer for a flow entry.
889  * This function should only be used for fixed-size data structures.
890  *
891  * This function also opportunistically determines the protocol headers to be
892  * present based on the fields being set. Some fields cannot be used alone to
893  * determine the protocol headers present. Sometimes, fields for particular
894  * protocol headers are not matched. In those cases, the protocol headers
895  * must be explicitly set.
896  */
897 static void
898 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
899 		     enum ice_flow_fld_match_type field_type, u16 val_loc,
900 		     u16 mask_loc, u16 last_loc)
901 {
902 	u64 bit = BIT_ULL(fld);
903 
904 	seg->match |= bit;
905 	if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
906 		seg->range |= bit;
907 
908 	seg->fields[fld].type = field_type;
909 	seg->fields[fld].src.val = val_loc;
910 	seg->fields[fld].src.mask = mask_loc;
911 	seg->fields[fld].src.last = last_loc;
912 
913 	ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
914 }
915 
916 /**
917  * ice_flow_set_fld - specifies locations of field from entry's input buffer
918  * @seg: packet segment the field being set belongs to
919  * @fld: field to be set
920  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
921  *           entry's input buffer
922  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
923  *            input buffer
924  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
925  *            entry's input buffer
926  * @range: indicate if field being matched is to be in a range
927  *
928  * This function specifies the locations, in the form of byte offsets from the
929  * start of the input buffer for a flow entry, from where the value to match,
930  * the mask value, and upper value can be extracted. These locations are then
931  * stored in the flow profile. When adding a flow entry associated with the
932  * flow profile, these locations will be used to quickly extract the values and
933  * create the content of a match entry. This function should only be used for
934  * fixed-size data structures.
935  */
936 void
937 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
938 		 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
939 {
940 	enum ice_flow_fld_match_type t = range ?
941 		ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
942 
943 	ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
944 }
945 
946 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
947 	(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
948 
949 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
950 	(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
951 
952 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
953 	(ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
954 	 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
955 
956 /**
957  * ice_flow_set_rss_seg_info - setup packet segments for RSS
958  * @segs: pointer to the flow field segment(s)
959  * @hash_fields: fields to be hashed on for the segment(s)
960  * @flow_hdr: protocol header fields within a packet segment
961  *
962  * Helper function to extract fields from hash bitmap and use flow
963  * header value to set flow field segment for further use in flow
964  * profile entry or removal.
965  */
966 static enum ice_status
967 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
968 			  u32 flow_hdr)
969 {
970 	u64 val;
971 	u8 i;
972 
973 	for_each_set_bit(i, (unsigned long *)&hash_fields,
974 			 ICE_FLOW_FIELD_IDX_MAX)
975 		ice_flow_set_fld(segs, (enum ice_flow_field)i,
976 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
977 				 ICE_FLOW_FLD_OFF_INVAL, false);
978 
979 	ICE_FLOW_SET_HDRS(segs, flow_hdr);
980 
981 	if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS)
982 		return ICE_ERR_PARAM;
983 
984 	val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
985 	if (val && !is_power_of_2(val))
986 		return ICE_ERR_CFG;
987 
988 	val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
989 	if (val && !is_power_of_2(val))
990 		return ICE_ERR_CFG;
991 
992 	return 0;
993 }
994 
995 /**
996  * ice_rem_vsi_rss_list - remove VSI from RSS list
997  * @hw: pointer to the hardware structure
998  * @vsi_handle: software VSI handle
999  *
1000  * Remove the VSI from all RSS configurations in the list.
1001  */
1002 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
1003 {
1004 	struct ice_rss_cfg *r, *tmp;
1005 
1006 	if (list_empty(&hw->rss_list_head))
1007 		return;
1008 
1009 	mutex_lock(&hw->rss_locks);
1010 	list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
1011 		if (test_and_clear_bit(vsi_handle, r->vsis))
1012 			if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
1013 				list_del(&r->l_entry);
1014 				devm_kfree(ice_hw_to_dev(hw), r);
1015 			}
1016 	mutex_unlock(&hw->rss_locks);
1017 }
1018 
1019 /**
1020  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
1021  * @hw: pointer to the hardware structure
1022  * @vsi_handle: software VSI handle
1023  *
1024  * This function will iterate through all flow profiles and disassociate
1025  * the VSI from that profile. If the flow profile has no VSIs it will
1026  * be removed.
1027  */
1028 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
1029 {
1030 	const enum ice_block blk = ICE_BLK_RSS;
1031 	struct ice_flow_prof *p, *t;
1032 	enum ice_status status = 0;
1033 
1034 	if (!ice_is_vsi_valid(hw, vsi_handle))
1035 		return ICE_ERR_PARAM;
1036 
1037 	if (list_empty(&hw->fl_profs[blk]))
1038 		return 0;
1039 
1040 	mutex_lock(&hw->fl_profs_locks[blk]);
1041 	list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry)
1042 		if (test_bit(vsi_handle, p->vsis)) {
1043 			status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
1044 			if (status)
1045 				break;
1046 
1047 			if (bitmap_empty(p->vsis, ICE_MAX_VSI)) {
1048 				status = ice_flow_rem_prof_sync(hw, blk, p);
1049 				if (status)
1050 					break;
1051 			}
1052 		}
1053 	mutex_unlock(&hw->fl_profs_locks[blk]);
1054 
1055 	return status;
1056 }
1057 
1058 /**
1059  * ice_rem_rss_list - remove RSS configuration from list
1060  * @hw: pointer to the hardware structure
1061  * @vsi_handle: software VSI handle
1062  * @prof: pointer to flow profile
1063  *
1064  * Assumption: lock has already been acquired for RSS list
1065  */
1066 static void
1067 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
1068 {
1069 	struct ice_rss_cfg *r, *tmp;
1070 
1071 	/* Search for RSS hash fields associated to the VSI that match the
1072 	 * hash configurations associated to the flow profile. If found
1073 	 * remove from the RSS entry list of the VSI context and delete entry.
1074 	 */
1075 	list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
1076 		if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
1077 		    r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
1078 			clear_bit(vsi_handle, r->vsis);
1079 			if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
1080 				list_del(&r->l_entry);
1081 				devm_kfree(ice_hw_to_dev(hw), r);
1082 			}
1083 			return;
1084 		}
1085 }
1086 
1087 /**
1088  * ice_add_rss_list - add RSS configuration to list
1089  * @hw: pointer to the hardware structure
1090  * @vsi_handle: software VSI handle
1091  * @prof: pointer to flow profile
1092  *
1093  * Assumption: lock has already been acquired for RSS list
1094  */
1095 static enum ice_status
1096 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
1097 {
1098 	struct ice_rss_cfg *r, *rss_cfg;
1099 
1100 	list_for_each_entry(r, &hw->rss_list_head, l_entry)
1101 		if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
1102 		    r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
1103 			set_bit(vsi_handle, r->vsis);
1104 			return 0;
1105 		}
1106 
1107 	rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg),
1108 			       GFP_KERNEL);
1109 	if (!rss_cfg)
1110 		return ICE_ERR_NO_MEMORY;
1111 
1112 	rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
1113 	rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
1114 	set_bit(vsi_handle, rss_cfg->vsis);
1115 
1116 	list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head);
1117 
1118 	return 0;
1119 }
1120 
1121 #define ICE_FLOW_PROF_HASH_S	0
1122 #define ICE_FLOW_PROF_HASH_M	(0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
1123 #define ICE_FLOW_PROF_HDR_S	32
1124 #define ICE_FLOW_PROF_HDR_M	(0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
1125 #define ICE_FLOW_PROF_ENCAP_S	63
1126 #define ICE_FLOW_PROF_ENCAP_M	(BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
1127 
1128 #define ICE_RSS_OUTER_HEADERS	1
1129 #define ICE_RSS_INNER_HEADERS	2
1130 
1131 /* Flow profile ID format:
1132  * [0:31] - Packet match fields
1133  * [32:62] - Protocol header
1134  * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
1135  */
1136 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
1137 	(u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
1138 	      (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
1139 	      ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
1140 
1141 /**
1142  * ice_add_rss_cfg_sync - add an RSS configuration
1143  * @hw: pointer to the hardware structure
1144  * @vsi_handle: software VSI handle
1145  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
1146  * @addl_hdrs: protocol header fields
1147  * @segs_cnt: packet segment count
1148  *
1149  * Assumption: lock has already been acquired for RSS list
1150  */
1151 static enum ice_status
1152 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
1153 		     u32 addl_hdrs, u8 segs_cnt)
1154 {
1155 	const enum ice_block blk = ICE_BLK_RSS;
1156 	struct ice_flow_prof *prof = NULL;
1157 	struct ice_flow_seg_info *segs;
1158 	enum ice_status status;
1159 
1160 	if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
1161 		return ICE_ERR_PARAM;
1162 
1163 	segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
1164 	if (!segs)
1165 		return ICE_ERR_NO_MEMORY;
1166 
1167 	/* Construct the packet segment info from the hashed fields */
1168 	status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
1169 					   addl_hdrs);
1170 	if (status)
1171 		goto exit;
1172 
1173 	/* Search for a flow profile that has matching headers, hash fields
1174 	 * and has the input VSI associated to it. If found, no further
1175 	 * operations required and exit.
1176 	 */
1177 	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
1178 					vsi_handle,
1179 					ICE_FLOW_FIND_PROF_CHK_FLDS |
1180 					ICE_FLOW_FIND_PROF_CHK_VSI);
1181 	if (prof)
1182 		goto exit;
1183 
1184 	/* Check if a flow profile exists with the same protocol headers and
1185 	 * associated with the input VSI. If so disassociate the VSI from
1186 	 * this profile. The VSI will be added to a new profile created with
1187 	 * the protocol header and new hash field configuration.
1188 	 */
1189 	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
1190 					vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
1191 	if (prof) {
1192 		status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
1193 		if (!status)
1194 			ice_rem_rss_list(hw, vsi_handle, prof);
1195 		else
1196 			goto exit;
1197 
1198 		/* Remove profile if it has no VSIs associated */
1199 		if (bitmap_empty(prof->vsis, ICE_MAX_VSI)) {
1200 			status = ice_flow_rem_prof(hw, blk, prof->id);
1201 			if (status)
1202 				goto exit;
1203 		}
1204 	}
1205 
1206 	/* Search for a profile that has same match fields only. If this
1207 	 * exists then associate the VSI to this profile.
1208 	 */
1209 	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
1210 					vsi_handle,
1211 					ICE_FLOW_FIND_PROF_CHK_FLDS);
1212 	if (prof) {
1213 		status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
1214 		if (!status)
1215 			status = ice_add_rss_list(hw, vsi_handle, prof);
1216 		goto exit;
1217 	}
1218 
1219 	/* Create a new flow profile with generated profile and packet
1220 	 * segment information.
1221 	 */
1222 	status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
1223 				   ICE_FLOW_GEN_PROFID(hashed_flds,
1224 						       segs[segs_cnt - 1].hdrs,
1225 						       segs_cnt),
1226 				   segs, segs_cnt, &prof);
1227 	if (status)
1228 		goto exit;
1229 
1230 	status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
1231 	/* If association to a new flow profile failed then this profile can
1232 	 * be removed.
1233 	 */
1234 	if (status) {
1235 		ice_flow_rem_prof(hw, blk, prof->id);
1236 		goto exit;
1237 	}
1238 
1239 	status = ice_add_rss_list(hw, vsi_handle, prof);
1240 
1241 exit:
1242 	kfree(segs);
1243 	return status;
1244 }
1245 
1246 /**
1247  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
1248  * @hw: pointer to the hardware structure
1249  * @vsi_handle: software VSI handle
1250  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
1251  * @addl_hdrs: protocol header fields
1252  *
1253  * This function will generate a flow profile based on fields associated with
1254  * the input fields to hash on, the flow type and use the VSI number to add
1255  * a flow entry to the profile.
1256  */
1257 enum ice_status
1258 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
1259 		u32 addl_hdrs)
1260 {
1261 	enum ice_status status;
1262 
1263 	if (hashed_flds == ICE_HASH_INVALID ||
1264 	    !ice_is_vsi_valid(hw, vsi_handle))
1265 		return ICE_ERR_PARAM;
1266 
1267 	mutex_lock(&hw->rss_locks);
1268 	status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
1269 				      ICE_RSS_OUTER_HEADERS);
1270 	if (!status)
1271 		status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
1272 					      addl_hdrs, ICE_RSS_INNER_HEADERS);
1273 	mutex_unlock(&hw->rss_locks);
1274 
1275 	return status;
1276 }
1277 
1278 /* Mapping of AVF hash bit fields to an L3-L4 hash combination.
1279  * As the ice_flow_avf_hdr_field represent individual bit shifts in a hash,
1280  * convert its values to their appropriate flow L3, L4 values.
1281  */
1282 #define ICE_FLOW_AVF_RSS_IPV4_MASKS \
1283 	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
1284 	 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4))
1285 #define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \
1286 	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
1287 	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP))
1288 #define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \
1289 	(BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
1290 	 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
1291 	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP))
1292 #define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \
1293 	(ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \
1294 	 ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP))
1295 
1296 #define ICE_FLOW_AVF_RSS_IPV6_MASKS \
1297 	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
1298 	 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6))
1299 #define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \
1300 	(BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
1301 	 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \
1302 	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP))
1303 #define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \
1304 	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
1305 	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP))
1306 #define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \
1307 	(ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \
1308 	 ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP))
1309 
1310 /**
1311  * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
1312  * @hw: pointer to the hardware structure
1313  * @vsi_handle: software VSI handle
1314  * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
1315  *
1316  * This function will take the hash bitmap provided by the AVF driver via a
1317  * message, convert it to ICE-compatible values, and configure RSS flow
1318  * profiles.
1319  */
1320 enum ice_status
1321 ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
1322 {
1323 	enum ice_status status = 0;
1324 	u64 hash_flds;
1325 
1326 	if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
1327 	    !ice_is_vsi_valid(hw, vsi_handle))
1328 		return ICE_ERR_PARAM;
1329 
1330 	/* Make sure no unsupported bits are specified */
1331 	if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS |
1332 			 ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS))
1333 		return ICE_ERR_CFG;
1334 
1335 	hash_flds = avf_hash;
1336 
1337 	/* Always create an L3 RSS configuration for any L4 RSS configuration */
1338 	if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS)
1339 		hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS;
1340 
1341 	if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)
1342 		hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS;
1343 
1344 	/* Create the corresponding RSS configuration for each valid hash bit */
1345 	while (hash_flds) {
1346 		u64 rss_hash = ICE_HASH_INVALID;
1347 
1348 		if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) {
1349 			if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) {
1350 				rss_hash = ICE_FLOW_HASH_IPV4;
1351 				hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS;
1352 			} else if (hash_flds &
1353 				   ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) {
1354 				rss_hash = ICE_FLOW_HASH_IPV4 |
1355 					ICE_FLOW_HASH_TCP_PORT;
1356 				hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS;
1357 			} else if (hash_flds &
1358 				   ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) {
1359 				rss_hash = ICE_FLOW_HASH_IPV4 |
1360 					ICE_FLOW_HASH_UDP_PORT;
1361 				hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS;
1362 			} else if (hash_flds &
1363 				   BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) {
1364 				rss_hash = ICE_FLOW_HASH_IPV4 |
1365 					ICE_FLOW_HASH_SCTP_PORT;
1366 				hash_flds &=
1367 					~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP);
1368 			}
1369 		} else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) {
1370 			if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) {
1371 				rss_hash = ICE_FLOW_HASH_IPV6;
1372 				hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS;
1373 			} else if (hash_flds &
1374 				   ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) {
1375 				rss_hash = ICE_FLOW_HASH_IPV6 |
1376 					ICE_FLOW_HASH_TCP_PORT;
1377 				hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS;
1378 			} else if (hash_flds &
1379 				   ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) {
1380 				rss_hash = ICE_FLOW_HASH_IPV6 |
1381 					ICE_FLOW_HASH_UDP_PORT;
1382 				hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS;
1383 			} else if (hash_flds &
1384 				   BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) {
1385 				rss_hash = ICE_FLOW_HASH_IPV6 |
1386 					ICE_FLOW_HASH_SCTP_PORT;
1387 				hash_flds &=
1388 					~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP);
1389 			}
1390 		}
1391 
1392 		if (rss_hash == ICE_HASH_INVALID)
1393 			return ICE_ERR_OUT_OF_RANGE;
1394 
1395 		status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
1396 					 ICE_FLOW_SEG_HDR_NONE);
1397 		if (status)
1398 			break;
1399 	}
1400 
1401 	return status;
1402 }
1403 
1404 /**
1405  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
1406  * @hw: pointer to the hardware structure
1407  * @vsi_handle: software VSI handle
1408  */
1409 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
1410 {
1411 	enum ice_status status = 0;
1412 	struct ice_rss_cfg *r;
1413 
1414 	if (!ice_is_vsi_valid(hw, vsi_handle))
1415 		return ICE_ERR_PARAM;
1416 
1417 	mutex_lock(&hw->rss_locks);
1418 	list_for_each_entry(r, &hw->rss_list_head, l_entry) {
1419 		if (test_bit(vsi_handle, r->vsis)) {
1420 			status = ice_add_rss_cfg_sync(hw, vsi_handle,
1421 						      r->hashed_flds,
1422 						      r->packet_hdr,
1423 						      ICE_RSS_OUTER_HEADERS);
1424 			if (status)
1425 				break;
1426 			status = ice_add_rss_cfg_sync(hw, vsi_handle,
1427 						      r->hashed_flds,
1428 						      r->packet_hdr,
1429 						      ICE_RSS_INNER_HEADERS);
1430 			if (status)
1431 				break;
1432 		}
1433 	}
1434 	mutex_unlock(&hw->rss_locks);
1435 
1436 	return status;
1437 }
1438 
1439 /**
1440  * ice_get_rss_cfg - returns hashed fields for the given header types
1441  * @hw: pointer to the hardware structure
1442  * @vsi_handle: software VSI handle
1443  * @hdrs: protocol header type
1444  *
1445  * This function will return the match fields of the first instance of flow
1446  * profile having the given header types and containing input VSI
1447  */
1448 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
1449 {
1450 	struct ice_rss_cfg *r, *rss_cfg = NULL;
1451 
1452 	/* verify if the protocol header is non zero and VSI is valid */
1453 	if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
1454 		return ICE_HASH_INVALID;
1455 
1456 	mutex_lock(&hw->rss_locks);
1457 	list_for_each_entry(r, &hw->rss_list_head, l_entry)
1458 		if (test_bit(vsi_handle, r->vsis) &&
1459 		    r->packet_hdr == hdrs) {
1460 			rss_cfg = r;
1461 			break;
1462 		}
1463 	mutex_unlock(&hw->rss_locks);
1464 
1465 	return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;
1466 }
1467