1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3 
4 #include "ice_common.h"
5 #include "ice_flow.h"
6 
7 /* Describe properties of a protocol header field */
8 struct ice_flow_field_info {
9 	enum ice_flow_seg_hdr hdr;
10 	s16 off;	/* Offset from start of a protocol header, in bits */
11 	u16 size;	/* Size of fields in bits */
12 	u16 mask;	/* 16-bit mask for field */
13 };
14 
15 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
16 	.hdr = _hdr, \
17 	.off = (_offset_bytes) * BITS_PER_BYTE, \
18 	.size = (_size_bytes) * BITS_PER_BYTE, \
19 	.mask = 0, \
20 }
21 
22 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
23 	.hdr = _hdr, \
24 	.off = (_offset_bytes) * BITS_PER_BYTE, \
25 	.size = (_size_bytes) * BITS_PER_BYTE, \
26 	.mask = _mask, \
27 }
28 
29 /* Table containing properties of supported protocol header fields */
30 static const
31 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
32 	/* Ether */
33 	/* ICE_FLOW_FIELD_IDX_ETH_DA */
34 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
35 	/* ICE_FLOW_FIELD_IDX_ETH_SA */
36 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
37 	/* ICE_FLOW_FIELD_IDX_S_VLAN */
38 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, sizeof(__be16)),
39 	/* ICE_FLOW_FIELD_IDX_C_VLAN */
40 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, sizeof(__be16)),
41 	/* ICE_FLOW_FIELD_IDX_ETH_TYPE */
42 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, sizeof(__be16)),
43 	/* IPv4 / IPv6 */
44 	/* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
45 	ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, 1, 0x00fc),
46 	/* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
47 	ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, 1, 0x0ff0),
48 	/* ICE_FLOW_FIELD_IDX_IPV4_TTL */
49 	ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8, 1, 0xff00),
50 	/* ICE_FLOW_FIELD_IDX_IPV4_PROT */
51 	ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8, 1, 0x00ff),
52 	/* ICE_FLOW_FIELD_IDX_IPV6_TTL */
53 	ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6, 1, 0x00ff),
54 	/* ICE_FLOW_FIELD_IDX_IPV6_PROT */
55 	ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6, 1, 0xff00),
56 	/* ICE_FLOW_FIELD_IDX_IPV4_SA */
57 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, sizeof(struct in_addr)),
58 	/* ICE_FLOW_FIELD_IDX_IPV4_DA */
59 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, sizeof(struct in_addr)),
60 	/* ICE_FLOW_FIELD_IDX_IPV6_SA */
61 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, sizeof(struct in6_addr)),
62 	/* ICE_FLOW_FIELD_IDX_IPV6_DA */
63 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, sizeof(struct in6_addr)),
64 	/* Transport */
65 	/* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
66 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, sizeof(__be16)),
67 	/* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
68 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, sizeof(__be16)),
69 	/* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
70 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, sizeof(__be16)),
71 	/* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
72 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, sizeof(__be16)),
73 	/* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
74 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)),
75 	/* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
76 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),
77 	/* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
78 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, 1),
79 	/* ARP */
80 	/* ICE_FLOW_FIELD_IDX_ARP_SIP */
81 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, sizeof(struct in_addr)),
82 	/* ICE_FLOW_FIELD_IDX_ARP_DIP */
83 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, sizeof(struct in_addr)),
84 	/* ICE_FLOW_FIELD_IDX_ARP_SHA */
85 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
86 	/* ICE_FLOW_FIELD_IDX_ARP_DHA */
87 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
88 	/* ICE_FLOW_FIELD_IDX_ARP_OP */
89 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, sizeof(__be16)),
90 	/* ICMP */
91 	/* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
92 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, 1),
93 	/* ICE_FLOW_FIELD_IDX_ICMP_CODE */
94 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, 1),
95 	/* GRE */
96 	/* ICE_FLOW_FIELD_IDX_GRE_KEYID */
97 	ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12,
98 			  sizeof_field(struct gre_full_hdr, key)),
99 };
100 
101 /* Bitmaps indicating relevant packet types for a particular protocol header
102  *
103  * Packet types for packets with an Outer/First/Single MAC header
104  */
105 static const u32 ice_ptypes_mac_ofos[] = {
106 	0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
107 	0x0000077E, 0x00000000, 0x00000000, 0x00000000,
108 	0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
109 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
110 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
111 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
112 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
113 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
114 };
115 
116 /* Packet types for packets with an Innermost/Last MAC VLAN header */
117 static const u32 ice_ptypes_macvlan_il[] = {
118 	0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
119 	0x0000077E, 0x00000000, 0x00000000, 0x00000000,
120 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
121 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
122 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
123 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
124 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
125 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
126 };
127 
128 /* Packet types for packets with an Outer/First/Single IPv4 header */
129 static const u32 ice_ptypes_ipv4_ofos[] = {
130 	0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
131 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
132 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
133 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
134 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
135 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
136 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
137 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
138 };
139 
140 /* Packet types for packets with an Innermost/Last IPv4 header */
141 static const u32 ice_ptypes_ipv4_il[] = {
142 	0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
143 	0x0000000E, 0x00000000, 0x00000000, 0x00000000,
144 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
145 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
146 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
147 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
148 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
149 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
150 };
151 
152 /* Packet types for packets with an Outer/First/Single IPv6 header */
153 static const u32 ice_ptypes_ipv6_ofos[] = {
154 	0x00000000, 0x00000000, 0x77000000, 0x10002000,
155 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
156 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
157 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
158 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
159 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
160 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
161 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
162 };
163 
164 /* Packet types for packets with an Innermost/Last IPv6 header */
165 static const u32 ice_ptypes_ipv6_il[] = {
166 	0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
167 	0x00000770, 0x00000000, 0x00000000, 0x00000000,
168 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
169 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
170 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
171 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
172 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
173 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
174 };
175 
176 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
177 static const u32 ice_ipv4_ofos_no_l4[] = {
178 	0x10C00000, 0x04000800, 0x00000000, 0x00000000,
179 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
180 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
181 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
182 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
183 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
184 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
185 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
186 };
187 
188 /* Packet types for packets with an Outermost/First ARP header */
189 static const u32 ice_ptypes_arp_of[] = {
190 	0x00000800, 0x00000000, 0x00000000, 0x00000000,
191 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
192 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
193 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
194 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
195 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
196 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
197 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
198 };
199 
200 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
201 static const u32 ice_ipv4_il_no_l4[] = {
202 	0x60000000, 0x18043008, 0x80000002, 0x6010c021,
203 	0x00000008, 0x00000000, 0x00000000, 0x00000000,
204 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
205 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
206 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
207 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
208 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
209 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
210 };
211 
212 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
213 static const u32 ice_ipv6_ofos_no_l4[] = {
214 	0x00000000, 0x00000000, 0x43000000, 0x10002000,
215 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
216 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
217 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
218 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
219 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
220 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
221 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
222 };
223 
224 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
225 static const u32 ice_ipv6_il_no_l4[] = {
226 	0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
227 	0x00000430, 0x00000000, 0x00000000, 0x00000000,
228 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
229 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
230 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
231 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
232 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
233 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
234 };
235 
236 /* UDP Packet types for non-tunneled packets or tunneled
237  * packets with inner UDP.
238  */
239 static const u32 ice_ptypes_udp_il[] = {
240 	0x81000000, 0x20204040, 0x04000010, 0x80810102,
241 	0x00000040, 0x00000000, 0x00000000, 0x00000000,
242 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
243 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
244 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
245 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
246 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
247 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
248 };
249 
250 /* Packet types for packets with an Innermost/Last TCP header */
251 static const u32 ice_ptypes_tcp_il[] = {
252 	0x04000000, 0x80810102, 0x10000040, 0x02040408,
253 	0x00000102, 0x00000000, 0x00000000, 0x00000000,
254 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
255 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
256 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
257 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
258 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
259 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
260 };
261 
262 /* Packet types for packets with an Innermost/Last SCTP header */
263 static const u32 ice_ptypes_sctp_il[] = {
264 	0x08000000, 0x01020204, 0x20000081, 0x04080810,
265 	0x00000204, 0x00000000, 0x00000000, 0x00000000,
266 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
267 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
268 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
269 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
270 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
271 };
272 
273 /* Packet types for packets with an Outermost/First ICMP header */
274 static const u32 ice_ptypes_icmp_of[] = {
275 	0x10000000, 0x00000000, 0x00000000, 0x00000000,
276 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
277 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
278 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
279 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
280 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
281 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
282 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
283 };
284 
285 /* Packet types for packets with an Innermost/Last ICMP header */
286 static const u32 ice_ptypes_icmp_il[] = {
287 	0x00000000, 0x02040408, 0x40000102, 0x08101020,
288 	0x00000408, 0x00000000, 0x00000000, 0x00000000,
289 	0x00000000, 0x00000000, 0x42108000, 0x00000000,
290 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
291 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
292 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
293 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
294 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
295 };
296 
297 /* Packet types for packets with an Outermost/First GRE header */
298 static const u32 ice_ptypes_gre_of[] = {
299 	0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
300 	0x0000017E, 0x00000000, 0x00000000, 0x00000000,
301 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
302 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
303 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
304 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
305 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
306 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
307 };
308 
309 /* Packet types for packets with an Innermost/Last MAC header */
310 static const u32 ice_ptypes_mac_il[] = {
311 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
312 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
313 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
314 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
315 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
316 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
317 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
318 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
319 };
320 
321 /* Manage parameters and info. used during the creation of a flow profile */
322 struct ice_flow_prof_params {
323 	enum ice_block blk;
324 	u16 entry_length; /* # of bytes formatted entry will require */
325 	u8 es_cnt;
326 	struct ice_flow_prof *prof;
327 
328 	/* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
329 	 * This will give us the direction flags.
330 	 */
331 	struct ice_fv_word es[ICE_MAX_FV_WORDS];
332 
333 	u16 mask[ICE_MAX_FV_WORDS];
334 	DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX);
335 };
336 
337 #define ICE_FLOW_SEG_HDRS_L3_MASK	\
338 	(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_ARP)
339 #define ICE_FLOW_SEG_HDRS_L4_MASK	\
340 	(ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
341 	 ICE_FLOW_SEG_HDR_SCTP)
342 
343 /**
344  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
345  * @segs: array of one or more packet segments that describe the flow
346  * @segs_cnt: number of packet segments provided
347  */
348 static enum ice_status
349 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
350 {
351 	u8 i;
352 
353 	for (i = 0; i < segs_cnt; i++) {
354 		/* Multiple L3 headers */
355 		if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
356 		    !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
357 			return ICE_ERR_PARAM;
358 
359 		/* Multiple L4 headers */
360 		if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
361 		    !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
362 			return ICE_ERR_PARAM;
363 	}
364 
365 	return 0;
366 }
367 
368 /* Sizes of fixed known protocol headers without header options */
369 #define ICE_FLOW_PROT_HDR_SZ_MAC	14
370 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN	(ICE_FLOW_PROT_HDR_SZ_MAC + 2)
371 #define ICE_FLOW_PROT_HDR_SZ_IPV4	20
372 #define ICE_FLOW_PROT_HDR_SZ_IPV6	40
373 #define ICE_FLOW_PROT_HDR_SZ_ARP	28
374 #define ICE_FLOW_PROT_HDR_SZ_ICMP	8
375 #define ICE_FLOW_PROT_HDR_SZ_TCP	20
376 #define ICE_FLOW_PROT_HDR_SZ_UDP	8
377 #define ICE_FLOW_PROT_HDR_SZ_SCTP	12
378 
379 /**
380  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
381  * @params: information about the flow to be processed
382  * @seg: index of packet segment whose header size is to be determined
383  */
384 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
385 {
386 	u16 sz;
387 
388 	/* L2 headers */
389 	sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
390 		ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
391 
392 	/* L3 headers */
393 	if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
394 		sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
395 	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
396 		sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
397 	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
398 		sz += ICE_FLOW_PROT_HDR_SZ_ARP;
399 	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
400 		/* An L3 header is required if L4 is specified */
401 		return 0;
402 
403 	/* L4 headers */
404 	if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
405 		sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
406 	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
407 		sz += ICE_FLOW_PROT_HDR_SZ_TCP;
408 	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
409 		sz += ICE_FLOW_PROT_HDR_SZ_UDP;
410 	else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
411 		sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
412 
413 	return sz;
414 }
415 
416 /**
417  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
418  * @params: information about the flow to be processed
419  *
420  * This function identifies the packet types associated with the protocol
421  * headers being present in packet segments of the specified flow profile.
422  */
423 static enum ice_status
424 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
425 {
426 	struct ice_flow_prof *prof;
427 	u8 i;
428 
429 	memset(params->ptypes, 0xff, sizeof(params->ptypes));
430 
431 	prof = params->prof;
432 
433 	for (i = 0; i < params->prof->segs_cnt; i++) {
434 		const unsigned long *src;
435 		u32 hdrs;
436 
437 		hdrs = prof->segs[i].hdrs;
438 
439 		if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
440 			src = !i ? (const unsigned long *)ice_ptypes_mac_ofos :
441 				(const unsigned long *)ice_ptypes_mac_il;
442 			bitmap_and(params->ptypes, params->ptypes, src,
443 				   ICE_FLOW_PTYPE_MAX);
444 		}
445 
446 		if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
447 			src = (const unsigned long *)ice_ptypes_macvlan_il;
448 			bitmap_and(params->ptypes, params->ptypes, src,
449 				   ICE_FLOW_PTYPE_MAX);
450 		}
451 
452 		if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
453 			bitmap_and(params->ptypes, params->ptypes,
454 				   (const unsigned long *)ice_ptypes_arp_of,
455 				   ICE_FLOW_PTYPE_MAX);
456 		}
457 		if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
458 		    !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) {
459 			src = !i ? (const unsigned long *)ice_ipv4_ofos_no_l4 :
460 				(const unsigned long *)ice_ipv4_il_no_l4;
461 			bitmap_and(params->ptypes, params->ptypes, src,
462 				   ICE_FLOW_PTYPE_MAX);
463 		} else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
464 			src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos :
465 				(const unsigned long *)ice_ptypes_ipv4_il;
466 			bitmap_and(params->ptypes, params->ptypes, src,
467 				   ICE_FLOW_PTYPE_MAX);
468 		} else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
469 			   !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) {
470 			src = !i ? (const unsigned long *)ice_ipv6_ofos_no_l4 :
471 				(const unsigned long *)ice_ipv6_il_no_l4;
472 			bitmap_and(params->ptypes, params->ptypes, src,
473 				   ICE_FLOW_PTYPE_MAX);
474 		} else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
475 			src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos :
476 				(const unsigned long *)ice_ptypes_ipv6_il;
477 			bitmap_and(params->ptypes, params->ptypes, src,
478 				   ICE_FLOW_PTYPE_MAX);
479 		}
480 
481 		if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
482 			src = (const unsigned long *)ice_ptypes_udp_il;
483 			bitmap_and(params->ptypes, params->ptypes, src,
484 				   ICE_FLOW_PTYPE_MAX);
485 		} else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
486 			bitmap_and(params->ptypes, params->ptypes,
487 				   (const unsigned long *)ice_ptypes_tcp_il,
488 				   ICE_FLOW_PTYPE_MAX);
489 		} else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
490 			src = (const unsigned long *)ice_ptypes_sctp_il;
491 			bitmap_and(params->ptypes, params->ptypes, src,
492 				   ICE_FLOW_PTYPE_MAX);
493 		}
494 
495 		if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
496 			src = !i ? (const unsigned long *)ice_ptypes_icmp_of :
497 				(const unsigned long *)ice_ptypes_icmp_il;
498 			bitmap_and(params->ptypes, params->ptypes, src,
499 				   ICE_FLOW_PTYPE_MAX);
500 		} else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
501 			if (!i) {
502 				src = (const unsigned long *)ice_ptypes_gre_of;
503 				bitmap_and(params->ptypes, params->ptypes,
504 					   src, ICE_FLOW_PTYPE_MAX);
505 			}
506 		}
507 	}
508 
509 	return 0;
510 }
511 
512 /**
513  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
514  * @hw: pointer to the HW struct
515  * @params: information about the flow to be processed
516  * @seg: packet segment index of the field to be extracted
517  * @fld: ID of field to be extracted
518  * @match: bit field of all fields
519  *
520  * This function determines the protocol ID, offset, and size of the given
521  * field. It then allocates one or more extraction sequence entries for the
522  * given field, and fill the entries with protocol ID and offset information.
523  */
524 static enum ice_status
525 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
526 		    u8 seg, enum ice_flow_field fld, u64 match)
527 {
528 	enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
529 	enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
530 	u8 fv_words = hw->blk[params->blk].es.fvw;
531 	struct ice_flow_fld_info *flds;
532 	u16 cnt, ese_bits, i;
533 	u16 sib_mask = 0;
534 	u16 mask;
535 	u16 off;
536 
537 	flds = params->prof->segs[seg].fields;
538 
539 	switch (fld) {
540 	case ICE_FLOW_FIELD_IDX_ETH_DA:
541 	case ICE_FLOW_FIELD_IDX_ETH_SA:
542 	case ICE_FLOW_FIELD_IDX_S_VLAN:
543 	case ICE_FLOW_FIELD_IDX_C_VLAN:
544 		prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
545 		break;
546 	case ICE_FLOW_FIELD_IDX_ETH_TYPE:
547 		prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
548 		break;
549 	case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
550 		prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
551 		break;
552 	case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
553 		prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
554 		break;
555 	case ICE_FLOW_FIELD_IDX_IPV4_TTL:
556 	case ICE_FLOW_FIELD_IDX_IPV4_PROT:
557 		prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
558 
559 		/* TTL and PROT share the same extraction seq. entry.
560 		 * Each is considered a sibling to the other in terms of sharing
561 		 * the same extraction sequence entry.
562 		 */
563 		if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
564 			sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
565 		else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
566 			sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
567 
568 		/* If the sibling field is also included, that field's
569 		 * mask needs to be included.
570 		 */
571 		if (match & BIT(sib))
572 			sib_mask = ice_flds_info[sib].mask;
573 		break;
574 	case ICE_FLOW_FIELD_IDX_IPV6_TTL:
575 	case ICE_FLOW_FIELD_IDX_IPV6_PROT:
576 		prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
577 
578 		/* TTL and PROT share the same extraction seq. entry.
579 		 * Each is considered a sibling to the other in terms of sharing
580 		 * the same extraction sequence entry.
581 		 */
582 		if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
583 			sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
584 		else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
585 			sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
586 
587 		/* If the sibling field is also included, that field's
588 		 * mask needs to be included.
589 		 */
590 		if (match & BIT(sib))
591 			sib_mask = ice_flds_info[sib].mask;
592 		break;
593 	case ICE_FLOW_FIELD_IDX_IPV4_SA:
594 	case ICE_FLOW_FIELD_IDX_IPV4_DA:
595 		prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
596 		break;
597 	case ICE_FLOW_FIELD_IDX_IPV6_SA:
598 	case ICE_FLOW_FIELD_IDX_IPV6_DA:
599 		prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
600 		break;
601 	case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
602 	case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
603 	case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
604 		prot_id = ICE_PROT_TCP_IL;
605 		break;
606 	case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
607 	case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
608 		prot_id = ICE_PROT_UDP_IL_OR_S;
609 		break;
610 	case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
611 	case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
612 		prot_id = ICE_PROT_SCTP_IL;
613 		break;
614 	case ICE_FLOW_FIELD_IDX_ARP_SIP:
615 	case ICE_FLOW_FIELD_IDX_ARP_DIP:
616 	case ICE_FLOW_FIELD_IDX_ARP_SHA:
617 	case ICE_FLOW_FIELD_IDX_ARP_DHA:
618 	case ICE_FLOW_FIELD_IDX_ARP_OP:
619 		prot_id = ICE_PROT_ARP_OF;
620 		break;
621 	case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
622 	case ICE_FLOW_FIELD_IDX_ICMP_CODE:
623 		/* ICMP type and code share the same extraction seq. entry */
624 		prot_id = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) ?
625 				ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
626 		sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
627 			ICE_FLOW_FIELD_IDX_ICMP_CODE :
628 			ICE_FLOW_FIELD_IDX_ICMP_TYPE;
629 		break;
630 	case ICE_FLOW_FIELD_IDX_GRE_KEYID:
631 		prot_id = ICE_PROT_GRE_OF;
632 		break;
633 	default:
634 		return ICE_ERR_NOT_IMPL;
635 	}
636 
637 	/* Each extraction sequence entry is a word in size, and extracts a
638 	 * word-aligned offset from a protocol header.
639 	 */
640 	ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
641 
642 	flds[fld].xtrct.prot_id = prot_id;
643 	flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
644 		ICE_FLOW_FV_EXTRACT_SZ;
645 	flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
646 	flds[fld].xtrct.idx = params->es_cnt;
647 	flds[fld].xtrct.mask = ice_flds_info[fld].mask;
648 
649 	/* Adjust the next field-entry index after accommodating the number of
650 	 * entries this field consumes
651 	 */
652 	cnt = DIV_ROUND_UP(flds[fld].xtrct.disp + ice_flds_info[fld].size,
653 			   ese_bits);
654 
655 	/* Fill in the extraction sequence entries needed for this field */
656 	off = flds[fld].xtrct.off;
657 	mask = flds[fld].xtrct.mask;
658 	for (i = 0; i < cnt; i++) {
659 		/* Only consume an extraction sequence entry if there is no
660 		 * sibling field associated with this field or the sibling entry
661 		 * already extracts the word shared with this field.
662 		 */
663 		if (sib == ICE_FLOW_FIELD_IDX_MAX ||
664 		    flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
665 		    flds[sib].xtrct.off != off) {
666 			u8 idx;
667 
668 			/* Make sure the number of extraction sequence required
669 			 * does not exceed the block's capability
670 			 */
671 			if (params->es_cnt >= fv_words)
672 				return ICE_ERR_MAX_LIMIT;
673 
674 			/* some blocks require a reversed field vector layout */
675 			if (hw->blk[params->blk].es.reverse)
676 				idx = fv_words - params->es_cnt - 1;
677 			else
678 				idx = params->es_cnt;
679 
680 			params->es[idx].prot_id = prot_id;
681 			params->es[idx].off = off;
682 			params->mask[idx] = mask | sib_mask;
683 			params->es_cnt++;
684 		}
685 
686 		off += ICE_FLOW_FV_EXTRACT_SZ;
687 	}
688 
689 	return 0;
690 }
691 
692 /**
693  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
694  * @hw: pointer to the HW struct
695  * @params: information about the flow to be processed
696  * @seg: index of packet segment whose raw fields are to be extracted
697  */
698 static enum ice_status
699 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
700 		     u8 seg)
701 {
702 	u16 fv_words;
703 	u16 hdrs_sz;
704 	u8 i;
705 
706 	if (!params->prof->segs[seg].raws_cnt)
707 		return 0;
708 
709 	if (params->prof->segs[seg].raws_cnt >
710 	    ARRAY_SIZE(params->prof->segs[seg].raws))
711 		return ICE_ERR_MAX_LIMIT;
712 
713 	/* Offsets within the segment headers are not supported */
714 	hdrs_sz = ice_flow_calc_seg_sz(params, seg);
715 	if (!hdrs_sz)
716 		return ICE_ERR_PARAM;
717 
718 	fv_words = hw->blk[params->blk].es.fvw;
719 
720 	for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
721 		struct ice_flow_seg_fld_raw *raw;
722 		u16 off, cnt, j;
723 
724 		raw = &params->prof->segs[seg].raws[i];
725 
726 		/* Storing extraction information */
727 		raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
728 		raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
729 			ICE_FLOW_FV_EXTRACT_SZ;
730 		raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
731 			BITS_PER_BYTE;
732 		raw->info.xtrct.idx = params->es_cnt;
733 
734 		/* Determine the number of field vector entries this raw field
735 		 * consumes.
736 		 */
737 		cnt = DIV_ROUND_UP(raw->info.xtrct.disp +
738 				   (raw->info.src.last * BITS_PER_BYTE),
739 				   (ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE));
740 		off = raw->info.xtrct.off;
741 		for (j = 0; j < cnt; j++) {
742 			u16 idx;
743 
744 			/* Make sure the number of extraction sequence required
745 			 * does not exceed the block's capability
746 			 */
747 			if (params->es_cnt >= hw->blk[params->blk].es.count ||
748 			    params->es_cnt >= ICE_MAX_FV_WORDS)
749 				return ICE_ERR_MAX_LIMIT;
750 
751 			/* some blocks require a reversed field vector layout */
752 			if (hw->blk[params->blk].es.reverse)
753 				idx = fv_words - params->es_cnt - 1;
754 			else
755 				idx = params->es_cnt;
756 
757 			params->es[idx].prot_id = raw->info.xtrct.prot_id;
758 			params->es[idx].off = off;
759 			params->es_cnt++;
760 			off += ICE_FLOW_FV_EXTRACT_SZ;
761 		}
762 	}
763 
764 	return 0;
765 }
766 
767 /**
768  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
769  * @hw: pointer to the HW struct
770  * @params: information about the flow to be processed
771  *
772  * This function iterates through all matched fields in the given segments, and
773  * creates an extraction sequence for the fields.
774  */
775 static enum ice_status
776 ice_flow_create_xtrct_seq(struct ice_hw *hw,
777 			  struct ice_flow_prof_params *params)
778 {
779 	struct ice_flow_prof *prof = params->prof;
780 	enum ice_status status = 0;
781 	u8 i;
782 
783 	for (i = 0; i < prof->segs_cnt; i++) {
784 		u64 match = params->prof->segs[i].match;
785 		enum ice_flow_field j;
786 
787 		for_each_set_bit(j, (unsigned long *)&match,
788 				 ICE_FLOW_FIELD_IDX_MAX) {
789 			status = ice_flow_xtract_fld(hw, params, i, j, match);
790 			if (status)
791 				return status;
792 			clear_bit(j, (unsigned long *)&match);
793 		}
794 
795 		/* Process raw matching bytes */
796 		status = ice_flow_xtract_raws(hw, params, i);
797 		if (status)
798 			return status;
799 	}
800 
801 	return status;
802 }
803 
804 /**
805  * ice_flow_proc_segs - process all packet segments associated with a profile
806  * @hw: pointer to the HW struct
807  * @params: information about the flow to be processed
808  */
809 static enum ice_status
810 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
811 {
812 	enum ice_status status;
813 
814 	status = ice_flow_proc_seg_hdrs(params);
815 	if (status)
816 		return status;
817 
818 	status = ice_flow_create_xtrct_seq(hw, params);
819 	if (status)
820 		return status;
821 
822 	switch (params->blk) {
823 	case ICE_BLK_FD:
824 	case ICE_BLK_RSS:
825 		status = 0;
826 		break;
827 	default:
828 		return ICE_ERR_NOT_IMPL;
829 	}
830 
831 	return status;
832 }
833 
834 #define ICE_FLOW_FIND_PROF_CHK_FLDS	0x00000001
835 #define ICE_FLOW_FIND_PROF_CHK_VSI	0x00000002
836 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR	0x00000004
837 
838 /**
839  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
840  * @hw: pointer to the HW struct
841  * @blk: classification stage
842  * @dir: flow direction
843  * @segs: array of one or more packet segments that describe the flow
844  * @segs_cnt: number of packet segments provided
845  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
846  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
847  */
848 static struct ice_flow_prof *
849 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
850 			 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
851 			 u8 segs_cnt, u16 vsi_handle, u32 conds)
852 {
853 	struct ice_flow_prof *p, *prof = NULL;
854 
855 	mutex_lock(&hw->fl_profs_locks[blk]);
856 	list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
857 		if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
858 		    segs_cnt && segs_cnt == p->segs_cnt) {
859 			u8 i;
860 
861 			/* Check for profile-VSI association if specified */
862 			if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
863 			    ice_is_vsi_valid(hw, vsi_handle) &&
864 			    !test_bit(vsi_handle, p->vsis))
865 				continue;
866 
867 			/* Protocol headers must be checked. Matched fields are
868 			 * checked if specified.
869 			 */
870 			for (i = 0; i < segs_cnt; i++)
871 				if (segs[i].hdrs != p->segs[i].hdrs ||
872 				    ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
873 				     segs[i].match != p->segs[i].match))
874 					break;
875 
876 			/* A match is found if all segments are matched */
877 			if (i == segs_cnt) {
878 				prof = p;
879 				break;
880 			}
881 		}
882 	mutex_unlock(&hw->fl_profs_locks[blk]);
883 
884 	return prof;
885 }
886 
887 /**
888  * ice_flow_find_prof_id - Look up a profile with given profile ID
889  * @hw: pointer to the HW struct
890  * @blk: classification stage
891  * @prof_id: unique ID to identify this flow profile
892  */
893 static struct ice_flow_prof *
894 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
895 {
896 	struct ice_flow_prof *p;
897 
898 	list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
899 		if (p->id == prof_id)
900 			return p;
901 
902 	return NULL;
903 }
904 
905 /**
906  * ice_dealloc_flow_entry - Deallocate flow entry memory
907  * @hw: pointer to the HW struct
908  * @entry: flow entry to be removed
909  */
910 static void
911 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
912 {
913 	if (!entry)
914 		return;
915 
916 	if (entry->entry)
917 		devm_kfree(ice_hw_to_dev(hw), entry->entry);
918 
919 	devm_kfree(ice_hw_to_dev(hw), entry);
920 }
921 
922 /**
923  * ice_flow_rem_entry_sync - Remove a flow entry
924  * @hw: pointer to the HW struct
925  * @blk: classification stage
926  * @entry: flow entry to be removed
927  */
928 static enum ice_status
929 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
930 			struct ice_flow_entry *entry)
931 {
932 	if (!entry)
933 		return ICE_ERR_BAD_PTR;
934 
935 	list_del(&entry->l_entry);
936 
937 	ice_dealloc_flow_entry(hw, entry);
938 
939 	return 0;
940 }
941 
942 /**
943  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
944  * @hw: pointer to the HW struct
945  * @blk: classification stage
946  * @dir: flow direction
947  * @prof_id: unique ID to identify this flow profile
948  * @segs: array of one or more packet segments that describe the flow
949  * @segs_cnt: number of packet segments provided
950  * @prof: stores the returned flow profile added
951  *
952  * Assumption: the caller has acquired the lock to the profile list
953  */
954 static enum ice_status
955 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
956 		       enum ice_flow_dir dir, u64 prof_id,
957 		       struct ice_flow_seg_info *segs, u8 segs_cnt,
958 		       struct ice_flow_prof **prof)
959 {
960 	struct ice_flow_prof_params *params;
961 	enum ice_status status;
962 	u8 i;
963 
964 	if (!prof)
965 		return ICE_ERR_BAD_PTR;
966 
967 	params = kzalloc(sizeof(*params), GFP_KERNEL);
968 	if (!params)
969 		return ICE_ERR_NO_MEMORY;
970 
971 	params->prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params->prof),
972 				    GFP_KERNEL);
973 	if (!params->prof) {
974 		status = ICE_ERR_NO_MEMORY;
975 		goto free_params;
976 	}
977 
978 	/* initialize extraction sequence to all invalid (0xff) */
979 	for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
980 		params->es[i].prot_id = ICE_PROT_INVALID;
981 		params->es[i].off = ICE_FV_OFFSET_INVAL;
982 	}
983 
984 	params->blk = blk;
985 	params->prof->id = prof_id;
986 	params->prof->dir = dir;
987 	params->prof->segs_cnt = segs_cnt;
988 
989 	/* Make a copy of the segments that need to be persistent in the flow
990 	 * profile instance
991 	 */
992 	for (i = 0; i < segs_cnt; i++)
993 		memcpy(&params->prof->segs[i], &segs[i], sizeof(*segs));
994 
995 	status = ice_flow_proc_segs(hw, params);
996 	if (status) {
997 		ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
998 		goto out;
999 	}
1000 
1001 	/* Add a HW profile for this flow profile */
1002 	status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
1003 			      params->es, params->mask);
1004 	if (status) {
1005 		ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1006 		goto out;
1007 	}
1008 
1009 	INIT_LIST_HEAD(&params->prof->entries);
1010 	mutex_init(&params->prof->entries_lock);
1011 	*prof = params->prof;
1012 
1013 out:
1014 	if (status)
1015 		devm_kfree(ice_hw_to_dev(hw), params->prof);
1016 free_params:
1017 	kfree(params);
1018 
1019 	return status;
1020 }
1021 
1022 /**
1023  * ice_flow_rem_prof_sync - remove a flow profile
1024  * @hw: pointer to the hardware structure
1025  * @blk: classification stage
1026  * @prof: pointer to flow profile to remove
1027  *
1028  * Assumption: the caller has acquired the lock to the profile list
1029  */
1030 static enum ice_status
1031 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1032 		       struct ice_flow_prof *prof)
1033 {
1034 	enum ice_status status;
1035 
1036 	/* Remove all remaining flow entries before removing the flow profile */
1037 	if (!list_empty(&prof->entries)) {
1038 		struct ice_flow_entry *e, *t;
1039 
1040 		mutex_lock(&prof->entries_lock);
1041 
1042 		list_for_each_entry_safe(e, t, &prof->entries, l_entry) {
1043 			status = ice_flow_rem_entry_sync(hw, blk, e);
1044 			if (status)
1045 				break;
1046 		}
1047 
1048 		mutex_unlock(&prof->entries_lock);
1049 	}
1050 
1051 	/* Remove all hardware profiles associated with this flow profile */
1052 	status = ice_rem_prof(hw, blk, prof->id);
1053 	if (!status) {
1054 		list_del(&prof->l_entry);
1055 		mutex_destroy(&prof->entries_lock);
1056 		devm_kfree(ice_hw_to_dev(hw), prof);
1057 	}
1058 
1059 	return status;
1060 }
1061 
1062 /**
1063  * ice_flow_assoc_prof - associate a VSI with a flow profile
1064  * @hw: pointer to the hardware structure
1065  * @blk: classification stage
1066  * @prof: pointer to flow profile
1067  * @vsi_handle: software VSI handle
1068  *
1069  * Assumption: the caller has acquired the lock to the profile list
1070  * and the software VSI handle has been validated
1071  */
1072 static enum ice_status
1073 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1074 		    struct ice_flow_prof *prof, u16 vsi_handle)
1075 {
1076 	enum ice_status status = 0;
1077 
1078 	if (!test_bit(vsi_handle, prof->vsis)) {
1079 		status = ice_add_prof_id_flow(hw, blk,
1080 					      ice_get_hw_vsi_num(hw,
1081 								 vsi_handle),
1082 					      prof->id);
1083 		if (!status)
1084 			set_bit(vsi_handle, prof->vsis);
1085 		else
1086 			ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
1087 				  status);
1088 	}
1089 
1090 	return status;
1091 }
1092 
1093 /**
1094  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
1095  * @hw: pointer to the hardware structure
1096  * @blk: classification stage
1097  * @prof: pointer to flow profile
1098  * @vsi_handle: software VSI handle
1099  *
1100  * Assumption: the caller has acquired the lock to the profile list
1101  * and the software VSI handle has been validated
1102  */
1103 static enum ice_status
1104 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
1105 		       struct ice_flow_prof *prof, u16 vsi_handle)
1106 {
1107 	enum ice_status status = 0;
1108 
1109 	if (test_bit(vsi_handle, prof->vsis)) {
1110 		status = ice_rem_prof_id_flow(hw, blk,
1111 					      ice_get_hw_vsi_num(hw,
1112 								 vsi_handle),
1113 					      prof->id);
1114 		if (!status)
1115 			clear_bit(vsi_handle, prof->vsis);
1116 		else
1117 			ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
1118 				  status);
1119 	}
1120 
1121 	return status;
1122 }
1123 
1124 /**
1125  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
1126  * @hw: pointer to the HW struct
1127  * @blk: classification stage
1128  * @dir: flow direction
1129  * @prof_id: unique ID to identify this flow profile
1130  * @segs: array of one or more packet segments that describe the flow
1131  * @segs_cnt: number of packet segments provided
1132  * @prof: stores the returned flow profile added
1133  */
1134 enum ice_status
1135 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1136 		  u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
1137 		  struct ice_flow_prof **prof)
1138 {
1139 	enum ice_status status;
1140 
1141 	if (segs_cnt > ICE_FLOW_SEG_MAX)
1142 		return ICE_ERR_MAX_LIMIT;
1143 
1144 	if (!segs_cnt)
1145 		return ICE_ERR_PARAM;
1146 
1147 	if (!segs)
1148 		return ICE_ERR_BAD_PTR;
1149 
1150 	status = ice_flow_val_hdrs(segs, segs_cnt);
1151 	if (status)
1152 		return status;
1153 
1154 	mutex_lock(&hw->fl_profs_locks[blk]);
1155 
1156 	status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
1157 					prof);
1158 	if (!status)
1159 		list_add(&(*prof)->l_entry, &hw->fl_profs[blk]);
1160 
1161 	mutex_unlock(&hw->fl_profs_locks[blk]);
1162 
1163 	return status;
1164 }
1165 
1166 /**
1167  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
1168  * @hw: pointer to the HW struct
1169  * @blk: the block for which the flow profile is to be removed
1170  * @prof_id: unique ID of the flow profile to be removed
1171  */
1172 enum ice_status
1173 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1174 {
1175 	struct ice_flow_prof *prof;
1176 	enum ice_status status;
1177 
1178 	mutex_lock(&hw->fl_profs_locks[blk]);
1179 
1180 	prof = ice_flow_find_prof_id(hw, blk, prof_id);
1181 	if (!prof) {
1182 		status = ICE_ERR_DOES_NOT_EXIST;
1183 		goto out;
1184 	}
1185 
1186 	/* prof becomes invalid after the call */
1187 	status = ice_flow_rem_prof_sync(hw, blk, prof);
1188 
1189 out:
1190 	mutex_unlock(&hw->fl_profs_locks[blk]);
1191 
1192 	return status;
1193 }
1194 
1195 /**
1196  * ice_flow_add_entry - Add a flow entry
1197  * @hw: pointer to the HW struct
1198  * @blk: classification stage
1199  * @prof_id: ID of the profile to add a new flow entry to
1200  * @entry_id: unique ID to identify this flow entry
1201  * @vsi_handle: software VSI handle for the flow entry
1202  * @prio: priority of the flow entry
1203  * @data: pointer to a data buffer containing flow entry's match values/masks
1204  * @entry_h: pointer to buffer that receives the new flow entry's handle
1205  */
1206 enum ice_status
1207 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1208 		   u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
1209 		   void *data, u64 *entry_h)
1210 {
1211 	struct ice_flow_entry *e = NULL;
1212 	struct ice_flow_prof *prof;
1213 	enum ice_status status;
1214 
1215 	/* No flow entry data is expected for RSS */
1216 	if (!entry_h || (!data && blk != ICE_BLK_RSS))
1217 		return ICE_ERR_BAD_PTR;
1218 
1219 	if (!ice_is_vsi_valid(hw, vsi_handle))
1220 		return ICE_ERR_PARAM;
1221 
1222 	mutex_lock(&hw->fl_profs_locks[blk]);
1223 
1224 	prof = ice_flow_find_prof_id(hw, blk, prof_id);
1225 	if (!prof) {
1226 		status = ICE_ERR_DOES_NOT_EXIST;
1227 	} else {
1228 		/* Allocate memory for the entry being added and associate
1229 		 * the VSI to the found flow profile
1230 		 */
1231 		e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL);
1232 		if (!e)
1233 			status = ICE_ERR_NO_MEMORY;
1234 		else
1235 			status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
1236 	}
1237 
1238 	mutex_unlock(&hw->fl_profs_locks[blk]);
1239 	if (status)
1240 		goto out;
1241 
1242 	e->id = entry_id;
1243 	e->vsi_handle = vsi_handle;
1244 	e->prof = prof;
1245 	e->priority = prio;
1246 
1247 	switch (blk) {
1248 	case ICE_BLK_FD:
1249 	case ICE_BLK_RSS:
1250 		break;
1251 	default:
1252 		status = ICE_ERR_NOT_IMPL;
1253 		goto out;
1254 	}
1255 
1256 	mutex_lock(&prof->entries_lock);
1257 	list_add(&e->l_entry, &prof->entries);
1258 	mutex_unlock(&prof->entries_lock);
1259 
1260 	*entry_h = ICE_FLOW_ENTRY_HNDL(e);
1261 
1262 out:
1263 	if (status && e) {
1264 		if (e->entry)
1265 			devm_kfree(ice_hw_to_dev(hw), e->entry);
1266 		devm_kfree(ice_hw_to_dev(hw), e);
1267 	}
1268 
1269 	return status;
1270 }
1271 
1272 /**
1273  * ice_flow_rem_entry - Remove a flow entry
1274  * @hw: pointer to the HW struct
1275  * @blk: classification stage
1276  * @entry_h: handle to the flow entry to be removed
1277  */
1278 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
1279 				   u64 entry_h)
1280 {
1281 	struct ice_flow_entry *entry;
1282 	struct ice_flow_prof *prof;
1283 	enum ice_status status = 0;
1284 
1285 	if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
1286 		return ICE_ERR_PARAM;
1287 
1288 	entry = ICE_FLOW_ENTRY_PTR(entry_h);
1289 
1290 	/* Retain the pointer to the flow profile as the entry will be freed */
1291 	prof = entry->prof;
1292 
1293 	if (prof) {
1294 		mutex_lock(&prof->entries_lock);
1295 		status = ice_flow_rem_entry_sync(hw, blk, entry);
1296 		mutex_unlock(&prof->entries_lock);
1297 	}
1298 
1299 	return status;
1300 }
1301 
1302 /**
1303  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
1304  * @seg: packet segment the field being set belongs to
1305  * @fld: field to be set
1306  * @field_type: type of the field
1307  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
1308  *           entry's input buffer
1309  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
1310  *            input buffer
1311  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
1312  *            entry's input buffer
1313  *
1314  * This helper function stores information of a field being matched, including
1315  * the type of the field and the locations of the value to match, the mask, and
1316  * the upper-bound value in the start of the input buffer for a flow entry.
1317  * This function should only be used for fixed-size data structures.
1318  *
1319  * This function also opportunistically determines the protocol headers to be
1320  * present based on the fields being set. Some fields cannot be used alone to
1321  * determine the protocol headers present. Sometimes, fields for particular
1322  * protocol headers are not matched. In those cases, the protocol headers
1323  * must be explicitly set.
1324  */
1325 static void
1326 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
1327 		     enum ice_flow_fld_match_type field_type, u16 val_loc,
1328 		     u16 mask_loc, u16 last_loc)
1329 {
1330 	u64 bit = BIT_ULL(fld);
1331 
1332 	seg->match |= bit;
1333 	if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
1334 		seg->range |= bit;
1335 
1336 	seg->fields[fld].type = field_type;
1337 	seg->fields[fld].src.val = val_loc;
1338 	seg->fields[fld].src.mask = mask_loc;
1339 	seg->fields[fld].src.last = last_loc;
1340 
1341 	ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
1342 }
1343 
1344 /**
1345  * ice_flow_set_fld - specifies locations of field from entry's input buffer
1346  * @seg: packet segment the field being set belongs to
1347  * @fld: field to be set
1348  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
1349  *           entry's input buffer
1350  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
1351  *            input buffer
1352  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
1353  *            entry's input buffer
1354  * @range: indicate if field being matched is to be in a range
1355  *
1356  * This function specifies the locations, in the form of byte offsets from the
1357  * start of the input buffer for a flow entry, from where the value to match,
1358  * the mask value, and upper value can be extracted. These locations are then
1359  * stored in the flow profile. When adding a flow entry associated with the
1360  * flow profile, these locations will be used to quickly extract the values and
1361  * create the content of a match entry. This function should only be used for
1362  * fixed-size data structures.
1363  */
1364 void
1365 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
1366 		 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
1367 {
1368 	enum ice_flow_fld_match_type t = range ?
1369 		ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
1370 
1371 	ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
1372 }
1373 
1374 /**
1375  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
1376  * @seg: packet segment the field being set belongs to
1377  * @off: offset of the raw field from the beginning of the segment in bytes
1378  * @len: length of the raw pattern to be matched
1379  * @val_loc: location of the value to match from entry's input buffer
1380  * @mask_loc: location of mask value from entry's input buffer
1381  *
1382  * This function specifies the offset of the raw field to be match from the
1383  * beginning of the specified packet segment, and the locations, in the form of
1384  * byte offsets from the start of the input buffer for a flow entry, from where
1385  * the value to match and the mask value to be extracted. These locations are
1386  * then stored in the flow profile. When adding flow entries to the associated
1387  * flow profile, these locations can be used to quickly extract the values to
1388  * create the content of a match entry. This function should only be used for
1389  * fixed-size data structures.
1390  */
1391 void
1392 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
1393 		     u16 val_loc, u16 mask_loc)
1394 {
1395 	if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
1396 		seg->raws[seg->raws_cnt].off = off;
1397 		seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
1398 		seg->raws[seg->raws_cnt].info.src.val = val_loc;
1399 		seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
1400 		/* The "last" field is used to store the length of the field */
1401 		seg->raws[seg->raws_cnt].info.src.last = len;
1402 	}
1403 
1404 	/* Overflows of "raws" will be handled as an error condition later in
1405 	 * the flow when this information is processed.
1406 	 */
1407 	seg->raws_cnt++;
1408 }
1409 
1410 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
1411 	(ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
1412 
1413 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
1414 	(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
1415 
1416 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
1417 	(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
1418 
1419 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
1420 	(ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
1421 	 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
1422 	 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
1423 
1424 /**
1425  * ice_flow_set_rss_seg_info - setup packet segments for RSS
1426  * @segs: pointer to the flow field segment(s)
1427  * @hash_fields: fields to be hashed on for the segment(s)
1428  * @flow_hdr: protocol header fields within a packet segment
1429  *
1430  * Helper function to extract fields from hash bitmap and use flow
1431  * header value to set flow field segment for further use in flow
1432  * profile entry or removal.
1433  */
1434 static enum ice_status
1435 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
1436 			  u32 flow_hdr)
1437 {
1438 	u64 val;
1439 	u8 i;
1440 
1441 	for_each_set_bit(i, (unsigned long *)&hash_fields,
1442 			 ICE_FLOW_FIELD_IDX_MAX)
1443 		ice_flow_set_fld(segs, (enum ice_flow_field)i,
1444 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1445 				 ICE_FLOW_FLD_OFF_INVAL, false);
1446 
1447 	ICE_FLOW_SET_HDRS(segs, flow_hdr);
1448 
1449 	if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS)
1450 		return ICE_ERR_PARAM;
1451 
1452 	val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
1453 	if (val && !is_power_of_2(val))
1454 		return ICE_ERR_CFG;
1455 
1456 	val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
1457 	if (val && !is_power_of_2(val))
1458 		return ICE_ERR_CFG;
1459 
1460 	return 0;
1461 }
1462 
1463 /**
1464  * ice_rem_vsi_rss_list - remove VSI from RSS list
1465  * @hw: pointer to the hardware structure
1466  * @vsi_handle: software VSI handle
1467  *
1468  * Remove the VSI from all RSS configurations in the list.
1469  */
1470 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
1471 {
1472 	struct ice_rss_cfg *r, *tmp;
1473 
1474 	if (list_empty(&hw->rss_list_head))
1475 		return;
1476 
1477 	mutex_lock(&hw->rss_locks);
1478 	list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
1479 		if (test_and_clear_bit(vsi_handle, r->vsis))
1480 			if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
1481 				list_del(&r->l_entry);
1482 				devm_kfree(ice_hw_to_dev(hw), r);
1483 			}
1484 	mutex_unlock(&hw->rss_locks);
1485 }
1486 
1487 /**
1488  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
1489  * @hw: pointer to the hardware structure
1490  * @vsi_handle: software VSI handle
1491  *
1492  * This function will iterate through all flow profiles and disassociate
1493  * the VSI from that profile. If the flow profile has no VSIs it will
1494  * be removed.
1495  */
1496 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
1497 {
1498 	const enum ice_block blk = ICE_BLK_RSS;
1499 	struct ice_flow_prof *p, *t;
1500 	enum ice_status status = 0;
1501 
1502 	if (!ice_is_vsi_valid(hw, vsi_handle))
1503 		return ICE_ERR_PARAM;
1504 
1505 	if (list_empty(&hw->fl_profs[blk]))
1506 		return 0;
1507 
1508 	mutex_lock(&hw->rss_locks);
1509 	list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry)
1510 		if (test_bit(vsi_handle, p->vsis)) {
1511 			status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
1512 			if (status)
1513 				break;
1514 
1515 			if (bitmap_empty(p->vsis, ICE_MAX_VSI)) {
1516 				status = ice_flow_rem_prof(hw, blk, p->id);
1517 				if (status)
1518 					break;
1519 			}
1520 		}
1521 	mutex_unlock(&hw->rss_locks);
1522 
1523 	return status;
1524 }
1525 
1526 /**
1527  * ice_rem_rss_list - remove RSS configuration from list
1528  * @hw: pointer to the hardware structure
1529  * @vsi_handle: software VSI handle
1530  * @prof: pointer to flow profile
1531  *
1532  * Assumption: lock has already been acquired for RSS list
1533  */
1534 static void
1535 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
1536 {
1537 	struct ice_rss_cfg *r, *tmp;
1538 
1539 	/* Search for RSS hash fields associated to the VSI that match the
1540 	 * hash configurations associated to the flow profile. If found
1541 	 * remove from the RSS entry list of the VSI context and delete entry.
1542 	 */
1543 	list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
1544 		if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
1545 		    r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
1546 			clear_bit(vsi_handle, r->vsis);
1547 			if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
1548 				list_del(&r->l_entry);
1549 				devm_kfree(ice_hw_to_dev(hw), r);
1550 			}
1551 			return;
1552 		}
1553 }
1554 
1555 /**
1556  * ice_add_rss_list - add RSS configuration to list
1557  * @hw: pointer to the hardware structure
1558  * @vsi_handle: software VSI handle
1559  * @prof: pointer to flow profile
1560  *
1561  * Assumption: lock has already been acquired for RSS list
1562  */
1563 static enum ice_status
1564 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
1565 {
1566 	struct ice_rss_cfg *r, *rss_cfg;
1567 
1568 	list_for_each_entry(r, &hw->rss_list_head, l_entry)
1569 		if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
1570 		    r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
1571 			set_bit(vsi_handle, r->vsis);
1572 			return 0;
1573 		}
1574 
1575 	rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg),
1576 			       GFP_KERNEL);
1577 	if (!rss_cfg)
1578 		return ICE_ERR_NO_MEMORY;
1579 
1580 	rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
1581 	rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
1582 	set_bit(vsi_handle, rss_cfg->vsis);
1583 
1584 	list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head);
1585 
1586 	return 0;
1587 }
1588 
1589 #define ICE_FLOW_PROF_HASH_S	0
1590 #define ICE_FLOW_PROF_HASH_M	(0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
1591 #define ICE_FLOW_PROF_HDR_S	32
1592 #define ICE_FLOW_PROF_HDR_M	(0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
1593 #define ICE_FLOW_PROF_ENCAP_S	63
1594 #define ICE_FLOW_PROF_ENCAP_M	(BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
1595 
1596 #define ICE_RSS_OUTER_HEADERS	1
1597 #define ICE_RSS_INNER_HEADERS	2
1598 
1599 /* Flow profile ID format:
1600  * [0:31] - Packet match fields
1601  * [32:62] - Protocol header
1602  * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
1603  */
1604 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
1605 	(u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
1606 	      (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
1607 	      ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
1608 
1609 /**
1610  * ice_add_rss_cfg_sync - add an RSS configuration
1611  * @hw: pointer to the hardware structure
1612  * @vsi_handle: software VSI handle
1613  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
1614  * @addl_hdrs: protocol header fields
1615  * @segs_cnt: packet segment count
1616  *
1617  * Assumption: lock has already been acquired for RSS list
1618  */
1619 static enum ice_status
1620 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
1621 		     u32 addl_hdrs, u8 segs_cnt)
1622 {
1623 	const enum ice_block blk = ICE_BLK_RSS;
1624 	struct ice_flow_prof *prof = NULL;
1625 	struct ice_flow_seg_info *segs;
1626 	enum ice_status status;
1627 
1628 	if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
1629 		return ICE_ERR_PARAM;
1630 
1631 	segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
1632 	if (!segs)
1633 		return ICE_ERR_NO_MEMORY;
1634 
1635 	/* Construct the packet segment info from the hashed fields */
1636 	status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
1637 					   addl_hdrs);
1638 	if (status)
1639 		goto exit;
1640 
1641 	/* Search for a flow profile that has matching headers, hash fields
1642 	 * and has the input VSI associated to it. If found, no further
1643 	 * operations required and exit.
1644 	 */
1645 	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
1646 					vsi_handle,
1647 					ICE_FLOW_FIND_PROF_CHK_FLDS |
1648 					ICE_FLOW_FIND_PROF_CHK_VSI);
1649 	if (prof)
1650 		goto exit;
1651 
1652 	/* Check if a flow profile exists with the same protocol headers and
1653 	 * associated with the input VSI. If so disassociate the VSI from
1654 	 * this profile. The VSI will be added to a new profile created with
1655 	 * the protocol header and new hash field configuration.
1656 	 */
1657 	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
1658 					vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
1659 	if (prof) {
1660 		status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
1661 		if (!status)
1662 			ice_rem_rss_list(hw, vsi_handle, prof);
1663 		else
1664 			goto exit;
1665 
1666 		/* Remove profile if it has no VSIs associated */
1667 		if (bitmap_empty(prof->vsis, ICE_MAX_VSI)) {
1668 			status = ice_flow_rem_prof(hw, blk, prof->id);
1669 			if (status)
1670 				goto exit;
1671 		}
1672 	}
1673 
1674 	/* Search for a profile that has same match fields only. If this
1675 	 * exists then associate the VSI to this profile.
1676 	 */
1677 	prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
1678 					vsi_handle,
1679 					ICE_FLOW_FIND_PROF_CHK_FLDS);
1680 	if (prof) {
1681 		status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
1682 		if (!status)
1683 			status = ice_add_rss_list(hw, vsi_handle, prof);
1684 		goto exit;
1685 	}
1686 
1687 	/* Create a new flow profile with generated profile and packet
1688 	 * segment information.
1689 	 */
1690 	status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
1691 				   ICE_FLOW_GEN_PROFID(hashed_flds,
1692 						       segs[segs_cnt - 1].hdrs,
1693 						       segs_cnt),
1694 				   segs, segs_cnt, &prof);
1695 	if (status)
1696 		goto exit;
1697 
1698 	status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
1699 	/* If association to a new flow profile failed then this profile can
1700 	 * be removed.
1701 	 */
1702 	if (status) {
1703 		ice_flow_rem_prof(hw, blk, prof->id);
1704 		goto exit;
1705 	}
1706 
1707 	status = ice_add_rss_list(hw, vsi_handle, prof);
1708 
1709 exit:
1710 	kfree(segs);
1711 	return status;
1712 }
1713 
1714 /**
1715  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
1716  * @hw: pointer to the hardware structure
1717  * @vsi_handle: software VSI handle
1718  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
1719  * @addl_hdrs: protocol header fields
1720  *
1721  * This function will generate a flow profile based on fields associated with
1722  * the input fields to hash on, the flow type and use the VSI number to add
1723  * a flow entry to the profile.
1724  */
1725 enum ice_status
1726 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
1727 		u32 addl_hdrs)
1728 {
1729 	enum ice_status status;
1730 
1731 	if (hashed_flds == ICE_HASH_INVALID ||
1732 	    !ice_is_vsi_valid(hw, vsi_handle))
1733 		return ICE_ERR_PARAM;
1734 
1735 	mutex_lock(&hw->rss_locks);
1736 	status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
1737 				      ICE_RSS_OUTER_HEADERS);
1738 	if (!status)
1739 		status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
1740 					      addl_hdrs, ICE_RSS_INNER_HEADERS);
1741 	mutex_unlock(&hw->rss_locks);
1742 
1743 	return status;
1744 }
1745 
1746 /* Mapping of AVF hash bit fields to an L3-L4 hash combination.
1747  * As the ice_flow_avf_hdr_field represent individual bit shifts in a hash,
1748  * convert its values to their appropriate flow L3, L4 values.
1749  */
1750 #define ICE_FLOW_AVF_RSS_IPV4_MASKS \
1751 	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
1752 	 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4))
1753 #define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \
1754 	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
1755 	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP))
1756 #define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \
1757 	(BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
1758 	 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
1759 	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP))
1760 #define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \
1761 	(ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \
1762 	 ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP))
1763 
1764 #define ICE_FLOW_AVF_RSS_IPV6_MASKS \
1765 	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
1766 	 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6))
1767 #define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \
1768 	(BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
1769 	 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \
1770 	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP))
1771 #define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \
1772 	(BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
1773 	 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP))
1774 #define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \
1775 	(ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \
1776 	 ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP))
1777 
1778 /**
1779  * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
1780  * @hw: pointer to the hardware structure
1781  * @vsi_handle: software VSI handle
1782  * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
1783  *
1784  * This function will take the hash bitmap provided by the AVF driver via a
1785  * message, convert it to ICE-compatible values, and configure RSS flow
1786  * profiles.
1787  */
1788 enum ice_status
1789 ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
1790 {
1791 	enum ice_status status = 0;
1792 	u64 hash_flds;
1793 
1794 	if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
1795 	    !ice_is_vsi_valid(hw, vsi_handle))
1796 		return ICE_ERR_PARAM;
1797 
1798 	/* Make sure no unsupported bits are specified */
1799 	if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS |
1800 			 ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS))
1801 		return ICE_ERR_CFG;
1802 
1803 	hash_flds = avf_hash;
1804 
1805 	/* Always create an L3 RSS configuration for any L4 RSS configuration */
1806 	if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS)
1807 		hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS;
1808 
1809 	if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)
1810 		hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS;
1811 
1812 	/* Create the corresponding RSS configuration for each valid hash bit */
1813 	while (hash_flds) {
1814 		u64 rss_hash = ICE_HASH_INVALID;
1815 
1816 		if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) {
1817 			if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) {
1818 				rss_hash = ICE_FLOW_HASH_IPV4;
1819 				hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS;
1820 			} else if (hash_flds &
1821 				   ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) {
1822 				rss_hash = ICE_FLOW_HASH_IPV4 |
1823 					ICE_FLOW_HASH_TCP_PORT;
1824 				hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS;
1825 			} else if (hash_flds &
1826 				   ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) {
1827 				rss_hash = ICE_FLOW_HASH_IPV4 |
1828 					ICE_FLOW_HASH_UDP_PORT;
1829 				hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS;
1830 			} else if (hash_flds &
1831 				   BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) {
1832 				rss_hash = ICE_FLOW_HASH_IPV4 |
1833 					ICE_FLOW_HASH_SCTP_PORT;
1834 				hash_flds &=
1835 					~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP);
1836 			}
1837 		} else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) {
1838 			if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) {
1839 				rss_hash = ICE_FLOW_HASH_IPV6;
1840 				hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS;
1841 			} else if (hash_flds &
1842 				   ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) {
1843 				rss_hash = ICE_FLOW_HASH_IPV6 |
1844 					ICE_FLOW_HASH_TCP_PORT;
1845 				hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS;
1846 			} else if (hash_flds &
1847 				   ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) {
1848 				rss_hash = ICE_FLOW_HASH_IPV6 |
1849 					ICE_FLOW_HASH_UDP_PORT;
1850 				hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS;
1851 			} else if (hash_flds &
1852 				   BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) {
1853 				rss_hash = ICE_FLOW_HASH_IPV6 |
1854 					ICE_FLOW_HASH_SCTP_PORT;
1855 				hash_flds &=
1856 					~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP);
1857 			}
1858 		}
1859 
1860 		if (rss_hash == ICE_HASH_INVALID)
1861 			return ICE_ERR_OUT_OF_RANGE;
1862 
1863 		status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
1864 					 ICE_FLOW_SEG_HDR_NONE);
1865 		if (status)
1866 			break;
1867 	}
1868 
1869 	return status;
1870 }
1871 
1872 /**
1873  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
1874  * @hw: pointer to the hardware structure
1875  * @vsi_handle: software VSI handle
1876  */
1877 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
1878 {
1879 	enum ice_status status = 0;
1880 	struct ice_rss_cfg *r;
1881 
1882 	if (!ice_is_vsi_valid(hw, vsi_handle))
1883 		return ICE_ERR_PARAM;
1884 
1885 	mutex_lock(&hw->rss_locks);
1886 	list_for_each_entry(r, &hw->rss_list_head, l_entry) {
1887 		if (test_bit(vsi_handle, r->vsis)) {
1888 			status = ice_add_rss_cfg_sync(hw, vsi_handle,
1889 						      r->hashed_flds,
1890 						      r->packet_hdr,
1891 						      ICE_RSS_OUTER_HEADERS);
1892 			if (status)
1893 				break;
1894 			status = ice_add_rss_cfg_sync(hw, vsi_handle,
1895 						      r->hashed_flds,
1896 						      r->packet_hdr,
1897 						      ICE_RSS_INNER_HEADERS);
1898 			if (status)
1899 				break;
1900 		}
1901 	}
1902 	mutex_unlock(&hw->rss_locks);
1903 
1904 	return status;
1905 }
1906 
1907 /**
1908  * ice_get_rss_cfg - returns hashed fields for the given header types
1909  * @hw: pointer to the hardware structure
1910  * @vsi_handle: software VSI handle
1911  * @hdrs: protocol header type
1912  *
1913  * This function will return the match fields of the first instance of flow
1914  * profile having the given header types and containing input VSI
1915  */
1916 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
1917 {
1918 	u64 rss_hash = ICE_HASH_INVALID;
1919 	struct ice_rss_cfg *r;
1920 
1921 	/* verify if the protocol header is non zero and VSI is valid */
1922 	if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
1923 		return ICE_HASH_INVALID;
1924 
1925 	mutex_lock(&hw->rss_locks);
1926 	list_for_each_entry(r, &hw->rss_list_head, l_entry)
1927 		if (test_bit(vsi_handle, r->vsis) &&
1928 		    r->packet_hdr == hdrs) {
1929 			rss_hash = r->hashed_flds;
1930 			break;
1931 		}
1932 	mutex_unlock(&hw->rss_locks);
1933 
1934 	return rss_hash;
1935 }
1936