1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_lib.h"
5 #include "ice_switch.h"
6 
7 #define ICE_ETH_DA_OFFSET		0
8 #define ICE_ETH_ETHTYPE_OFFSET		12
9 #define ICE_ETH_VLAN_TCI_OFFSET		14
10 #define ICE_MAX_VLAN_ID			0xFFF
11 #define ICE_IPV6_ETHER_ID		0x86DD
12 
13 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
14  * struct to configure any switch filter rules.
15  * {DA (6 bytes), SA(6 bytes),
16  * Ether type (2 bytes for header without VLAN tag) OR
17  * VLAN tag (4 bytes for header with VLAN tag) }
18  *
19  * Word on Hardcoded values
20  * byte 0 = 0x2: to identify it as locally administered DA MAC
21  * byte 6 = 0x2: to identify it as locally administered SA MAC
22  * byte 12 = 0x81 & byte 13 = 0x00:
23  *	In case of VLAN filter first two bytes defines ether type (0x8100)
24  *	and remaining two bytes are placeholder for programming a given VLAN ID
25  *	In case of Ether type filter it is treated as header without VLAN tag
26  *	and byte 12 and 13 is used to program a given Ether type instead
27  */
28 #define DUMMY_ETH_HDR_LEN		16
29 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
30 							0x2, 0, 0, 0, 0, 0,
31 							0x81, 0, 0, 0};
32 
33 enum {
34 	ICE_PKT_OUTER_IPV6	= BIT(0),
35 	ICE_PKT_TUN_GTPC	= BIT(1),
36 	ICE_PKT_TUN_GTPU	= BIT(2),
37 	ICE_PKT_TUN_NVGRE	= BIT(3),
38 	ICE_PKT_TUN_UDP		= BIT(4),
39 	ICE_PKT_INNER_IPV6	= BIT(5),
40 	ICE_PKT_INNER_TCP	= BIT(6),
41 	ICE_PKT_INNER_UDP	= BIT(7),
42 	ICE_PKT_GTP_NOPAY	= BIT(8),
43 	ICE_PKT_KMALLOC		= BIT(9),
44 	ICE_PKT_PPPOE		= BIT(10),
45 	ICE_PKT_L2TPV3		= BIT(11),
46 };
47 
48 struct ice_dummy_pkt_offsets {
49 	enum ice_protocol_type type;
50 	u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
51 };
52 
53 struct ice_dummy_pkt_profile {
54 	const struct ice_dummy_pkt_offsets *offsets;
55 	const u8 *pkt;
56 	u32 match;
57 	u16 pkt_len;
58 	u16 offsets_len;
59 };
60 
61 #define ICE_DECLARE_PKT_OFFSETS(type)					\
62 	static const struct ice_dummy_pkt_offsets			\
63 	ice_dummy_##type##_packet_offsets[]
64 
65 #define ICE_DECLARE_PKT_TEMPLATE(type)					\
66 	static const u8 ice_dummy_##type##_packet[]
67 
68 #define ICE_PKT_PROFILE(type, m) {					\
69 	.match		= (m),						\
70 	.pkt		= ice_dummy_##type##_packet,			\
71 	.pkt_len	= sizeof(ice_dummy_##type##_packet),		\
72 	.offsets	= ice_dummy_##type##_packet_offsets,		\
73 	.offsets_len	= sizeof(ice_dummy_##type##_packet_offsets),	\
74 }
75 
76 ICE_DECLARE_PKT_OFFSETS(vlan) = {
77 	{ ICE_VLAN_OFOS,        12 },
78 };
79 
80 ICE_DECLARE_PKT_TEMPLATE(vlan) = {
81 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
82 };
83 
84 ICE_DECLARE_PKT_OFFSETS(qinq) = {
85 	{ ICE_VLAN_EX,          12 },
86 	{ ICE_VLAN_IN,          16 },
87 };
88 
89 ICE_DECLARE_PKT_TEMPLATE(qinq) = {
90 	0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
91 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
92 };
93 
94 ICE_DECLARE_PKT_OFFSETS(gre_tcp) = {
95 	{ ICE_MAC_OFOS,		0 },
96 	{ ICE_ETYPE_OL,		12 },
97 	{ ICE_IPV4_OFOS,	14 },
98 	{ ICE_NVGRE,		34 },
99 	{ ICE_MAC_IL,		42 },
100 	{ ICE_ETYPE_IL,		54 },
101 	{ ICE_IPV4_IL,		56 },
102 	{ ICE_TCP_IL,		76 },
103 	{ ICE_PROTOCOL_LAST,	0 },
104 };
105 
106 ICE_DECLARE_PKT_TEMPLATE(gre_tcp) = {
107 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_OFOS 0 */
108 	0x00, 0x00, 0x00, 0x00,
109 	0x00, 0x00, 0x00, 0x00,
110 
111 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
112 
113 	0x45, 0x00, 0x00, 0x3E,	/* ICE_IPV4_OFOS 14 */
114 	0x00, 0x00, 0x00, 0x00,
115 	0x00, 0x2F, 0x00, 0x00,
116 	0x00, 0x00, 0x00, 0x00,
117 	0x00, 0x00, 0x00, 0x00,
118 
119 	0x80, 0x00, 0x65, 0x58,	/* ICE_NVGRE 34 */
120 	0x00, 0x00, 0x00, 0x00,
121 
122 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_IL 42 */
123 	0x00, 0x00, 0x00, 0x00,
124 	0x00, 0x00, 0x00, 0x00,
125 
126 	0x08, 0x00,		/* ICE_ETYPE_IL 54 */
127 
128 	0x45, 0x00, 0x00, 0x14,	/* ICE_IPV4_IL 56 */
129 	0x00, 0x00, 0x00, 0x00,
130 	0x00, 0x06, 0x00, 0x00,
131 	0x00, 0x00, 0x00, 0x00,
132 	0x00, 0x00, 0x00, 0x00,
133 
134 	0x00, 0x00, 0x00, 0x00,	/* ICE_TCP_IL 76 */
135 	0x00, 0x00, 0x00, 0x00,
136 	0x00, 0x00, 0x00, 0x00,
137 	0x50, 0x02, 0x20, 0x00,
138 	0x00, 0x00, 0x00, 0x00
139 };
140 
141 ICE_DECLARE_PKT_OFFSETS(gre_udp) = {
142 	{ ICE_MAC_OFOS,		0 },
143 	{ ICE_ETYPE_OL,		12 },
144 	{ ICE_IPV4_OFOS,	14 },
145 	{ ICE_NVGRE,		34 },
146 	{ ICE_MAC_IL,		42 },
147 	{ ICE_ETYPE_IL,		54 },
148 	{ ICE_IPV4_IL,		56 },
149 	{ ICE_UDP_ILOS,		76 },
150 	{ ICE_PROTOCOL_LAST,	0 },
151 };
152 
153 ICE_DECLARE_PKT_TEMPLATE(gre_udp) = {
154 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_OFOS 0 */
155 	0x00, 0x00, 0x00, 0x00,
156 	0x00, 0x00, 0x00, 0x00,
157 
158 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
159 
160 	0x45, 0x00, 0x00, 0x3E,	/* ICE_IPV4_OFOS 14 */
161 	0x00, 0x00, 0x00, 0x00,
162 	0x00, 0x2F, 0x00, 0x00,
163 	0x00, 0x00, 0x00, 0x00,
164 	0x00, 0x00, 0x00, 0x00,
165 
166 	0x80, 0x00, 0x65, 0x58,	/* ICE_NVGRE 34 */
167 	0x00, 0x00, 0x00, 0x00,
168 
169 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_IL 42 */
170 	0x00, 0x00, 0x00, 0x00,
171 	0x00, 0x00, 0x00, 0x00,
172 
173 	0x08, 0x00,		/* ICE_ETYPE_IL 54 */
174 
175 	0x45, 0x00, 0x00, 0x14,	/* ICE_IPV4_IL 56 */
176 	0x00, 0x00, 0x00, 0x00,
177 	0x00, 0x11, 0x00, 0x00,
178 	0x00, 0x00, 0x00, 0x00,
179 	0x00, 0x00, 0x00, 0x00,
180 
181 	0x00, 0x00, 0x00, 0x00,	/* ICE_UDP_ILOS 76 */
182 	0x00, 0x08, 0x00, 0x00,
183 };
184 
185 ICE_DECLARE_PKT_OFFSETS(udp_tun_tcp) = {
186 	{ ICE_MAC_OFOS,		0 },
187 	{ ICE_ETYPE_OL,		12 },
188 	{ ICE_IPV4_OFOS,	14 },
189 	{ ICE_UDP_OF,		34 },
190 	{ ICE_VXLAN,		42 },
191 	{ ICE_GENEVE,		42 },
192 	{ ICE_VXLAN_GPE,	42 },
193 	{ ICE_MAC_IL,		50 },
194 	{ ICE_ETYPE_IL,		62 },
195 	{ ICE_IPV4_IL,		64 },
196 	{ ICE_TCP_IL,		84 },
197 	{ ICE_PROTOCOL_LAST,	0 },
198 };
199 
200 ICE_DECLARE_PKT_TEMPLATE(udp_tun_tcp) = {
201 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
202 	0x00, 0x00, 0x00, 0x00,
203 	0x00, 0x00, 0x00, 0x00,
204 
205 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
206 
207 	0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
208 	0x00, 0x01, 0x00, 0x00,
209 	0x40, 0x11, 0x00, 0x00,
210 	0x00, 0x00, 0x00, 0x00,
211 	0x00, 0x00, 0x00, 0x00,
212 
213 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
214 	0x00, 0x46, 0x00, 0x00,
215 
216 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
217 	0x00, 0x00, 0x00, 0x00,
218 
219 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
220 	0x00, 0x00, 0x00, 0x00,
221 	0x00, 0x00, 0x00, 0x00,
222 
223 	0x08, 0x00,		/* ICE_ETYPE_IL 62 */
224 
225 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
226 	0x00, 0x01, 0x00, 0x00,
227 	0x40, 0x06, 0x00, 0x00,
228 	0x00, 0x00, 0x00, 0x00,
229 	0x00, 0x00, 0x00, 0x00,
230 
231 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
232 	0x00, 0x00, 0x00, 0x00,
233 	0x00, 0x00, 0x00, 0x00,
234 	0x50, 0x02, 0x20, 0x00,
235 	0x00, 0x00, 0x00, 0x00
236 };
237 
238 ICE_DECLARE_PKT_OFFSETS(udp_tun_udp) = {
239 	{ ICE_MAC_OFOS,		0 },
240 	{ ICE_ETYPE_OL,		12 },
241 	{ ICE_IPV4_OFOS,	14 },
242 	{ ICE_UDP_OF,		34 },
243 	{ ICE_VXLAN,		42 },
244 	{ ICE_GENEVE,		42 },
245 	{ ICE_VXLAN_GPE,	42 },
246 	{ ICE_MAC_IL,		50 },
247 	{ ICE_ETYPE_IL,		62 },
248 	{ ICE_IPV4_IL,		64 },
249 	{ ICE_UDP_ILOS,		84 },
250 	{ ICE_PROTOCOL_LAST,	0 },
251 };
252 
253 ICE_DECLARE_PKT_TEMPLATE(udp_tun_udp) = {
254 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
255 	0x00, 0x00, 0x00, 0x00,
256 	0x00, 0x00, 0x00, 0x00,
257 
258 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
259 
260 	0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
261 	0x00, 0x01, 0x00, 0x00,
262 	0x00, 0x11, 0x00, 0x00,
263 	0x00, 0x00, 0x00, 0x00,
264 	0x00, 0x00, 0x00, 0x00,
265 
266 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
267 	0x00, 0x3a, 0x00, 0x00,
268 
269 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
270 	0x00, 0x00, 0x00, 0x00,
271 
272 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
273 	0x00, 0x00, 0x00, 0x00,
274 	0x00, 0x00, 0x00, 0x00,
275 
276 	0x08, 0x00,		/* ICE_ETYPE_IL 62 */
277 
278 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
279 	0x00, 0x01, 0x00, 0x00,
280 	0x00, 0x11, 0x00, 0x00,
281 	0x00, 0x00, 0x00, 0x00,
282 	0x00, 0x00, 0x00, 0x00,
283 
284 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
285 	0x00, 0x08, 0x00, 0x00,
286 };
287 
288 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_tcp) = {
289 	{ ICE_MAC_OFOS,		0 },
290 	{ ICE_ETYPE_OL,		12 },
291 	{ ICE_IPV4_OFOS,	14 },
292 	{ ICE_NVGRE,		34 },
293 	{ ICE_MAC_IL,		42 },
294 	{ ICE_ETYPE_IL,		54 },
295 	{ ICE_IPV6_IL,		56 },
296 	{ ICE_TCP_IL,		96 },
297 	{ ICE_PROTOCOL_LAST,	0 },
298 };
299 
300 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_tcp) = {
301 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
302 	0x00, 0x00, 0x00, 0x00,
303 	0x00, 0x00, 0x00, 0x00,
304 
305 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
306 
307 	0x45, 0x00, 0x00, 0x66, /* ICE_IPV4_OFOS 14 */
308 	0x00, 0x00, 0x00, 0x00,
309 	0x00, 0x2F, 0x00, 0x00,
310 	0x00, 0x00, 0x00, 0x00,
311 	0x00, 0x00, 0x00, 0x00,
312 
313 	0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
314 	0x00, 0x00, 0x00, 0x00,
315 
316 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
317 	0x00, 0x00, 0x00, 0x00,
318 	0x00, 0x00, 0x00, 0x00,
319 
320 	0x86, 0xdd,		/* ICE_ETYPE_IL 54 */
321 
322 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
323 	0x00, 0x08, 0x06, 0x40,
324 	0x00, 0x00, 0x00, 0x00,
325 	0x00, 0x00, 0x00, 0x00,
326 	0x00, 0x00, 0x00, 0x00,
327 	0x00, 0x00, 0x00, 0x00,
328 	0x00, 0x00, 0x00, 0x00,
329 	0x00, 0x00, 0x00, 0x00,
330 	0x00, 0x00, 0x00, 0x00,
331 	0x00, 0x00, 0x00, 0x00,
332 
333 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 96 */
334 	0x00, 0x00, 0x00, 0x00,
335 	0x00, 0x00, 0x00, 0x00,
336 	0x50, 0x02, 0x20, 0x00,
337 	0x00, 0x00, 0x00, 0x00
338 };
339 
340 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_udp) = {
341 	{ ICE_MAC_OFOS,		0 },
342 	{ ICE_ETYPE_OL,		12 },
343 	{ ICE_IPV4_OFOS,	14 },
344 	{ ICE_NVGRE,		34 },
345 	{ ICE_MAC_IL,		42 },
346 	{ ICE_ETYPE_IL,		54 },
347 	{ ICE_IPV6_IL,		56 },
348 	{ ICE_UDP_ILOS,		96 },
349 	{ ICE_PROTOCOL_LAST,	0 },
350 };
351 
352 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_udp) = {
353 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
354 	0x00, 0x00, 0x00, 0x00,
355 	0x00, 0x00, 0x00, 0x00,
356 
357 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
358 
359 	0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
360 	0x00, 0x00, 0x00, 0x00,
361 	0x00, 0x2F, 0x00, 0x00,
362 	0x00, 0x00, 0x00, 0x00,
363 	0x00, 0x00, 0x00, 0x00,
364 
365 	0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
366 	0x00, 0x00, 0x00, 0x00,
367 
368 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
369 	0x00, 0x00, 0x00, 0x00,
370 	0x00, 0x00, 0x00, 0x00,
371 
372 	0x86, 0xdd,		/* ICE_ETYPE_IL 54 */
373 
374 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
375 	0x00, 0x08, 0x11, 0x40,
376 	0x00, 0x00, 0x00, 0x00,
377 	0x00, 0x00, 0x00, 0x00,
378 	0x00, 0x00, 0x00, 0x00,
379 	0x00, 0x00, 0x00, 0x00,
380 	0x00, 0x00, 0x00, 0x00,
381 	0x00, 0x00, 0x00, 0x00,
382 	0x00, 0x00, 0x00, 0x00,
383 	0x00, 0x00, 0x00, 0x00,
384 
385 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 96 */
386 	0x00, 0x08, 0x00, 0x00,
387 };
388 
389 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_tcp) = {
390 	{ ICE_MAC_OFOS,		0 },
391 	{ ICE_ETYPE_OL,		12 },
392 	{ ICE_IPV4_OFOS,	14 },
393 	{ ICE_UDP_OF,		34 },
394 	{ ICE_VXLAN,		42 },
395 	{ ICE_GENEVE,		42 },
396 	{ ICE_VXLAN_GPE,	42 },
397 	{ ICE_MAC_IL,		50 },
398 	{ ICE_ETYPE_IL,		62 },
399 	{ ICE_IPV6_IL,		64 },
400 	{ ICE_TCP_IL,		104 },
401 	{ ICE_PROTOCOL_LAST,	0 },
402 };
403 
404 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_tcp) = {
405 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
406 	0x00, 0x00, 0x00, 0x00,
407 	0x00, 0x00, 0x00, 0x00,
408 
409 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
410 
411 	0x45, 0x00, 0x00, 0x6e, /* ICE_IPV4_OFOS 14 */
412 	0x00, 0x01, 0x00, 0x00,
413 	0x40, 0x11, 0x00, 0x00,
414 	0x00, 0x00, 0x00, 0x00,
415 	0x00, 0x00, 0x00, 0x00,
416 
417 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
418 	0x00, 0x5a, 0x00, 0x00,
419 
420 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
421 	0x00, 0x00, 0x00, 0x00,
422 
423 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
424 	0x00, 0x00, 0x00, 0x00,
425 	0x00, 0x00, 0x00, 0x00,
426 
427 	0x86, 0xdd,		/* ICE_ETYPE_IL 62 */
428 
429 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
430 	0x00, 0x08, 0x06, 0x40,
431 	0x00, 0x00, 0x00, 0x00,
432 	0x00, 0x00, 0x00, 0x00,
433 	0x00, 0x00, 0x00, 0x00,
434 	0x00, 0x00, 0x00, 0x00,
435 	0x00, 0x00, 0x00, 0x00,
436 	0x00, 0x00, 0x00, 0x00,
437 	0x00, 0x00, 0x00, 0x00,
438 	0x00, 0x00, 0x00, 0x00,
439 
440 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 104 */
441 	0x00, 0x00, 0x00, 0x00,
442 	0x00, 0x00, 0x00, 0x00,
443 	0x50, 0x02, 0x20, 0x00,
444 	0x00, 0x00, 0x00, 0x00
445 };
446 
447 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_udp) = {
448 	{ ICE_MAC_OFOS,		0 },
449 	{ ICE_ETYPE_OL,		12 },
450 	{ ICE_IPV4_OFOS,	14 },
451 	{ ICE_UDP_OF,		34 },
452 	{ ICE_VXLAN,		42 },
453 	{ ICE_GENEVE,		42 },
454 	{ ICE_VXLAN_GPE,	42 },
455 	{ ICE_MAC_IL,		50 },
456 	{ ICE_ETYPE_IL,		62 },
457 	{ ICE_IPV6_IL,		64 },
458 	{ ICE_UDP_ILOS,		104 },
459 	{ ICE_PROTOCOL_LAST,	0 },
460 };
461 
462 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_udp) = {
463 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
464 	0x00, 0x00, 0x00, 0x00,
465 	0x00, 0x00, 0x00, 0x00,
466 
467 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
468 
469 	0x45, 0x00, 0x00, 0x62, /* ICE_IPV4_OFOS 14 */
470 	0x00, 0x01, 0x00, 0x00,
471 	0x00, 0x11, 0x00, 0x00,
472 	0x00, 0x00, 0x00, 0x00,
473 	0x00, 0x00, 0x00, 0x00,
474 
475 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
476 	0x00, 0x4e, 0x00, 0x00,
477 
478 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
479 	0x00, 0x00, 0x00, 0x00,
480 
481 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
482 	0x00, 0x00, 0x00, 0x00,
483 	0x00, 0x00, 0x00, 0x00,
484 
485 	0x86, 0xdd,		/* ICE_ETYPE_IL 62 */
486 
487 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
488 	0x00, 0x08, 0x11, 0x40,
489 	0x00, 0x00, 0x00, 0x00,
490 	0x00, 0x00, 0x00, 0x00,
491 	0x00, 0x00, 0x00, 0x00,
492 	0x00, 0x00, 0x00, 0x00,
493 	0x00, 0x00, 0x00, 0x00,
494 	0x00, 0x00, 0x00, 0x00,
495 	0x00, 0x00, 0x00, 0x00,
496 	0x00, 0x00, 0x00, 0x00,
497 
498 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 104 */
499 	0x00, 0x08, 0x00, 0x00,
500 };
501 
502 /* offset info for MAC + IPv4 + UDP dummy packet */
503 ICE_DECLARE_PKT_OFFSETS(udp) = {
504 	{ ICE_MAC_OFOS,		0 },
505 	{ ICE_ETYPE_OL,		12 },
506 	{ ICE_IPV4_OFOS,	14 },
507 	{ ICE_UDP_ILOS,		34 },
508 	{ ICE_PROTOCOL_LAST,	0 },
509 };
510 
511 /* Dummy packet for MAC + IPv4 + UDP */
512 ICE_DECLARE_PKT_TEMPLATE(udp) = {
513 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
514 	0x00, 0x00, 0x00, 0x00,
515 	0x00, 0x00, 0x00, 0x00,
516 
517 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
518 
519 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
520 	0x00, 0x01, 0x00, 0x00,
521 	0x00, 0x11, 0x00, 0x00,
522 	0x00, 0x00, 0x00, 0x00,
523 	0x00, 0x00, 0x00, 0x00,
524 
525 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
526 	0x00, 0x08, 0x00, 0x00,
527 
528 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
529 };
530 
531 /* offset info for MAC + IPv4 + TCP dummy packet */
532 ICE_DECLARE_PKT_OFFSETS(tcp) = {
533 	{ ICE_MAC_OFOS,		0 },
534 	{ ICE_ETYPE_OL,		12 },
535 	{ ICE_IPV4_OFOS,	14 },
536 	{ ICE_TCP_IL,		34 },
537 	{ ICE_PROTOCOL_LAST,	0 },
538 };
539 
540 /* Dummy packet for MAC + IPv4 + TCP */
541 ICE_DECLARE_PKT_TEMPLATE(tcp) = {
542 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
543 	0x00, 0x00, 0x00, 0x00,
544 	0x00, 0x00, 0x00, 0x00,
545 
546 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
547 
548 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
549 	0x00, 0x01, 0x00, 0x00,
550 	0x00, 0x06, 0x00, 0x00,
551 	0x00, 0x00, 0x00, 0x00,
552 	0x00, 0x00, 0x00, 0x00,
553 
554 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
555 	0x00, 0x00, 0x00, 0x00,
556 	0x00, 0x00, 0x00, 0x00,
557 	0x50, 0x00, 0x00, 0x00,
558 	0x00, 0x00, 0x00, 0x00,
559 
560 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
561 };
562 
563 ICE_DECLARE_PKT_OFFSETS(tcp_ipv6) = {
564 	{ ICE_MAC_OFOS,		0 },
565 	{ ICE_ETYPE_OL,		12 },
566 	{ ICE_IPV6_OFOS,	14 },
567 	{ ICE_TCP_IL,		54 },
568 	{ ICE_PROTOCOL_LAST,	0 },
569 };
570 
571 ICE_DECLARE_PKT_TEMPLATE(tcp_ipv6) = {
572 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
573 	0x00, 0x00, 0x00, 0x00,
574 	0x00, 0x00, 0x00, 0x00,
575 
576 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
577 
578 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
579 	0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
580 	0x00, 0x00, 0x00, 0x00,
581 	0x00, 0x00, 0x00, 0x00,
582 	0x00, 0x00, 0x00, 0x00,
583 	0x00, 0x00, 0x00, 0x00,
584 	0x00, 0x00, 0x00, 0x00,
585 	0x00, 0x00, 0x00, 0x00,
586 	0x00, 0x00, 0x00, 0x00,
587 	0x00, 0x00, 0x00, 0x00,
588 
589 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
590 	0x00, 0x00, 0x00, 0x00,
591 	0x00, 0x00, 0x00, 0x00,
592 	0x50, 0x00, 0x00, 0x00,
593 	0x00, 0x00, 0x00, 0x00,
594 
595 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
596 };
597 
598 /* IPv6 + UDP */
599 ICE_DECLARE_PKT_OFFSETS(udp_ipv6) = {
600 	{ ICE_MAC_OFOS,		0 },
601 	{ ICE_ETYPE_OL,		12 },
602 	{ ICE_IPV6_OFOS,	14 },
603 	{ ICE_UDP_ILOS,		54 },
604 	{ ICE_PROTOCOL_LAST,	0 },
605 };
606 
607 /* IPv6 + UDP dummy packet */
608 ICE_DECLARE_PKT_TEMPLATE(udp_ipv6) = {
609 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
610 	0x00, 0x00, 0x00, 0x00,
611 	0x00, 0x00, 0x00, 0x00,
612 
613 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
614 
615 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
616 	0x00, 0x10, 0x11, 0x00, /* Next header UDP */
617 	0x00, 0x00, 0x00, 0x00,
618 	0x00, 0x00, 0x00, 0x00,
619 	0x00, 0x00, 0x00, 0x00,
620 	0x00, 0x00, 0x00, 0x00,
621 	0x00, 0x00, 0x00, 0x00,
622 	0x00, 0x00, 0x00, 0x00,
623 	0x00, 0x00, 0x00, 0x00,
624 	0x00, 0x00, 0x00, 0x00,
625 
626 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
627 	0x00, 0x10, 0x00, 0x00,
628 
629 	0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
630 	0x00, 0x00, 0x00, 0x00,
631 
632 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
633 };
634 
635 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
636 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_tcp) = {
637 	{ ICE_MAC_OFOS,		0 },
638 	{ ICE_IPV4_OFOS,	14 },
639 	{ ICE_UDP_OF,		34 },
640 	{ ICE_GTP,		42 },
641 	{ ICE_IPV4_IL,		62 },
642 	{ ICE_TCP_IL,		82 },
643 	{ ICE_PROTOCOL_LAST,	0 },
644 };
645 
646 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_tcp) = {
647 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
648 	0x00, 0x00, 0x00, 0x00,
649 	0x00, 0x00, 0x00, 0x00,
650 	0x08, 0x00,
651 
652 	0x45, 0x00, 0x00, 0x58, /* IP 14 */
653 	0x00, 0x00, 0x00, 0x00,
654 	0x00, 0x11, 0x00, 0x00,
655 	0x00, 0x00, 0x00, 0x00,
656 	0x00, 0x00, 0x00, 0x00,
657 
658 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
659 	0x00, 0x44, 0x00, 0x00,
660 
661 	0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 42 */
662 	0x00, 0x00, 0x00, 0x00,
663 	0x00, 0x00, 0x00, 0x85,
664 
665 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
666 	0x00, 0x00, 0x00, 0x00,
667 
668 	0x45, 0x00, 0x00, 0x28, /* IP 62 */
669 	0x00, 0x00, 0x00, 0x00,
670 	0x00, 0x06, 0x00, 0x00,
671 	0x00, 0x00, 0x00, 0x00,
672 	0x00, 0x00, 0x00, 0x00,
673 
674 	0x00, 0x00, 0x00, 0x00, /* TCP 82 */
675 	0x00, 0x00, 0x00, 0x00,
676 	0x00, 0x00, 0x00, 0x00,
677 	0x50, 0x00, 0x00, 0x00,
678 	0x00, 0x00, 0x00, 0x00,
679 
680 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
681 };
682 
683 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
684 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_udp) = {
685 	{ ICE_MAC_OFOS,		0 },
686 	{ ICE_IPV4_OFOS,	14 },
687 	{ ICE_UDP_OF,		34 },
688 	{ ICE_GTP,		42 },
689 	{ ICE_IPV4_IL,		62 },
690 	{ ICE_UDP_ILOS,		82 },
691 	{ ICE_PROTOCOL_LAST,	0 },
692 };
693 
694 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_udp) = {
695 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
696 	0x00, 0x00, 0x00, 0x00,
697 	0x00, 0x00, 0x00, 0x00,
698 	0x08, 0x00,
699 
700 	0x45, 0x00, 0x00, 0x4c, /* IP 14 */
701 	0x00, 0x00, 0x00, 0x00,
702 	0x00, 0x11, 0x00, 0x00,
703 	0x00, 0x00, 0x00, 0x00,
704 	0x00, 0x00, 0x00, 0x00,
705 
706 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
707 	0x00, 0x38, 0x00, 0x00,
708 
709 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 42 */
710 	0x00, 0x00, 0x00, 0x00,
711 	0x00, 0x00, 0x00, 0x85,
712 
713 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
714 	0x00, 0x00, 0x00, 0x00,
715 
716 	0x45, 0x00, 0x00, 0x1c, /* IP 62 */
717 	0x00, 0x00, 0x00, 0x00,
718 	0x00, 0x11, 0x00, 0x00,
719 	0x00, 0x00, 0x00, 0x00,
720 	0x00, 0x00, 0x00, 0x00,
721 
722 	0x00, 0x00, 0x00, 0x00, /* UDP 82 */
723 	0x00, 0x08, 0x00, 0x00,
724 
725 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
726 };
727 
728 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
729 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_tcp) = {
730 	{ ICE_MAC_OFOS,		0 },
731 	{ ICE_IPV4_OFOS,	14 },
732 	{ ICE_UDP_OF,		34 },
733 	{ ICE_GTP,		42 },
734 	{ ICE_IPV6_IL,		62 },
735 	{ ICE_TCP_IL,		102 },
736 	{ ICE_PROTOCOL_LAST,	0 },
737 };
738 
739 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_tcp) = {
740 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
741 	0x00, 0x00, 0x00, 0x00,
742 	0x00, 0x00, 0x00, 0x00,
743 	0x08, 0x00,
744 
745 	0x45, 0x00, 0x00, 0x6c, /* IP 14 */
746 	0x00, 0x00, 0x00, 0x00,
747 	0x00, 0x11, 0x00, 0x00,
748 	0x00, 0x00, 0x00, 0x00,
749 	0x00, 0x00, 0x00, 0x00,
750 
751 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
752 	0x00, 0x58, 0x00, 0x00,
753 
754 	0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 42 */
755 	0x00, 0x00, 0x00, 0x00,
756 	0x00, 0x00, 0x00, 0x85,
757 
758 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
759 	0x00, 0x00, 0x00, 0x00,
760 
761 	0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
762 	0x00, 0x14, 0x06, 0x00,
763 	0x00, 0x00, 0x00, 0x00,
764 	0x00, 0x00, 0x00, 0x00,
765 	0x00, 0x00, 0x00, 0x00,
766 	0x00, 0x00, 0x00, 0x00,
767 	0x00, 0x00, 0x00, 0x00,
768 	0x00, 0x00, 0x00, 0x00,
769 	0x00, 0x00, 0x00, 0x00,
770 	0x00, 0x00, 0x00, 0x00,
771 
772 	0x00, 0x00, 0x00, 0x00, /* TCP 102 */
773 	0x00, 0x00, 0x00, 0x00,
774 	0x00, 0x00, 0x00, 0x00,
775 	0x50, 0x00, 0x00, 0x00,
776 	0x00, 0x00, 0x00, 0x00,
777 
778 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
779 };
780 
781 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_udp) = {
782 	{ ICE_MAC_OFOS,		0 },
783 	{ ICE_IPV4_OFOS,	14 },
784 	{ ICE_UDP_OF,		34 },
785 	{ ICE_GTP,		42 },
786 	{ ICE_IPV6_IL,		62 },
787 	{ ICE_UDP_ILOS,		102 },
788 	{ ICE_PROTOCOL_LAST,	0 },
789 };
790 
791 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_udp) = {
792 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
793 	0x00, 0x00, 0x00, 0x00,
794 	0x00, 0x00, 0x00, 0x00,
795 	0x08, 0x00,
796 
797 	0x45, 0x00, 0x00, 0x60, /* IP 14 */
798 	0x00, 0x00, 0x00, 0x00,
799 	0x00, 0x11, 0x00, 0x00,
800 	0x00, 0x00, 0x00, 0x00,
801 	0x00, 0x00, 0x00, 0x00,
802 
803 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
804 	0x00, 0x4c, 0x00, 0x00,
805 
806 	0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 42 */
807 	0x00, 0x00, 0x00, 0x00,
808 	0x00, 0x00, 0x00, 0x85,
809 
810 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
811 	0x00, 0x00, 0x00, 0x00,
812 
813 	0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
814 	0x00, 0x08, 0x11, 0x00,
815 	0x00, 0x00, 0x00, 0x00,
816 	0x00, 0x00, 0x00, 0x00,
817 	0x00, 0x00, 0x00, 0x00,
818 	0x00, 0x00, 0x00, 0x00,
819 	0x00, 0x00, 0x00, 0x00,
820 	0x00, 0x00, 0x00, 0x00,
821 	0x00, 0x00, 0x00, 0x00,
822 	0x00, 0x00, 0x00, 0x00,
823 
824 	0x00, 0x00, 0x00, 0x00, /* UDP 102 */
825 	0x00, 0x08, 0x00, 0x00,
826 
827 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
828 };
829 
830 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_tcp) = {
831 	{ ICE_MAC_OFOS,		0 },
832 	{ ICE_IPV6_OFOS,	14 },
833 	{ ICE_UDP_OF,		54 },
834 	{ ICE_GTP,		62 },
835 	{ ICE_IPV4_IL,		82 },
836 	{ ICE_TCP_IL,		102 },
837 	{ ICE_PROTOCOL_LAST,	0 },
838 };
839 
840 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_tcp) = {
841 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
842 	0x00, 0x00, 0x00, 0x00,
843 	0x00, 0x00, 0x00, 0x00,
844 	0x86, 0xdd,
845 
846 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
847 	0x00, 0x44, 0x11, 0x00,
848 	0x00, 0x00, 0x00, 0x00,
849 	0x00, 0x00, 0x00, 0x00,
850 	0x00, 0x00, 0x00, 0x00,
851 	0x00, 0x00, 0x00, 0x00,
852 	0x00, 0x00, 0x00, 0x00,
853 	0x00, 0x00, 0x00, 0x00,
854 	0x00, 0x00, 0x00, 0x00,
855 	0x00, 0x00, 0x00, 0x00,
856 
857 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
858 	0x00, 0x44, 0x00, 0x00,
859 
860 	0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 62 */
861 	0x00, 0x00, 0x00, 0x00,
862 	0x00, 0x00, 0x00, 0x85,
863 
864 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
865 	0x00, 0x00, 0x00, 0x00,
866 
867 	0x45, 0x00, 0x00, 0x28, /* IP 82 */
868 	0x00, 0x00, 0x00, 0x00,
869 	0x00, 0x06, 0x00, 0x00,
870 	0x00, 0x00, 0x00, 0x00,
871 	0x00, 0x00, 0x00, 0x00,
872 
873 	0x00, 0x00, 0x00, 0x00, /* TCP 102 */
874 	0x00, 0x00, 0x00, 0x00,
875 	0x00, 0x00, 0x00, 0x00,
876 	0x50, 0x00, 0x00, 0x00,
877 	0x00, 0x00, 0x00, 0x00,
878 
879 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
880 };
881 
882 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_udp) = {
883 	{ ICE_MAC_OFOS,		0 },
884 	{ ICE_IPV6_OFOS,	14 },
885 	{ ICE_UDP_OF,		54 },
886 	{ ICE_GTP,		62 },
887 	{ ICE_IPV4_IL,		82 },
888 	{ ICE_UDP_ILOS,		102 },
889 	{ ICE_PROTOCOL_LAST,	0 },
890 };
891 
892 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_udp) = {
893 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
894 	0x00, 0x00, 0x00, 0x00,
895 	0x00, 0x00, 0x00, 0x00,
896 	0x86, 0xdd,
897 
898 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
899 	0x00, 0x38, 0x11, 0x00,
900 	0x00, 0x00, 0x00, 0x00,
901 	0x00, 0x00, 0x00, 0x00,
902 	0x00, 0x00, 0x00, 0x00,
903 	0x00, 0x00, 0x00, 0x00,
904 	0x00, 0x00, 0x00, 0x00,
905 	0x00, 0x00, 0x00, 0x00,
906 	0x00, 0x00, 0x00, 0x00,
907 	0x00, 0x00, 0x00, 0x00,
908 
909 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
910 	0x00, 0x38, 0x00, 0x00,
911 
912 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 62 */
913 	0x00, 0x00, 0x00, 0x00,
914 	0x00, 0x00, 0x00, 0x85,
915 
916 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
917 	0x00, 0x00, 0x00, 0x00,
918 
919 	0x45, 0x00, 0x00, 0x1c, /* IP 82 */
920 	0x00, 0x00, 0x00, 0x00,
921 	0x00, 0x11, 0x00, 0x00,
922 	0x00, 0x00, 0x00, 0x00,
923 	0x00, 0x00, 0x00, 0x00,
924 
925 	0x00, 0x00, 0x00, 0x00, /* UDP 102 */
926 	0x00, 0x08, 0x00, 0x00,
927 
928 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
929 };
930 
931 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_tcp) = {
932 	{ ICE_MAC_OFOS,		0 },
933 	{ ICE_IPV6_OFOS,	14 },
934 	{ ICE_UDP_OF,		54 },
935 	{ ICE_GTP,		62 },
936 	{ ICE_IPV6_IL,		82 },
937 	{ ICE_TCP_IL,		122 },
938 	{ ICE_PROTOCOL_LAST,	0 },
939 };
940 
941 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_tcp) = {
942 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
943 	0x00, 0x00, 0x00, 0x00,
944 	0x00, 0x00, 0x00, 0x00,
945 	0x86, 0xdd,
946 
947 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
948 	0x00, 0x58, 0x11, 0x00,
949 	0x00, 0x00, 0x00, 0x00,
950 	0x00, 0x00, 0x00, 0x00,
951 	0x00, 0x00, 0x00, 0x00,
952 	0x00, 0x00, 0x00, 0x00,
953 	0x00, 0x00, 0x00, 0x00,
954 	0x00, 0x00, 0x00, 0x00,
955 	0x00, 0x00, 0x00, 0x00,
956 	0x00, 0x00, 0x00, 0x00,
957 
958 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
959 	0x00, 0x58, 0x00, 0x00,
960 
961 	0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 62 */
962 	0x00, 0x00, 0x00, 0x00,
963 	0x00, 0x00, 0x00, 0x85,
964 
965 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
966 	0x00, 0x00, 0x00, 0x00,
967 
968 	0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
969 	0x00, 0x14, 0x06, 0x00,
970 	0x00, 0x00, 0x00, 0x00,
971 	0x00, 0x00, 0x00, 0x00,
972 	0x00, 0x00, 0x00, 0x00,
973 	0x00, 0x00, 0x00, 0x00,
974 	0x00, 0x00, 0x00, 0x00,
975 	0x00, 0x00, 0x00, 0x00,
976 	0x00, 0x00, 0x00, 0x00,
977 	0x00, 0x00, 0x00, 0x00,
978 
979 	0x00, 0x00, 0x00, 0x00, /* TCP 122 */
980 	0x00, 0x00, 0x00, 0x00,
981 	0x00, 0x00, 0x00, 0x00,
982 	0x50, 0x00, 0x00, 0x00,
983 	0x00, 0x00, 0x00, 0x00,
984 
985 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
986 };
987 
988 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_udp) = {
989 	{ ICE_MAC_OFOS,		0 },
990 	{ ICE_IPV6_OFOS,	14 },
991 	{ ICE_UDP_OF,		54 },
992 	{ ICE_GTP,		62 },
993 	{ ICE_IPV6_IL,		82 },
994 	{ ICE_UDP_ILOS,		122 },
995 	{ ICE_PROTOCOL_LAST,	0 },
996 };
997 
998 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_udp) = {
999 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
1000 	0x00, 0x00, 0x00, 0x00,
1001 	0x00, 0x00, 0x00, 0x00,
1002 	0x86, 0xdd,
1003 
1004 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
1005 	0x00, 0x4c, 0x11, 0x00,
1006 	0x00, 0x00, 0x00, 0x00,
1007 	0x00, 0x00, 0x00, 0x00,
1008 	0x00, 0x00, 0x00, 0x00,
1009 	0x00, 0x00, 0x00, 0x00,
1010 	0x00, 0x00, 0x00, 0x00,
1011 	0x00, 0x00, 0x00, 0x00,
1012 	0x00, 0x00, 0x00, 0x00,
1013 	0x00, 0x00, 0x00, 0x00,
1014 
1015 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
1016 	0x00, 0x4c, 0x00, 0x00,
1017 
1018 	0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 62 */
1019 	0x00, 0x00, 0x00, 0x00,
1020 	0x00, 0x00, 0x00, 0x85,
1021 
1022 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
1023 	0x00, 0x00, 0x00, 0x00,
1024 
1025 	0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
1026 	0x00, 0x08, 0x11, 0x00,
1027 	0x00, 0x00, 0x00, 0x00,
1028 	0x00, 0x00, 0x00, 0x00,
1029 	0x00, 0x00, 0x00, 0x00,
1030 	0x00, 0x00, 0x00, 0x00,
1031 	0x00, 0x00, 0x00, 0x00,
1032 	0x00, 0x00, 0x00, 0x00,
1033 	0x00, 0x00, 0x00, 0x00,
1034 	0x00, 0x00, 0x00, 0x00,
1035 
1036 	0x00, 0x00, 0x00, 0x00, /* UDP 122 */
1037 	0x00, 0x08, 0x00, 0x00,
1038 
1039 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
1040 };
1041 
1042 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4) = {
1043 	{ ICE_MAC_OFOS,		0 },
1044 	{ ICE_IPV4_OFOS,	14 },
1045 	{ ICE_UDP_OF,		34 },
1046 	{ ICE_GTP_NO_PAY,	42 },
1047 	{ ICE_PROTOCOL_LAST,	0 },
1048 };
1049 
1050 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4) = {
1051 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1052 	0x00, 0x00, 0x00, 0x00,
1053 	0x00, 0x00, 0x00, 0x00,
1054 	0x08, 0x00,
1055 
1056 	0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
1057 	0x00, 0x00, 0x40, 0x00,
1058 	0x40, 0x11, 0x00, 0x00,
1059 	0x00, 0x00, 0x00, 0x00,
1060 	0x00, 0x00, 0x00, 0x00,
1061 
1062 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
1063 	0x00, 0x00, 0x00, 0x00,
1064 
1065 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
1066 	0x00, 0x00, 0x00, 0x00,
1067 	0x00, 0x00, 0x00, 0x85,
1068 
1069 	0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1070 	0x00, 0x00, 0x00, 0x00,
1071 
1072 	0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
1073 	0x00, 0x00, 0x40, 0x00,
1074 	0x40, 0x00, 0x00, 0x00,
1075 	0x00, 0x00, 0x00, 0x00,
1076 	0x00, 0x00, 0x00, 0x00,
1077 	0x00, 0x00,
1078 };
1079 
1080 ICE_DECLARE_PKT_OFFSETS(ipv6_gtp) = {
1081 	{ ICE_MAC_OFOS,		0 },
1082 	{ ICE_IPV6_OFOS,	14 },
1083 	{ ICE_UDP_OF,		54 },
1084 	{ ICE_GTP_NO_PAY,	62 },
1085 	{ ICE_PROTOCOL_LAST,	0 },
1086 };
1087 
1088 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = {
1089 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1090 	0x00, 0x00, 0x00, 0x00,
1091 	0x00, 0x00, 0x00, 0x00,
1092 	0x86, 0xdd,
1093 
1094 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1095 	0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1096 	0x00, 0x00, 0x00, 0x00,
1097 	0x00, 0x00, 0x00, 0x00,
1098 	0x00, 0x00, 0x00, 0x00,
1099 	0x00, 0x00, 0x00, 0x00,
1100 	0x00, 0x00, 0x00, 0x00,
1101 	0x00, 0x00, 0x00, 0x00,
1102 	0x00, 0x00, 0x00, 0x00,
1103 	0x00, 0x00, 0x00, 0x00,
1104 
1105 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1106 	0x00, 0x00, 0x00, 0x00,
1107 
1108 	0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */
1109 	0x00, 0x00, 0x00, 0x00,
1110 
1111 	0x00, 0x00,
1112 };
1113 
1114 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_tcp) = {
1115 	{ ICE_MAC_OFOS,		0 },
1116 	{ ICE_ETYPE_OL,		12 },
1117 	{ ICE_PPPOE,		14 },
1118 	{ ICE_IPV4_OFOS,	22 },
1119 	{ ICE_TCP_IL,		42 },
1120 	{ ICE_PROTOCOL_LAST,	0 },
1121 };
1122 
1123 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_tcp) = {
1124 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1125 	0x00, 0x00, 0x00, 0x00,
1126 	0x00, 0x00, 0x00, 0x00,
1127 
1128 	0x88, 0x64,		/* ICE_ETYPE_OL 12 */
1129 
1130 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1131 	0x00, 0x16,
1132 
1133 	0x00, 0x21,		/* PPP Link Layer 20 */
1134 
1135 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 22 */
1136 	0x00, 0x01, 0x00, 0x00,
1137 	0x00, 0x06, 0x00, 0x00,
1138 	0x00, 0x00, 0x00, 0x00,
1139 	0x00, 0x00, 0x00, 0x00,
1140 
1141 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 42 */
1142 	0x00, 0x00, 0x00, 0x00,
1143 	0x00, 0x00, 0x00, 0x00,
1144 	0x50, 0x00, 0x00, 0x00,
1145 	0x00, 0x00, 0x00, 0x00,
1146 
1147 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1148 };
1149 
1150 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_udp) = {
1151 	{ ICE_MAC_OFOS,		0 },
1152 	{ ICE_ETYPE_OL,		12 },
1153 	{ ICE_PPPOE,		14 },
1154 	{ ICE_IPV4_OFOS,	22 },
1155 	{ ICE_UDP_ILOS,		42 },
1156 	{ ICE_PROTOCOL_LAST,	0 },
1157 };
1158 
1159 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_udp) = {
1160 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1161 	0x00, 0x00, 0x00, 0x00,
1162 	0x00, 0x00, 0x00, 0x00,
1163 
1164 	0x88, 0x64,		/* ICE_ETYPE_OL 12 */
1165 
1166 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1167 	0x00, 0x16,
1168 
1169 	0x00, 0x21,		/* PPP Link Layer 20 */
1170 
1171 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1172 	0x00, 0x01, 0x00, 0x00,
1173 	0x00, 0x11, 0x00, 0x00,
1174 	0x00, 0x00, 0x00, 0x00,
1175 	0x00, 0x00, 0x00, 0x00,
1176 
1177 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1178 	0x00, 0x08, 0x00, 0x00,
1179 
1180 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1181 };
1182 
1183 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_tcp) = {
1184 	{ ICE_MAC_OFOS,		0 },
1185 	{ ICE_ETYPE_OL,		12 },
1186 	{ ICE_PPPOE,		14 },
1187 	{ ICE_IPV6_OFOS,	22 },
1188 	{ ICE_TCP_IL,		62 },
1189 	{ ICE_PROTOCOL_LAST,	0 },
1190 };
1191 
1192 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_tcp) = {
1193 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1194 	0x00, 0x00, 0x00, 0x00,
1195 	0x00, 0x00, 0x00, 0x00,
1196 
1197 	0x88, 0x64,		/* ICE_ETYPE_OL 12 */
1198 
1199 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1200 	0x00, 0x2a,
1201 
1202 	0x00, 0x57,		/* PPP Link Layer 20 */
1203 
1204 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1205 	0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1206 	0x00, 0x00, 0x00, 0x00,
1207 	0x00, 0x00, 0x00, 0x00,
1208 	0x00, 0x00, 0x00, 0x00,
1209 	0x00, 0x00, 0x00, 0x00,
1210 	0x00, 0x00, 0x00, 0x00,
1211 	0x00, 0x00, 0x00, 0x00,
1212 	0x00, 0x00, 0x00, 0x00,
1213 	0x00, 0x00, 0x00, 0x00,
1214 
1215 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 62 */
1216 	0x00, 0x00, 0x00, 0x00,
1217 	0x00, 0x00, 0x00, 0x00,
1218 	0x50, 0x00, 0x00, 0x00,
1219 	0x00, 0x00, 0x00, 0x00,
1220 
1221 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1222 };
1223 
1224 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_udp) = {
1225 	{ ICE_MAC_OFOS,		0 },
1226 	{ ICE_ETYPE_OL,		12 },
1227 	{ ICE_PPPOE,		14 },
1228 	{ ICE_IPV6_OFOS,	22 },
1229 	{ ICE_UDP_ILOS,		62 },
1230 	{ ICE_PROTOCOL_LAST,	0 },
1231 };
1232 
1233 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_udp) = {
1234 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1235 	0x00, 0x00, 0x00, 0x00,
1236 	0x00, 0x00, 0x00, 0x00,
1237 
1238 	0x88, 0x64,		/* ICE_ETYPE_OL 12 */
1239 
1240 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1241 	0x00, 0x2a,
1242 
1243 	0x00, 0x57,		/* PPP Link Layer 20 */
1244 
1245 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1246 	0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1247 	0x00, 0x00, 0x00, 0x00,
1248 	0x00, 0x00, 0x00, 0x00,
1249 	0x00, 0x00, 0x00, 0x00,
1250 	0x00, 0x00, 0x00, 0x00,
1251 	0x00, 0x00, 0x00, 0x00,
1252 	0x00, 0x00, 0x00, 0x00,
1253 	0x00, 0x00, 0x00, 0x00,
1254 	0x00, 0x00, 0x00, 0x00,
1255 
1256 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1257 	0x00, 0x08, 0x00, 0x00,
1258 
1259 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1260 };
1261 
1262 ICE_DECLARE_PKT_OFFSETS(ipv4_l2tpv3) = {
1263 	{ ICE_MAC_OFOS,		0 },
1264 	{ ICE_ETYPE_OL,		12 },
1265 	{ ICE_IPV4_OFOS,	14 },
1266 	{ ICE_L2TPV3,		34 },
1267 	{ ICE_PROTOCOL_LAST,	0 },
1268 };
1269 
1270 ICE_DECLARE_PKT_TEMPLATE(ipv4_l2tpv3) = {
1271 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1272 	0x00, 0x00, 0x00, 0x00,
1273 	0x00, 0x00, 0x00, 0x00,
1274 
1275 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
1276 
1277 	0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1278 	0x00, 0x00, 0x40, 0x00,
1279 	0x40, 0x73, 0x00, 0x00,
1280 	0x00, 0x00, 0x00, 0x00,
1281 	0x00, 0x00, 0x00, 0x00,
1282 
1283 	0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1284 	0x00, 0x00, 0x00, 0x00,
1285 	0x00, 0x00, 0x00, 0x00,
1286 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1287 };
1288 
1289 ICE_DECLARE_PKT_OFFSETS(ipv6_l2tpv3) = {
1290 	{ ICE_MAC_OFOS,		0 },
1291 	{ ICE_ETYPE_OL,		12 },
1292 	{ ICE_IPV6_OFOS,	14 },
1293 	{ ICE_L2TPV3,		54 },
1294 	{ ICE_PROTOCOL_LAST,	0 },
1295 };
1296 
1297 ICE_DECLARE_PKT_TEMPLATE(ipv6_l2tpv3) = {
1298 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1299 	0x00, 0x00, 0x00, 0x00,
1300 	0x00, 0x00, 0x00, 0x00,
1301 
1302 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
1303 
1304 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1305 	0x00, 0x0c, 0x73, 0x40,
1306 	0x00, 0x00, 0x00, 0x00,
1307 	0x00, 0x00, 0x00, 0x00,
1308 	0x00, 0x00, 0x00, 0x00,
1309 	0x00, 0x00, 0x00, 0x00,
1310 	0x00, 0x00, 0x00, 0x00,
1311 	0x00, 0x00, 0x00, 0x00,
1312 	0x00, 0x00, 0x00, 0x00,
1313 	0x00, 0x00, 0x00, 0x00,
1314 
1315 	0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1316 	0x00, 0x00, 0x00, 0x00,
1317 	0x00, 0x00, 0x00, 0x00,
1318 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1319 };
1320 
1321 static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
1322 	ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 |
1323 				  ICE_PKT_GTP_NOPAY),
1324 	ICE_PKT_PROFILE(ipv6_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1325 					    ICE_PKT_OUTER_IPV6 |
1326 					    ICE_PKT_INNER_IPV6 |
1327 					    ICE_PKT_INNER_UDP),
1328 	ICE_PKT_PROFILE(ipv6_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1329 					    ICE_PKT_OUTER_IPV6 |
1330 					    ICE_PKT_INNER_IPV6),
1331 	ICE_PKT_PROFILE(ipv6_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1332 					    ICE_PKT_OUTER_IPV6 |
1333 					    ICE_PKT_INNER_UDP),
1334 	ICE_PKT_PROFILE(ipv6_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU |
1335 					    ICE_PKT_OUTER_IPV6),
1336 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPU | ICE_PKT_GTP_NOPAY),
1337 	ICE_PKT_PROFILE(ipv4_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1338 					    ICE_PKT_INNER_IPV6 |
1339 					    ICE_PKT_INNER_UDP),
1340 	ICE_PKT_PROFILE(ipv4_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1341 					    ICE_PKT_INNER_IPV6),
1342 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1343 					    ICE_PKT_INNER_UDP),
1344 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU),
1345 	ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6),
1346 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC),
1347 	ICE_PKT_PROFILE(pppoe_ipv6_udp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6 |
1348 					ICE_PKT_INNER_UDP),
1349 	ICE_PKT_PROFILE(pppoe_ipv6_tcp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6),
1350 	ICE_PKT_PROFILE(pppoe_ipv4_udp, ICE_PKT_PPPOE | ICE_PKT_INNER_UDP),
1351 	ICE_PKT_PROFILE(pppoe_ipv4_tcp, ICE_PKT_PPPOE),
1352 	ICE_PKT_PROFILE(gre_ipv6_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6 |
1353 				      ICE_PKT_INNER_TCP),
1354 	ICE_PKT_PROFILE(gre_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_TCP),
1355 	ICE_PKT_PROFILE(gre_ipv6_udp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6),
1356 	ICE_PKT_PROFILE(gre_udp, ICE_PKT_TUN_NVGRE),
1357 	ICE_PKT_PROFILE(udp_tun_ipv6_tcp, ICE_PKT_TUN_UDP |
1358 					  ICE_PKT_INNER_IPV6 |
1359 					  ICE_PKT_INNER_TCP),
1360 	ICE_PKT_PROFILE(ipv6_l2tpv3, ICE_PKT_L2TPV3 | ICE_PKT_OUTER_IPV6),
1361 	ICE_PKT_PROFILE(ipv4_l2tpv3, ICE_PKT_L2TPV3),
1362 	ICE_PKT_PROFILE(udp_tun_tcp, ICE_PKT_TUN_UDP | ICE_PKT_INNER_TCP),
1363 	ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP |
1364 					  ICE_PKT_INNER_IPV6),
1365 	ICE_PKT_PROFILE(udp_tun_udp, ICE_PKT_TUN_UDP),
1366 	ICE_PKT_PROFILE(udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP),
1367 	ICE_PKT_PROFILE(udp, ICE_PKT_INNER_UDP),
1368 	ICE_PKT_PROFILE(tcp_ipv6, ICE_PKT_OUTER_IPV6),
1369 	ICE_PKT_PROFILE(tcp, 0),
1370 };
1371 
1372 #define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l)	struct_size((s), hdr_data, (l))
1373 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s)	\
1374 	ICE_SW_RULE_RX_TX_HDR_SIZE((s), DUMMY_ETH_HDR_LEN)
1375 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s)	\
1376 	ICE_SW_RULE_RX_TX_HDR_SIZE((s), 0)
1377 #define ICE_SW_RULE_LG_ACT_SIZE(s, n)		struct_size((s), act, (n))
1378 #define ICE_SW_RULE_VSI_LIST_SIZE(s, n)		struct_size((s), vsi, (n))
1379 
1380 /* this is a recipe to profile association bitmap */
1381 static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1382 			  ICE_MAX_NUM_PROFILES);
1383 
1384 /* this is a profile to recipe association bitmap */
1385 static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1386 			  ICE_MAX_NUM_RECIPES);
1387 
1388 /**
1389  * ice_init_def_sw_recp - initialize the recipe book keeping tables
1390  * @hw: pointer to the HW struct
1391  *
1392  * Allocate memory for the entire recipe table and initialize the structures/
1393  * entries corresponding to basic recipes.
1394  */
1395 int ice_init_def_sw_recp(struct ice_hw *hw)
1396 {
1397 	struct ice_sw_recipe *recps;
1398 	u8 i;
1399 
1400 	recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
1401 			     sizeof(*recps), GFP_KERNEL);
1402 	if (!recps)
1403 		return -ENOMEM;
1404 
1405 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1406 		recps[i].root_rid = i;
1407 		INIT_LIST_HEAD(&recps[i].filt_rules);
1408 		INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1409 		INIT_LIST_HEAD(&recps[i].rg_list);
1410 		mutex_init(&recps[i].filt_rule_lock);
1411 	}
1412 
1413 	hw->switch_info->recp_list = recps;
1414 
1415 	return 0;
1416 }
1417 
1418 /**
1419  * ice_aq_get_sw_cfg - get switch configuration
1420  * @hw: pointer to the hardware structure
1421  * @buf: pointer to the result buffer
1422  * @buf_size: length of the buffer available for response
1423  * @req_desc: pointer to requested descriptor
1424  * @num_elems: pointer to number of elements
1425  * @cd: pointer to command details structure or NULL
1426  *
1427  * Get switch configuration (0x0200) to be placed in buf.
1428  * This admin command returns information such as initial VSI/port number
1429  * and switch ID it belongs to.
1430  *
1431  * NOTE: *req_desc is both an input/output parameter.
1432  * The caller of this function first calls this function with *request_desc set
1433  * to 0. If the response from f/w has *req_desc set to 0, all the switch
1434  * configuration information has been returned; if non-zero (meaning not all
1435  * the information was returned), the caller should call this function again
1436  * with *req_desc set to the previous value returned by f/w to get the
1437  * next block of switch configuration information.
1438  *
1439  * *num_elems is output only parameter. This reflects the number of elements
1440  * in response buffer. The caller of this function to use *num_elems while
1441  * parsing the response buffer.
1442  */
1443 static int
1444 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
1445 		  u16 buf_size, u16 *req_desc, u16 *num_elems,
1446 		  struct ice_sq_cd *cd)
1447 {
1448 	struct ice_aqc_get_sw_cfg *cmd;
1449 	struct ice_aq_desc desc;
1450 	int status;
1451 
1452 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1453 	cmd = &desc.params.get_sw_conf;
1454 	cmd->element = cpu_to_le16(*req_desc);
1455 
1456 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1457 	if (!status) {
1458 		*req_desc = le16_to_cpu(cmd->element);
1459 		*num_elems = le16_to_cpu(cmd->num_elems);
1460 	}
1461 
1462 	return status;
1463 }
1464 
1465 /**
1466  * ice_aq_add_vsi
1467  * @hw: pointer to the HW struct
1468  * @vsi_ctx: pointer to a VSI context struct
1469  * @cd: pointer to command details structure or NULL
1470  *
1471  * Add a VSI context to the hardware (0x0210)
1472  */
1473 static int
1474 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1475 	       struct ice_sq_cd *cd)
1476 {
1477 	struct ice_aqc_add_update_free_vsi_resp *res;
1478 	struct ice_aqc_add_get_update_free_vsi *cmd;
1479 	struct ice_aq_desc desc;
1480 	int status;
1481 
1482 	cmd = &desc.params.vsi_cmd;
1483 	res = &desc.params.add_update_free_vsi_res;
1484 
1485 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1486 
1487 	if (!vsi_ctx->alloc_from_pool)
1488 		cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
1489 					   ICE_AQ_VSI_IS_VALID);
1490 	cmd->vf_id = vsi_ctx->vf_num;
1491 
1492 	cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1493 
1494 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1495 
1496 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1497 				 sizeof(vsi_ctx->info), cd);
1498 
1499 	if (!status) {
1500 		vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1501 		vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
1502 		vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
1503 	}
1504 
1505 	return status;
1506 }
1507 
1508 /**
1509  * ice_aq_free_vsi
1510  * @hw: pointer to the HW struct
1511  * @vsi_ctx: pointer to a VSI context struct
1512  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1513  * @cd: pointer to command details structure or NULL
1514  *
1515  * Free VSI context info from hardware (0x0213)
1516  */
1517 static int
1518 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1519 		bool keep_vsi_alloc, struct ice_sq_cd *cd)
1520 {
1521 	struct ice_aqc_add_update_free_vsi_resp *resp;
1522 	struct ice_aqc_add_get_update_free_vsi *cmd;
1523 	struct ice_aq_desc desc;
1524 	int status;
1525 
1526 	cmd = &desc.params.vsi_cmd;
1527 	resp = &desc.params.add_update_free_vsi_res;
1528 
1529 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1530 
1531 	cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1532 	if (keep_vsi_alloc)
1533 		cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
1534 
1535 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1536 	if (!status) {
1537 		vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1538 		vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1539 	}
1540 
1541 	return status;
1542 }
1543 
1544 /**
1545  * ice_aq_update_vsi
1546  * @hw: pointer to the HW struct
1547  * @vsi_ctx: pointer to a VSI context struct
1548  * @cd: pointer to command details structure or NULL
1549  *
1550  * Update VSI context in the hardware (0x0211)
1551  */
1552 static int
1553 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1554 		  struct ice_sq_cd *cd)
1555 {
1556 	struct ice_aqc_add_update_free_vsi_resp *resp;
1557 	struct ice_aqc_add_get_update_free_vsi *cmd;
1558 	struct ice_aq_desc desc;
1559 	int status;
1560 
1561 	cmd = &desc.params.vsi_cmd;
1562 	resp = &desc.params.add_update_free_vsi_res;
1563 
1564 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1565 
1566 	cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1567 
1568 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1569 
1570 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1571 				 sizeof(vsi_ctx->info), cd);
1572 
1573 	if (!status) {
1574 		vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1575 		vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1576 	}
1577 
1578 	return status;
1579 }
1580 
1581 /**
1582  * ice_is_vsi_valid - check whether the VSI is valid or not
1583  * @hw: pointer to the HW struct
1584  * @vsi_handle: VSI handle
1585  *
1586  * check whether the VSI is valid or not
1587  */
1588 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1589 {
1590 	return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1591 }
1592 
1593 /**
1594  * ice_get_hw_vsi_num - return the HW VSI number
1595  * @hw: pointer to the HW struct
1596  * @vsi_handle: VSI handle
1597  *
1598  * return the HW VSI number
1599  * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1600  */
1601 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1602 {
1603 	return hw->vsi_ctx[vsi_handle]->vsi_num;
1604 }
1605 
1606 /**
1607  * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1608  * @hw: pointer to the HW struct
1609  * @vsi_handle: VSI handle
1610  *
1611  * return the VSI context entry for a given VSI handle
1612  */
1613 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1614 {
1615 	return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1616 }
1617 
1618 /**
1619  * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1620  * @hw: pointer to the HW struct
1621  * @vsi_handle: VSI handle
1622  * @vsi: VSI context pointer
1623  *
1624  * save the VSI context entry for a given VSI handle
1625  */
1626 static void
1627 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1628 {
1629 	hw->vsi_ctx[vsi_handle] = vsi;
1630 }
1631 
1632 /**
1633  * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1634  * @hw: pointer to the HW struct
1635  * @vsi_handle: VSI handle
1636  */
1637 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1638 {
1639 	struct ice_vsi_ctx *vsi;
1640 	u8 i;
1641 
1642 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
1643 	if (!vsi)
1644 		return;
1645 	ice_for_each_traffic_class(i) {
1646 		if (vsi->lan_q_ctx[i]) {
1647 			devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
1648 			vsi->lan_q_ctx[i] = NULL;
1649 		}
1650 		if (vsi->rdma_q_ctx[i]) {
1651 			devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
1652 			vsi->rdma_q_ctx[i] = NULL;
1653 		}
1654 	}
1655 }
1656 
1657 /**
1658  * ice_clear_vsi_ctx - clear the VSI context entry
1659  * @hw: pointer to the HW struct
1660  * @vsi_handle: VSI handle
1661  *
1662  * clear the VSI context entry
1663  */
1664 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1665 {
1666 	struct ice_vsi_ctx *vsi;
1667 
1668 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
1669 	if (vsi) {
1670 		ice_clear_vsi_q_ctx(hw, vsi_handle);
1671 		devm_kfree(ice_hw_to_dev(hw), vsi);
1672 		hw->vsi_ctx[vsi_handle] = NULL;
1673 	}
1674 }
1675 
1676 /**
1677  * ice_clear_all_vsi_ctx - clear all the VSI context entries
1678  * @hw: pointer to the HW struct
1679  */
1680 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1681 {
1682 	u16 i;
1683 
1684 	for (i = 0; i < ICE_MAX_VSI; i++)
1685 		ice_clear_vsi_ctx(hw, i);
1686 }
1687 
1688 /**
1689  * ice_add_vsi - add VSI context to the hardware and VSI handle list
1690  * @hw: pointer to the HW struct
1691  * @vsi_handle: unique VSI handle provided by drivers
1692  * @vsi_ctx: pointer to a VSI context struct
1693  * @cd: pointer to command details structure or NULL
1694  *
1695  * Add a VSI context to the hardware also add it into the VSI handle list.
1696  * If this function gets called after reset for existing VSIs then update
1697  * with the new HW VSI number in the corresponding VSI handle list entry.
1698  */
1699 int
1700 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1701 	    struct ice_sq_cd *cd)
1702 {
1703 	struct ice_vsi_ctx *tmp_vsi_ctx;
1704 	int status;
1705 
1706 	if (vsi_handle >= ICE_MAX_VSI)
1707 		return -EINVAL;
1708 	status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1709 	if (status)
1710 		return status;
1711 	tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1712 	if (!tmp_vsi_ctx) {
1713 		/* Create a new VSI context */
1714 		tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
1715 					   sizeof(*tmp_vsi_ctx), GFP_KERNEL);
1716 		if (!tmp_vsi_ctx) {
1717 			ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1718 			return -ENOMEM;
1719 		}
1720 		*tmp_vsi_ctx = *vsi_ctx;
1721 		ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1722 	} else {
1723 		/* update with new HW VSI num */
1724 		tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1725 	}
1726 
1727 	return 0;
1728 }
1729 
1730 /**
1731  * ice_free_vsi- free VSI context from hardware and VSI handle list
1732  * @hw: pointer to the HW struct
1733  * @vsi_handle: unique VSI handle
1734  * @vsi_ctx: pointer to a VSI context struct
1735  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1736  * @cd: pointer to command details structure or NULL
1737  *
1738  * Free VSI context info from hardware as well as from VSI handle list
1739  */
1740 int
1741 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1742 	     bool keep_vsi_alloc, struct ice_sq_cd *cd)
1743 {
1744 	int status;
1745 
1746 	if (!ice_is_vsi_valid(hw, vsi_handle))
1747 		return -EINVAL;
1748 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1749 	status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1750 	if (!status)
1751 		ice_clear_vsi_ctx(hw, vsi_handle);
1752 	return status;
1753 }
1754 
1755 /**
1756  * ice_update_vsi
1757  * @hw: pointer to the HW struct
1758  * @vsi_handle: unique VSI handle
1759  * @vsi_ctx: pointer to a VSI context struct
1760  * @cd: pointer to command details structure or NULL
1761  *
1762  * Update VSI context in the hardware
1763  */
1764 int
1765 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1766 	       struct ice_sq_cd *cd)
1767 {
1768 	if (!ice_is_vsi_valid(hw, vsi_handle))
1769 		return -EINVAL;
1770 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1771 	return ice_aq_update_vsi(hw, vsi_ctx, cd);
1772 }
1773 
1774 /**
1775  * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI
1776  * @hw: pointer to HW struct
1777  * @vsi_handle: VSI SW index
1778  * @enable: boolean for enable/disable
1779  */
1780 int
1781 ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
1782 {
1783 	struct ice_vsi_ctx *ctx, *cached_ctx;
1784 	int status;
1785 
1786 	cached_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1787 	if (!cached_ctx)
1788 		return -ENOENT;
1789 
1790 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1791 	if (!ctx)
1792 		return -ENOMEM;
1793 
1794 	ctx->info.q_opt_rss = cached_ctx->info.q_opt_rss;
1795 	ctx->info.q_opt_tc = cached_ctx->info.q_opt_tc;
1796 	ctx->info.q_opt_flags = cached_ctx->info.q_opt_flags;
1797 
1798 	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
1799 
1800 	if (enable)
1801 		ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1802 	else
1803 		ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1804 
1805 	status = ice_update_vsi(hw, vsi_handle, ctx, NULL);
1806 	if (!status) {
1807 		cached_ctx->info.q_opt_flags = ctx->info.q_opt_flags;
1808 		cached_ctx->info.valid_sections |= ctx->info.valid_sections;
1809 	}
1810 
1811 	kfree(ctx);
1812 	return status;
1813 }
1814 
1815 /**
1816  * ice_aq_alloc_free_vsi_list
1817  * @hw: pointer to the HW struct
1818  * @vsi_list_id: VSI list ID returned or used for lookup
1819  * @lkup_type: switch rule filter lookup type
1820  * @opc: switch rules population command type - pass in the command opcode
1821  *
1822  * allocates or free a VSI list resource
1823  */
1824 static int
1825 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1826 			   enum ice_sw_lkup_type lkup_type,
1827 			   enum ice_adminq_opc opc)
1828 {
1829 	struct ice_aqc_alloc_free_res_elem *sw_buf;
1830 	struct ice_aqc_res_elem *vsi_ele;
1831 	u16 buf_len;
1832 	int status;
1833 
1834 	buf_len = struct_size(sw_buf, elem, 1);
1835 	sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
1836 	if (!sw_buf)
1837 		return -ENOMEM;
1838 	sw_buf->num_elems = cpu_to_le16(1);
1839 
1840 	if (lkup_type == ICE_SW_LKUP_MAC ||
1841 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1842 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1843 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1844 	    lkup_type == ICE_SW_LKUP_PROMISC ||
1845 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1846 	    lkup_type == ICE_SW_LKUP_DFLT) {
1847 		sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1848 	} else if (lkup_type == ICE_SW_LKUP_VLAN) {
1849 		sw_buf->res_type =
1850 			cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1851 	} else {
1852 		status = -EINVAL;
1853 		goto ice_aq_alloc_free_vsi_list_exit;
1854 	}
1855 
1856 	if (opc == ice_aqc_opc_free_res)
1857 		sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
1858 
1859 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1860 	if (status)
1861 		goto ice_aq_alloc_free_vsi_list_exit;
1862 
1863 	if (opc == ice_aqc_opc_alloc_res) {
1864 		vsi_ele = &sw_buf->elem[0];
1865 		*vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
1866 	}
1867 
1868 ice_aq_alloc_free_vsi_list_exit:
1869 	devm_kfree(ice_hw_to_dev(hw), sw_buf);
1870 	return status;
1871 }
1872 
1873 /**
1874  * ice_aq_sw_rules - add/update/remove switch rules
1875  * @hw: pointer to the HW struct
1876  * @rule_list: pointer to switch rule population list
1877  * @rule_list_sz: total size of the rule list in bytes
1878  * @num_rules: number of switch rules in the rule_list
1879  * @opc: switch rules population command type - pass in the command opcode
1880  * @cd: pointer to command details structure or NULL
1881  *
1882  * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1883  */
1884 int
1885 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1886 		u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1887 {
1888 	struct ice_aq_desc desc;
1889 	int status;
1890 
1891 	if (opc != ice_aqc_opc_add_sw_rules &&
1892 	    opc != ice_aqc_opc_update_sw_rules &&
1893 	    opc != ice_aqc_opc_remove_sw_rules)
1894 		return -EINVAL;
1895 
1896 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1897 
1898 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1899 	desc.params.sw_rules.num_rules_fltr_entry_index =
1900 		cpu_to_le16(num_rules);
1901 	status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1902 	if (opc != ice_aqc_opc_add_sw_rules &&
1903 	    hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
1904 		status = -ENOENT;
1905 
1906 	return status;
1907 }
1908 
1909 /**
1910  * ice_aq_add_recipe - add switch recipe
1911  * @hw: pointer to the HW struct
1912  * @s_recipe_list: pointer to switch rule population list
1913  * @num_recipes: number of switch recipes in the list
1914  * @cd: pointer to command details structure or NULL
1915  *
1916  * Add(0x0290)
1917  */
1918 static int
1919 ice_aq_add_recipe(struct ice_hw *hw,
1920 		  struct ice_aqc_recipe_data_elem *s_recipe_list,
1921 		  u16 num_recipes, struct ice_sq_cd *cd)
1922 {
1923 	struct ice_aqc_add_get_recipe *cmd;
1924 	struct ice_aq_desc desc;
1925 	u16 buf_size;
1926 
1927 	cmd = &desc.params.add_get_recipe;
1928 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1929 
1930 	cmd->num_sub_recipes = cpu_to_le16(num_recipes);
1931 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1932 
1933 	buf_size = num_recipes * sizeof(*s_recipe_list);
1934 
1935 	return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1936 }
1937 
1938 /**
1939  * ice_aq_get_recipe - get switch recipe
1940  * @hw: pointer to the HW struct
1941  * @s_recipe_list: pointer to switch rule population list
1942  * @num_recipes: pointer to the number of recipes (input and output)
1943  * @recipe_root: root recipe number of recipe(s) to retrieve
1944  * @cd: pointer to command details structure or NULL
1945  *
1946  * Get(0x0292)
1947  *
1948  * On input, *num_recipes should equal the number of entries in s_recipe_list.
1949  * On output, *num_recipes will equal the number of entries returned in
1950  * s_recipe_list.
1951  *
1952  * The caller must supply enough space in s_recipe_list to hold all possible
1953  * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1954  */
1955 static int
1956 ice_aq_get_recipe(struct ice_hw *hw,
1957 		  struct ice_aqc_recipe_data_elem *s_recipe_list,
1958 		  u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1959 {
1960 	struct ice_aqc_add_get_recipe *cmd;
1961 	struct ice_aq_desc desc;
1962 	u16 buf_size;
1963 	int status;
1964 
1965 	if (*num_recipes != ICE_MAX_NUM_RECIPES)
1966 		return -EINVAL;
1967 
1968 	cmd = &desc.params.add_get_recipe;
1969 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1970 
1971 	cmd->return_index = cpu_to_le16(recipe_root);
1972 	cmd->num_sub_recipes = 0;
1973 
1974 	buf_size = *num_recipes * sizeof(*s_recipe_list);
1975 
1976 	status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1977 	*num_recipes = le16_to_cpu(cmd->num_sub_recipes);
1978 
1979 	return status;
1980 }
1981 
1982 /**
1983  * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
1984  * @hw: pointer to the HW struct
1985  * @params: parameters used to update the default recipe
1986  *
1987  * This function only supports updating default recipes and it only supports
1988  * updating a single recipe based on the lkup_idx at a time.
1989  *
1990  * This is done as a read-modify-write operation. First, get the current recipe
1991  * contents based on the recipe's ID. Then modify the field vector index and
1992  * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
1993  * the pre-existing recipe with the modifications.
1994  */
1995 int
1996 ice_update_recipe_lkup_idx(struct ice_hw *hw,
1997 			   struct ice_update_recipe_lkup_idx_params *params)
1998 {
1999 	struct ice_aqc_recipe_data_elem *rcp_list;
2000 	u16 num_recps = ICE_MAX_NUM_RECIPES;
2001 	int status;
2002 
2003 	rcp_list = kcalloc(num_recps, sizeof(*rcp_list), GFP_KERNEL);
2004 	if (!rcp_list)
2005 		return -ENOMEM;
2006 
2007 	/* read current recipe list from firmware */
2008 	rcp_list->recipe_indx = params->rid;
2009 	status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
2010 	if (status) {
2011 		ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
2012 			  params->rid, status);
2013 		goto error_out;
2014 	}
2015 
2016 	/* only modify existing recipe's lkup_idx and mask if valid, while
2017 	 * leaving all other fields the same, then update the recipe firmware
2018 	 */
2019 	rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
2020 	if (params->mask_valid)
2021 		rcp_list->content.mask[params->lkup_idx] =
2022 			cpu_to_le16(params->mask);
2023 
2024 	if (params->ignore_valid)
2025 		rcp_list->content.lkup_indx[params->lkup_idx] |=
2026 			ICE_AQ_RECIPE_LKUP_IGNORE;
2027 
2028 	status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
2029 	if (status)
2030 		ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
2031 			  params->rid, params->lkup_idx, params->fv_idx,
2032 			  params->mask, params->mask_valid ? "true" : "false",
2033 			  status);
2034 
2035 error_out:
2036 	kfree(rcp_list);
2037 	return status;
2038 }
2039 
2040 /**
2041  * ice_aq_map_recipe_to_profile - Map recipe to packet profile
2042  * @hw: pointer to the HW struct
2043  * @profile_id: package profile ID to associate the recipe with
2044  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2045  * @cd: pointer to command details structure or NULL
2046  * Recipe to profile association (0x0291)
2047  */
2048 static int
2049 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2050 			     struct ice_sq_cd *cd)
2051 {
2052 	struct ice_aqc_recipe_to_profile *cmd;
2053 	struct ice_aq_desc desc;
2054 
2055 	cmd = &desc.params.recipe_to_profile;
2056 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
2057 	cmd->profile_id = cpu_to_le16(profile_id);
2058 	/* Set the recipe ID bit in the bitmask to let the device know which
2059 	 * profile we are associating the recipe to
2060 	 */
2061 	memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));
2062 
2063 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2064 }
2065 
2066 /**
2067  * ice_aq_get_recipe_to_profile - Map recipe to packet profile
2068  * @hw: pointer to the HW struct
2069  * @profile_id: package profile ID to associate the recipe with
2070  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2071  * @cd: pointer to command details structure or NULL
2072  * Associate profile ID with given recipe (0x0293)
2073  */
2074 static int
2075 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2076 			     struct ice_sq_cd *cd)
2077 {
2078 	struct ice_aqc_recipe_to_profile *cmd;
2079 	struct ice_aq_desc desc;
2080 	int status;
2081 
2082 	cmd = &desc.params.recipe_to_profile;
2083 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2084 	cmd->profile_id = cpu_to_le16(profile_id);
2085 
2086 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2087 	if (!status)
2088 		memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));
2089 
2090 	return status;
2091 }
2092 
2093 /**
2094  * ice_alloc_recipe - add recipe resource
2095  * @hw: pointer to the hardware structure
2096  * @rid: recipe ID returned as response to AQ call
2097  */
2098 static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2099 {
2100 	struct ice_aqc_alloc_free_res_elem *sw_buf;
2101 	u16 buf_len;
2102 	int status;
2103 
2104 	buf_len = struct_size(sw_buf, elem, 1);
2105 	sw_buf = kzalloc(buf_len, GFP_KERNEL);
2106 	if (!sw_buf)
2107 		return -ENOMEM;
2108 
2109 	sw_buf->num_elems = cpu_to_le16(1);
2110 	sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
2111 					ICE_AQC_RES_TYPE_S) |
2112 					ICE_AQC_RES_TYPE_FLAG_SHARED);
2113 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2114 				       ice_aqc_opc_alloc_res, NULL);
2115 	if (!status)
2116 		*rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
2117 	kfree(sw_buf);
2118 
2119 	return status;
2120 }
2121 
2122 /**
2123  * ice_get_recp_to_prof_map - updates recipe to profile mapping
2124  * @hw: pointer to hardware structure
2125  *
2126  * This function is used to populate recipe_to_profile matrix where index to
2127  * this array is the recipe ID and the element is the mapping of which profiles
2128  * is this recipe mapped to.
2129  */
2130 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2131 {
2132 	DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
2133 	u16 i;
2134 
2135 	for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2136 		u16 j;
2137 
2138 		bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2139 		bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
2140 		if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2141 			continue;
2142 		bitmap_copy(profile_to_recipe[i], r_bitmap,
2143 			    ICE_MAX_NUM_RECIPES);
2144 		for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2145 			set_bit(i, recipe_to_profile[j]);
2146 	}
2147 }
2148 
2149 /**
2150  * ice_collect_result_idx - copy result index values
2151  * @buf: buffer that contains the result index
2152  * @recp: the recipe struct to copy data into
2153  */
2154 static void
2155 ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
2156 		       struct ice_sw_recipe *recp)
2157 {
2158 	if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2159 		set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2160 			recp->res_idxs);
2161 }
2162 
2163 /**
2164  * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
2165  * @hw: pointer to hardware structure
2166  * @recps: struct that we need to populate
2167  * @rid: recipe ID that we are populating
2168  * @refresh_required: true if we should get recipe to profile mapping from FW
2169  *
2170  * This function is used to populate all the necessary entries into our
2171  * bookkeeping so that we have a current list of all the recipes that are
2172  * programmed in the firmware.
2173  */
2174 static int
2175 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2176 		    bool *refresh_required)
2177 {
2178 	DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS);
2179 	struct ice_aqc_recipe_data_elem *tmp;
2180 	u16 num_recps = ICE_MAX_NUM_RECIPES;
2181 	struct ice_prot_lkup_ext *lkup_exts;
2182 	u8 fv_word_idx = 0;
2183 	u16 sub_recps;
2184 	int status;
2185 
2186 	bitmap_zero(result_bm, ICE_MAX_FV_WORDS);
2187 
2188 	/* we need a buffer big enough to accommodate all the recipes */
2189 	tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
2190 	if (!tmp)
2191 		return -ENOMEM;
2192 
2193 	tmp[0].recipe_indx = rid;
2194 	status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2195 	/* non-zero status meaning recipe doesn't exist */
2196 	if (status)
2197 		goto err_unroll;
2198 
2199 	/* Get recipe to profile map so that we can get the fv from lkups that
2200 	 * we read for a recipe from FW. Since we want to minimize the number of
2201 	 * times we make this FW call, just make one call and cache the copy
2202 	 * until a new recipe is added. This operation is only required the
2203 	 * first time to get the changes from FW. Then to search existing
2204 	 * entries we don't need to update the cache again until another recipe
2205 	 * gets added.
2206 	 */
2207 	if (*refresh_required) {
2208 		ice_get_recp_to_prof_map(hw);
2209 		*refresh_required = false;
2210 	}
2211 
2212 	/* Start populating all the entries for recps[rid] based on lkups from
2213 	 * firmware. Note that we are only creating the root recipe in our
2214 	 * database.
2215 	 */
2216 	lkup_exts = &recps[rid].lkup_exts;
2217 
2218 	for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2219 		struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2220 		struct ice_recp_grp_entry *rg_entry;
2221 		u8 i, prof, idx, prot = 0;
2222 		bool is_root;
2223 		u16 off = 0;
2224 
2225 		rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
2226 					GFP_KERNEL);
2227 		if (!rg_entry) {
2228 			status = -ENOMEM;
2229 			goto err_unroll;
2230 		}
2231 
2232 		idx = root_bufs.recipe_indx;
2233 		is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2234 
2235 		/* Mark all result indices in this chain */
2236 		if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2237 			set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2238 				result_bm);
2239 
2240 		/* get the first profile that is associated with rid */
2241 		prof = find_first_bit(recipe_to_profile[idx],
2242 				      ICE_MAX_NUM_PROFILES);
2243 		for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2244 			u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2245 
2246 			rg_entry->fv_idx[i] = lkup_indx;
2247 			rg_entry->fv_mask[i] =
2248 				le16_to_cpu(root_bufs.content.mask[i + 1]);
2249 
2250 			/* If the recipe is a chained recipe then all its
2251 			 * child recipe's result will have a result index.
2252 			 * To fill fv_words we should not use those result
2253 			 * index, we only need the protocol ids and offsets.
2254 			 * We will skip all the fv_idx which stores result
2255 			 * index in them. We also need to skip any fv_idx which
2256 			 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2257 			 * valid offset value.
2258 			 */
2259 			if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) ||
2260 			    rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2261 			    rg_entry->fv_idx[i] == 0)
2262 				continue;
2263 
2264 			ice_find_prot_off(hw, ICE_BLK_SW, prof,
2265 					  rg_entry->fv_idx[i], &prot, &off);
2266 			lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2267 			lkup_exts->fv_words[fv_word_idx].off = off;
2268 			lkup_exts->field_mask[fv_word_idx] =
2269 				rg_entry->fv_mask[i];
2270 			fv_word_idx++;
2271 		}
2272 		/* populate rg_list with the data from the child entry of this
2273 		 * recipe
2274 		 */
2275 		list_add(&rg_entry->l_entry, &recps[rid].rg_list);
2276 
2277 		/* Propagate some data to the recipe database */
2278 		recps[idx].is_root = !!is_root;
2279 		recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2280 		bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2281 		if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2282 			recps[idx].chain_idx = root_bufs.content.result_indx &
2283 				~ICE_AQ_RECIPE_RESULT_EN;
2284 			set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2285 		} else {
2286 			recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2287 		}
2288 
2289 		if (!is_root)
2290 			continue;
2291 
2292 		/* Only do the following for root recipes entries */
2293 		memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2294 		       sizeof(recps[idx].r_bitmap));
2295 		recps[idx].root_rid = root_bufs.content.rid &
2296 			~ICE_AQ_RECIPE_ID_IS_ROOT;
2297 		recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2298 	}
2299 
2300 	/* Complete initialization of the root recipe entry */
2301 	lkup_exts->n_val_words = fv_word_idx;
2302 	recps[rid].big_recp = (num_recps > 1);
2303 	recps[rid].n_grp_count = (u8)num_recps;
2304 	recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp,
2305 					   recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
2306 					   GFP_KERNEL);
2307 	if (!recps[rid].root_buf) {
2308 		status = -ENOMEM;
2309 		goto err_unroll;
2310 	}
2311 
2312 	/* Copy result indexes */
2313 	bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2314 	recps[rid].recp_created = true;
2315 
2316 err_unroll:
2317 	kfree(tmp);
2318 	return status;
2319 }
2320 
2321 /* ice_init_port_info - Initialize port_info with switch configuration data
2322  * @pi: pointer to port_info
2323  * @vsi_port_num: VSI number or port number
2324  * @type: Type of switch element (port or VSI)
2325  * @swid: switch ID of the switch the element is attached to
2326  * @pf_vf_num: PF or VF number
2327  * @is_vf: true if the element is a VF, false otherwise
2328  */
2329 static void
2330 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2331 		   u16 swid, u16 pf_vf_num, bool is_vf)
2332 {
2333 	switch (type) {
2334 	case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2335 		pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2336 		pi->sw_id = swid;
2337 		pi->pf_vf_num = pf_vf_num;
2338 		pi->is_vf = is_vf;
2339 		break;
2340 	default:
2341 		ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
2342 		break;
2343 	}
2344 }
2345 
2346 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2347  * @hw: pointer to the hardware structure
2348  */
2349 int ice_get_initial_sw_cfg(struct ice_hw *hw)
2350 {
2351 	struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
2352 	u16 req_desc = 0;
2353 	u16 num_elems;
2354 	int status;
2355 	u16 i;
2356 
2357 	rbuf = kzalloc(ICE_SW_CFG_MAX_BUF_LEN, GFP_KERNEL);
2358 	if (!rbuf)
2359 		return -ENOMEM;
2360 
2361 	/* Multiple calls to ice_aq_get_sw_cfg may be required
2362 	 * to get all the switch configuration information. The need
2363 	 * for additional calls is indicated by ice_aq_get_sw_cfg
2364 	 * writing a non-zero value in req_desc
2365 	 */
2366 	do {
2367 		struct ice_aqc_get_sw_cfg_resp_elem *ele;
2368 
2369 		status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2370 					   &req_desc, &num_elems, NULL);
2371 
2372 		if (status)
2373 			break;
2374 
2375 		for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
2376 			u16 pf_vf_num, swid, vsi_port_num;
2377 			bool is_vf = false;
2378 			u8 res_type;
2379 
2380 			vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
2381 				ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2382 
2383 			pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
2384 				ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2385 
2386 			swid = le16_to_cpu(ele->swid);
2387 
2388 			if (le16_to_cpu(ele->pf_vf_num) &
2389 			    ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2390 				is_vf = true;
2391 
2392 			res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >>
2393 					ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2394 
2395 			if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
2396 				/* FW VSI is not needed. Just continue. */
2397 				continue;
2398 			}
2399 
2400 			ice_init_port_info(hw->port_info, vsi_port_num,
2401 					   res_type, swid, pf_vf_num, is_vf);
2402 		}
2403 	} while (req_desc && !status);
2404 
2405 	kfree(rbuf);
2406 	return status;
2407 }
2408 
2409 /**
2410  * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2411  * @hw: pointer to the hardware structure
2412  * @fi: filter info structure to fill/update
2413  *
2414  * This helper function populates the lb_en and lan_en elements of the provided
2415  * ice_fltr_info struct using the switch's type and characteristics of the
2416  * switch rule being configured.
2417  */
2418 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2419 {
2420 	fi->lb_en = false;
2421 	fi->lan_en = false;
2422 	if ((fi->flag & ICE_FLTR_TX) &&
2423 	    (fi->fltr_act == ICE_FWD_TO_VSI ||
2424 	     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2425 	     fi->fltr_act == ICE_FWD_TO_Q ||
2426 	     fi->fltr_act == ICE_FWD_TO_QGRP)) {
2427 		/* Setting LB for prune actions will result in replicated
2428 		 * packets to the internal switch that will be dropped.
2429 		 */
2430 		if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2431 			fi->lb_en = true;
2432 
2433 		/* Set lan_en to TRUE if
2434 		 * 1. The switch is a VEB AND
2435 		 * 2
2436 		 * 2.1 The lookup is a directional lookup like ethertype,
2437 		 * promiscuous, ethertype-MAC, promiscuous-VLAN
2438 		 * and default-port OR
2439 		 * 2.2 The lookup is VLAN, OR
2440 		 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2441 		 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2442 		 *
2443 		 * OR
2444 		 *
2445 		 * The switch is a VEPA.
2446 		 *
2447 		 * In all other cases, the LAN enable has to be set to false.
2448 		 */
2449 		if (hw->evb_veb) {
2450 			if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2451 			    fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2452 			    fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2453 			    fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2454 			    fi->lkup_type == ICE_SW_LKUP_DFLT ||
2455 			    fi->lkup_type == ICE_SW_LKUP_VLAN ||
2456 			    (fi->lkup_type == ICE_SW_LKUP_MAC &&
2457 			     !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
2458 			    (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2459 			     !is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
2460 				fi->lan_en = true;
2461 		} else {
2462 			fi->lan_en = true;
2463 		}
2464 	}
2465 }
2466 
2467 /**
2468  * ice_fill_sw_rule - Helper function to fill switch rule structure
2469  * @hw: pointer to the hardware structure
2470  * @f_info: entry containing packet forwarding information
2471  * @s_rule: switch rule structure to be filled in based on mac_entry
2472  * @opc: switch rules population command type - pass in the command opcode
2473  */
2474 static void
2475 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2476 		 struct ice_sw_rule_lkup_rx_tx *s_rule,
2477 		 enum ice_adminq_opc opc)
2478 {
2479 	u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2480 	u16 vlan_tpid = ETH_P_8021Q;
2481 	void *daddr = NULL;
2482 	u16 eth_hdr_sz;
2483 	u8 *eth_hdr;
2484 	u32 act = 0;
2485 	__be16 *off;
2486 	u8 q_rgn;
2487 
2488 	if (opc == ice_aqc_opc_remove_sw_rules) {
2489 		s_rule->act = 0;
2490 		s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
2491 		s_rule->hdr_len = 0;
2492 		return;
2493 	}
2494 
2495 	eth_hdr_sz = sizeof(dummy_eth_header);
2496 	eth_hdr = s_rule->hdr_data;
2497 
2498 	/* initialize the ether header with a dummy header */
2499 	memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
2500 	ice_fill_sw_info(hw, f_info);
2501 
2502 	switch (f_info->fltr_act) {
2503 	case ICE_FWD_TO_VSI:
2504 		act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2505 			ICE_SINGLE_ACT_VSI_ID_M;
2506 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2507 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2508 				ICE_SINGLE_ACT_VALID_BIT;
2509 		break;
2510 	case ICE_FWD_TO_VSI_LIST:
2511 		act |= ICE_SINGLE_ACT_VSI_LIST;
2512 		act |= (f_info->fwd_id.vsi_list_id <<
2513 			ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2514 			ICE_SINGLE_ACT_VSI_LIST_ID_M;
2515 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2516 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2517 				ICE_SINGLE_ACT_VALID_BIT;
2518 		break;
2519 	case ICE_FWD_TO_Q:
2520 		act |= ICE_SINGLE_ACT_TO_Q;
2521 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2522 			ICE_SINGLE_ACT_Q_INDEX_M;
2523 		break;
2524 	case ICE_DROP_PACKET:
2525 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2526 			ICE_SINGLE_ACT_VALID_BIT;
2527 		break;
2528 	case ICE_FWD_TO_QGRP:
2529 		q_rgn = f_info->qgrp_size > 0 ?
2530 			(u8)ilog2(f_info->qgrp_size) : 0;
2531 		act |= ICE_SINGLE_ACT_TO_Q;
2532 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2533 			ICE_SINGLE_ACT_Q_INDEX_M;
2534 		act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2535 			ICE_SINGLE_ACT_Q_REGION_M;
2536 		break;
2537 	default:
2538 		return;
2539 	}
2540 
2541 	if (f_info->lb_en)
2542 		act |= ICE_SINGLE_ACT_LB_ENABLE;
2543 	if (f_info->lan_en)
2544 		act |= ICE_SINGLE_ACT_LAN_ENABLE;
2545 
2546 	switch (f_info->lkup_type) {
2547 	case ICE_SW_LKUP_MAC:
2548 		daddr = f_info->l_data.mac.mac_addr;
2549 		break;
2550 	case ICE_SW_LKUP_VLAN:
2551 		vlan_id = f_info->l_data.vlan.vlan_id;
2552 		if (f_info->l_data.vlan.tpid_valid)
2553 			vlan_tpid = f_info->l_data.vlan.tpid;
2554 		if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2555 		    f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2556 			act |= ICE_SINGLE_ACT_PRUNE;
2557 			act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2558 		}
2559 		break;
2560 	case ICE_SW_LKUP_ETHERTYPE_MAC:
2561 		daddr = f_info->l_data.ethertype_mac.mac_addr;
2562 		fallthrough;
2563 	case ICE_SW_LKUP_ETHERTYPE:
2564 		off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2565 		*off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
2566 		break;
2567 	case ICE_SW_LKUP_MAC_VLAN:
2568 		daddr = f_info->l_data.mac_vlan.mac_addr;
2569 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
2570 		break;
2571 	case ICE_SW_LKUP_PROMISC_VLAN:
2572 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
2573 		fallthrough;
2574 	case ICE_SW_LKUP_PROMISC:
2575 		daddr = f_info->l_data.mac_vlan.mac_addr;
2576 		break;
2577 	default:
2578 		break;
2579 	}
2580 
2581 	s_rule->hdr.type = (f_info->flag & ICE_FLTR_RX) ?
2582 		cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2583 		cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
2584 
2585 	/* Recipe set depending on lookup type */
2586 	s_rule->recipe_id = cpu_to_le16(f_info->lkup_type);
2587 	s_rule->src = cpu_to_le16(f_info->src);
2588 	s_rule->act = cpu_to_le32(act);
2589 
2590 	if (daddr)
2591 		ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
2592 
2593 	if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2594 		off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2595 		*off = cpu_to_be16(vlan_id);
2596 		off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2597 		*off = cpu_to_be16(vlan_tpid);
2598 	}
2599 
2600 	/* Create the switch rule with the final dummy Ethernet header */
2601 	if (opc != ice_aqc_opc_update_sw_rules)
2602 		s_rule->hdr_len = cpu_to_le16(eth_hdr_sz);
2603 }
2604 
2605 /**
2606  * ice_add_marker_act
2607  * @hw: pointer to the hardware structure
2608  * @m_ent: the management entry for which sw marker needs to be added
2609  * @sw_marker: sw marker to tag the Rx descriptor with
2610  * @l_id: large action resource ID
2611  *
2612  * Create a large action to hold software marker and update the switch rule
2613  * entry pointed by m_ent with newly created large action
2614  */
2615 static int
2616 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2617 		   u16 sw_marker, u16 l_id)
2618 {
2619 	struct ice_sw_rule_lkup_rx_tx *rx_tx;
2620 	struct ice_sw_rule_lg_act *lg_act;
2621 	/* For software marker we need 3 large actions
2622 	 * 1. FWD action: FWD TO VSI or VSI LIST
2623 	 * 2. GENERIC VALUE action to hold the profile ID
2624 	 * 3. GENERIC VALUE action to hold the software marker ID
2625 	 */
2626 	const u16 num_lg_acts = 3;
2627 	u16 lg_act_size;
2628 	u16 rules_size;
2629 	int status;
2630 	u32 act;
2631 	u16 id;
2632 
2633 	if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2634 		return -EINVAL;
2635 
2636 	/* Create two back-to-back switch rules and submit them to the HW using
2637 	 * one memory buffer:
2638 	 *    1. Large Action
2639 	 *    2. Look up Tx Rx
2640 	 */
2641 	lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(lg_act, num_lg_acts);
2642 	rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(rx_tx);
2643 	lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
2644 	if (!lg_act)
2645 		return -ENOMEM;
2646 
2647 	rx_tx = (typeof(rx_tx))((u8 *)lg_act + lg_act_size);
2648 
2649 	/* Fill in the first switch rule i.e. large action */
2650 	lg_act->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
2651 	lg_act->index = cpu_to_le16(l_id);
2652 	lg_act->size = cpu_to_le16(num_lg_acts);
2653 
2654 	/* First action VSI forwarding or VSI list forwarding depending on how
2655 	 * many VSIs
2656 	 */
2657 	id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2658 		m_ent->fltr_info.fwd_id.hw_vsi_id;
2659 
2660 	act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2661 	act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
2662 	if (m_ent->vsi_count > 1)
2663 		act |= ICE_LG_ACT_VSI_LIST;
2664 	lg_act->act[0] = cpu_to_le32(act);
2665 
2666 	/* Second action descriptor type */
2667 	act = ICE_LG_ACT_GENERIC;
2668 
2669 	act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2670 	lg_act->act[1] = cpu_to_le32(act);
2671 
2672 	act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2673 	       ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2674 
2675 	/* Third action Marker value */
2676 	act |= ICE_LG_ACT_GENERIC;
2677 	act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2678 		ICE_LG_ACT_GENERIC_VALUE_M;
2679 
2680 	lg_act->act[2] = cpu_to_le32(act);
2681 
2682 	/* call the fill switch rule to fill the lookup Tx Rx structure */
2683 	ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2684 			 ice_aqc_opc_update_sw_rules);
2685 
2686 	/* Update the action to point to the large action ID */
2687 	rx_tx->act = cpu_to_le32(ICE_SINGLE_ACT_PTR |
2688 				 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2689 				  ICE_SINGLE_ACT_PTR_VAL_M));
2690 
2691 	/* Use the filter rule ID of the previously created rule with single
2692 	 * act. Once the update happens, hardware will treat this as large
2693 	 * action
2694 	 */
2695 	rx_tx->index = cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
2696 
2697 	status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2698 				 ice_aqc_opc_update_sw_rules, NULL);
2699 	if (!status) {
2700 		m_ent->lg_act_idx = l_id;
2701 		m_ent->sw_marker_id = sw_marker;
2702 	}
2703 
2704 	devm_kfree(ice_hw_to_dev(hw), lg_act);
2705 	return status;
2706 }
2707 
2708 /**
2709  * ice_create_vsi_list_map
2710  * @hw: pointer to the hardware structure
2711  * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2712  * @num_vsi: number of VSI handles in the array
2713  * @vsi_list_id: VSI list ID generated as part of allocate resource
2714  *
2715  * Helper function to create a new entry of VSI list ID to VSI mapping
2716  * using the given VSI list ID
2717  */
2718 static struct ice_vsi_list_map_info *
2719 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2720 			u16 vsi_list_id)
2721 {
2722 	struct ice_switch_info *sw = hw->switch_info;
2723 	struct ice_vsi_list_map_info *v_map;
2724 	int i;
2725 
2726 	v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL);
2727 	if (!v_map)
2728 		return NULL;
2729 
2730 	v_map->vsi_list_id = vsi_list_id;
2731 	v_map->ref_cnt = 1;
2732 	for (i = 0; i < num_vsi; i++)
2733 		set_bit(vsi_handle_arr[i], v_map->vsi_map);
2734 
2735 	list_add(&v_map->list_entry, &sw->vsi_list_map_head);
2736 	return v_map;
2737 }
2738 
2739 /**
2740  * ice_update_vsi_list_rule
2741  * @hw: pointer to the hardware structure
2742  * @vsi_handle_arr: array of VSI handles to form a VSI list
2743  * @num_vsi: number of VSI handles in the array
2744  * @vsi_list_id: VSI list ID generated as part of allocate resource
2745  * @remove: Boolean value to indicate if this is a remove action
2746  * @opc: switch rules population command type - pass in the command opcode
2747  * @lkup_type: lookup type of the filter
2748  *
2749  * Call AQ command to add a new switch rule or update existing switch rule
2750  * using the given VSI list ID
2751  */
2752 static int
2753 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2754 			 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2755 			 enum ice_sw_lkup_type lkup_type)
2756 {
2757 	struct ice_sw_rule_vsi_list *s_rule;
2758 	u16 s_rule_size;
2759 	u16 rule_type;
2760 	int status;
2761 	int i;
2762 
2763 	if (!num_vsi)
2764 		return -EINVAL;
2765 
2766 	if (lkup_type == ICE_SW_LKUP_MAC ||
2767 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2768 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2769 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2770 	    lkup_type == ICE_SW_LKUP_PROMISC ||
2771 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2772 	    lkup_type == ICE_SW_LKUP_DFLT)
2773 		rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2774 			ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2775 	else if (lkup_type == ICE_SW_LKUP_VLAN)
2776 		rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2777 			ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2778 	else
2779 		return -EINVAL;
2780 
2781 	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi);
2782 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
2783 	if (!s_rule)
2784 		return -ENOMEM;
2785 	for (i = 0; i < num_vsi; i++) {
2786 		if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2787 			status = -EINVAL;
2788 			goto exit;
2789 		}
2790 		/* AQ call requires hw_vsi_id(s) */
2791 		s_rule->vsi[i] =
2792 			cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2793 	}
2794 
2795 	s_rule->hdr.type = cpu_to_le16(rule_type);
2796 	s_rule->number_vsi = cpu_to_le16(num_vsi);
2797 	s_rule->index = cpu_to_le16(vsi_list_id);
2798 
2799 	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2800 
2801 exit:
2802 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2803 	return status;
2804 }
2805 
2806 /**
2807  * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2808  * @hw: pointer to the HW struct
2809  * @vsi_handle_arr: array of VSI handles to form a VSI list
2810  * @num_vsi: number of VSI handles in the array
2811  * @vsi_list_id: stores the ID of the VSI list to be created
2812  * @lkup_type: switch rule filter's lookup type
2813  */
2814 static int
2815 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2816 			 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2817 {
2818 	int status;
2819 
2820 	status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2821 					    ice_aqc_opc_alloc_res);
2822 	if (status)
2823 		return status;
2824 
2825 	/* Update the newly created VSI list to include the specified VSIs */
2826 	return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2827 					*vsi_list_id, false,
2828 					ice_aqc_opc_add_sw_rules, lkup_type);
2829 }
2830 
2831 /**
2832  * ice_create_pkt_fwd_rule
2833  * @hw: pointer to the hardware structure
2834  * @f_entry: entry containing packet forwarding information
2835  *
2836  * Create switch rule with given filter information and add an entry
2837  * to the corresponding filter management list to track this switch rule
2838  * and VSI mapping
2839  */
2840 static int
2841 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2842 			struct ice_fltr_list_entry *f_entry)
2843 {
2844 	struct ice_fltr_mgmt_list_entry *fm_entry;
2845 	struct ice_sw_rule_lkup_rx_tx *s_rule;
2846 	enum ice_sw_lkup_type l_type;
2847 	struct ice_sw_recipe *recp;
2848 	int status;
2849 
2850 	s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2851 			      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
2852 			      GFP_KERNEL);
2853 	if (!s_rule)
2854 		return -ENOMEM;
2855 	fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
2856 				GFP_KERNEL);
2857 	if (!fm_entry) {
2858 		status = -ENOMEM;
2859 		goto ice_create_pkt_fwd_rule_exit;
2860 	}
2861 
2862 	fm_entry->fltr_info = f_entry->fltr_info;
2863 
2864 	/* Initialize all the fields for the management entry */
2865 	fm_entry->vsi_count = 1;
2866 	fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2867 	fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2868 	fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2869 
2870 	ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2871 			 ice_aqc_opc_add_sw_rules);
2872 
2873 	status = ice_aq_sw_rules(hw, s_rule,
2874 				 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
2875 				 ice_aqc_opc_add_sw_rules, NULL);
2876 	if (status) {
2877 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
2878 		goto ice_create_pkt_fwd_rule_exit;
2879 	}
2880 
2881 	f_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
2882 	fm_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
2883 
2884 	/* The book keeping entries will get removed when base driver
2885 	 * calls remove filter AQ command
2886 	 */
2887 	l_type = fm_entry->fltr_info.lkup_type;
2888 	recp = &hw->switch_info->recp_list[l_type];
2889 	list_add(&fm_entry->list_entry, &recp->filt_rules);
2890 
2891 ice_create_pkt_fwd_rule_exit:
2892 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2893 	return status;
2894 }
2895 
2896 /**
2897  * ice_update_pkt_fwd_rule
2898  * @hw: pointer to the hardware structure
2899  * @f_info: filter information for switch rule
2900  *
2901  * Call AQ command to update a previously created switch rule with a
2902  * VSI list ID
2903  */
2904 static int
2905 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2906 {
2907 	struct ice_sw_rule_lkup_rx_tx *s_rule;
2908 	int status;
2909 
2910 	s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2911 			      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
2912 			      GFP_KERNEL);
2913 	if (!s_rule)
2914 		return -ENOMEM;
2915 
2916 	ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2917 
2918 	s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
2919 
2920 	/* Update switch rule with new rule set to forward VSI list */
2921 	status = ice_aq_sw_rules(hw, s_rule,
2922 				 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
2923 				 ice_aqc_opc_update_sw_rules, NULL);
2924 
2925 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2926 	return status;
2927 }
2928 
2929 /**
2930  * ice_update_sw_rule_bridge_mode
2931  * @hw: pointer to the HW struct
2932  *
2933  * Updates unicast switch filter rules based on VEB/VEPA mode
2934  */
2935 int ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2936 {
2937 	struct ice_switch_info *sw = hw->switch_info;
2938 	struct ice_fltr_mgmt_list_entry *fm_entry;
2939 	struct list_head *rule_head;
2940 	struct mutex *rule_lock; /* Lock to protect filter rule list */
2941 	int status = 0;
2942 
2943 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2944 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2945 
2946 	mutex_lock(rule_lock);
2947 	list_for_each_entry(fm_entry, rule_head, list_entry) {
2948 		struct ice_fltr_info *fi = &fm_entry->fltr_info;
2949 		u8 *addr = fi->l_data.mac.mac_addr;
2950 
2951 		/* Update unicast Tx rules to reflect the selected
2952 		 * VEB/VEPA mode
2953 		 */
2954 		if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
2955 		    (fi->fltr_act == ICE_FWD_TO_VSI ||
2956 		     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2957 		     fi->fltr_act == ICE_FWD_TO_Q ||
2958 		     fi->fltr_act == ICE_FWD_TO_QGRP)) {
2959 			status = ice_update_pkt_fwd_rule(hw, fi);
2960 			if (status)
2961 				break;
2962 		}
2963 	}
2964 
2965 	mutex_unlock(rule_lock);
2966 
2967 	return status;
2968 }
2969 
2970 /**
2971  * ice_add_update_vsi_list
2972  * @hw: pointer to the hardware structure
2973  * @m_entry: pointer to current filter management list entry
2974  * @cur_fltr: filter information from the book keeping entry
2975  * @new_fltr: filter information with the new VSI to be added
2976  *
2977  * Call AQ command to add or update previously created VSI list with new VSI.
2978  *
2979  * Helper function to do book keeping associated with adding filter information
2980  * The algorithm to do the book keeping is described below :
2981  * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2982  *	if only one VSI has been added till now
2983  *		Allocate a new VSI list and add two VSIs
2984  *		to this list using switch rule command
2985  *		Update the previously created switch rule with the
2986  *		newly created VSI list ID
2987  *	if a VSI list was previously created
2988  *		Add the new VSI to the previously created VSI list set
2989  *		using the update switch rule command
2990  */
2991 static int
2992 ice_add_update_vsi_list(struct ice_hw *hw,
2993 			struct ice_fltr_mgmt_list_entry *m_entry,
2994 			struct ice_fltr_info *cur_fltr,
2995 			struct ice_fltr_info *new_fltr)
2996 {
2997 	u16 vsi_list_id = 0;
2998 	int status = 0;
2999 
3000 	if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
3001 	     cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
3002 		return -EOPNOTSUPP;
3003 
3004 	if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
3005 	     new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
3006 	    (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
3007 	     cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
3008 		return -EOPNOTSUPP;
3009 
3010 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
3011 		/* Only one entry existed in the mapping and it was not already
3012 		 * a part of a VSI list. So, create a VSI list with the old and
3013 		 * new VSIs.
3014 		 */
3015 		struct ice_fltr_info tmp_fltr;
3016 		u16 vsi_handle_arr[2];
3017 
3018 		/* A rule already exists with the new VSI being added */
3019 		if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
3020 			return -EEXIST;
3021 
3022 		vsi_handle_arr[0] = cur_fltr->vsi_handle;
3023 		vsi_handle_arr[1] = new_fltr->vsi_handle;
3024 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3025 						  &vsi_list_id,
3026 						  new_fltr->lkup_type);
3027 		if (status)
3028 			return status;
3029 
3030 		tmp_fltr = *new_fltr;
3031 		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
3032 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3033 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3034 		/* Update the previous switch rule of "MAC forward to VSI" to
3035 		 * "MAC fwd to VSI list"
3036 		 */
3037 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3038 		if (status)
3039 			return status;
3040 
3041 		cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
3042 		cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3043 		m_entry->vsi_list_info =
3044 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3045 						vsi_list_id);
3046 
3047 		if (!m_entry->vsi_list_info)
3048 			return -ENOMEM;
3049 
3050 		/* If this entry was large action then the large action needs
3051 		 * to be updated to point to FWD to VSI list
3052 		 */
3053 		if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
3054 			status =
3055 			    ice_add_marker_act(hw, m_entry,
3056 					       m_entry->sw_marker_id,
3057 					       m_entry->lg_act_idx);
3058 	} else {
3059 		u16 vsi_handle = new_fltr->vsi_handle;
3060 		enum ice_adminq_opc opcode;
3061 
3062 		if (!m_entry->vsi_list_info)
3063 			return -EIO;
3064 
3065 		/* A rule already exists with the new VSI being added */
3066 		if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
3067 			return 0;
3068 
3069 		/* Update the previously created VSI list set with
3070 		 * the new VSI ID passed in
3071 		 */
3072 		vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
3073 		opcode = ice_aqc_opc_update_sw_rules;
3074 
3075 		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
3076 						  vsi_list_id, false, opcode,
3077 						  new_fltr->lkup_type);
3078 		/* update VSI list mapping info with new VSI ID */
3079 		if (!status)
3080 			set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
3081 	}
3082 	if (!status)
3083 		m_entry->vsi_count++;
3084 	return status;
3085 }
3086 
3087 /**
3088  * ice_find_rule_entry - Search a rule entry
3089  * @hw: pointer to the hardware structure
3090  * @recp_id: lookup type for which the specified rule needs to be searched
3091  * @f_info: rule information
3092  *
3093  * Helper function to search for a given rule entry
3094  * Returns pointer to entry storing the rule if found
3095  */
3096 static struct ice_fltr_mgmt_list_entry *
3097 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
3098 {
3099 	struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3100 	struct ice_switch_info *sw = hw->switch_info;
3101 	struct list_head *list_head;
3102 
3103 	list_head = &sw->recp_list[recp_id].filt_rules;
3104 	list_for_each_entry(list_itr, list_head, list_entry) {
3105 		if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3106 			    sizeof(f_info->l_data)) &&
3107 		    f_info->flag == list_itr->fltr_info.flag) {
3108 			ret = list_itr;
3109 			break;
3110 		}
3111 	}
3112 	return ret;
3113 }
3114 
3115 /**
3116  * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3117  * @hw: pointer to the hardware structure
3118  * @recp_id: lookup type for which VSI lists needs to be searched
3119  * @vsi_handle: VSI handle to be found in VSI list
3120  * @vsi_list_id: VSI list ID found containing vsi_handle
3121  *
3122  * Helper function to search a VSI list with single entry containing given VSI
3123  * handle element. This can be extended further to search VSI list with more
3124  * than 1 vsi_count. Returns pointer to VSI list entry if found.
3125  */
3126 static struct ice_vsi_list_map_info *
3127 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
3128 			u16 *vsi_list_id)
3129 {
3130 	struct ice_vsi_list_map_info *map_info = NULL;
3131 	struct ice_switch_info *sw = hw->switch_info;
3132 	struct ice_fltr_mgmt_list_entry *list_itr;
3133 	struct list_head *list_head;
3134 
3135 	list_head = &sw->recp_list[recp_id].filt_rules;
3136 	list_for_each_entry(list_itr, list_head, list_entry) {
3137 		if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
3138 			map_info = list_itr->vsi_list_info;
3139 			if (test_bit(vsi_handle, map_info->vsi_map)) {
3140 				*vsi_list_id = map_info->vsi_list_id;
3141 				return map_info;
3142 			}
3143 		}
3144 	}
3145 	return NULL;
3146 }
3147 
3148 /**
3149  * ice_add_rule_internal - add rule for a given lookup type
3150  * @hw: pointer to the hardware structure
3151  * @recp_id: lookup type (recipe ID) for which rule has to be added
3152  * @f_entry: structure containing MAC forwarding information
3153  *
3154  * Adds or updates the rule lists for a given recipe
3155  */
3156 static int
3157 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
3158 		      struct ice_fltr_list_entry *f_entry)
3159 {
3160 	struct ice_switch_info *sw = hw->switch_info;
3161 	struct ice_fltr_info *new_fltr, *cur_fltr;
3162 	struct ice_fltr_mgmt_list_entry *m_entry;
3163 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3164 	int status = 0;
3165 
3166 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3167 		return -EINVAL;
3168 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3169 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3170 
3171 	rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3172 
3173 	mutex_lock(rule_lock);
3174 	new_fltr = &f_entry->fltr_info;
3175 	if (new_fltr->flag & ICE_FLTR_RX)
3176 		new_fltr->src = hw->port_info->lport;
3177 	else if (new_fltr->flag & ICE_FLTR_TX)
3178 		new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
3179 
3180 	m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
3181 	if (!m_entry) {
3182 		mutex_unlock(rule_lock);
3183 		return ice_create_pkt_fwd_rule(hw, f_entry);
3184 	}
3185 
3186 	cur_fltr = &m_entry->fltr_info;
3187 	status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3188 	mutex_unlock(rule_lock);
3189 
3190 	return status;
3191 }
3192 
3193 /**
3194  * ice_remove_vsi_list_rule
3195  * @hw: pointer to the hardware structure
3196  * @vsi_list_id: VSI list ID generated as part of allocate resource
3197  * @lkup_type: switch rule filter lookup type
3198  *
3199  * The VSI list should be emptied before this function is called to remove the
3200  * VSI list.
3201  */
3202 static int
3203 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3204 			 enum ice_sw_lkup_type lkup_type)
3205 {
3206 	struct ice_sw_rule_vsi_list *s_rule;
3207 	u16 s_rule_size;
3208 	int status;
3209 
3210 	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, 0);
3211 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
3212 	if (!s_rule)
3213 		return -ENOMEM;
3214 
3215 	s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
3216 	s_rule->index = cpu_to_le16(vsi_list_id);
3217 
3218 	/* Free the vsi_list resource that we allocated. It is assumed that the
3219 	 * list is empty at this point.
3220 	 */
3221 	status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3222 					    ice_aqc_opc_free_res);
3223 
3224 	devm_kfree(ice_hw_to_dev(hw), s_rule);
3225 	return status;
3226 }
3227 
3228 /**
3229  * ice_rem_update_vsi_list
3230  * @hw: pointer to the hardware structure
3231  * @vsi_handle: VSI handle of the VSI to remove
3232  * @fm_list: filter management entry for which the VSI list management needs to
3233  *           be done
3234  */
3235 static int
3236 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3237 			struct ice_fltr_mgmt_list_entry *fm_list)
3238 {
3239 	enum ice_sw_lkup_type lkup_type;
3240 	u16 vsi_list_id;
3241 	int status = 0;
3242 
3243 	if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3244 	    fm_list->vsi_count == 0)
3245 		return -EINVAL;
3246 
3247 	/* A rule with the VSI being removed does not exist */
3248 	if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
3249 		return -ENOENT;
3250 
3251 	lkup_type = fm_list->fltr_info.lkup_type;
3252 	vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3253 	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3254 					  ice_aqc_opc_update_sw_rules,
3255 					  lkup_type);
3256 	if (status)
3257 		return status;
3258 
3259 	fm_list->vsi_count--;
3260 	clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3261 
3262 	if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3263 		struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3264 		struct ice_vsi_list_map_info *vsi_list_info =
3265 			fm_list->vsi_list_info;
3266 		u16 rem_vsi_handle;
3267 
3268 		rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
3269 						ICE_MAX_VSI);
3270 		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3271 			return -EIO;
3272 
3273 		/* Make sure VSI list is empty before removing it below */
3274 		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3275 						  vsi_list_id, true,
3276 						  ice_aqc_opc_update_sw_rules,
3277 						  lkup_type);
3278 		if (status)
3279 			return status;
3280 
3281 		tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3282 		tmp_fltr_info.fwd_id.hw_vsi_id =
3283 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
3284 		tmp_fltr_info.vsi_handle = rem_vsi_handle;
3285 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3286 		if (status) {
3287 			ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3288 				  tmp_fltr_info.fwd_id.hw_vsi_id, status);
3289 			return status;
3290 		}
3291 
3292 		fm_list->fltr_info = tmp_fltr_info;
3293 	}
3294 
3295 	if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3296 	    (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3297 		struct ice_vsi_list_map_info *vsi_list_info =
3298 			fm_list->vsi_list_info;
3299 
3300 		/* Remove the VSI list since it is no longer used */
3301 		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3302 		if (status) {
3303 			ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
3304 				  vsi_list_id, status);
3305 			return status;
3306 		}
3307 
3308 		list_del(&vsi_list_info->list_entry);
3309 		devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
3310 		fm_list->vsi_list_info = NULL;
3311 	}
3312 
3313 	return status;
3314 }
3315 
3316 /**
3317  * ice_remove_rule_internal - Remove a filter rule of a given type
3318  * @hw: pointer to the hardware structure
3319  * @recp_id: recipe ID for which the rule needs to removed
3320  * @f_entry: rule entry containing filter information
3321  */
3322 static int
3323 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
3324 			 struct ice_fltr_list_entry *f_entry)
3325 {
3326 	struct ice_switch_info *sw = hw->switch_info;
3327 	struct ice_fltr_mgmt_list_entry *list_elem;
3328 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3329 	bool remove_rule = false;
3330 	u16 vsi_handle;
3331 	int status = 0;
3332 
3333 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3334 		return -EINVAL;
3335 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3336 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3337 
3338 	rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3339 	mutex_lock(rule_lock);
3340 	list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
3341 	if (!list_elem) {
3342 		status = -ENOENT;
3343 		goto exit;
3344 	}
3345 
3346 	if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3347 		remove_rule = true;
3348 	} else if (!list_elem->vsi_list_info) {
3349 		status = -ENOENT;
3350 		goto exit;
3351 	} else if (list_elem->vsi_list_info->ref_cnt > 1) {
3352 		/* a ref_cnt > 1 indicates that the vsi_list is being
3353 		 * shared by multiple rules. Decrement the ref_cnt and
3354 		 * remove this rule, but do not modify the list, as it
3355 		 * is in-use by other rules.
3356 		 */
3357 		list_elem->vsi_list_info->ref_cnt--;
3358 		remove_rule = true;
3359 	} else {
3360 		/* a ref_cnt of 1 indicates the vsi_list is only used
3361 		 * by one rule. However, the original removal request is only
3362 		 * for a single VSI. Update the vsi_list first, and only
3363 		 * remove the rule if there are no further VSIs in this list.
3364 		 */
3365 		vsi_handle = f_entry->fltr_info.vsi_handle;
3366 		status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3367 		if (status)
3368 			goto exit;
3369 		/* if VSI count goes to zero after updating the VSI list */
3370 		if (list_elem->vsi_count == 0)
3371 			remove_rule = true;
3372 	}
3373 
3374 	if (remove_rule) {
3375 		/* Remove the lookup rule */
3376 		struct ice_sw_rule_lkup_rx_tx *s_rule;
3377 
3378 		s_rule = devm_kzalloc(ice_hw_to_dev(hw),
3379 				      ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
3380 				      GFP_KERNEL);
3381 		if (!s_rule) {
3382 			status = -ENOMEM;
3383 			goto exit;
3384 		}
3385 
3386 		ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3387 				 ice_aqc_opc_remove_sw_rules);
3388 
3389 		status = ice_aq_sw_rules(hw, s_rule,
3390 					 ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
3391 					 1, ice_aqc_opc_remove_sw_rules, NULL);
3392 
3393 		/* Remove a book keeping from the list */
3394 		devm_kfree(ice_hw_to_dev(hw), s_rule);
3395 
3396 		if (status)
3397 			goto exit;
3398 
3399 		list_del(&list_elem->list_entry);
3400 		devm_kfree(ice_hw_to_dev(hw), list_elem);
3401 	}
3402 exit:
3403 	mutex_unlock(rule_lock);
3404 	return status;
3405 }
3406 
3407 /**
3408  * ice_mac_fltr_exist - does this MAC filter exist for given VSI
3409  * @hw: pointer to the hardware structure
3410  * @mac: MAC address to be checked (for MAC filter)
3411  * @vsi_handle: check MAC filter for this VSI
3412  */
3413 bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle)
3414 {
3415 	struct ice_fltr_mgmt_list_entry *entry;
3416 	struct list_head *rule_head;
3417 	struct ice_switch_info *sw;
3418 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3419 	u16 hw_vsi_id;
3420 
3421 	if (!ice_is_vsi_valid(hw, vsi_handle))
3422 		return false;
3423 
3424 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3425 	sw = hw->switch_info;
3426 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3427 	if (!rule_head)
3428 		return false;
3429 
3430 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3431 	mutex_lock(rule_lock);
3432 	list_for_each_entry(entry, rule_head, list_entry) {
3433 		struct ice_fltr_info *f_info = &entry->fltr_info;
3434 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3435 
3436 		if (is_zero_ether_addr(mac_addr))
3437 			continue;
3438 
3439 		if (f_info->flag != ICE_FLTR_TX ||
3440 		    f_info->src_id != ICE_SRC_ID_VSI ||
3441 		    f_info->lkup_type != ICE_SW_LKUP_MAC ||
3442 		    f_info->fltr_act != ICE_FWD_TO_VSI ||
3443 		    hw_vsi_id != f_info->fwd_id.hw_vsi_id)
3444 			continue;
3445 
3446 		if (ether_addr_equal(mac, mac_addr)) {
3447 			mutex_unlock(rule_lock);
3448 			return true;
3449 		}
3450 	}
3451 	mutex_unlock(rule_lock);
3452 	return false;
3453 }
3454 
3455 /**
3456  * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI
3457  * @hw: pointer to the hardware structure
3458  * @vlan_id: VLAN ID
3459  * @vsi_handle: check MAC filter for this VSI
3460  */
3461 bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
3462 {
3463 	struct ice_fltr_mgmt_list_entry *entry;
3464 	struct list_head *rule_head;
3465 	struct ice_switch_info *sw;
3466 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3467 	u16 hw_vsi_id;
3468 
3469 	if (vlan_id > ICE_MAX_VLAN_ID)
3470 		return false;
3471 
3472 	if (!ice_is_vsi_valid(hw, vsi_handle))
3473 		return false;
3474 
3475 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3476 	sw = hw->switch_info;
3477 	rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
3478 	if (!rule_head)
3479 		return false;
3480 
3481 	rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3482 	mutex_lock(rule_lock);
3483 	list_for_each_entry(entry, rule_head, list_entry) {
3484 		struct ice_fltr_info *f_info = &entry->fltr_info;
3485 		u16 entry_vlan_id = f_info->l_data.vlan.vlan_id;
3486 		struct ice_vsi_list_map_info *map_info;
3487 
3488 		if (entry_vlan_id > ICE_MAX_VLAN_ID)
3489 			continue;
3490 
3491 		if (f_info->flag != ICE_FLTR_TX ||
3492 		    f_info->src_id != ICE_SRC_ID_VSI ||
3493 		    f_info->lkup_type != ICE_SW_LKUP_VLAN)
3494 			continue;
3495 
3496 		/* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */
3497 		if (f_info->fltr_act != ICE_FWD_TO_VSI &&
3498 		    f_info->fltr_act != ICE_FWD_TO_VSI_LIST)
3499 			continue;
3500 
3501 		if (f_info->fltr_act == ICE_FWD_TO_VSI) {
3502 			if (hw_vsi_id != f_info->fwd_id.hw_vsi_id)
3503 				continue;
3504 		} else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3505 			/* If filter_action is FWD_TO_VSI_LIST, make sure
3506 			 * that VSI being checked is part of VSI list
3507 			 */
3508 			if (entry->vsi_count == 1 &&
3509 			    entry->vsi_list_info) {
3510 				map_info = entry->vsi_list_info;
3511 				if (!test_bit(vsi_handle, map_info->vsi_map))
3512 					continue;
3513 			}
3514 		}
3515 
3516 		if (vlan_id == entry_vlan_id) {
3517 			mutex_unlock(rule_lock);
3518 			return true;
3519 		}
3520 	}
3521 	mutex_unlock(rule_lock);
3522 
3523 	return false;
3524 }
3525 
3526 /**
3527  * ice_add_mac - Add a MAC address based filter rule
3528  * @hw: pointer to the hardware structure
3529  * @m_list: list of MAC addresses and forwarding information
3530  */
3531 int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
3532 {
3533 	struct ice_fltr_list_entry *m_list_itr;
3534 	int status = 0;
3535 
3536 	if (!m_list || !hw)
3537 		return -EINVAL;
3538 
3539 	list_for_each_entry(m_list_itr, m_list, list_entry) {
3540 		u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3541 		u16 vsi_handle;
3542 		u16 hw_vsi_id;
3543 
3544 		m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3545 		vsi_handle = m_list_itr->fltr_info.vsi_handle;
3546 		if (!ice_is_vsi_valid(hw, vsi_handle))
3547 			return -EINVAL;
3548 		hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3549 		m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3550 		/* update the src in case it is VSI num */
3551 		if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3552 			return -EINVAL;
3553 		m_list_itr->fltr_info.src = hw_vsi_id;
3554 		if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3555 		    is_zero_ether_addr(add))
3556 			return -EINVAL;
3557 
3558 		m_list_itr->status = ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3559 							   m_list_itr);
3560 		if (m_list_itr->status)
3561 			return m_list_itr->status;
3562 	}
3563 
3564 	return status;
3565 }
3566 
3567 /**
3568  * ice_add_vlan_internal - Add one VLAN based filter rule
3569  * @hw: pointer to the hardware structure
3570  * @f_entry: filter entry containing one VLAN information
3571  */
3572 static int
3573 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3574 {
3575 	struct ice_switch_info *sw = hw->switch_info;
3576 	struct ice_fltr_mgmt_list_entry *v_list_itr;
3577 	struct ice_fltr_info *new_fltr, *cur_fltr;
3578 	enum ice_sw_lkup_type lkup_type;
3579 	u16 vsi_list_id = 0, vsi_handle;
3580 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3581 	int status = 0;
3582 
3583 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3584 		return -EINVAL;
3585 
3586 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3587 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3588 	new_fltr = &f_entry->fltr_info;
3589 
3590 	/* VLAN ID should only be 12 bits */
3591 	if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3592 		return -EINVAL;
3593 
3594 	if (new_fltr->src_id != ICE_SRC_ID_VSI)
3595 		return -EINVAL;
3596 
3597 	new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3598 	lkup_type = new_fltr->lkup_type;
3599 	vsi_handle = new_fltr->vsi_handle;
3600 	rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3601 	mutex_lock(rule_lock);
3602 	v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3603 	if (!v_list_itr) {
3604 		struct ice_vsi_list_map_info *map_info = NULL;
3605 
3606 		if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3607 			/* All VLAN pruning rules use a VSI list. Check if
3608 			 * there is already a VSI list containing VSI that we
3609 			 * want to add. If found, use the same vsi_list_id for
3610 			 * this new VLAN rule or else create a new list.
3611 			 */
3612 			map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3613 							   vsi_handle,
3614 							   &vsi_list_id);
3615 			if (!map_info) {
3616 				status = ice_create_vsi_list_rule(hw,
3617 								  &vsi_handle,
3618 								  1,
3619 								  &vsi_list_id,
3620 								  lkup_type);
3621 				if (status)
3622 					goto exit;
3623 			}
3624 			/* Convert the action to forwarding to a VSI list. */
3625 			new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3626 			new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3627 		}
3628 
3629 		status = ice_create_pkt_fwd_rule(hw, f_entry);
3630 		if (!status) {
3631 			v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3632 							 new_fltr);
3633 			if (!v_list_itr) {
3634 				status = -ENOENT;
3635 				goto exit;
3636 			}
3637 			/* reuse VSI list for new rule and increment ref_cnt */
3638 			if (map_info) {
3639 				v_list_itr->vsi_list_info = map_info;
3640 				map_info->ref_cnt++;
3641 			} else {
3642 				v_list_itr->vsi_list_info =
3643 					ice_create_vsi_list_map(hw, &vsi_handle,
3644 								1, vsi_list_id);
3645 			}
3646 		}
3647 	} else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3648 		/* Update existing VSI list to add new VSI ID only if it used
3649 		 * by one VLAN rule.
3650 		 */
3651 		cur_fltr = &v_list_itr->fltr_info;
3652 		status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3653 						 new_fltr);
3654 	} else {
3655 		/* If VLAN rule exists and VSI list being used by this rule is
3656 		 * referenced by more than 1 VLAN rule. Then create a new VSI
3657 		 * list appending previous VSI with new VSI and update existing
3658 		 * VLAN rule to point to new VSI list ID
3659 		 */
3660 		struct ice_fltr_info tmp_fltr;
3661 		u16 vsi_handle_arr[2];
3662 		u16 cur_handle;
3663 
3664 		/* Current implementation only supports reusing VSI list with
3665 		 * one VSI count. We should never hit below condition
3666 		 */
3667 		if (v_list_itr->vsi_count > 1 &&
3668 		    v_list_itr->vsi_list_info->ref_cnt > 1) {
3669 			ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3670 			status = -EIO;
3671 			goto exit;
3672 		}
3673 
3674 		cur_handle =
3675 			find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3676 				       ICE_MAX_VSI);
3677 
3678 		/* A rule already exists with the new VSI being added */
3679 		if (cur_handle == vsi_handle) {
3680 			status = -EEXIST;
3681 			goto exit;
3682 		}
3683 
3684 		vsi_handle_arr[0] = cur_handle;
3685 		vsi_handle_arr[1] = vsi_handle;
3686 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3687 						  &vsi_list_id, lkup_type);
3688 		if (status)
3689 			goto exit;
3690 
3691 		tmp_fltr = v_list_itr->fltr_info;
3692 		tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3693 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3694 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3695 		/* Update the previous switch rule to a new VSI list which
3696 		 * includes current VSI that is requested
3697 		 */
3698 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3699 		if (status)
3700 			goto exit;
3701 
3702 		/* before overriding VSI list map info. decrement ref_cnt of
3703 		 * previous VSI list
3704 		 */
3705 		v_list_itr->vsi_list_info->ref_cnt--;
3706 
3707 		/* now update to newly created list */
3708 		v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3709 		v_list_itr->vsi_list_info =
3710 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3711 						vsi_list_id);
3712 		v_list_itr->vsi_count++;
3713 	}
3714 
3715 exit:
3716 	mutex_unlock(rule_lock);
3717 	return status;
3718 }
3719 
3720 /**
3721  * ice_add_vlan - Add VLAN based filter rule
3722  * @hw: pointer to the hardware structure
3723  * @v_list: list of VLAN entries and forwarding information
3724  */
3725 int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
3726 {
3727 	struct ice_fltr_list_entry *v_list_itr;
3728 
3729 	if (!v_list || !hw)
3730 		return -EINVAL;
3731 
3732 	list_for_each_entry(v_list_itr, v_list, list_entry) {
3733 		if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3734 			return -EINVAL;
3735 		v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3736 		v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3737 		if (v_list_itr->status)
3738 			return v_list_itr->status;
3739 	}
3740 	return 0;
3741 }
3742 
3743 /**
3744  * ice_add_eth_mac - Add ethertype and MAC based filter rule
3745  * @hw: pointer to the hardware structure
3746  * @em_list: list of ether type MAC filter, MAC is optional
3747  *
3748  * This function requires the caller to populate the entries in
3749  * the filter list with the necessary fields (including flags to
3750  * indicate Tx or Rx rules).
3751  */
3752 int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3753 {
3754 	struct ice_fltr_list_entry *em_list_itr;
3755 
3756 	if (!em_list || !hw)
3757 		return -EINVAL;
3758 
3759 	list_for_each_entry(em_list_itr, em_list, list_entry) {
3760 		enum ice_sw_lkup_type l_type =
3761 			em_list_itr->fltr_info.lkup_type;
3762 
3763 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3764 		    l_type != ICE_SW_LKUP_ETHERTYPE)
3765 			return -EINVAL;
3766 
3767 		em_list_itr->status = ice_add_rule_internal(hw, l_type,
3768 							    em_list_itr);
3769 		if (em_list_itr->status)
3770 			return em_list_itr->status;
3771 	}
3772 	return 0;
3773 }
3774 
3775 /**
3776  * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3777  * @hw: pointer to the hardware structure
3778  * @em_list: list of ethertype or ethertype MAC entries
3779  */
3780 int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3781 {
3782 	struct ice_fltr_list_entry *em_list_itr, *tmp;
3783 
3784 	if (!em_list || !hw)
3785 		return -EINVAL;
3786 
3787 	list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
3788 		enum ice_sw_lkup_type l_type =
3789 			em_list_itr->fltr_info.lkup_type;
3790 
3791 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3792 		    l_type != ICE_SW_LKUP_ETHERTYPE)
3793 			return -EINVAL;
3794 
3795 		em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3796 							       em_list_itr);
3797 		if (em_list_itr->status)
3798 			return em_list_itr->status;
3799 	}
3800 	return 0;
3801 }
3802 
3803 /**
3804  * ice_rem_sw_rule_info
3805  * @hw: pointer to the hardware structure
3806  * @rule_head: pointer to the switch list structure that we want to delete
3807  */
3808 static void
3809 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3810 {
3811 	if (!list_empty(rule_head)) {
3812 		struct ice_fltr_mgmt_list_entry *entry;
3813 		struct ice_fltr_mgmt_list_entry *tmp;
3814 
3815 		list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
3816 			list_del(&entry->list_entry);
3817 			devm_kfree(ice_hw_to_dev(hw), entry);
3818 		}
3819 	}
3820 }
3821 
3822 /**
3823  * ice_rem_adv_rule_info
3824  * @hw: pointer to the hardware structure
3825  * @rule_head: pointer to the switch list structure that we want to delete
3826  */
3827 static void
3828 ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3829 {
3830 	struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3831 	struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3832 
3833 	if (list_empty(rule_head))
3834 		return;
3835 
3836 	list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) {
3837 		list_del(&lst_itr->list_entry);
3838 		devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
3839 		devm_kfree(ice_hw_to_dev(hw), lst_itr);
3840 	}
3841 }
3842 
3843 /**
3844  * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3845  * @pi: pointer to the port_info structure
3846  * @vsi_handle: VSI handle to set as default
3847  * @set: true to add the above mentioned switch rule, false to remove it
3848  * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3849  *
3850  * add filter rule to set/unset given VSI as default VSI for the switch
3851  * (represented by swid)
3852  */
3853 int
3854 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3855 		 u8 direction)
3856 {
3857 	struct ice_fltr_list_entry f_list_entry;
3858 	struct ice_fltr_info f_info;
3859 	struct ice_hw *hw = pi->hw;
3860 	u16 hw_vsi_id;
3861 	int status;
3862 
3863 	if (!ice_is_vsi_valid(hw, vsi_handle))
3864 		return -EINVAL;
3865 
3866 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3867 
3868 	memset(&f_info, 0, sizeof(f_info));
3869 
3870 	f_info.lkup_type = ICE_SW_LKUP_DFLT;
3871 	f_info.flag = direction;
3872 	f_info.fltr_act = ICE_FWD_TO_VSI;
3873 	f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3874 	f_info.vsi_handle = vsi_handle;
3875 
3876 	if (f_info.flag & ICE_FLTR_RX) {
3877 		f_info.src = hw->port_info->lport;
3878 		f_info.src_id = ICE_SRC_ID_LPORT;
3879 	} else if (f_info.flag & ICE_FLTR_TX) {
3880 		f_info.src_id = ICE_SRC_ID_VSI;
3881 		f_info.src = hw_vsi_id;
3882 	}
3883 	f_list_entry.fltr_info = f_info;
3884 
3885 	if (set)
3886 		status = ice_add_rule_internal(hw, ICE_SW_LKUP_DFLT,
3887 					       &f_list_entry);
3888 	else
3889 		status = ice_remove_rule_internal(hw, ICE_SW_LKUP_DFLT,
3890 						  &f_list_entry);
3891 
3892 	return status;
3893 }
3894 
3895 /**
3896  * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3897  * @fm_entry: filter entry to inspect
3898  * @vsi_handle: VSI handle to compare with filter info
3899  */
3900 static bool
3901 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3902 {
3903 	return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3904 		 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3905 		(fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3906 		 fm_entry->vsi_list_info &&
3907 		 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
3908 }
3909 
3910 /**
3911  * ice_check_if_dflt_vsi - check if VSI is default VSI
3912  * @pi: pointer to the port_info structure
3913  * @vsi_handle: vsi handle to check for in filter list
3914  * @rule_exists: indicates if there are any VSI's in the rule list
3915  *
3916  * checks if the VSI is in a default VSI list, and also indicates
3917  * if the default VSI list is empty
3918  */
3919 bool
3920 ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
3921 		      bool *rule_exists)
3922 {
3923 	struct ice_fltr_mgmt_list_entry *fm_entry;
3924 	struct ice_sw_recipe *recp_list;
3925 	struct list_head *rule_head;
3926 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3927 	bool ret = false;
3928 
3929 	recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT];
3930 	rule_lock = &recp_list->filt_rule_lock;
3931 	rule_head = &recp_list->filt_rules;
3932 
3933 	mutex_lock(rule_lock);
3934 
3935 	if (rule_exists && !list_empty(rule_head))
3936 		*rule_exists = true;
3937 
3938 	list_for_each_entry(fm_entry, rule_head, list_entry) {
3939 		if (ice_vsi_uses_fltr(fm_entry, vsi_handle)) {
3940 			ret = true;
3941 			break;
3942 		}
3943 	}
3944 
3945 	mutex_unlock(rule_lock);
3946 
3947 	return ret;
3948 }
3949 
3950 /**
3951  * ice_remove_mac - remove a MAC address based filter rule
3952  * @hw: pointer to the hardware structure
3953  * @m_list: list of MAC addresses and forwarding information
3954  *
3955  * This function removes either a MAC filter rule or a specific VSI from a
3956  * VSI list for a multicast MAC address.
3957  *
3958  * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should
3959  * be aware that this call will only work if all the entries passed into m_list
3960  * were added previously. It will not attempt to do a partial remove of entries
3961  * that were found.
3962  */
3963 int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
3964 {
3965 	struct ice_fltr_list_entry *list_itr, *tmp;
3966 
3967 	if (!m_list)
3968 		return -EINVAL;
3969 
3970 	list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
3971 		enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3972 		u16 vsi_handle;
3973 
3974 		if (l_type != ICE_SW_LKUP_MAC)
3975 			return -EINVAL;
3976 
3977 		vsi_handle = list_itr->fltr_info.vsi_handle;
3978 		if (!ice_is_vsi_valid(hw, vsi_handle))
3979 			return -EINVAL;
3980 
3981 		list_itr->fltr_info.fwd_id.hw_vsi_id =
3982 					ice_get_hw_vsi_num(hw, vsi_handle);
3983 
3984 		list_itr->status = ice_remove_rule_internal(hw,
3985 							    ICE_SW_LKUP_MAC,
3986 							    list_itr);
3987 		if (list_itr->status)
3988 			return list_itr->status;
3989 	}
3990 	return 0;
3991 }
3992 
3993 /**
3994  * ice_remove_vlan - Remove VLAN based filter rule
3995  * @hw: pointer to the hardware structure
3996  * @v_list: list of VLAN entries and forwarding information
3997  */
3998 int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
3999 {
4000 	struct ice_fltr_list_entry *v_list_itr, *tmp;
4001 
4002 	if (!v_list || !hw)
4003 		return -EINVAL;
4004 
4005 	list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
4006 		enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4007 
4008 		if (l_type != ICE_SW_LKUP_VLAN)
4009 			return -EINVAL;
4010 		v_list_itr->status = ice_remove_rule_internal(hw,
4011 							      ICE_SW_LKUP_VLAN,
4012 							      v_list_itr);
4013 		if (v_list_itr->status)
4014 			return v_list_itr->status;
4015 	}
4016 	return 0;
4017 }
4018 
4019 /**
4020  * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4021  * @hw: pointer to the hardware structure
4022  * @vsi_handle: VSI handle to remove filters from
4023  * @vsi_list_head: pointer to the list to add entry to
4024  * @fi: pointer to fltr_info of filter entry to copy & add
4025  *
4026  * Helper function, used when creating a list of filters to remove from
4027  * a specific VSI. The entry added to vsi_list_head is a COPY of the
4028  * original filter entry, with the exception of fltr_info.fltr_act and
4029  * fltr_info.fwd_id fields. These are set such that later logic can
4030  * extract which VSI to remove the fltr from, and pass on that information.
4031  */
4032 static int
4033 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4034 			       struct list_head *vsi_list_head,
4035 			       struct ice_fltr_info *fi)
4036 {
4037 	struct ice_fltr_list_entry *tmp;
4038 
4039 	/* this memory is freed up in the caller function
4040 	 * once filters for this VSI are removed
4041 	 */
4042 	tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
4043 	if (!tmp)
4044 		return -ENOMEM;
4045 
4046 	tmp->fltr_info = *fi;
4047 
4048 	/* Overwrite these fields to indicate which VSI to remove filter from,
4049 	 * so find and remove logic can extract the information from the
4050 	 * list entries. Note that original entries will still have proper
4051 	 * values.
4052 	 */
4053 	tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4054 	tmp->fltr_info.vsi_handle = vsi_handle;
4055 	tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4056 
4057 	list_add(&tmp->list_entry, vsi_list_head);
4058 
4059 	return 0;
4060 }
4061 
4062 /**
4063  * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4064  * @hw: pointer to the hardware structure
4065  * @vsi_handle: VSI handle to remove filters from
4066  * @lkup_list_head: pointer to the list that has certain lookup type filters
4067  * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4068  *
4069  * Locates all filters in lkup_list_head that are used by the given VSI,
4070  * and adds COPIES of those entries to vsi_list_head (intended to be used
4071  * to remove the listed filters).
4072  * Note that this means all entries in vsi_list_head must be explicitly
4073  * deallocated by the caller when done with list.
4074  */
4075 static int
4076 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4077 			 struct list_head *lkup_list_head,
4078 			 struct list_head *vsi_list_head)
4079 {
4080 	struct ice_fltr_mgmt_list_entry *fm_entry;
4081 	int status = 0;
4082 
4083 	/* check to make sure VSI ID is valid and within boundary */
4084 	if (!ice_is_vsi_valid(hw, vsi_handle))
4085 		return -EINVAL;
4086 
4087 	list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
4088 		if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
4089 			continue;
4090 
4091 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4092 							vsi_list_head,
4093 							&fm_entry->fltr_info);
4094 		if (status)
4095 			return status;
4096 	}
4097 	return status;
4098 }
4099 
4100 /**
4101  * ice_determine_promisc_mask
4102  * @fi: filter info to parse
4103  *
4104  * Helper function to determine which ICE_PROMISC_ mask corresponds
4105  * to given filter into.
4106  */
4107 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4108 {
4109 	u16 vid = fi->l_data.mac_vlan.vlan_id;
4110 	u8 *macaddr = fi->l_data.mac.mac_addr;
4111 	bool is_tx_fltr = false;
4112 	u8 promisc_mask = 0;
4113 
4114 	if (fi->flag == ICE_FLTR_TX)
4115 		is_tx_fltr = true;
4116 
4117 	if (is_broadcast_ether_addr(macaddr))
4118 		promisc_mask |= is_tx_fltr ?
4119 			ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4120 	else if (is_multicast_ether_addr(macaddr))
4121 		promisc_mask |= is_tx_fltr ?
4122 			ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4123 	else if (is_unicast_ether_addr(macaddr))
4124 		promisc_mask |= is_tx_fltr ?
4125 			ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4126 	if (vid)
4127 		promisc_mask |= is_tx_fltr ?
4128 			ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4129 
4130 	return promisc_mask;
4131 }
4132 
4133 /**
4134  * ice_remove_promisc - Remove promisc based filter rules
4135  * @hw: pointer to the hardware structure
4136  * @recp_id: recipe ID for which the rule needs to removed
4137  * @v_list: list of promisc entries
4138  */
4139 static int
4140 ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list)
4141 {
4142 	struct ice_fltr_list_entry *v_list_itr, *tmp;
4143 
4144 	list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
4145 		v_list_itr->status =
4146 			ice_remove_rule_internal(hw, recp_id, v_list_itr);
4147 		if (v_list_itr->status)
4148 			return v_list_itr->status;
4149 	}
4150 	return 0;
4151 }
4152 
4153 /**
4154  * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4155  * @hw: pointer to the hardware structure
4156  * @vsi_handle: VSI handle to clear mode
4157  * @promisc_mask: mask of promiscuous config bits to clear
4158  * @vid: VLAN ID to clear VLAN promiscuous
4159  */
4160 int
4161 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4162 		      u16 vid)
4163 {
4164 	struct ice_switch_info *sw = hw->switch_info;
4165 	struct ice_fltr_list_entry *fm_entry, *tmp;
4166 	struct list_head remove_list_head;
4167 	struct ice_fltr_mgmt_list_entry *itr;
4168 	struct list_head *rule_head;
4169 	struct mutex *rule_lock;	/* Lock to protect filter rule list */
4170 	int status = 0;
4171 	u8 recipe_id;
4172 
4173 	if (!ice_is_vsi_valid(hw, vsi_handle))
4174 		return -EINVAL;
4175 
4176 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4177 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4178 	else
4179 		recipe_id = ICE_SW_LKUP_PROMISC;
4180 
4181 	rule_head = &sw->recp_list[recipe_id].filt_rules;
4182 	rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4183 
4184 	INIT_LIST_HEAD(&remove_list_head);
4185 
4186 	mutex_lock(rule_lock);
4187 	list_for_each_entry(itr, rule_head, list_entry) {
4188 		struct ice_fltr_info *fltr_info;
4189 		u8 fltr_promisc_mask = 0;
4190 
4191 		if (!ice_vsi_uses_fltr(itr, vsi_handle))
4192 			continue;
4193 		fltr_info = &itr->fltr_info;
4194 
4195 		if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4196 		    vid != fltr_info->l_data.mac_vlan.vlan_id)
4197 			continue;
4198 
4199 		fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4200 
4201 		/* Skip if filter is not completely specified by given mask */
4202 		if (fltr_promisc_mask & ~promisc_mask)
4203 			continue;
4204 
4205 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4206 							&remove_list_head,
4207 							fltr_info);
4208 		if (status) {
4209 			mutex_unlock(rule_lock);
4210 			goto free_fltr_list;
4211 		}
4212 	}
4213 	mutex_unlock(rule_lock);
4214 
4215 	status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4216 
4217 free_fltr_list:
4218 	list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4219 		list_del(&fm_entry->list_entry);
4220 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
4221 	}
4222 
4223 	return status;
4224 }
4225 
4226 /**
4227  * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4228  * @hw: pointer to the hardware structure
4229  * @vsi_handle: VSI handle to configure
4230  * @promisc_mask: mask of promiscuous config bits
4231  * @vid: VLAN ID to set VLAN promiscuous
4232  */
4233 int
4234 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4235 {
4236 	enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4237 	struct ice_fltr_list_entry f_list_entry;
4238 	struct ice_fltr_info new_fltr;
4239 	bool is_tx_fltr;
4240 	int status = 0;
4241 	u16 hw_vsi_id;
4242 	int pkt_type;
4243 	u8 recipe_id;
4244 
4245 	if (!ice_is_vsi_valid(hw, vsi_handle))
4246 		return -EINVAL;
4247 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4248 
4249 	memset(&new_fltr, 0, sizeof(new_fltr));
4250 
4251 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4252 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4253 		new_fltr.l_data.mac_vlan.vlan_id = vid;
4254 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4255 	} else {
4256 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4257 		recipe_id = ICE_SW_LKUP_PROMISC;
4258 	}
4259 
4260 	/* Separate filters must be set for each direction/packet type
4261 	 * combination, so we will loop over the mask value, store the
4262 	 * individual type, and clear it out in the input mask as it
4263 	 * is found.
4264 	 */
4265 	while (promisc_mask) {
4266 		u8 *mac_addr;
4267 
4268 		pkt_type = 0;
4269 		is_tx_fltr = false;
4270 
4271 		if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4272 			promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4273 			pkt_type = UCAST_FLTR;
4274 		} else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4275 			promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4276 			pkt_type = UCAST_FLTR;
4277 			is_tx_fltr = true;
4278 		} else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4279 			promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4280 			pkt_type = MCAST_FLTR;
4281 		} else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4282 			promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4283 			pkt_type = MCAST_FLTR;
4284 			is_tx_fltr = true;
4285 		} else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4286 			promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4287 			pkt_type = BCAST_FLTR;
4288 		} else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4289 			promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4290 			pkt_type = BCAST_FLTR;
4291 			is_tx_fltr = true;
4292 		}
4293 
4294 		/* Check for VLAN promiscuous flag */
4295 		if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4296 			promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4297 		} else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4298 			promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4299 			is_tx_fltr = true;
4300 		}
4301 
4302 		/* Set filter DA based on packet type */
4303 		mac_addr = new_fltr.l_data.mac.mac_addr;
4304 		if (pkt_type == BCAST_FLTR) {
4305 			eth_broadcast_addr(mac_addr);
4306 		} else if (pkt_type == MCAST_FLTR ||
4307 			   pkt_type == UCAST_FLTR) {
4308 			/* Use the dummy ether header DA */
4309 			ether_addr_copy(mac_addr, dummy_eth_header);
4310 			if (pkt_type == MCAST_FLTR)
4311 				mac_addr[0] |= 0x1;	/* Set multicast bit */
4312 		}
4313 
4314 		/* Need to reset this to zero for all iterations */
4315 		new_fltr.flag = 0;
4316 		if (is_tx_fltr) {
4317 			new_fltr.flag |= ICE_FLTR_TX;
4318 			new_fltr.src = hw_vsi_id;
4319 		} else {
4320 			new_fltr.flag |= ICE_FLTR_RX;
4321 			new_fltr.src = hw->port_info->lport;
4322 		}
4323 
4324 		new_fltr.fltr_act = ICE_FWD_TO_VSI;
4325 		new_fltr.vsi_handle = vsi_handle;
4326 		new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4327 		f_list_entry.fltr_info = new_fltr;
4328 
4329 		status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4330 		if (status)
4331 			goto set_promisc_exit;
4332 	}
4333 
4334 set_promisc_exit:
4335 	return status;
4336 }
4337 
4338 /**
4339  * ice_set_vlan_vsi_promisc
4340  * @hw: pointer to the hardware structure
4341  * @vsi_handle: VSI handle to configure
4342  * @promisc_mask: mask of promiscuous config bits
4343  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4344  *
4345  * Configure VSI with all associated VLANs to given promiscuous mode(s)
4346  */
4347 int
4348 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4349 			 bool rm_vlan_promisc)
4350 {
4351 	struct ice_switch_info *sw = hw->switch_info;
4352 	struct ice_fltr_list_entry *list_itr, *tmp;
4353 	struct list_head vsi_list_head;
4354 	struct list_head *vlan_head;
4355 	struct mutex *vlan_lock; /* Lock to protect filter rule list */
4356 	u16 vlan_id;
4357 	int status;
4358 
4359 	INIT_LIST_HEAD(&vsi_list_head);
4360 	vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4361 	vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4362 	mutex_lock(vlan_lock);
4363 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4364 					  &vsi_list_head);
4365 	mutex_unlock(vlan_lock);
4366 	if (status)
4367 		goto free_fltr_list;
4368 
4369 	list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
4370 		/* Avoid enabling or disabling VLAN zero twice when in double
4371 		 * VLAN mode
4372 		 */
4373 		if (ice_is_dvm_ena(hw) &&
4374 		    list_itr->fltr_info.l_data.vlan.tpid == 0)
4375 			continue;
4376 
4377 		vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4378 		if (rm_vlan_promisc)
4379 			status = ice_clear_vsi_promisc(hw, vsi_handle,
4380 						       promisc_mask, vlan_id);
4381 		else
4382 			status = ice_set_vsi_promisc(hw, vsi_handle,
4383 						     promisc_mask, vlan_id);
4384 		if (status && status != -EEXIST)
4385 			break;
4386 	}
4387 
4388 free_fltr_list:
4389 	list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
4390 		list_del(&list_itr->list_entry);
4391 		devm_kfree(ice_hw_to_dev(hw), list_itr);
4392 	}
4393 	return status;
4394 }
4395 
4396 /**
4397  * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4398  * @hw: pointer to the hardware structure
4399  * @vsi_handle: VSI handle to remove filters from
4400  * @lkup: switch rule filter lookup type
4401  */
4402 static void
4403 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4404 			 enum ice_sw_lkup_type lkup)
4405 {
4406 	struct ice_switch_info *sw = hw->switch_info;
4407 	struct ice_fltr_list_entry *fm_entry;
4408 	struct list_head remove_list_head;
4409 	struct list_head *rule_head;
4410 	struct ice_fltr_list_entry *tmp;
4411 	struct mutex *rule_lock;	/* Lock to protect filter rule list */
4412 	int status;
4413 
4414 	INIT_LIST_HEAD(&remove_list_head);
4415 	rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4416 	rule_head = &sw->recp_list[lkup].filt_rules;
4417 	mutex_lock(rule_lock);
4418 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4419 					  &remove_list_head);
4420 	mutex_unlock(rule_lock);
4421 	if (status)
4422 		goto free_fltr_list;
4423 
4424 	switch (lkup) {
4425 	case ICE_SW_LKUP_MAC:
4426 		ice_remove_mac(hw, &remove_list_head);
4427 		break;
4428 	case ICE_SW_LKUP_VLAN:
4429 		ice_remove_vlan(hw, &remove_list_head);
4430 		break;
4431 	case ICE_SW_LKUP_PROMISC:
4432 	case ICE_SW_LKUP_PROMISC_VLAN:
4433 		ice_remove_promisc(hw, lkup, &remove_list_head);
4434 		break;
4435 	case ICE_SW_LKUP_MAC_VLAN:
4436 	case ICE_SW_LKUP_ETHERTYPE:
4437 	case ICE_SW_LKUP_ETHERTYPE_MAC:
4438 	case ICE_SW_LKUP_DFLT:
4439 	case ICE_SW_LKUP_LAST:
4440 	default:
4441 		ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
4442 		break;
4443 	}
4444 
4445 free_fltr_list:
4446 	list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4447 		list_del(&fm_entry->list_entry);
4448 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
4449 	}
4450 }
4451 
4452 /**
4453  * ice_remove_vsi_fltr - Remove all filters for a VSI
4454  * @hw: pointer to the hardware structure
4455  * @vsi_handle: VSI handle to remove filters from
4456  */
4457 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4458 {
4459 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4460 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4461 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4462 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4463 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4464 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4465 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4466 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4467 }
4468 
4469 /**
4470  * ice_alloc_res_cntr - allocating resource counter
4471  * @hw: pointer to the hardware structure
4472  * @type: type of resource
4473  * @alloc_shared: if set it is shared else dedicated
4474  * @num_items: number of entries requested for FD resource type
4475  * @counter_id: counter index returned by AQ call
4476  */
4477 int
4478 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4479 		   u16 *counter_id)
4480 {
4481 	struct ice_aqc_alloc_free_res_elem *buf;
4482 	u16 buf_len;
4483 	int status;
4484 
4485 	/* Allocate resource */
4486 	buf_len = struct_size(buf, elem, 1);
4487 	buf = kzalloc(buf_len, GFP_KERNEL);
4488 	if (!buf)
4489 		return -ENOMEM;
4490 
4491 	buf->num_elems = cpu_to_le16(num_items);
4492 	buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4493 				      ICE_AQC_RES_TYPE_M) | alloc_shared);
4494 
4495 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4496 				       ice_aqc_opc_alloc_res, NULL);
4497 	if (status)
4498 		goto exit;
4499 
4500 	*counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
4501 
4502 exit:
4503 	kfree(buf);
4504 	return status;
4505 }
4506 
4507 /**
4508  * ice_free_res_cntr - free resource counter
4509  * @hw: pointer to the hardware structure
4510  * @type: type of resource
4511  * @alloc_shared: if set it is shared else dedicated
4512  * @num_items: number of entries to be freed for FD resource type
4513  * @counter_id: counter ID resource which needs to be freed
4514  */
4515 int
4516 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4517 		  u16 counter_id)
4518 {
4519 	struct ice_aqc_alloc_free_res_elem *buf;
4520 	u16 buf_len;
4521 	int status;
4522 
4523 	/* Free resource */
4524 	buf_len = struct_size(buf, elem, 1);
4525 	buf = kzalloc(buf_len, GFP_KERNEL);
4526 	if (!buf)
4527 		return -ENOMEM;
4528 
4529 	buf->num_elems = cpu_to_le16(num_items);
4530 	buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4531 				      ICE_AQC_RES_TYPE_M) | alloc_shared);
4532 	buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
4533 
4534 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4535 				       ice_aqc_opc_free_res, NULL);
4536 	if (status)
4537 		ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
4538 
4539 	kfree(buf);
4540 	return status;
4541 }
4542 
4543 #define ICE_PROTOCOL_ENTRY(id, ...) {		\
4544 	.prot_type	= id,			\
4545 	.offs		= {__VA_ARGS__},	\
4546 }
4547 
4548 /* This is mapping table entry that maps every word within a given protocol
4549  * structure to the real byte offset as per the specification of that
4550  * protocol header.
4551  * for example dst address is 3 words in ethertype header and corresponding
4552  * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4553  * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4554  * matching entry describing its field. This needs to be updated if new
4555  * structure is added to that union.
4556  */
4557 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4558 	ICE_PROTOCOL_ENTRY(ICE_MAC_OFOS, 0, 2, 4, 6, 8, 10, 12),
4559 	ICE_PROTOCOL_ENTRY(ICE_MAC_IL, 0, 2, 4, 6, 8, 10, 12),
4560 	ICE_PROTOCOL_ENTRY(ICE_ETYPE_OL, 0),
4561 	ICE_PROTOCOL_ENTRY(ICE_ETYPE_IL, 0),
4562 	ICE_PROTOCOL_ENTRY(ICE_VLAN_OFOS, 2, 0),
4563 	ICE_PROTOCOL_ENTRY(ICE_IPV4_OFOS, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18),
4564 	ICE_PROTOCOL_ENTRY(ICE_IPV4_IL,	0, 2, 4, 6, 8, 10, 12, 14, 16, 18),
4565 	ICE_PROTOCOL_ENTRY(ICE_IPV6_OFOS, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18,
4566 			   20, 22, 24, 26, 28, 30, 32, 34, 36, 38),
4567 	ICE_PROTOCOL_ENTRY(ICE_IPV6_IL, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
4568 			   22, 24, 26, 28, 30, 32, 34, 36, 38),
4569 	ICE_PROTOCOL_ENTRY(ICE_TCP_IL, 0, 2),
4570 	ICE_PROTOCOL_ENTRY(ICE_UDP_OF, 0, 2),
4571 	ICE_PROTOCOL_ENTRY(ICE_UDP_ILOS, 0, 2),
4572 	ICE_PROTOCOL_ENTRY(ICE_VXLAN, 8, 10, 12, 14),
4573 	ICE_PROTOCOL_ENTRY(ICE_GENEVE, 8, 10, 12, 14),
4574 	ICE_PROTOCOL_ENTRY(ICE_NVGRE, 0, 2, 4, 6),
4575 	ICE_PROTOCOL_ENTRY(ICE_GTP, 8, 10, 12, 14, 16, 18, 20, 22),
4576 	ICE_PROTOCOL_ENTRY(ICE_GTP_NO_PAY, 8, 10, 12, 14),
4577 	ICE_PROTOCOL_ENTRY(ICE_PPPOE, 0, 2, 4, 6),
4578 	ICE_PROTOCOL_ENTRY(ICE_L2TPV3, 0, 2, 4, 6, 8, 10),
4579 	ICE_PROTOCOL_ENTRY(ICE_VLAN_EX, 2, 0),
4580 	ICE_PROTOCOL_ENTRY(ICE_VLAN_IN, 2, 0),
4581 	ICE_PROTOCOL_ENTRY(ICE_HW_METADATA,
4582 			   ICE_SOURCE_PORT_MDID_OFFSET,
4583 			   ICE_PTYPE_MDID_OFFSET,
4584 			   ICE_PACKET_LENGTH_MDID_OFFSET,
4585 			   ICE_SOURCE_VSI_MDID_OFFSET,
4586 			   ICE_PKT_VLAN_MDID_OFFSET,
4587 			   ICE_PKT_TUNNEL_MDID_OFFSET,
4588 			   ICE_PKT_TCP_MDID_OFFSET,
4589 			   ICE_PKT_ERROR_MDID_OFFSET),
4590 };
4591 
4592 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4593 	{ ICE_MAC_OFOS,		ICE_MAC_OFOS_HW },
4594 	{ ICE_MAC_IL,		ICE_MAC_IL_HW },
4595 	{ ICE_ETYPE_OL,		ICE_ETYPE_OL_HW },
4596 	{ ICE_ETYPE_IL,		ICE_ETYPE_IL_HW },
4597 	{ ICE_VLAN_OFOS,	ICE_VLAN_OL_HW },
4598 	{ ICE_IPV4_OFOS,	ICE_IPV4_OFOS_HW },
4599 	{ ICE_IPV4_IL,		ICE_IPV4_IL_HW },
4600 	{ ICE_IPV6_OFOS,	ICE_IPV6_OFOS_HW },
4601 	{ ICE_IPV6_IL,		ICE_IPV6_IL_HW },
4602 	{ ICE_TCP_IL,		ICE_TCP_IL_HW },
4603 	{ ICE_UDP_OF,		ICE_UDP_OF_HW },
4604 	{ ICE_UDP_ILOS,		ICE_UDP_ILOS_HW },
4605 	{ ICE_VXLAN,		ICE_UDP_OF_HW },
4606 	{ ICE_GENEVE,		ICE_UDP_OF_HW },
4607 	{ ICE_NVGRE,		ICE_GRE_OF_HW },
4608 	{ ICE_GTP,		ICE_UDP_OF_HW },
4609 	{ ICE_GTP_NO_PAY,	ICE_UDP_ILOS_HW },
4610 	{ ICE_PPPOE,		ICE_PPPOE_HW },
4611 	{ ICE_L2TPV3,		ICE_L2TPV3_HW },
4612 	{ ICE_VLAN_EX,          ICE_VLAN_OF_HW },
4613 	{ ICE_VLAN_IN,          ICE_VLAN_OL_HW },
4614 	{ ICE_HW_METADATA,      ICE_META_DATA_ID_HW },
4615 };
4616 
4617 /**
4618  * ice_find_recp - find a recipe
4619  * @hw: pointer to the hardware structure
4620  * @lkup_exts: extension sequence to match
4621  * @tun_type: type of recipe tunnel
4622  *
4623  * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4624  */
4625 static u16
4626 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
4627 	      enum ice_sw_tunnel_type tun_type)
4628 {
4629 	bool refresh_required = true;
4630 	struct ice_sw_recipe *recp;
4631 	u8 i;
4632 
4633 	/* Walk through existing recipes to find a match */
4634 	recp = hw->switch_info->recp_list;
4635 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4636 		/* If recipe was not created for this ID, in SW bookkeeping,
4637 		 * check if FW has an entry for this recipe. If the FW has an
4638 		 * entry update it in our SW bookkeeping and continue with the
4639 		 * matching.
4640 		 */
4641 		if (!recp[i].recp_created)
4642 			if (ice_get_recp_frm_fw(hw,
4643 						hw->switch_info->recp_list, i,
4644 						&refresh_required))
4645 				continue;
4646 
4647 		/* Skip inverse action recipes */
4648 		if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4649 		    ICE_AQ_RECIPE_ACT_INV_ACT)
4650 			continue;
4651 
4652 		/* if number of words we are looking for match */
4653 		if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4654 			struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
4655 			struct ice_fv_word *be = lkup_exts->fv_words;
4656 			u16 *cr = recp[i].lkup_exts.field_mask;
4657 			u16 *de = lkup_exts->field_mask;
4658 			bool found = true;
4659 			u8 pe, qr;
4660 
4661 			/* ar, cr, and qr are related to the recipe words, while
4662 			 * be, de, and pe are related to the lookup words
4663 			 */
4664 			for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
4665 				for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
4666 				     qr++) {
4667 					if (ar[qr].off == be[pe].off &&
4668 					    ar[qr].prot_id == be[pe].prot_id &&
4669 					    cr[qr] == de[pe])
4670 						/* Found the "pe"th word in the
4671 						 * given recipe
4672 						 */
4673 						break;
4674 				}
4675 				/* After walking through all the words in the
4676 				 * "i"th recipe if "p"th word was not found then
4677 				 * this recipe is not what we are looking for.
4678 				 * So break out from this loop and try the next
4679 				 * recipe
4680 				 */
4681 				if (qr >= recp[i].lkup_exts.n_val_words) {
4682 					found = false;
4683 					break;
4684 				}
4685 			}
4686 			/* If for "i"th recipe the found was never set to false
4687 			 * then it means we found our match
4688 			 * Also tun type of recipe needs to be checked
4689 			 */
4690 			if (found && recp[i].tun_type == tun_type)
4691 				return i; /* Return the recipe ID */
4692 		}
4693 	}
4694 	return ICE_MAX_NUM_RECIPES;
4695 }
4696 
4697 /**
4698  * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
4699  *
4700  * As protocol id for outer vlan is different in dvm and svm, if dvm is
4701  * supported protocol array record for outer vlan has to be modified to
4702  * reflect the value proper for DVM.
4703  */
4704 void ice_change_proto_id_to_dvm(void)
4705 {
4706 	u8 i;
4707 
4708 	for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4709 		if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
4710 		    ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
4711 			ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
4712 }
4713 
4714 /**
4715  * ice_prot_type_to_id - get protocol ID from protocol type
4716  * @type: protocol type
4717  * @id: pointer to variable that will receive the ID
4718  *
4719  * Returns true if found, false otherwise
4720  */
4721 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
4722 {
4723 	u8 i;
4724 
4725 	for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4726 		if (ice_prot_id_tbl[i].type == type) {
4727 			*id = ice_prot_id_tbl[i].protocol_id;
4728 			return true;
4729 		}
4730 	return false;
4731 }
4732 
4733 /**
4734  * ice_fill_valid_words - count valid words
4735  * @rule: advanced rule with lookup information
4736  * @lkup_exts: byte offset extractions of the words that are valid
4737  *
4738  * calculate valid words in a lookup rule using mask value
4739  */
4740 static u8
4741 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4742 		     struct ice_prot_lkup_ext *lkup_exts)
4743 {
4744 	u8 j, word, prot_id, ret_val;
4745 
4746 	if (!ice_prot_type_to_id(rule->type, &prot_id))
4747 		return 0;
4748 
4749 	word = lkup_exts->n_val_words;
4750 
4751 	for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4752 		if (((u16 *)&rule->m_u)[j] &&
4753 		    rule->type < ARRAY_SIZE(ice_prot_ext)) {
4754 			/* No more space to accommodate */
4755 			if (word >= ICE_MAX_CHAIN_WORDS)
4756 				return 0;
4757 			lkup_exts->fv_words[word].off =
4758 				ice_prot_ext[rule->type].offs[j];
4759 			lkup_exts->fv_words[word].prot_id =
4760 				ice_prot_id_tbl[rule->type].protocol_id;
4761 			lkup_exts->field_mask[word] =
4762 				be16_to_cpu(((__force __be16 *)&rule->m_u)[j]);
4763 			word++;
4764 		}
4765 
4766 	ret_val = word - lkup_exts->n_val_words;
4767 	lkup_exts->n_val_words = word;
4768 
4769 	return ret_val;
4770 }
4771 
4772 /**
4773  * ice_create_first_fit_recp_def - Create a recipe grouping
4774  * @hw: pointer to the hardware structure
4775  * @lkup_exts: an array of protocol header extractions
4776  * @rg_list: pointer to a list that stores new recipe groups
4777  * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4778  *
4779  * Using first fit algorithm, take all the words that are still not done
4780  * and start grouping them in 4-word groups. Each group makes up one
4781  * recipe.
4782  */
4783 static int
4784 ice_create_first_fit_recp_def(struct ice_hw *hw,
4785 			      struct ice_prot_lkup_ext *lkup_exts,
4786 			      struct list_head *rg_list,
4787 			      u8 *recp_cnt)
4788 {
4789 	struct ice_pref_recipe_group *grp = NULL;
4790 	u8 j;
4791 
4792 	*recp_cnt = 0;
4793 
4794 	/* Walk through every word in the rule to check if it is not done. If so
4795 	 * then this word needs to be part of a new recipe.
4796 	 */
4797 	for (j = 0; j < lkup_exts->n_val_words; j++)
4798 		if (!test_bit(j, lkup_exts->done)) {
4799 			if (!grp ||
4800 			    grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4801 				struct ice_recp_grp_entry *entry;
4802 
4803 				entry = devm_kzalloc(ice_hw_to_dev(hw),
4804 						     sizeof(*entry),
4805 						     GFP_KERNEL);
4806 				if (!entry)
4807 					return -ENOMEM;
4808 				list_add(&entry->l_entry, rg_list);
4809 				grp = &entry->r_group;
4810 				(*recp_cnt)++;
4811 			}
4812 
4813 			grp->pairs[grp->n_val_pairs].prot_id =
4814 				lkup_exts->fv_words[j].prot_id;
4815 			grp->pairs[grp->n_val_pairs].off =
4816 				lkup_exts->fv_words[j].off;
4817 			grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4818 			grp->n_val_pairs++;
4819 		}
4820 
4821 	return 0;
4822 }
4823 
4824 /**
4825  * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4826  * @hw: pointer to the hardware structure
4827  * @fv_list: field vector with the extraction sequence information
4828  * @rg_list: recipe groupings with protocol-offset pairs
4829  *
4830  * Helper function to fill in the field vector indices for protocol-offset
4831  * pairs. These indexes are then ultimately programmed into a recipe.
4832  */
4833 static int
4834 ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
4835 		       struct list_head *rg_list)
4836 {
4837 	struct ice_sw_fv_list_entry *fv;
4838 	struct ice_recp_grp_entry *rg;
4839 	struct ice_fv_word *fv_ext;
4840 
4841 	if (list_empty(fv_list))
4842 		return 0;
4843 
4844 	fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry,
4845 			      list_entry);
4846 	fv_ext = fv->fv_ptr->ew;
4847 
4848 	list_for_each_entry(rg, rg_list, l_entry) {
4849 		u8 i;
4850 
4851 		for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4852 			struct ice_fv_word *pr;
4853 			bool found = false;
4854 			u16 mask;
4855 			u8 j;
4856 
4857 			pr = &rg->r_group.pairs[i];
4858 			mask = rg->r_group.mask[i];
4859 
4860 			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4861 				if (fv_ext[j].prot_id == pr->prot_id &&
4862 				    fv_ext[j].off == pr->off) {
4863 					found = true;
4864 
4865 					/* Store index of field vector */
4866 					rg->fv_idx[i] = j;
4867 					rg->fv_mask[i] = mask;
4868 					break;
4869 				}
4870 
4871 			/* Protocol/offset could not be found, caller gave an
4872 			 * invalid pair
4873 			 */
4874 			if (!found)
4875 				return -EINVAL;
4876 		}
4877 	}
4878 
4879 	return 0;
4880 }
4881 
4882 /**
4883  * ice_find_free_recp_res_idx - find free result indexes for recipe
4884  * @hw: pointer to hardware structure
4885  * @profiles: bitmap of profiles that will be associated with the new recipe
4886  * @free_idx: pointer to variable to receive the free index bitmap
4887  *
4888  * The algorithm used here is:
4889  *	1. When creating a new recipe, create a set P which contains all
4890  *	   Profiles that will be associated with our new recipe
4891  *
4892  *	2. For each Profile p in set P:
4893  *	    a. Add all recipes associated with Profile p into set R
4894  *	    b. Optional : PossibleIndexes &= profile[p].possibleIndexes
4895  *		[initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
4896  *		i. Or just assume they all have the same possible indexes:
4897  *			44, 45, 46, 47
4898  *			i.e., PossibleIndexes = 0x0000F00000000000
4899  *
4900  *	3. For each Recipe r in set R:
4901  *	    a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
4902  *	    b. FreeIndexes = UsedIndexes ^ PossibleIndexes
4903  *
4904  *	FreeIndexes will contain the bits indicating the indexes free for use,
4905  *      then the code needs to update the recipe[r].used_result_idx_bits to
4906  *      indicate which indexes were selected for use by this recipe.
4907  */
4908 static u16
4909 ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
4910 			   unsigned long *free_idx)
4911 {
4912 	DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS);
4913 	DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES);
4914 	DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS);
4915 	u16 bit;
4916 
4917 	bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
4918 	bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
4919 
4920 	bitmap_fill(possible_idx, ICE_MAX_FV_WORDS);
4921 
4922 	/* For each profile we are going to associate the recipe with, add the
4923 	 * recipes that are associated with that profile. This will give us
4924 	 * the set of recipes that our recipe may collide with. Also, determine
4925 	 * what possible result indexes are usable given this set of profiles.
4926 	 */
4927 	for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
4928 		bitmap_or(recipes, recipes, profile_to_recipe[bit],
4929 			  ICE_MAX_NUM_RECIPES);
4930 		bitmap_and(possible_idx, possible_idx,
4931 			   hw->switch_info->prof_res_bm[bit],
4932 			   ICE_MAX_FV_WORDS);
4933 	}
4934 
4935 	/* For each recipe that our new recipe may collide with, determine
4936 	 * which indexes have been used.
4937 	 */
4938 	for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
4939 		bitmap_or(used_idx, used_idx,
4940 			  hw->switch_info->recp_list[bit].res_idxs,
4941 			  ICE_MAX_FV_WORDS);
4942 
4943 	bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
4944 
4945 	/* return number of free indexes */
4946 	return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS);
4947 }
4948 
4949 /**
4950  * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4951  * @hw: pointer to hardware structure
4952  * @rm: recipe management list entry
4953  * @profiles: bitmap of profiles that will be associated.
4954  */
4955 static int
4956 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
4957 		  unsigned long *profiles)
4958 {
4959 	DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
4960 	struct ice_aqc_recipe_data_elem *tmp;
4961 	struct ice_aqc_recipe_data_elem *buf;
4962 	struct ice_recp_grp_entry *entry;
4963 	u16 free_res_idx;
4964 	u16 recipe_count;
4965 	u8 chain_idx;
4966 	u8 recps = 0;
4967 	int status;
4968 
4969 	/* When more than one recipe are required, another recipe is needed to
4970 	 * chain them together. Matching a tunnel metadata ID takes up one of
4971 	 * the match fields in the chaining recipe reducing the number of
4972 	 * chained recipes by one.
4973 	 */
4974 	 /* check number of free result indices */
4975 	bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
4976 	free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
4977 
4978 	ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
4979 		  free_res_idx, rm->n_grp_count);
4980 
4981 	if (rm->n_grp_count > 1) {
4982 		if (rm->n_grp_count > free_res_idx)
4983 			return -ENOSPC;
4984 
4985 		rm->n_grp_count++;
4986 	}
4987 
4988 	if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
4989 		return -ENOSPC;
4990 
4991 	tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
4992 	if (!tmp)
4993 		return -ENOMEM;
4994 
4995 	buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
4996 			   GFP_KERNEL);
4997 	if (!buf) {
4998 		status = -ENOMEM;
4999 		goto err_mem;
5000 	}
5001 
5002 	bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5003 	recipe_count = ICE_MAX_NUM_RECIPES;
5004 	status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5005 				   NULL);
5006 	if (status || recipe_count == 0)
5007 		goto err_unroll;
5008 
5009 	/* Allocate the recipe resources, and configure them according to the
5010 	 * match fields from protocol headers and extracted field vectors.
5011 	 */
5012 	chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5013 	list_for_each_entry(entry, &rm->rg_list, l_entry) {
5014 		u8 i;
5015 
5016 		status = ice_alloc_recipe(hw, &entry->rid);
5017 		if (status)
5018 			goto err_unroll;
5019 
5020 		/* Clear the result index of the located recipe, as this will be
5021 		 * updated, if needed, later in the recipe creation process.
5022 		 */
5023 		tmp[0].content.result_indx = 0;
5024 
5025 		buf[recps] = tmp[0];
5026 		buf[recps].recipe_indx = (u8)entry->rid;
5027 		/* if the recipe is a non-root recipe RID should be programmed
5028 		 * as 0 for the rules to be applied correctly.
5029 		 */
5030 		buf[recps].content.rid = 0;
5031 		memset(&buf[recps].content.lkup_indx, 0,
5032 		       sizeof(buf[recps].content.lkup_indx));
5033 
5034 		/* All recipes use look-up index 0 to match switch ID. */
5035 		buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5036 		buf[recps].content.mask[0] =
5037 			cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
5038 		/* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5039 		 * to be 0
5040 		 */
5041 		for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5042 			buf[recps].content.lkup_indx[i] = 0x80;
5043 			buf[recps].content.mask[i] = 0;
5044 		}
5045 
5046 		for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5047 			buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5048 			buf[recps].content.mask[i + 1] =
5049 				cpu_to_le16(entry->fv_mask[i]);
5050 		}
5051 
5052 		if (rm->n_grp_count > 1) {
5053 			/* Checks to see if there really is a valid result index
5054 			 * that can be used.
5055 			 */
5056 			if (chain_idx >= ICE_MAX_FV_WORDS) {
5057 				ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
5058 				status = -ENOSPC;
5059 				goto err_unroll;
5060 			}
5061 
5062 			entry->chain_idx = chain_idx;
5063 			buf[recps].content.result_indx =
5064 				ICE_AQ_RECIPE_RESULT_EN |
5065 				((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5066 				 ICE_AQ_RECIPE_RESULT_DATA_M);
5067 			clear_bit(chain_idx, result_idx_bm);
5068 			chain_idx = find_first_bit(result_idx_bm,
5069 						   ICE_MAX_FV_WORDS);
5070 		}
5071 
5072 		/* fill recipe dependencies */
5073 		bitmap_zero((unsigned long *)buf[recps].recipe_bitmap,
5074 			    ICE_MAX_NUM_RECIPES);
5075 		set_bit(buf[recps].recipe_indx,
5076 			(unsigned long *)buf[recps].recipe_bitmap);
5077 		buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5078 		recps++;
5079 	}
5080 
5081 	if (rm->n_grp_count == 1) {
5082 		rm->root_rid = buf[0].recipe_indx;
5083 		set_bit(buf[0].recipe_indx, rm->r_bitmap);
5084 		buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5085 		if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5086 			memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5087 			       sizeof(buf[0].recipe_bitmap));
5088 		} else {
5089 			status = -EINVAL;
5090 			goto err_unroll;
5091 		}
5092 		/* Applicable only for ROOT_RECIPE, set the fwd_priority for
5093 		 * the recipe which is getting created if specified
5094 		 * by user. Usually any advanced switch filter, which results
5095 		 * into new extraction sequence, ended up creating a new recipe
5096 		 * of type ROOT and usually recipes are associated with profiles
5097 		 * Switch rule referreing newly created recipe, needs to have
5098 		 * either/or 'fwd' or 'join' priority, otherwise switch rule
5099 		 * evaluation will not happen correctly. In other words, if
5100 		 * switch rule to be evaluated on priority basis, then recipe
5101 		 * needs to have priority, otherwise it will be evaluated last.
5102 		 */
5103 		buf[0].content.act_ctrl_fwd_priority = rm->priority;
5104 	} else {
5105 		struct ice_recp_grp_entry *last_chain_entry;
5106 		u16 rid, i;
5107 
5108 		/* Allocate the last recipe that will chain the outcomes of the
5109 		 * other recipes together
5110 		 */
5111 		status = ice_alloc_recipe(hw, &rid);
5112 		if (status)
5113 			goto err_unroll;
5114 
5115 		buf[recps].recipe_indx = (u8)rid;
5116 		buf[recps].content.rid = (u8)rid;
5117 		buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5118 		/* the new entry created should also be part of rg_list to
5119 		 * make sure we have complete recipe
5120 		 */
5121 		last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw),
5122 						sizeof(*last_chain_entry),
5123 						GFP_KERNEL);
5124 		if (!last_chain_entry) {
5125 			status = -ENOMEM;
5126 			goto err_unroll;
5127 		}
5128 		last_chain_entry->rid = rid;
5129 		memset(&buf[recps].content.lkup_indx, 0,
5130 		       sizeof(buf[recps].content.lkup_indx));
5131 		/* All recipes use look-up index 0 to match switch ID. */
5132 		buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5133 		buf[recps].content.mask[0] =
5134 			cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
5135 		for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5136 			buf[recps].content.lkup_indx[i] =
5137 				ICE_AQ_RECIPE_LKUP_IGNORE;
5138 			buf[recps].content.mask[i] = 0;
5139 		}
5140 
5141 		i = 1;
5142 		/* update r_bitmap with the recp that is used for chaining */
5143 		set_bit(rid, rm->r_bitmap);
5144 		/* this is the recipe that chains all the other recipes so it
5145 		 * should not have a chaining ID to indicate the same
5146 		 */
5147 		last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5148 		list_for_each_entry(entry, &rm->rg_list, l_entry) {
5149 			last_chain_entry->fv_idx[i] = entry->chain_idx;
5150 			buf[recps].content.lkup_indx[i] = entry->chain_idx;
5151 			buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF);
5152 			set_bit(entry->rid, rm->r_bitmap);
5153 		}
5154 		list_add(&last_chain_entry->l_entry, &rm->rg_list);
5155 		if (sizeof(buf[recps].recipe_bitmap) >=
5156 		    sizeof(rm->r_bitmap)) {
5157 			memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5158 			       sizeof(buf[recps].recipe_bitmap));
5159 		} else {
5160 			status = -EINVAL;
5161 			goto err_unroll;
5162 		}
5163 		buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5164 
5165 		recps++;
5166 		rm->root_rid = (u8)rid;
5167 	}
5168 	status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5169 	if (status)
5170 		goto err_unroll;
5171 
5172 	status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5173 	ice_release_change_lock(hw);
5174 	if (status)
5175 		goto err_unroll;
5176 
5177 	/* Every recipe that just got created add it to the recipe
5178 	 * book keeping list
5179 	 */
5180 	list_for_each_entry(entry, &rm->rg_list, l_entry) {
5181 		struct ice_switch_info *sw = hw->switch_info;
5182 		bool is_root, idx_found = false;
5183 		struct ice_sw_recipe *recp;
5184 		u16 idx, buf_idx = 0;
5185 
5186 		/* find buffer index for copying some data */
5187 		for (idx = 0; idx < rm->n_grp_count; idx++)
5188 			if (buf[idx].recipe_indx == entry->rid) {
5189 				buf_idx = idx;
5190 				idx_found = true;
5191 			}
5192 
5193 		if (!idx_found) {
5194 			status = -EIO;
5195 			goto err_unroll;
5196 		}
5197 
5198 		recp = &sw->recp_list[entry->rid];
5199 		is_root = (rm->root_rid == entry->rid);
5200 		recp->is_root = is_root;
5201 
5202 		recp->root_rid = entry->rid;
5203 		recp->big_recp = (is_root && rm->n_grp_count > 1);
5204 
5205 		memcpy(&recp->ext_words, entry->r_group.pairs,
5206 		       entry->r_group.n_val_pairs * sizeof(struct ice_fv_word));
5207 
5208 		memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5209 		       sizeof(recp->r_bitmap));
5210 
5211 		/* Copy non-result fv index values and masks to recipe. This
5212 		 * call will also update the result recipe bitmask.
5213 		 */
5214 		ice_collect_result_idx(&buf[buf_idx], recp);
5215 
5216 		/* for non-root recipes, also copy to the root, this allows
5217 		 * easier matching of a complete chained recipe
5218 		 */
5219 		if (!is_root)
5220 			ice_collect_result_idx(&buf[buf_idx],
5221 					       &sw->recp_list[rm->root_rid]);
5222 
5223 		recp->n_ext_words = entry->r_group.n_val_pairs;
5224 		recp->chain_idx = entry->chain_idx;
5225 		recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5226 		recp->n_grp_count = rm->n_grp_count;
5227 		recp->tun_type = rm->tun_type;
5228 		recp->recp_created = true;
5229 	}
5230 	rm->root_buf = buf;
5231 	kfree(tmp);
5232 	return status;
5233 
5234 err_unroll:
5235 err_mem:
5236 	kfree(tmp);
5237 	devm_kfree(ice_hw_to_dev(hw), buf);
5238 	return status;
5239 }
5240 
5241 /**
5242  * ice_create_recipe_group - creates recipe group
5243  * @hw: pointer to hardware structure
5244  * @rm: recipe management list entry
5245  * @lkup_exts: lookup elements
5246  */
5247 static int
5248 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5249 			struct ice_prot_lkup_ext *lkup_exts)
5250 {
5251 	u8 recp_count = 0;
5252 	int status;
5253 
5254 	rm->n_grp_count = 0;
5255 
5256 	/* Create recipes for words that are marked not done by packing them
5257 	 * as best fit.
5258 	 */
5259 	status = ice_create_first_fit_recp_def(hw, lkup_exts,
5260 					       &rm->rg_list, &recp_count);
5261 	if (!status) {
5262 		rm->n_grp_count += recp_count;
5263 		rm->n_ext_words = lkup_exts->n_val_words;
5264 		memcpy(&rm->ext_words, lkup_exts->fv_words,
5265 		       sizeof(rm->ext_words));
5266 		memcpy(rm->word_masks, lkup_exts->field_mask,
5267 		       sizeof(rm->word_masks));
5268 	}
5269 
5270 	return status;
5271 }
5272 
5273 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5274  * @hw: pointer to hardware structure
5275  * @rinfo: other information regarding the rule e.g. priority and action info
5276  * @bm: pointer to memory for returning the bitmap of field vectors
5277  */
5278 static void
5279 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5280 			 unsigned long *bm)
5281 {
5282 	enum ice_prof_type prof_type;
5283 
5284 	bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
5285 
5286 	switch (rinfo->tun_type) {
5287 	case ICE_NON_TUN:
5288 		prof_type = ICE_PROF_NON_TUN;
5289 		break;
5290 	case ICE_ALL_TUNNELS:
5291 		prof_type = ICE_PROF_TUN_ALL;
5292 		break;
5293 	case ICE_SW_TUN_GENEVE:
5294 	case ICE_SW_TUN_VXLAN:
5295 		prof_type = ICE_PROF_TUN_UDP;
5296 		break;
5297 	case ICE_SW_TUN_NVGRE:
5298 		prof_type = ICE_PROF_TUN_GRE;
5299 		break;
5300 	case ICE_SW_TUN_GTPU:
5301 		prof_type = ICE_PROF_TUN_GTPU;
5302 		break;
5303 	case ICE_SW_TUN_GTPC:
5304 		prof_type = ICE_PROF_TUN_GTPC;
5305 		break;
5306 	case ICE_SW_TUN_AND_NON_TUN:
5307 	default:
5308 		prof_type = ICE_PROF_ALL;
5309 		break;
5310 	}
5311 
5312 	ice_get_sw_fv_bitmap(hw, prof_type, bm);
5313 }
5314 
5315 /**
5316  * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5317  * @hw: pointer to hardware structure
5318  * @lkups: lookup elements or match criteria for the advanced recipe, one
5319  *  structure per protocol header
5320  * @lkups_cnt: number of protocols
5321  * @rinfo: other information regarding the rule e.g. priority and action info
5322  * @rid: return the recipe ID of the recipe created
5323  */
5324 static int
5325 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5326 		   u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5327 {
5328 	DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES);
5329 	DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES);
5330 	struct ice_prot_lkup_ext *lkup_exts;
5331 	struct ice_recp_grp_entry *r_entry;
5332 	struct ice_sw_fv_list_entry *fvit;
5333 	struct ice_recp_grp_entry *r_tmp;
5334 	struct ice_sw_fv_list_entry *tmp;
5335 	struct ice_sw_recipe *rm;
5336 	int status = 0;
5337 	u8 i;
5338 
5339 	if (!lkups_cnt)
5340 		return -EINVAL;
5341 
5342 	lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
5343 	if (!lkup_exts)
5344 		return -ENOMEM;
5345 
5346 	/* Determine the number of words to be matched and if it exceeds a
5347 	 * recipe's restrictions
5348 	 */
5349 	for (i = 0; i < lkups_cnt; i++) {
5350 		u16 count;
5351 
5352 		if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5353 			status = -EIO;
5354 			goto err_free_lkup_exts;
5355 		}
5356 
5357 		count = ice_fill_valid_words(&lkups[i], lkup_exts);
5358 		if (!count) {
5359 			status = -EIO;
5360 			goto err_free_lkup_exts;
5361 		}
5362 	}
5363 
5364 	rm = kzalloc(sizeof(*rm), GFP_KERNEL);
5365 	if (!rm) {
5366 		status = -ENOMEM;
5367 		goto err_free_lkup_exts;
5368 	}
5369 
5370 	/* Get field vectors that contain fields extracted from all the protocol
5371 	 * headers being programmed.
5372 	 */
5373 	INIT_LIST_HEAD(&rm->fv_list);
5374 	INIT_LIST_HEAD(&rm->rg_list);
5375 
5376 	/* Get bitmap of field vectors (profiles) that are compatible with the
5377 	 * rule request; only these will be searched in the subsequent call to
5378 	 * ice_get_sw_fv_list.
5379 	 */
5380 	ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5381 
5382 	status = ice_get_sw_fv_list(hw, lkup_exts, fv_bitmap, &rm->fv_list);
5383 	if (status)
5384 		goto err_unroll;
5385 
5386 	/* Group match words into recipes using preferred recipe grouping
5387 	 * criteria.
5388 	 */
5389 	status = ice_create_recipe_group(hw, rm, lkup_exts);
5390 	if (status)
5391 		goto err_unroll;
5392 
5393 	/* set the recipe priority if specified */
5394 	rm->priority = (u8)rinfo->priority;
5395 
5396 	/* Find offsets from the field vector. Pick the first one for all the
5397 	 * recipes.
5398 	 */
5399 	status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5400 	if (status)
5401 		goto err_unroll;
5402 
5403 	/* get bitmap of all profiles the recipe will be associated with */
5404 	bitmap_zero(profiles, ICE_MAX_NUM_PROFILES);
5405 	list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5406 		ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5407 		set_bit((u16)fvit->profile_id, profiles);
5408 	}
5409 
5410 	/* Look for a recipe which matches our requested fv / mask list */
5411 	*rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
5412 	if (*rid < ICE_MAX_NUM_RECIPES)
5413 		/* Success if found a recipe that match the existing criteria */
5414 		goto err_unroll;
5415 
5416 	rm->tun_type = rinfo->tun_type;
5417 	/* Recipe we need does not exist, add a recipe */
5418 	status = ice_add_sw_recipe(hw, rm, profiles);
5419 	if (status)
5420 		goto err_unroll;
5421 
5422 	/* Associate all the recipes created with all the profiles in the
5423 	 * common field vector.
5424 	 */
5425 	list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5426 		DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
5427 		u16 j;
5428 
5429 		status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5430 						      (u8 *)r_bitmap, NULL);
5431 		if (status)
5432 			goto err_unroll;
5433 
5434 		bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
5435 			  ICE_MAX_NUM_RECIPES);
5436 		status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5437 		if (status)
5438 			goto err_unroll;
5439 
5440 		status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5441 						      (u8 *)r_bitmap,
5442 						      NULL);
5443 		ice_release_change_lock(hw);
5444 
5445 		if (status)
5446 			goto err_unroll;
5447 
5448 		/* Update profile to recipe bitmap array */
5449 		bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap,
5450 			    ICE_MAX_NUM_RECIPES);
5451 
5452 		/* Update recipe to profile bitmap array */
5453 		for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
5454 			set_bit((u16)fvit->profile_id, recipe_to_profile[j]);
5455 	}
5456 
5457 	*rid = rm->root_rid;
5458 	memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts,
5459 	       sizeof(*lkup_exts));
5460 err_unroll:
5461 	list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
5462 		list_del(&r_entry->l_entry);
5463 		devm_kfree(ice_hw_to_dev(hw), r_entry);
5464 	}
5465 
5466 	list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) {
5467 		list_del(&fvit->list_entry);
5468 		devm_kfree(ice_hw_to_dev(hw), fvit);
5469 	}
5470 
5471 	if (rm->root_buf)
5472 		devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
5473 
5474 	kfree(rm);
5475 
5476 err_free_lkup_exts:
5477 	kfree(lkup_exts);
5478 
5479 	return status;
5480 }
5481 
5482 /**
5483  * ice_dummy_packet_add_vlan - insert VLAN header to dummy pkt
5484  *
5485  * @dummy_pkt: dummy packet profile pattern to which VLAN tag(s) will be added
5486  * @num_vlan: number of VLAN tags
5487  */
5488 static struct ice_dummy_pkt_profile *
5489 ice_dummy_packet_add_vlan(const struct ice_dummy_pkt_profile *dummy_pkt,
5490 			  u32 num_vlan)
5491 {
5492 	struct ice_dummy_pkt_profile *profile;
5493 	struct ice_dummy_pkt_offsets *offsets;
5494 	u32 buf_len, off, etype_off, i;
5495 	u8 *pkt;
5496 
5497 	if (num_vlan < 1 || num_vlan > 2)
5498 		return ERR_PTR(-EINVAL);
5499 
5500 	off = num_vlan * VLAN_HLEN;
5501 
5502 	buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet_offsets)) +
5503 		  dummy_pkt->offsets_len;
5504 	offsets = kzalloc(buf_len, GFP_KERNEL);
5505 	if (!offsets)
5506 		return ERR_PTR(-ENOMEM);
5507 
5508 	offsets[0] = dummy_pkt->offsets[0];
5509 	if (num_vlan == 2) {
5510 		offsets[1] = ice_dummy_qinq_packet_offsets[0];
5511 		offsets[2] = ice_dummy_qinq_packet_offsets[1];
5512 	} else if (num_vlan == 1) {
5513 		offsets[1] = ice_dummy_vlan_packet_offsets[0];
5514 	}
5515 
5516 	for (i = 1; dummy_pkt->offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5517 		offsets[i + num_vlan].type = dummy_pkt->offsets[i].type;
5518 		offsets[i + num_vlan].offset =
5519 			dummy_pkt->offsets[i].offset + off;
5520 	}
5521 	offsets[i + num_vlan] = dummy_pkt->offsets[i];
5522 
5523 	etype_off = dummy_pkt->offsets[1].offset;
5524 
5525 	buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet)) +
5526 		  dummy_pkt->pkt_len;
5527 	pkt = kzalloc(buf_len, GFP_KERNEL);
5528 	if (!pkt) {
5529 		kfree(offsets);
5530 		return ERR_PTR(-ENOMEM);
5531 	}
5532 
5533 	memcpy(pkt, dummy_pkt->pkt, etype_off);
5534 	memcpy(pkt + etype_off,
5535 	       num_vlan == 2 ? ice_dummy_qinq_packet : ice_dummy_vlan_packet,
5536 	       off);
5537 	memcpy(pkt + etype_off + off, dummy_pkt->pkt + etype_off,
5538 	       dummy_pkt->pkt_len - etype_off);
5539 
5540 	profile = kzalloc(sizeof(*profile), GFP_KERNEL);
5541 	if (!profile) {
5542 		kfree(offsets);
5543 		kfree(pkt);
5544 		return ERR_PTR(-ENOMEM);
5545 	}
5546 
5547 	profile->offsets = offsets;
5548 	profile->pkt = pkt;
5549 	profile->pkt_len = buf_len;
5550 	profile->match |= ICE_PKT_KMALLOC;
5551 
5552 	return profile;
5553 }
5554 
5555 /**
5556  * ice_find_dummy_packet - find dummy packet
5557  *
5558  * @lkups: lookup elements or match criteria for the advanced recipe, one
5559  *	   structure per protocol header
5560  * @lkups_cnt: number of protocols
5561  * @tun_type: tunnel type
5562  *
5563  * Returns the &ice_dummy_pkt_profile corresponding to these lookup params.
5564  */
5565 static const struct ice_dummy_pkt_profile *
5566 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5567 		      enum ice_sw_tunnel_type tun_type)
5568 {
5569 	const struct ice_dummy_pkt_profile *ret = ice_dummy_pkt_profiles;
5570 	u32 match = 0, vlan_count = 0;
5571 	u16 i;
5572 
5573 	switch (tun_type) {
5574 	case ICE_SW_TUN_GTPC:
5575 		match |= ICE_PKT_TUN_GTPC;
5576 		break;
5577 	case ICE_SW_TUN_GTPU:
5578 		match |= ICE_PKT_TUN_GTPU;
5579 		break;
5580 	case ICE_SW_TUN_NVGRE:
5581 		match |= ICE_PKT_TUN_NVGRE;
5582 		break;
5583 	case ICE_SW_TUN_GENEVE:
5584 	case ICE_SW_TUN_VXLAN:
5585 		match |= ICE_PKT_TUN_UDP;
5586 		break;
5587 	default:
5588 		break;
5589 	}
5590 
5591 	for (i = 0; i < lkups_cnt; i++) {
5592 		if (lkups[i].type == ICE_UDP_ILOS)
5593 			match |= ICE_PKT_INNER_UDP;
5594 		else if (lkups[i].type == ICE_TCP_IL)
5595 			match |= ICE_PKT_INNER_TCP;
5596 		else if (lkups[i].type == ICE_IPV6_OFOS)
5597 			match |= ICE_PKT_OUTER_IPV6;
5598 		else if (lkups[i].type == ICE_VLAN_OFOS ||
5599 			 lkups[i].type == ICE_VLAN_EX)
5600 			vlan_count++;
5601 		else if (lkups[i].type == ICE_VLAN_IN)
5602 			vlan_count++;
5603 		else if (lkups[i].type == ICE_ETYPE_OL &&
5604 			 lkups[i].h_u.ethertype.ethtype_id ==
5605 				cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5606 			 lkups[i].m_u.ethertype.ethtype_id ==
5607 				cpu_to_be16(0xFFFF))
5608 			match |= ICE_PKT_OUTER_IPV6;
5609 		else if (lkups[i].type == ICE_ETYPE_IL &&
5610 			 lkups[i].h_u.ethertype.ethtype_id ==
5611 				cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5612 			 lkups[i].m_u.ethertype.ethtype_id ==
5613 				cpu_to_be16(0xFFFF))
5614 			match |= ICE_PKT_INNER_IPV6;
5615 		else if (lkups[i].type == ICE_IPV6_IL)
5616 			match |= ICE_PKT_INNER_IPV6;
5617 		else if (lkups[i].type == ICE_GTP_NO_PAY)
5618 			match |= ICE_PKT_GTP_NOPAY;
5619 		else if (lkups[i].type == ICE_PPPOE) {
5620 			match |= ICE_PKT_PPPOE;
5621 			if (lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
5622 			    htons(PPP_IPV6))
5623 				match |= ICE_PKT_OUTER_IPV6;
5624 		} else if (lkups[i].type == ICE_L2TPV3)
5625 			match |= ICE_PKT_L2TPV3;
5626 	}
5627 
5628 	while (ret->match && (match & ret->match) != ret->match)
5629 		ret++;
5630 
5631 	if (vlan_count != 0)
5632 		ret = ice_dummy_packet_add_vlan(ret, vlan_count);
5633 
5634 	return ret;
5635 }
5636 
5637 /**
5638  * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5639  *
5640  * @lkups: lookup elements or match criteria for the advanced recipe, one
5641  *	   structure per protocol header
5642  * @lkups_cnt: number of protocols
5643  * @s_rule: stores rule information from the match criteria
5644  * @profile: dummy packet profile (the template, its size and header offsets)
5645  */
5646 static int
5647 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5648 			  struct ice_sw_rule_lkup_rx_tx *s_rule,
5649 			  const struct ice_dummy_pkt_profile *profile)
5650 {
5651 	u8 *pkt;
5652 	u16 i;
5653 
5654 	/* Start with a packet with a pre-defined/dummy content. Then, fill
5655 	 * in the header values to be looked up or matched.
5656 	 */
5657 	pkt = s_rule->hdr_data;
5658 
5659 	memcpy(pkt, profile->pkt, profile->pkt_len);
5660 
5661 	for (i = 0; i < lkups_cnt; i++) {
5662 		const struct ice_dummy_pkt_offsets *offsets = profile->offsets;
5663 		enum ice_protocol_type type;
5664 		u16 offset = 0, len = 0, j;
5665 		bool found = false;
5666 
5667 		/* find the start of this layer; it should be found since this
5668 		 * was already checked when search for the dummy packet
5669 		 */
5670 		type = lkups[i].type;
5671 		/* metadata isn't present in the packet */
5672 		if (type == ICE_HW_METADATA)
5673 			continue;
5674 
5675 		for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5676 			if (type == offsets[j].type) {
5677 				offset = offsets[j].offset;
5678 				found = true;
5679 				break;
5680 			}
5681 		}
5682 		/* this should never happen in a correct calling sequence */
5683 		if (!found)
5684 			return -EINVAL;
5685 
5686 		switch (lkups[i].type) {
5687 		case ICE_MAC_OFOS:
5688 		case ICE_MAC_IL:
5689 			len = sizeof(struct ice_ether_hdr);
5690 			break;
5691 		case ICE_ETYPE_OL:
5692 		case ICE_ETYPE_IL:
5693 			len = sizeof(struct ice_ethtype_hdr);
5694 			break;
5695 		case ICE_VLAN_OFOS:
5696 		case ICE_VLAN_EX:
5697 		case ICE_VLAN_IN:
5698 			len = sizeof(struct ice_vlan_hdr);
5699 			break;
5700 		case ICE_IPV4_OFOS:
5701 		case ICE_IPV4_IL:
5702 			len = sizeof(struct ice_ipv4_hdr);
5703 			break;
5704 		case ICE_IPV6_OFOS:
5705 		case ICE_IPV6_IL:
5706 			len = sizeof(struct ice_ipv6_hdr);
5707 			break;
5708 		case ICE_TCP_IL:
5709 		case ICE_UDP_OF:
5710 		case ICE_UDP_ILOS:
5711 			len = sizeof(struct ice_l4_hdr);
5712 			break;
5713 		case ICE_SCTP_IL:
5714 			len = sizeof(struct ice_sctp_hdr);
5715 			break;
5716 		case ICE_NVGRE:
5717 			len = sizeof(struct ice_nvgre_hdr);
5718 			break;
5719 		case ICE_VXLAN:
5720 		case ICE_GENEVE:
5721 			len = sizeof(struct ice_udp_tnl_hdr);
5722 			break;
5723 		case ICE_GTP_NO_PAY:
5724 		case ICE_GTP:
5725 			len = sizeof(struct ice_udp_gtp_hdr);
5726 			break;
5727 		case ICE_PPPOE:
5728 			len = sizeof(struct ice_pppoe_hdr);
5729 			break;
5730 		case ICE_L2TPV3:
5731 			len = sizeof(struct ice_l2tpv3_sess_hdr);
5732 			break;
5733 		default:
5734 			return -EINVAL;
5735 		}
5736 
5737 		/* the length should be a word multiple */
5738 		if (len % ICE_BYTES_PER_WORD)
5739 			return -EIO;
5740 
5741 		/* We have the offset to the header start, the length, the
5742 		 * caller's header values and mask. Use this information to
5743 		 * copy the data into the dummy packet appropriately based on
5744 		 * the mask. Note that we need to only write the bits as
5745 		 * indicated by the mask to make sure we don't improperly write
5746 		 * over any significant packet data.
5747 		 */
5748 		for (j = 0; j < len / sizeof(u16); j++) {
5749 			u16 *ptr = (u16 *)(pkt + offset);
5750 			u16 mask = lkups[i].m_raw[j];
5751 
5752 			if (!mask)
5753 				continue;
5754 
5755 			ptr[j] = (ptr[j] & ~mask) | (lkups[i].h_raw[j] & mask);
5756 		}
5757 	}
5758 
5759 	s_rule->hdr_len = cpu_to_le16(profile->pkt_len);
5760 
5761 	return 0;
5762 }
5763 
5764 /**
5765  * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
5766  * @hw: pointer to the hardware structure
5767  * @tun_type: tunnel type
5768  * @pkt: dummy packet to fill in
5769  * @offsets: offset info for the dummy packet
5770  */
5771 static int
5772 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
5773 			u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
5774 {
5775 	u16 open_port, i;
5776 
5777 	switch (tun_type) {
5778 	case ICE_SW_TUN_VXLAN:
5779 		if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
5780 			return -EIO;
5781 		break;
5782 	case ICE_SW_TUN_GENEVE:
5783 		if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
5784 			return -EIO;
5785 		break;
5786 	default:
5787 		/* Nothing needs to be done for this tunnel type */
5788 		return 0;
5789 	}
5790 
5791 	/* Find the outer UDP protocol header and insert the port number */
5792 	for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5793 		if (offsets[i].type == ICE_UDP_OF) {
5794 			struct ice_l4_hdr *hdr;
5795 			u16 offset;
5796 
5797 			offset = offsets[i].offset;
5798 			hdr = (struct ice_l4_hdr *)&pkt[offset];
5799 			hdr->dst_port = cpu_to_be16(open_port);
5800 
5801 			return 0;
5802 		}
5803 	}
5804 
5805 	return -EIO;
5806 }
5807 
5808 /**
5809  * ice_fill_adv_packet_vlan - fill dummy packet with VLAN tag type
5810  * @hw: pointer to hw structure
5811  * @vlan_type: VLAN tag type
5812  * @pkt: dummy packet to fill in
5813  * @offsets: offset info for the dummy packet
5814  */
5815 static int
5816 ice_fill_adv_packet_vlan(struct ice_hw *hw, u16 vlan_type, u8 *pkt,
5817 			 const struct ice_dummy_pkt_offsets *offsets)
5818 {
5819 	u16 i;
5820 
5821 	/* Check if there is something to do */
5822 	if (!vlan_type || !ice_is_dvm_ena(hw))
5823 		return 0;
5824 
5825 	/* Find VLAN header and insert VLAN TPID */
5826 	for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5827 		if (offsets[i].type == ICE_VLAN_OFOS ||
5828 		    offsets[i].type == ICE_VLAN_EX) {
5829 			struct ice_vlan_hdr *hdr;
5830 			u16 offset;
5831 
5832 			offset = offsets[i].offset;
5833 			hdr = (struct ice_vlan_hdr *)&pkt[offset];
5834 			hdr->type = cpu_to_be16(vlan_type);
5835 
5836 			return 0;
5837 		}
5838 	}
5839 
5840 	return -EIO;
5841 }
5842 
5843 static bool ice_rules_equal(const struct ice_adv_rule_info *first,
5844 			    const struct ice_adv_rule_info *second)
5845 {
5846 	return first->sw_act.flag == second->sw_act.flag &&
5847 	       first->tun_type == second->tun_type &&
5848 	       first->vlan_type == second->vlan_type &&
5849 	       first->src_vsi == second->src_vsi;
5850 }
5851 
5852 /**
5853  * ice_find_adv_rule_entry - Search a rule entry
5854  * @hw: pointer to the hardware structure
5855  * @lkups: lookup elements or match criteria for the advanced recipe, one
5856  *	   structure per protocol header
5857  * @lkups_cnt: number of protocols
5858  * @recp_id: recipe ID for which we are finding the rule
5859  * @rinfo: other information regarding the rule e.g. priority and action info
5860  *
5861  * Helper function to search for a given advance rule entry
5862  * Returns pointer to entry storing the rule if found
5863  */
5864 static struct ice_adv_fltr_mgmt_list_entry *
5865 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5866 			u16 lkups_cnt, u16 recp_id,
5867 			struct ice_adv_rule_info *rinfo)
5868 {
5869 	struct ice_adv_fltr_mgmt_list_entry *list_itr;
5870 	struct ice_switch_info *sw = hw->switch_info;
5871 	int i;
5872 
5873 	list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules,
5874 			    list_entry) {
5875 		bool lkups_matched = true;
5876 
5877 		if (lkups_cnt != list_itr->lkups_cnt)
5878 			continue;
5879 		for (i = 0; i < list_itr->lkups_cnt; i++)
5880 			if (memcmp(&list_itr->lkups[i], &lkups[i],
5881 				   sizeof(*lkups))) {
5882 				lkups_matched = false;
5883 				break;
5884 			}
5885 		if (ice_rules_equal(rinfo, &list_itr->rule_info) &&
5886 		    lkups_matched)
5887 			return list_itr;
5888 	}
5889 	return NULL;
5890 }
5891 
5892 /**
5893  * ice_adv_add_update_vsi_list
5894  * @hw: pointer to the hardware structure
5895  * @m_entry: pointer to current adv filter management list entry
5896  * @cur_fltr: filter information from the book keeping entry
5897  * @new_fltr: filter information with the new VSI to be added
5898  *
5899  * Call AQ command to add or update previously created VSI list with new VSI.
5900  *
5901  * Helper function to do book keeping associated with adding filter information
5902  * The algorithm to do the booking keeping is described below :
5903  * When a VSI needs to subscribe to a given advanced filter
5904  *	if only one VSI has been added till now
5905  *		Allocate a new VSI list and add two VSIs
5906  *		to this list using switch rule command
5907  *		Update the previously created switch rule with the
5908  *		newly created VSI list ID
5909  *	if a VSI list was previously created
5910  *		Add the new VSI to the previously created VSI list set
5911  *		using the update switch rule command
5912  */
5913 static int
5914 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5915 			    struct ice_adv_fltr_mgmt_list_entry *m_entry,
5916 			    struct ice_adv_rule_info *cur_fltr,
5917 			    struct ice_adv_rule_info *new_fltr)
5918 {
5919 	u16 vsi_list_id = 0;
5920 	int status;
5921 
5922 	if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5923 	    cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
5924 	    cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5925 		return -EOPNOTSUPP;
5926 
5927 	if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5928 	     new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5929 	    (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5930 	     cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5931 		return -EOPNOTSUPP;
5932 
5933 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5934 		 /* Only one entry existed in the mapping and it was not already
5935 		  * a part of a VSI list. So, create a VSI list with the old and
5936 		  * new VSIs.
5937 		  */
5938 		struct ice_fltr_info tmp_fltr;
5939 		u16 vsi_handle_arr[2];
5940 
5941 		/* A rule already exists with the new VSI being added */
5942 		if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5943 		    new_fltr->sw_act.fwd_id.hw_vsi_id)
5944 			return -EEXIST;
5945 
5946 		vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5947 		vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5948 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5949 						  &vsi_list_id,
5950 						  ICE_SW_LKUP_LAST);
5951 		if (status)
5952 			return status;
5953 
5954 		memset(&tmp_fltr, 0, sizeof(tmp_fltr));
5955 		tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
5956 		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5957 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5958 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5959 		tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
5960 
5961 		/* Update the previous switch rule of "forward to VSI" to
5962 		 * "fwd to VSI list"
5963 		 */
5964 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5965 		if (status)
5966 			return status;
5967 
5968 		cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
5969 		cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
5970 		m_entry->vsi_list_info =
5971 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5972 						vsi_list_id);
5973 	} else {
5974 		u16 vsi_handle = new_fltr->sw_act.vsi_handle;
5975 
5976 		if (!m_entry->vsi_list_info)
5977 			return -EIO;
5978 
5979 		/* A rule already exists with the new VSI being added */
5980 		if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
5981 			return 0;
5982 
5983 		/* Update the previously created VSI list set with
5984 		 * the new VSI ID passed in
5985 		 */
5986 		vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
5987 
5988 		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
5989 						  vsi_list_id, false,
5990 						  ice_aqc_opc_update_sw_rules,
5991 						  ICE_SW_LKUP_LAST);
5992 		/* update VSI list mapping info with new VSI ID */
5993 		if (!status)
5994 			set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
5995 	}
5996 	if (!status)
5997 		m_entry->vsi_count++;
5998 	return status;
5999 }
6000 
6001 void ice_rule_add_tunnel_metadata(struct ice_adv_lkup_elem *lkup)
6002 {
6003 	lkup->type = ICE_HW_METADATA;
6004 	lkup->m_u.metadata.flags[ICE_PKT_FLAGS_TUNNEL] =
6005 		cpu_to_be16(ICE_PKT_TUNNEL_MASK);
6006 }
6007 
6008 void ice_rule_add_vlan_metadata(struct ice_adv_lkup_elem *lkup)
6009 {
6010 	lkup->type = ICE_HW_METADATA;
6011 	lkup->m_u.metadata.flags[ICE_PKT_FLAGS_VLAN] =
6012 		cpu_to_be16(ICE_PKT_VLAN_MASK);
6013 }
6014 
6015 void ice_rule_add_src_vsi_metadata(struct ice_adv_lkup_elem *lkup)
6016 {
6017 	lkup->type = ICE_HW_METADATA;
6018 	lkup->m_u.metadata.source_vsi = cpu_to_be16(ICE_MDID_SOURCE_VSI_MASK);
6019 }
6020 
6021 /**
6022  * ice_add_adv_rule - helper function to create an advanced switch rule
6023  * @hw: pointer to the hardware structure
6024  * @lkups: information on the words that needs to be looked up. All words
6025  * together makes one recipe
6026  * @lkups_cnt: num of entries in the lkups array
6027  * @rinfo: other information related to the rule that needs to be programmed
6028  * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6029  *               ignored is case of error.
6030  *
6031  * This function can program only 1 rule at a time. The lkups is used to
6032  * describe the all the words that forms the "lookup" portion of the recipe.
6033  * These words can span multiple protocols. Callers to this function need to
6034  * pass in a list of protocol headers with lookup information along and mask
6035  * that determines which words are valid from the given protocol header.
6036  * rinfo describes other information related to this rule such as forwarding
6037  * IDs, priority of this rule, etc.
6038  */
6039 int
6040 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6041 		 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6042 		 struct ice_rule_query_data *added_entry)
6043 {
6044 	struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6045 	struct ice_sw_rule_lkup_rx_tx *s_rule = NULL;
6046 	const struct ice_dummy_pkt_profile *profile;
6047 	u16 rid = 0, i, rule_buf_sz, vsi_handle;
6048 	struct list_head *rule_head;
6049 	struct ice_switch_info *sw;
6050 	u16 word_cnt;
6051 	u32 act = 0;
6052 	int status;
6053 	u8 q_rgn;
6054 
6055 	/* Initialize profile to result index bitmap */
6056 	if (!hw->switch_info->prof_res_bm_init) {
6057 		hw->switch_info->prof_res_bm_init = 1;
6058 		ice_init_prof_result_bm(hw);
6059 	}
6060 
6061 	if (!lkups_cnt)
6062 		return -EINVAL;
6063 
6064 	/* get # of words we need to match */
6065 	word_cnt = 0;
6066 	for (i = 0; i < lkups_cnt; i++) {
6067 		u16 j;
6068 
6069 		for (j = 0; j < ARRAY_SIZE(lkups->m_raw); j++)
6070 			if (lkups[i].m_raw[j])
6071 				word_cnt++;
6072 	}
6073 
6074 	if (!word_cnt)
6075 		return -EINVAL;
6076 
6077 	if (word_cnt > ICE_MAX_CHAIN_WORDS)
6078 		return -ENOSPC;
6079 
6080 	/* locate a dummy packet */
6081 	profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type);
6082 	if (IS_ERR(profile))
6083 		return PTR_ERR(profile);
6084 
6085 	if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6086 	      rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6087 	      rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6088 	      rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) {
6089 		status = -EIO;
6090 		goto free_pkt_profile;
6091 	}
6092 
6093 	vsi_handle = rinfo->sw_act.vsi_handle;
6094 	if (!ice_is_vsi_valid(hw, vsi_handle)) {
6095 		status =  -EINVAL;
6096 		goto free_pkt_profile;
6097 	}
6098 
6099 	if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6100 		rinfo->sw_act.fwd_id.hw_vsi_id =
6101 			ice_get_hw_vsi_num(hw, vsi_handle);
6102 
6103 	if (rinfo->src_vsi)
6104 		rinfo->sw_act.src = ice_get_hw_vsi_num(hw, rinfo->src_vsi);
6105 	else
6106 		rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6107 
6108 	status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6109 	if (status)
6110 		goto free_pkt_profile;
6111 	m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6112 	if (m_entry) {
6113 		/* we have to add VSI to VSI_LIST and increment vsi_count.
6114 		 * Also Update VSI list so that we can change forwarding rule
6115 		 * if the rule already exists, we will check if it exists with
6116 		 * same vsi_id, if not then add it to the VSI list if it already
6117 		 * exists if not then create a VSI list and add the existing VSI
6118 		 * ID and the new VSI ID to the list
6119 		 * We will add that VSI to the list
6120 		 */
6121 		status = ice_adv_add_update_vsi_list(hw, m_entry,
6122 						     &m_entry->rule_info,
6123 						     rinfo);
6124 		if (added_entry) {
6125 			added_entry->rid = rid;
6126 			added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6127 			added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6128 		}
6129 		goto free_pkt_profile;
6130 	}
6131 	rule_buf_sz = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, profile->pkt_len);
6132 	s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6133 	if (!s_rule) {
6134 		status = -ENOMEM;
6135 		goto free_pkt_profile;
6136 	}
6137 	if (!rinfo->flags_info.act_valid) {
6138 		act |= ICE_SINGLE_ACT_LAN_ENABLE;
6139 		act |= ICE_SINGLE_ACT_LB_ENABLE;
6140 	} else {
6141 		act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
6142 						ICE_SINGLE_ACT_LB_ENABLE);
6143 	}
6144 
6145 	switch (rinfo->sw_act.fltr_act) {
6146 	case ICE_FWD_TO_VSI:
6147 		act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6148 			ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6149 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6150 		break;
6151 	case ICE_FWD_TO_Q:
6152 		act |= ICE_SINGLE_ACT_TO_Q;
6153 		act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6154 		       ICE_SINGLE_ACT_Q_INDEX_M;
6155 		break;
6156 	case ICE_FWD_TO_QGRP:
6157 		q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6158 			(u8)ilog2(rinfo->sw_act.qgrp_size) : 0;
6159 		act |= ICE_SINGLE_ACT_TO_Q;
6160 		act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6161 		       ICE_SINGLE_ACT_Q_INDEX_M;
6162 		act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6163 		       ICE_SINGLE_ACT_Q_REGION_M;
6164 		break;
6165 	case ICE_DROP_PACKET:
6166 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6167 		       ICE_SINGLE_ACT_VALID_BIT;
6168 		break;
6169 	default:
6170 		status = -EIO;
6171 		goto err_ice_add_adv_rule;
6172 	}
6173 
6174 	/* If there is no matching criteria for direction there
6175 	 * is only one difference between Rx and Tx:
6176 	 * - get switch id base on VSI number from source field (Tx)
6177 	 * - get switch id base on port number (Rx)
6178 	 *
6179 	 * If matching on direction metadata is chose rule direction is
6180 	 * extracted from type value set here.
6181 	 */
6182 	if (rinfo->sw_act.flag & ICE_FLTR_TX) {
6183 		s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
6184 		s_rule->src = cpu_to_le16(rinfo->sw_act.src);
6185 	} else {
6186 		s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
6187 		s_rule->src = cpu_to_le16(hw->port_info->lport);
6188 	}
6189 
6190 	s_rule->recipe_id = cpu_to_le16(rid);
6191 	s_rule->act = cpu_to_le32(act);
6192 
6193 	status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, profile);
6194 	if (status)
6195 		goto err_ice_add_adv_rule;
6196 
6197 	status = ice_fill_adv_packet_tun(hw, rinfo->tun_type, s_rule->hdr_data,
6198 					 profile->offsets);
6199 	if (status)
6200 		goto err_ice_add_adv_rule;
6201 
6202 	status = ice_fill_adv_packet_vlan(hw, rinfo->vlan_type,
6203 					  s_rule->hdr_data,
6204 					  profile->offsets);
6205 	if (status)
6206 		goto err_ice_add_adv_rule;
6207 
6208 	status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6209 				 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6210 				 NULL);
6211 	if (status)
6212 		goto err_ice_add_adv_rule;
6213 	adv_fltr = devm_kzalloc(ice_hw_to_dev(hw),
6214 				sizeof(struct ice_adv_fltr_mgmt_list_entry),
6215 				GFP_KERNEL);
6216 	if (!adv_fltr) {
6217 		status = -ENOMEM;
6218 		goto err_ice_add_adv_rule;
6219 	}
6220 
6221 	adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
6222 				       lkups_cnt * sizeof(*lkups), GFP_KERNEL);
6223 	if (!adv_fltr->lkups) {
6224 		status = -ENOMEM;
6225 		goto err_ice_add_adv_rule;
6226 	}
6227 
6228 	adv_fltr->lkups_cnt = lkups_cnt;
6229 	adv_fltr->rule_info = *rinfo;
6230 	adv_fltr->rule_info.fltr_rule_id = le16_to_cpu(s_rule->index);
6231 	sw = hw->switch_info;
6232 	sw->recp_list[rid].adv_rule = true;
6233 	rule_head = &sw->recp_list[rid].filt_rules;
6234 
6235 	if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6236 		adv_fltr->vsi_count = 1;
6237 
6238 	/* Add rule entry to book keeping list */
6239 	list_add(&adv_fltr->list_entry, rule_head);
6240 	if (added_entry) {
6241 		added_entry->rid = rid;
6242 		added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6243 		added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6244 	}
6245 err_ice_add_adv_rule:
6246 	if (status && adv_fltr) {
6247 		devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups);
6248 		devm_kfree(ice_hw_to_dev(hw), adv_fltr);
6249 	}
6250 
6251 	kfree(s_rule);
6252 
6253 free_pkt_profile:
6254 	if (profile->match & ICE_PKT_KMALLOC) {
6255 		kfree(profile->offsets);
6256 		kfree(profile->pkt);
6257 		kfree(profile);
6258 	}
6259 
6260 	return status;
6261 }
6262 
6263 /**
6264  * ice_replay_vsi_fltr - Replay filters for requested VSI
6265  * @hw: pointer to the hardware structure
6266  * @vsi_handle: driver VSI handle
6267  * @recp_id: Recipe ID for which rules need to be replayed
6268  * @list_head: list for which filters need to be replayed
6269  *
6270  * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6271  * It is required to pass valid VSI handle.
6272  */
6273 static int
6274 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6275 		    struct list_head *list_head)
6276 {
6277 	struct ice_fltr_mgmt_list_entry *itr;
6278 	int status = 0;
6279 	u16 hw_vsi_id;
6280 
6281 	if (list_empty(list_head))
6282 		return status;
6283 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6284 
6285 	list_for_each_entry(itr, list_head, list_entry) {
6286 		struct ice_fltr_list_entry f_entry;
6287 
6288 		f_entry.fltr_info = itr->fltr_info;
6289 		if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6290 		    itr->fltr_info.vsi_handle == vsi_handle) {
6291 			/* update the src in case it is VSI num */
6292 			if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6293 				f_entry.fltr_info.src = hw_vsi_id;
6294 			status = ice_add_rule_internal(hw, recp_id, &f_entry);
6295 			if (status)
6296 				goto end;
6297 			continue;
6298 		}
6299 		if (!itr->vsi_list_info ||
6300 		    !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
6301 			continue;
6302 		/* Clearing it so that the logic can add it back */
6303 		clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6304 		f_entry.fltr_info.vsi_handle = vsi_handle;
6305 		f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6306 		/* update the src in case it is VSI num */
6307 		if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6308 			f_entry.fltr_info.src = hw_vsi_id;
6309 		if (recp_id == ICE_SW_LKUP_VLAN)
6310 			status = ice_add_vlan_internal(hw, &f_entry);
6311 		else
6312 			status = ice_add_rule_internal(hw, recp_id, &f_entry);
6313 		if (status)
6314 			goto end;
6315 	}
6316 end:
6317 	return status;
6318 }
6319 
6320 /**
6321  * ice_adv_rem_update_vsi_list
6322  * @hw: pointer to the hardware structure
6323  * @vsi_handle: VSI handle of the VSI to remove
6324  * @fm_list: filter management entry for which the VSI list management needs to
6325  *	     be done
6326  */
6327 static int
6328 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6329 			    struct ice_adv_fltr_mgmt_list_entry *fm_list)
6330 {
6331 	struct ice_vsi_list_map_info *vsi_list_info;
6332 	enum ice_sw_lkup_type lkup_type;
6333 	u16 vsi_list_id;
6334 	int status;
6335 
6336 	if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6337 	    fm_list->vsi_count == 0)
6338 		return -EINVAL;
6339 
6340 	/* A rule with the VSI being removed does not exist */
6341 	if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
6342 		return -ENOENT;
6343 
6344 	lkup_type = ICE_SW_LKUP_LAST;
6345 	vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6346 	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6347 					  ice_aqc_opc_update_sw_rules,
6348 					  lkup_type);
6349 	if (status)
6350 		return status;
6351 
6352 	fm_list->vsi_count--;
6353 	clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6354 	vsi_list_info = fm_list->vsi_list_info;
6355 	if (fm_list->vsi_count == 1) {
6356 		struct ice_fltr_info tmp_fltr;
6357 		u16 rem_vsi_handle;
6358 
6359 		rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
6360 						ICE_MAX_VSI);
6361 		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6362 			return -EIO;
6363 
6364 		/* Make sure VSI list is empty before removing it below */
6365 		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6366 						  vsi_list_id, true,
6367 						  ice_aqc_opc_update_sw_rules,
6368 						  lkup_type);
6369 		if (status)
6370 			return status;
6371 
6372 		memset(&tmp_fltr, 0, sizeof(tmp_fltr));
6373 		tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
6374 		tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6375 		fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6376 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6377 		tmp_fltr.fwd_id.hw_vsi_id =
6378 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
6379 		fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6380 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
6381 		fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
6382 
6383 		/* Update the previous switch rule of "MAC forward to VSI" to
6384 		 * "MAC fwd to VSI list"
6385 		 */
6386 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6387 		if (status) {
6388 			ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6389 				  tmp_fltr.fwd_id.hw_vsi_id, status);
6390 			return status;
6391 		}
6392 		fm_list->vsi_list_info->ref_cnt--;
6393 
6394 		/* Remove the VSI list since it is no longer used */
6395 		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6396 		if (status) {
6397 			ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
6398 				  vsi_list_id, status);
6399 			return status;
6400 		}
6401 
6402 		list_del(&vsi_list_info->list_entry);
6403 		devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
6404 		fm_list->vsi_list_info = NULL;
6405 	}
6406 
6407 	return status;
6408 }
6409 
6410 /**
6411  * ice_rem_adv_rule - removes existing advanced switch rule
6412  * @hw: pointer to the hardware structure
6413  * @lkups: information on the words that needs to be looked up. All words
6414  *         together makes one recipe
6415  * @lkups_cnt: num of entries in the lkups array
6416  * @rinfo: Its the pointer to the rule information for the rule
6417  *
6418  * This function can be used to remove 1 rule at a time. The lkups is
6419  * used to describe all the words that forms the "lookup" portion of the
6420  * rule. These words can span multiple protocols. Callers to this function
6421  * need to pass in a list of protocol headers with lookup information along
6422  * and mask that determines which words are valid from the given protocol
6423  * header. rinfo describes other information related to this rule such as
6424  * forwarding IDs, priority of this rule, etc.
6425  */
6426 static int
6427 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6428 		 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6429 {
6430 	struct ice_adv_fltr_mgmt_list_entry *list_elem;
6431 	struct ice_prot_lkup_ext lkup_exts;
6432 	bool remove_rule = false;
6433 	struct mutex *rule_lock; /* Lock to protect filter rule list */
6434 	u16 i, rid, vsi_handle;
6435 	int status = 0;
6436 
6437 	memset(&lkup_exts, 0, sizeof(lkup_exts));
6438 	for (i = 0; i < lkups_cnt; i++) {
6439 		u16 count;
6440 
6441 		if (lkups[i].type >= ICE_PROTOCOL_LAST)
6442 			return -EIO;
6443 
6444 		count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6445 		if (!count)
6446 			return -EIO;
6447 	}
6448 
6449 	rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
6450 	/* If did not find a recipe that match the existing criteria */
6451 	if (rid == ICE_MAX_NUM_RECIPES)
6452 		return -EINVAL;
6453 
6454 	rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6455 	list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6456 	/* the rule is already removed */
6457 	if (!list_elem)
6458 		return 0;
6459 	mutex_lock(rule_lock);
6460 	if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6461 		remove_rule = true;
6462 	} else if (list_elem->vsi_count > 1) {
6463 		remove_rule = false;
6464 		vsi_handle = rinfo->sw_act.vsi_handle;
6465 		status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6466 	} else {
6467 		vsi_handle = rinfo->sw_act.vsi_handle;
6468 		status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6469 		if (status) {
6470 			mutex_unlock(rule_lock);
6471 			return status;
6472 		}
6473 		if (list_elem->vsi_count == 0)
6474 			remove_rule = true;
6475 	}
6476 	mutex_unlock(rule_lock);
6477 	if (remove_rule) {
6478 		struct ice_sw_rule_lkup_rx_tx *s_rule;
6479 		u16 rule_buf_sz;
6480 
6481 		rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule);
6482 		s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6483 		if (!s_rule)
6484 			return -ENOMEM;
6485 		s_rule->act = 0;
6486 		s_rule->index = cpu_to_le16(list_elem->rule_info.fltr_rule_id);
6487 		s_rule->hdr_len = 0;
6488 		status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6489 					 rule_buf_sz, 1,
6490 					 ice_aqc_opc_remove_sw_rules, NULL);
6491 		if (!status || status == -ENOENT) {
6492 			struct ice_switch_info *sw = hw->switch_info;
6493 
6494 			mutex_lock(rule_lock);
6495 			list_del(&list_elem->list_entry);
6496 			devm_kfree(ice_hw_to_dev(hw), list_elem->lkups);
6497 			devm_kfree(ice_hw_to_dev(hw), list_elem);
6498 			mutex_unlock(rule_lock);
6499 			if (list_empty(&sw->recp_list[rid].filt_rules))
6500 				sw->recp_list[rid].adv_rule = false;
6501 		}
6502 		kfree(s_rule);
6503 	}
6504 	return status;
6505 }
6506 
6507 /**
6508  * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6509  * @hw: pointer to the hardware structure
6510  * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6511  *
6512  * This function is used to remove 1 rule at a time. The removal is based on
6513  * the remove_entry parameter. This function will remove rule for a given
6514  * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6515  */
6516 int
6517 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6518 		       struct ice_rule_query_data *remove_entry)
6519 {
6520 	struct ice_adv_fltr_mgmt_list_entry *list_itr;
6521 	struct list_head *list_head;
6522 	struct ice_adv_rule_info rinfo;
6523 	struct ice_switch_info *sw;
6524 
6525 	sw = hw->switch_info;
6526 	if (!sw->recp_list[remove_entry->rid].recp_created)
6527 		return -EINVAL;
6528 	list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6529 	list_for_each_entry(list_itr, list_head, list_entry) {
6530 		if (list_itr->rule_info.fltr_rule_id ==
6531 		    remove_entry->rule_id) {
6532 			rinfo = list_itr->rule_info;
6533 			rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6534 			return ice_rem_adv_rule(hw, list_itr->lkups,
6535 						list_itr->lkups_cnt, &rinfo);
6536 		}
6537 	}
6538 	/* either list is empty or unable to find rule */
6539 	return -ENOENT;
6540 }
6541 
6542 /**
6543  * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
6544  *                            given VSI handle
6545  * @hw: pointer to the hardware structure
6546  * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6547  *
6548  * This function is used to remove all the rules for a given VSI and as soon
6549  * as removing a rule fails, it will return immediately with the error code,
6550  * else it will return success.
6551  */
6552 int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6553 {
6554 	struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
6555 	struct ice_vsi_list_map_info *map_info;
6556 	struct ice_adv_rule_info rinfo;
6557 	struct list_head *list_head;
6558 	struct ice_switch_info *sw;
6559 	int status;
6560 	u8 rid;
6561 
6562 	sw = hw->switch_info;
6563 	for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6564 		if (!sw->recp_list[rid].recp_created)
6565 			continue;
6566 		if (!sw->recp_list[rid].adv_rule)
6567 			continue;
6568 
6569 		list_head = &sw->recp_list[rid].filt_rules;
6570 		list_for_each_entry_safe(list_itr, tmp_entry, list_head,
6571 					 list_entry) {
6572 			rinfo = list_itr->rule_info;
6573 
6574 			if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
6575 				map_info = list_itr->vsi_list_info;
6576 				if (!map_info)
6577 					continue;
6578 
6579 				if (!test_bit(vsi_handle, map_info->vsi_map))
6580 					continue;
6581 			} else if (rinfo.sw_act.vsi_handle != vsi_handle) {
6582 				continue;
6583 			}
6584 
6585 			rinfo.sw_act.vsi_handle = vsi_handle;
6586 			status = ice_rem_adv_rule(hw, list_itr->lkups,
6587 						  list_itr->lkups_cnt, &rinfo);
6588 			if (status)
6589 				return status;
6590 		}
6591 	}
6592 	return 0;
6593 }
6594 
6595 /**
6596  * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6597  * @hw: pointer to the hardware structure
6598  * @vsi_handle: driver VSI handle
6599  * @list_head: list for which filters need to be replayed
6600  *
6601  * Replay the advanced rule for the given VSI.
6602  */
6603 static int
6604 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6605 			struct list_head *list_head)
6606 {
6607 	struct ice_rule_query_data added_entry = { 0 };
6608 	struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6609 	int status = 0;
6610 
6611 	if (list_empty(list_head))
6612 		return status;
6613 	list_for_each_entry(adv_fltr, list_head, list_entry) {
6614 		struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6615 		u16 lk_cnt = adv_fltr->lkups_cnt;
6616 
6617 		if (vsi_handle != rinfo->sw_act.vsi_handle)
6618 			continue;
6619 		status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6620 					  &added_entry);
6621 		if (status)
6622 			break;
6623 	}
6624 	return status;
6625 }
6626 
6627 /**
6628  * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6629  * @hw: pointer to the hardware structure
6630  * @vsi_handle: driver VSI handle
6631  *
6632  * Replays filters for requested VSI via vsi_handle.
6633  */
6634 int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6635 {
6636 	struct ice_switch_info *sw = hw->switch_info;
6637 	int status;
6638 	u8 i;
6639 
6640 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6641 		struct list_head *head;
6642 
6643 		head = &sw->recp_list[i].filt_replay_rules;
6644 		if (!sw->recp_list[i].adv_rule)
6645 			status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6646 		else
6647 			status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6648 		if (status)
6649 			return status;
6650 	}
6651 	return status;
6652 }
6653 
6654 /**
6655  * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6656  * @hw: pointer to the HW struct
6657  *
6658  * Deletes the filter replay rules.
6659  */
6660 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6661 {
6662 	struct ice_switch_info *sw = hw->switch_info;
6663 	u8 i;
6664 
6665 	if (!sw)
6666 		return;
6667 
6668 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6669 		if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
6670 			struct list_head *l_head;
6671 
6672 			l_head = &sw->recp_list[i].filt_replay_rules;
6673 			if (!sw->recp_list[i].adv_rule)
6674 				ice_rem_sw_rule_info(hw, l_head);
6675 			else
6676 				ice_rem_adv_rule_info(hw, l_head);
6677 		}
6678 	}
6679 }
6680