xref: /openbmc/linux/drivers/net/ethernet/intel/ice/ice_switch.c (revision 3d40aed862874db14e1dd41fd6f12636dcfdcc3e)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_lib.h"
5 #include "ice_switch.h"
6 
7 #define ICE_ETH_DA_OFFSET		0
8 #define ICE_ETH_ETHTYPE_OFFSET		12
9 #define ICE_ETH_VLAN_TCI_OFFSET		14
10 #define ICE_MAX_VLAN_ID			0xFFF
11 #define ICE_IPV6_ETHER_ID		0x86DD
12 
13 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
14  * struct to configure any switch filter rules.
15  * {DA (6 bytes), SA(6 bytes),
16  * Ether type (2 bytes for header without VLAN tag) OR
17  * VLAN tag (4 bytes for header with VLAN tag) }
18  *
19  * Word on Hardcoded values
20  * byte 0 = 0x2: to identify it as locally administered DA MAC
21  * byte 6 = 0x2: to identify it as locally administered SA MAC
22  * byte 12 = 0x81 & byte 13 = 0x00:
23  *	In case of VLAN filter first two bytes defines ether type (0x8100)
24  *	and remaining two bytes are placeholder for programming a given VLAN ID
25  *	In case of Ether type filter it is treated as header without VLAN tag
26  *	and byte 12 and 13 is used to program a given Ether type instead
27  */
28 #define DUMMY_ETH_HDR_LEN		16
29 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
30 							0x2, 0, 0, 0, 0, 0,
31 							0x81, 0, 0, 0};
32 
33 enum {
34 	ICE_PKT_OUTER_IPV6	= BIT(0),
35 	ICE_PKT_TUN_GTPC	= BIT(1),
36 	ICE_PKT_TUN_GTPU	= BIT(2),
37 	ICE_PKT_TUN_NVGRE	= BIT(3),
38 	ICE_PKT_TUN_UDP		= BIT(4),
39 	ICE_PKT_INNER_IPV6	= BIT(5),
40 	ICE_PKT_INNER_TCP	= BIT(6),
41 	ICE_PKT_INNER_UDP	= BIT(7),
42 	ICE_PKT_GTP_NOPAY	= BIT(8),
43 	ICE_PKT_KMALLOC		= BIT(9),
44 	ICE_PKT_PPPOE		= BIT(10),
45 	ICE_PKT_L2TPV3		= BIT(11),
46 };
47 
48 struct ice_dummy_pkt_offsets {
49 	enum ice_protocol_type type;
50 	u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
51 };
52 
53 struct ice_dummy_pkt_profile {
54 	const struct ice_dummy_pkt_offsets *offsets;
55 	const u8 *pkt;
56 	u32 match;
57 	u16 pkt_len;
58 	u16 offsets_len;
59 };
60 
61 #define ICE_DECLARE_PKT_OFFSETS(type)					\
62 	static const struct ice_dummy_pkt_offsets			\
63 	ice_dummy_##type##_packet_offsets[]
64 
65 #define ICE_DECLARE_PKT_TEMPLATE(type)					\
66 	static const u8 ice_dummy_##type##_packet[]
67 
68 #define ICE_PKT_PROFILE(type, m) {					\
69 	.match		= (m),						\
70 	.pkt		= ice_dummy_##type##_packet,			\
71 	.pkt_len	= sizeof(ice_dummy_##type##_packet),		\
72 	.offsets	= ice_dummy_##type##_packet_offsets,		\
73 	.offsets_len	= sizeof(ice_dummy_##type##_packet_offsets),	\
74 }
75 
76 ICE_DECLARE_PKT_OFFSETS(vlan) = {
77 	{ ICE_VLAN_OFOS,        12 },
78 };
79 
80 ICE_DECLARE_PKT_TEMPLATE(vlan) = {
81 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
82 };
83 
84 ICE_DECLARE_PKT_OFFSETS(qinq) = {
85 	{ ICE_VLAN_EX,          12 },
86 	{ ICE_VLAN_IN,          16 },
87 };
88 
89 ICE_DECLARE_PKT_TEMPLATE(qinq) = {
90 	0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
91 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
92 };
93 
94 ICE_DECLARE_PKT_OFFSETS(gre_tcp) = {
95 	{ ICE_MAC_OFOS,		0 },
96 	{ ICE_ETYPE_OL,		12 },
97 	{ ICE_IPV4_OFOS,	14 },
98 	{ ICE_NVGRE,		34 },
99 	{ ICE_MAC_IL,		42 },
100 	{ ICE_ETYPE_IL,		54 },
101 	{ ICE_IPV4_IL,		56 },
102 	{ ICE_TCP_IL,		76 },
103 	{ ICE_PROTOCOL_LAST,	0 },
104 };
105 
106 ICE_DECLARE_PKT_TEMPLATE(gre_tcp) = {
107 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_OFOS 0 */
108 	0x00, 0x00, 0x00, 0x00,
109 	0x00, 0x00, 0x00, 0x00,
110 
111 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
112 
113 	0x45, 0x00, 0x00, 0x3E,	/* ICE_IPV4_OFOS 14 */
114 	0x00, 0x00, 0x00, 0x00,
115 	0x00, 0x2F, 0x00, 0x00,
116 	0x00, 0x00, 0x00, 0x00,
117 	0x00, 0x00, 0x00, 0x00,
118 
119 	0x80, 0x00, 0x65, 0x58,	/* ICE_NVGRE 34 */
120 	0x00, 0x00, 0x00, 0x00,
121 
122 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_IL 42 */
123 	0x00, 0x00, 0x00, 0x00,
124 	0x00, 0x00, 0x00, 0x00,
125 
126 	0x08, 0x00,		/* ICE_ETYPE_IL 54 */
127 
128 	0x45, 0x00, 0x00, 0x14,	/* ICE_IPV4_IL 56 */
129 	0x00, 0x00, 0x00, 0x00,
130 	0x00, 0x06, 0x00, 0x00,
131 	0x00, 0x00, 0x00, 0x00,
132 	0x00, 0x00, 0x00, 0x00,
133 
134 	0x00, 0x00, 0x00, 0x00,	/* ICE_TCP_IL 76 */
135 	0x00, 0x00, 0x00, 0x00,
136 	0x00, 0x00, 0x00, 0x00,
137 	0x50, 0x02, 0x20, 0x00,
138 	0x00, 0x00, 0x00, 0x00
139 };
140 
141 ICE_DECLARE_PKT_OFFSETS(gre_udp) = {
142 	{ ICE_MAC_OFOS,		0 },
143 	{ ICE_ETYPE_OL,		12 },
144 	{ ICE_IPV4_OFOS,	14 },
145 	{ ICE_NVGRE,		34 },
146 	{ ICE_MAC_IL,		42 },
147 	{ ICE_ETYPE_IL,		54 },
148 	{ ICE_IPV4_IL,		56 },
149 	{ ICE_UDP_ILOS,		76 },
150 	{ ICE_PROTOCOL_LAST,	0 },
151 };
152 
153 ICE_DECLARE_PKT_TEMPLATE(gre_udp) = {
154 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_OFOS 0 */
155 	0x00, 0x00, 0x00, 0x00,
156 	0x00, 0x00, 0x00, 0x00,
157 
158 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
159 
160 	0x45, 0x00, 0x00, 0x3E,	/* ICE_IPV4_OFOS 14 */
161 	0x00, 0x00, 0x00, 0x00,
162 	0x00, 0x2F, 0x00, 0x00,
163 	0x00, 0x00, 0x00, 0x00,
164 	0x00, 0x00, 0x00, 0x00,
165 
166 	0x80, 0x00, 0x65, 0x58,	/* ICE_NVGRE 34 */
167 	0x00, 0x00, 0x00, 0x00,
168 
169 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_IL 42 */
170 	0x00, 0x00, 0x00, 0x00,
171 	0x00, 0x00, 0x00, 0x00,
172 
173 	0x08, 0x00,		/* ICE_ETYPE_IL 54 */
174 
175 	0x45, 0x00, 0x00, 0x14,	/* ICE_IPV4_IL 56 */
176 	0x00, 0x00, 0x00, 0x00,
177 	0x00, 0x11, 0x00, 0x00,
178 	0x00, 0x00, 0x00, 0x00,
179 	0x00, 0x00, 0x00, 0x00,
180 
181 	0x00, 0x00, 0x00, 0x00,	/* ICE_UDP_ILOS 76 */
182 	0x00, 0x08, 0x00, 0x00,
183 };
184 
185 ICE_DECLARE_PKT_OFFSETS(udp_tun_tcp) = {
186 	{ ICE_MAC_OFOS,		0 },
187 	{ ICE_ETYPE_OL,		12 },
188 	{ ICE_IPV4_OFOS,	14 },
189 	{ ICE_UDP_OF,		34 },
190 	{ ICE_VXLAN,		42 },
191 	{ ICE_GENEVE,		42 },
192 	{ ICE_VXLAN_GPE,	42 },
193 	{ ICE_MAC_IL,		50 },
194 	{ ICE_ETYPE_IL,		62 },
195 	{ ICE_IPV4_IL,		64 },
196 	{ ICE_TCP_IL,		84 },
197 	{ ICE_PROTOCOL_LAST,	0 },
198 };
199 
200 ICE_DECLARE_PKT_TEMPLATE(udp_tun_tcp) = {
201 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
202 	0x00, 0x00, 0x00, 0x00,
203 	0x00, 0x00, 0x00, 0x00,
204 
205 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
206 
207 	0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
208 	0x00, 0x01, 0x00, 0x00,
209 	0x40, 0x11, 0x00, 0x00,
210 	0x00, 0x00, 0x00, 0x00,
211 	0x00, 0x00, 0x00, 0x00,
212 
213 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
214 	0x00, 0x46, 0x00, 0x00,
215 
216 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
217 	0x00, 0x00, 0x00, 0x00,
218 
219 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
220 	0x00, 0x00, 0x00, 0x00,
221 	0x00, 0x00, 0x00, 0x00,
222 
223 	0x08, 0x00,		/* ICE_ETYPE_IL 62 */
224 
225 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
226 	0x00, 0x01, 0x00, 0x00,
227 	0x40, 0x06, 0x00, 0x00,
228 	0x00, 0x00, 0x00, 0x00,
229 	0x00, 0x00, 0x00, 0x00,
230 
231 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
232 	0x00, 0x00, 0x00, 0x00,
233 	0x00, 0x00, 0x00, 0x00,
234 	0x50, 0x02, 0x20, 0x00,
235 	0x00, 0x00, 0x00, 0x00
236 };
237 
238 ICE_DECLARE_PKT_OFFSETS(udp_tun_udp) = {
239 	{ ICE_MAC_OFOS,		0 },
240 	{ ICE_ETYPE_OL,		12 },
241 	{ ICE_IPV4_OFOS,	14 },
242 	{ ICE_UDP_OF,		34 },
243 	{ ICE_VXLAN,		42 },
244 	{ ICE_GENEVE,		42 },
245 	{ ICE_VXLAN_GPE,	42 },
246 	{ ICE_MAC_IL,		50 },
247 	{ ICE_ETYPE_IL,		62 },
248 	{ ICE_IPV4_IL,		64 },
249 	{ ICE_UDP_ILOS,		84 },
250 	{ ICE_PROTOCOL_LAST,	0 },
251 };
252 
253 ICE_DECLARE_PKT_TEMPLATE(udp_tun_udp) = {
254 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
255 	0x00, 0x00, 0x00, 0x00,
256 	0x00, 0x00, 0x00, 0x00,
257 
258 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
259 
260 	0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
261 	0x00, 0x01, 0x00, 0x00,
262 	0x00, 0x11, 0x00, 0x00,
263 	0x00, 0x00, 0x00, 0x00,
264 	0x00, 0x00, 0x00, 0x00,
265 
266 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
267 	0x00, 0x3a, 0x00, 0x00,
268 
269 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
270 	0x00, 0x00, 0x00, 0x00,
271 
272 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
273 	0x00, 0x00, 0x00, 0x00,
274 	0x00, 0x00, 0x00, 0x00,
275 
276 	0x08, 0x00,		/* ICE_ETYPE_IL 62 */
277 
278 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
279 	0x00, 0x01, 0x00, 0x00,
280 	0x00, 0x11, 0x00, 0x00,
281 	0x00, 0x00, 0x00, 0x00,
282 	0x00, 0x00, 0x00, 0x00,
283 
284 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
285 	0x00, 0x08, 0x00, 0x00,
286 };
287 
288 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_tcp) = {
289 	{ ICE_MAC_OFOS,		0 },
290 	{ ICE_ETYPE_OL,		12 },
291 	{ ICE_IPV4_OFOS,	14 },
292 	{ ICE_NVGRE,		34 },
293 	{ ICE_MAC_IL,		42 },
294 	{ ICE_ETYPE_IL,		54 },
295 	{ ICE_IPV6_IL,		56 },
296 	{ ICE_TCP_IL,		96 },
297 	{ ICE_PROTOCOL_LAST,	0 },
298 };
299 
300 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_tcp) = {
301 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
302 	0x00, 0x00, 0x00, 0x00,
303 	0x00, 0x00, 0x00, 0x00,
304 
305 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
306 
307 	0x45, 0x00, 0x00, 0x66, /* ICE_IPV4_OFOS 14 */
308 	0x00, 0x00, 0x00, 0x00,
309 	0x00, 0x2F, 0x00, 0x00,
310 	0x00, 0x00, 0x00, 0x00,
311 	0x00, 0x00, 0x00, 0x00,
312 
313 	0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
314 	0x00, 0x00, 0x00, 0x00,
315 
316 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
317 	0x00, 0x00, 0x00, 0x00,
318 	0x00, 0x00, 0x00, 0x00,
319 
320 	0x86, 0xdd,		/* ICE_ETYPE_IL 54 */
321 
322 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
323 	0x00, 0x08, 0x06, 0x40,
324 	0x00, 0x00, 0x00, 0x00,
325 	0x00, 0x00, 0x00, 0x00,
326 	0x00, 0x00, 0x00, 0x00,
327 	0x00, 0x00, 0x00, 0x00,
328 	0x00, 0x00, 0x00, 0x00,
329 	0x00, 0x00, 0x00, 0x00,
330 	0x00, 0x00, 0x00, 0x00,
331 	0x00, 0x00, 0x00, 0x00,
332 
333 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 96 */
334 	0x00, 0x00, 0x00, 0x00,
335 	0x00, 0x00, 0x00, 0x00,
336 	0x50, 0x02, 0x20, 0x00,
337 	0x00, 0x00, 0x00, 0x00
338 };
339 
340 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_udp) = {
341 	{ ICE_MAC_OFOS,		0 },
342 	{ ICE_ETYPE_OL,		12 },
343 	{ ICE_IPV4_OFOS,	14 },
344 	{ ICE_NVGRE,		34 },
345 	{ ICE_MAC_IL,		42 },
346 	{ ICE_ETYPE_IL,		54 },
347 	{ ICE_IPV6_IL,		56 },
348 	{ ICE_UDP_ILOS,		96 },
349 	{ ICE_PROTOCOL_LAST,	0 },
350 };
351 
352 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_udp) = {
353 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
354 	0x00, 0x00, 0x00, 0x00,
355 	0x00, 0x00, 0x00, 0x00,
356 
357 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
358 
359 	0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
360 	0x00, 0x00, 0x00, 0x00,
361 	0x00, 0x2F, 0x00, 0x00,
362 	0x00, 0x00, 0x00, 0x00,
363 	0x00, 0x00, 0x00, 0x00,
364 
365 	0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
366 	0x00, 0x00, 0x00, 0x00,
367 
368 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
369 	0x00, 0x00, 0x00, 0x00,
370 	0x00, 0x00, 0x00, 0x00,
371 
372 	0x86, 0xdd,		/* ICE_ETYPE_IL 54 */
373 
374 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
375 	0x00, 0x08, 0x11, 0x40,
376 	0x00, 0x00, 0x00, 0x00,
377 	0x00, 0x00, 0x00, 0x00,
378 	0x00, 0x00, 0x00, 0x00,
379 	0x00, 0x00, 0x00, 0x00,
380 	0x00, 0x00, 0x00, 0x00,
381 	0x00, 0x00, 0x00, 0x00,
382 	0x00, 0x00, 0x00, 0x00,
383 	0x00, 0x00, 0x00, 0x00,
384 
385 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 96 */
386 	0x00, 0x08, 0x00, 0x00,
387 };
388 
389 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_tcp) = {
390 	{ ICE_MAC_OFOS,		0 },
391 	{ ICE_ETYPE_OL,		12 },
392 	{ ICE_IPV4_OFOS,	14 },
393 	{ ICE_UDP_OF,		34 },
394 	{ ICE_VXLAN,		42 },
395 	{ ICE_GENEVE,		42 },
396 	{ ICE_VXLAN_GPE,	42 },
397 	{ ICE_MAC_IL,		50 },
398 	{ ICE_ETYPE_IL,		62 },
399 	{ ICE_IPV6_IL,		64 },
400 	{ ICE_TCP_IL,		104 },
401 	{ ICE_PROTOCOL_LAST,	0 },
402 };
403 
404 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_tcp) = {
405 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
406 	0x00, 0x00, 0x00, 0x00,
407 	0x00, 0x00, 0x00, 0x00,
408 
409 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
410 
411 	0x45, 0x00, 0x00, 0x6e, /* ICE_IPV4_OFOS 14 */
412 	0x00, 0x01, 0x00, 0x00,
413 	0x40, 0x11, 0x00, 0x00,
414 	0x00, 0x00, 0x00, 0x00,
415 	0x00, 0x00, 0x00, 0x00,
416 
417 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
418 	0x00, 0x5a, 0x00, 0x00,
419 
420 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
421 	0x00, 0x00, 0x00, 0x00,
422 
423 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
424 	0x00, 0x00, 0x00, 0x00,
425 	0x00, 0x00, 0x00, 0x00,
426 
427 	0x86, 0xdd,		/* ICE_ETYPE_IL 62 */
428 
429 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
430 	0x00, 0x08, 0x06, 0x40,
431 	0x00, 0x00, 0x00, 0x00,
432 	0x00, 0x00, 0x00, 0x00,
433 	0x00, 0x00, 0x00, 0x00,
434 	0x00, 0x00, 0x00, 0x00,
435 	0x00, 0x00, 0x00, 0x00,
436 	0x00, 0x00, 0x00, 0x00,
437 	0x00, 0x00, 0x00, 0x00,
438 	0x00, 0x00, 0x00, 0x00,
439 
440 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 104 */
441 	0x00, 0x00, 0x00, 0x00,
442 	0x00, 0x00, 0x00, 0x00,
443 	0x50, 0x02, 0x20, 0x00,
444 	0x00, 0x00, 0x00, 0x00
445 };
446 
447 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_udp) = {
448 	{ ICE_MAC_OFOS,		0 },
449 	{ ICE_ETYPE_OL,		12 },
450 	{ ICE_IPV4_OFOS,	14 },
451 	{ ICE_UDP_OF,		34 },
452 	{ ICE_VXLAN,		42 },
453 	{ ICE_GENEVE,		42 },
454 	{ ICE_VXLAN_GPE,	42 },
455 	{ ICE_MAC_IL,		50 },
456 	{ ICE_ETYPE_IL,		62 },
457 	{ ICE_IPV6_IL,		64 },
458 	{ ICE_UDP_ILOS,		104 },
459 	{ ICE_PROTOCOL_LAST,	0 },
460 };
461 
462 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_udp) = {
463 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
464 	0x00, 0x00, 0x00, 0x00,
465 	0x00, 0x00, 0x00, 0x00,
466 
467 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
468 
469 	0x45, 0x00, 0x00, 0x62, /* ICE_IPV4_OFOS 14 */
470 	0x00, 0x01, 0x00, 0x00,
471 	0x00, 0x11, 0x00, 0x00,
472 	0x00, 0x00, 0x00, 0x00,
473 	0x00, 0x00, 0x00, 0x00,
474 
475 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
476 	0x00, 0x4e, 0x00, 0x00,
477 
478 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
479 	0x00, 0x00, 0x00, 0x00,
480 
481 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
482 	0x00, 0x00, 0x00, 0x00,
483 	0x00, 0x00, 0x00, 0x00,
484 
485 	0x86, 0xdd,		/* ICE_ETYPE_IL 62 */
486 
487 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
488 	0x00, 0x08, 0x11, 0x40,
489 	0x00, 0x00, 0x00, 0x00,
490 	0x00, 0x00, 0x00, 0x00,
491 	0x00, 0x00, 0x00, 0x00,
492 	0x00, 0x00, 0x00, 0x00,
493 	0x00, 0x00, 0x00, 0x00,
494 	0x00, 0x00, 0x00, 0x00,
495 	0x00, 0x00, 0x00, 0x00,
496 	0x00, 0x00, 0x00, 0x00,
497 
498 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 104 */
499 	0x00, 0x08, 0x00, 0x00,
500 };
501 
502 /* offset info for MAC + IPv4 + UDP dummy packet */
503 ICE_DECLARE_PKT_OFFSETS(udp) = {
504 	{ ICE_MAC_OFOS,		0 },
505 	{ ICE_ETYPE_OL,		12 },
506 	{ ICE_IPV4_OFOS,	14 },
507 	{ ICE_UDP_ILOS,		34 },
508 	{ ICE_PROTOCOL_LAST,	0 },
509 };
510 
511 /* Dummy packet for MAC + IPv4 + UDP */
512 ICE_DECLARE_PKT_TEMPLATE(udp) = {
513 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
514 	0x00, 0x00, 0x00, 0x00,
515 	0x00, 0x00, 0x00, 0x00,
516 
517 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
518 
519 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
520 	0x00, 0x01, 0x00, 0x00,
521 	0x00, 0x11, 0x00, 0x00,
522 	0x00, 0x00, 0x00, 0x00,
523 	0x00, 0x00, 0x00, 0x00,
524 
525 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
526 	0x00, 0x08, 0x00, 0x00,
527 
528 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
529 };
530 
531 /* offset info for MAC + IPv4 + TCP dummy packet */
532 ICE_DECLARE_PKT_OFFSETS(tcp) = {
533 	{ ICE_MAC_OFOS,		0 },
534 	{ ICE_ETYPE_OL,		12 },
535 	{ ICE_IPV4_OFOS,	14 },
536 	{ ICE_TCP_IL,		34 },
537 	{ ICE_PROTOCOL_LAST,	0 },
538 };
539 
540 /* Dummy packet for MAC + IPv4 + TCP */
541 ICE_DECLARE_PKT_TEMPLATE(tcp) = {
542 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
543 	0x00, 0x00, 0x00, 0x00,
544 	0x00, 0x00, 0x00, 0x00,
545 
546 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
547 
548 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
549 	0x00, 0x01, 0x00, 0x00,
550 	0x00, 0x06, 0x00, 0x00,
551 	0x00, 0x00, 0x00, 0x00,
552 	0x00, 0x00, 0x00, 0x00,
553 
554 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
555 	0x00, 0x00, 0x00, 0x00,
556 	0x00, 0x00, 0x00, 0x00,
557 	0x50, 0x00, 0x00, 0x00,
558 	0x00, 0x00, 0x00, 0x00,
559 
560 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
561 };
562 
563 ICE_DECLARE_PKT_OFFSETS(tcp_ipv6) = {
564 	{ ICE_MAC_OFOS,		0 },
565 	{ ICE_ETYPE_OL,		12 },
566 	{ ICE_IPV6_OFOS,	14 },
567 	{ ICE_TCP_IL,		54 },
568 	{ ICE_PROTOCOL_LAST,	0 },
569 };
570 
571 ICE_DECLARE_PKT_TEMPLATE(tcp_ipv6) = {
572 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
573 	0x00, 0x00, 0x00, 0x00,
574 	0x00, 0x00, 0x00, 0x00,
575 
576 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
577 
578 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
579 	0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
580 	0x00, 0x00, 0x00, 0x00,
581 	0x00, 0x00, 0x00, 0x00,
582 	0x00, 0x00, 0x00, 0x00,
583 	0x00, 0x00, 0x00, 0x00,
584 	0x00, 0x00, 0x00, 0x00,
585 	0x00, 0x00, 0x00, 0x00,
586 	0x00, 0x00, 0x00, 0x00,
587 	0x00, 0x00, 0x00, 0x00,
588 
589 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
590 	0x00, 0x00, 0x00, 0x00,
591 	0x00, 0x00, 0x00, 0x00,
592 	0x50, 0x00, 0x00, 0x00,
593 	0x00, 0x00, 0x00, 0x00,
594 
595 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
596 };
597 
598 /* IPv6 + UDP */
599 ICE_DECLARE_PKT_OFFSETS(udp_ipv6) = {
600 	{ ICE_MAC_OFOS,		0 },
601 	{ ICE_ETYPE_OL,		12 },
602 	{ ICE_IPV6_OFOS,	14 },
603 	{ ICE_UDP_ILOS,		54 },
604 	{ ICE_PROTOCOL_LAST,	0 },
605 };
606 
607 /* IPv6 + UDP dummy packet */
608 ICE_DECLARE_PKT_TEMPLATE(udp_ipv6) = {
609 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
610 	0x00, 0x00, 0x00, 0x00,
611 	0x00, 0x00, 0x00, 0x00,
612 
613 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
614 
615 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
616 	0x00, 0x10, 0x11, 0x00, /* Next header UDP */
617 	0x00, 0x00, 0x00, 0x00,
618 	0x00, 0x00, 0x00, 0x00,
619 	0x00, 0x00, 0x00, 0x00,
620 	0x00, 0x00, 0x00, 0x00,
621 	0x00, 0x00, 0x00, 0x00,
622 	0x00, 0x00, 0x00, 0x00,
623 	0x00, 0x00, 0x00, 0x00,
624 	0x00, 0x00, 0x00, 0x00,
625 
626 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
627 	0x00, 0x10, 0x00, 0x00,
628 
629 	0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
630 	0x00, 0x00, 0x00, 0x00,
631 
632 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
633 };
634 
635 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
636 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_tcp) = {
637 	{ ICE_MAC_OFOS,		0 },
638 	{ ICE_IPV4_OFOS,	14 },
639 	{ ICE_UDP_OF,		34 },
640 	{ ICE_GTP,		42 },
641 	{ ICE_IPV4_IL,		62 },
642 	{ ICE_TCP_IL,		82 },
643 	{ ICE_PROTOCOL_LAST,	0 },
644 };
645 
646 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_tcp) = {
647 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
648 	0x00, 0x00, 0x00, 0x00,
649 	0x00, 0x00, 0x00, 0x00,
650 	0x08, 0x00,
651 
652 	0x45, 0x00, 0x00, 0x58, /* IP 14 */
653 	0x00, 0x00, 0x00, 0x00,
654 	0x00, 0x11, 0x00, 0x00,
655 	0x00, 0x00, 0x00, 0x00,
656 	0x00, 0x00, 0x00, 0x00,
657 
658 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
659 	0x00, 0x44, 0x00, 0x00,
660 
661 	0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 42 */
662 	0x00, 0x00, 0x00, 0x00,
663 	0x00, 0x00, 0x00, 0x85,
664 
665 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
666 	0x00, 0x00, 0x00, 0x00,
667 
668 	0x45, 0x00, 0x00, 0x28, /* IP 62 */
669 	0x00, 0x00, 0x00, 0x00,
670 	0x00, 0x06, 0x00, 0x00,
671 	0x00, 0x00, 0x00, 0x00,
672 	0x00, 0x00, 0x00, 0x00,
673 
674 	0x00, 0x00, 0x00, 0x00, /* TCP 82 */
675 	0x00, 0x00, 0x00, 0x00,
676 	0x00, 0x00, 0x00, 0x00,
677 	0x50, 0x00, 0x00, 0x00,
678 	0x00, 0x00, 0x00, 0x00,
679 
680 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
681 };
682 
683 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
684 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_udp) = {
685 	{ ICE_MAC_OFOS,		0 },
686 	{ ICE_IPV4_OFOS,	14 },
687 	{ ICE_UDP_OF,		34 },
688 	{ ICE_GTP,		42 },
689 	{ ICE_IPV4_IL,		62 },
690 	{ ICE_UDP_ILOS,		82 },
691 	{ ICE_PROTOCOL_LAST,	0 },
692 };
693 
694 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_udp) = {
695 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
696 	0x00, 0x00, 0x00, 0x00,
697 	0x00, 0x00, 0x00, 0x00,
698 	0x08, 0x00,
699 
700 	0x45, 0x00, 0x00, 0x4c, /* IP 14 */
701 	0x00, 0x00, 0x00, 0x00,
702 	0x00, 0x11, 0x00, 0x00,
703 	0x00, 0x00, 0x00, 0x00,
704 	0x00, 0x00, 0x00, 0x00,
705 
706 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
707 	0x00, 0x38, 0x00, 0x00,
708 
709 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 42 */
710 	0x00, 0x00, 0x00, 0x00,
711 	0x00, 0x00, 0x00, 0x85,
712 
713 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
714 	0x00, 0x00, 0x00, 0x00,
715 
716 	0x45, 0x00, 0x00, 0x1c, /* IP 62 */
717 	0x00, 0x00, 0x00, 0x00,
718 	0x00, 0x11, 0x00, 0x00,
719 	0x00, 0x00, 0x00, 0x00,
720 	0x00, 0x00, 0x00, 0x00,
721 
722 	0x00, 0x00, 0x00, 0x00, /* UDP 82 */
723 	0x00, 0x08, 0x00, 0x00,
724 
725 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
726 };
727 
728 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
729 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_tcp) = {
730 	{ ICE_MAC_OFOS,		0 },
731 	{ ICE_IPV4_OFOS,	14 },
732 	{ ICE_UDP_OF,		34 },
733 	{ ICE_GTP,		42 },
734 	{ ICE_IPV6_IL,		62 },
735 	{ ICE_TCP_IL,		102 },
736 	{ ICE_PROTOCOL_LAST,	0 },
737 };
738 
739 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_tcp) = {
740 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
741 	0x00, 0x00, 0x00, 0x00,
742 	0x00, 0x00, 0x00, 0x00,
743 	0x08, 0x00,
744 
745 	0x45, 0x00, 0x00, 0x6c, /* IP 14 */
746 	0x00, 0x00, 0x00, 0x00,
747 	0x00, 0x11, 0x00, 0x00,
748 	0x00, 0x00, 0x00, 0x00,
749 	0x00, 0x00, 0x00, 0x00,
750 
751 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
752 	0x00, 0x58, 0x00, 0x00,
753 
754 	0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 42 */
755 	0x00, 0x00, 0x00, 0x00,
756 	0x00, 0x00, 0x00, 0x85,
757 
758 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
759 	0x00, 0x00, 0x00, 0x00,
760 
761 	0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
762 	0x00, 0x14, 0x06, 0x00,
763 	0x00, 0x00, 0x00, 0x00,
764 	0x00, 0x00, 0x00, 0x00,
765 	0x00, 0x00, 0x00, 0x00,
766 	0x00, 0x00, 0x00, 0x00,
767 	0x00, 0x00, 0x00, 0x00,
768 	0x00, 0x00, 0x00, 0x00,
769 	0x00, 0x00, 0x00, 0x00,
770 	0x00, 0x00, 0x00, 0x00,
771 
772 	0x00, 0x00, 0x00, 0x00, /* TCP 102 */
773 	0x00, 0x00, 0x00, 0x00,
774 	0x00, 0x00, 0x00, 0x00,
775 	0x50, 0x00, 0x00, 0x00,
776 	0x00, 0x00, 0x00, 0x00,
777 
778 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
779 };
780 
781 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_udp) = {
782 	{ ICE_MAC_OFOS,		0 },
783 	{ ICE_IPV4_OFOS,	14 },
784 	{ ICE_UDP_OF,		34 },
785 	{ ICE_GTP,		42 },
786 	{ ICE_IPV6_IL,		62 },
787 	{ ICE_UDP_ILOS,		102 },
788 	{ ICE_PROTOCOL_LAST,	0 },
789 };
790 
791 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_udp) = {
792 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
793 	0x00, 0x00, 0x00, 0x00,
794 	0x00, 0x00, 0x00, 0x00,
795 	0x08, 0x00,
796 
797 	0x45, 0x00, 0x00, 0x60, /* IP 14 */
798 	0x00, 0x00, 0x00, 0x00,
799 	0x00, 0x11, 0x00, 0x00,
800 	0x00, 0x00, 0x00, 0x00,
801 	0x00, 0x00, 0x00, 0x00,
802 
803 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
804 	0x00, 0x4c, 0x00, 0x00,
805 
806 	0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 42 */
807 	0x00, 0x00, 0x00, 0x00,
808 	0x00, 0x00, 0x00, 0x85,
809 
810 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
811 	0x00, 0x00, 0x00, 0x00,
812 
813 	0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
814 	0x00, 0x08, 0x11, 0x00,
815 	0x00, 0x00, 0x00, 0x00,
816 	0x00, 0x00, 0x00, 0x00,
817 	0x00, 0x00, 0x00, 0x00,
818 	0x00, 0x00, 0x00, 0x00,
819 	0x00, 0x00, 0x00, 0x00,
820 	0x00, 0x00, 0x00, 0x00,
821 	0x00, 0x00, 0x00, 0x00,
822 	0x00, 0x00, 0x00, 0x00,
823 
824 	0x00, 0x00, 0x00, 0x00, /* UDP 102 */
825 	0x00, 0x08, 0x00, 0x00,
826 
827 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
828 };
829 
830 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_tcp) = {
831 	{ ICE_MAC_OFOS,		0 },
832 	{ ICE_IPV6_OFOS,	14 },
833 	{ ICE_UDP_OF,		54 },
834 	{ ICE_GTP,		62 },
835 	{ ICE_IPV4_IL,		82 },
836 	{ ICE_TCP_IL,		102 },
837 	{ ICE_PROTOCOL_LAST,	0 },
838 };
839 
840 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_tcp) = {
841 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
842 	0x00, 0x00, 0x00, 0x00,
843 	0x00, 0x00, 0x00, 0x00,
844 	0x86, 0xdd,
845 
846 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
847 	0x00, 0x44, 0x11, 0x00,
848 	0x00, 0x00, 0x00, 0x00,
849 	0x00, 0x00, 0x00, 0x00,
850 	0x00, 0x00, 0x00, 0x00,
851 	0x00, 0x00, 0x00, 0x00,
852 	0x00, 0x00, 0x00, 0x00,
853 	0x00, 0x00, 0x00, 0x00,
854 	0x00, 0x00, 0x00, 0x00,
855 	0x00, 0x00, 0x00, 0x00,
856 
857 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
858 	0x00, 0x44, 0x00, 0x00,
859 
860 	0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 62 */
861 	0x00, 0x00, 0x00, 0x00,
862 	0x00, 0x00, 0x00, 0x85,
863 
864 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
865 	0x00, 0x00, 0x00, 0x00,
866 
867 	0x45, 0x00, 0x00, 0x28, /* IP 82 */
868 	0x00, 0x00, 0x00, 0x00,
869 	0x00, 0x06, 0x00, 0x00,
870 	0x00, 0x00, 0x00, 0x00,
871 	0x00, 0x00, 0x00, 0x00,
872 
873 	0x00, 0x00, 0x00, 0x00, /* TCP 102 */
874 	0x00, 0x00, 0x00, 0x00,
875 	0x00, 0x00, 0x00, 0x00,
876 	0x50, 0x00, 0x00, 0x00,
877 	0x00, 0x00, 0x00, 0x00,
878 
879 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
880 };
881 
882 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_udp) = {
883 	{ ICE_MAC_OFOS,		0 },
884 	{ ICE_IPV6_OFOS,	14 },
885 	{ ICE_UDP_OF,		54 },
886 	{ ICE_GTP,		62 },
887 	{ ICE_IPV4_IL,		82 },
888 	{ ICE_UDP_ILOS,		102 },
889 	{ ICE_PROTOCOL_LAST,	0 },
890 };
891 
892 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_udp) = {
893 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
894 	0x00, 0x00, 0x00, 0x00,
895 	0x00, 0x00, 0x00, 0x00,
896 	0x86, 0xdd,
897 
898 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
899 	0x00, 0x38, 0x11, 0x00,
900 	0x00, 0x00, 0x00, 0x00,
901 	0x00, 0x00, 0x00, 0x00,
902 	0x00, 0x00, 0x00, 0x00,
903 	0x00, 0x00, 0x00, 0x00,
904 	0x00, 0x00, 0x00, 0x00,
905 	0x00, 0x00, 0x00, 0x00,
906 	0x00, 0x00, 0x00, 0x00,
907 	0x00, 0x00, 0x00, 0x00,
908 
909 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
910 	0x00, 0x38, 0x00, 0x00,
911 
912 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 62 */
913 	0x00, 0x00, 0x00, 0x00,
914 	0x00, 0x00, 0x00, 0x85,
915 
916 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
917 	0x00, 0x00, 0x00, 0x00,
918 
919 	0x45, 0x00, 0x00, 0x1c, /* IP 82 */
920 	0x00, 0x00, 0x00, 0x00,
921 	0x00, 0x11, 0x00, 0x00,
922 	0x00, 0x00, 0x00, 0x00,
923 	0x00, 0x00, 0x00, 0x00,
924 
925 	0x00, 0x00, 0x00, 0x00, /* UDP 102 */
926 	0x00, 0x08, 0x00, 0x00,
927 
928 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
929 };
930 
931 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_tcp) = {
932 	{ ICE_MAC_OFOS,		0 },
933 	{ ICE_IPV6_OFOS,	14 },
934 	{ ICE_UDP_OF,		54 },
935 	{ ICE_GTP,		62 },
936 	{ ICE_IPV6_IL,		82 },
937 	{ ICE_TCP_IL,		122 },
938 	{ ICE_PROTOCOL_LAST,	0 },
939 };
940 
941 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_tcp) = {
942 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
943 	0x00, 0x00, 0x00, 0x00,
944 	0x00, 0x00, 0x00, 0x00,
945 	0x86, 0xdd,
946 
947 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
948 	0x00, 0x58, 0x11, 0x00,
949 	0x00, 0x00, 0x00, 0x00,
950 	0x00, 0x00, 0x00, 0x00,
951 	0x00, 0x00, 0x00, 0x00,
952 	0x00, 0x00, 0x00, 0x00,
953 	0x00, 0x00, 0x00, 0x00,
954 	0x00, 0x00, 0x00, 0x00,
955 	0x00, 0x00, 0x00, 0x00,
956 	0x00, 0x00, 0x00, 0x00,
957 
958 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
959 	0x00, 0x58, 0x00, 0x00,
960 
961 	0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 62 */
962 	0x00, 0x00, 0x00, 0x00,
963 	0x00, 0x00, 0x00, 0x85,
964 
965 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
966 	0x00, 0x00, 0x00, 0x00,
967 
968 	0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
969 	0x00, 0x14, 0x06, 0x00,
970 	0x00, 0x00, 0x00, 0x00,
971 	0x00, 0x00, 0x00, 0x00,
972 	0x00, 0x00, 0x00, 0x00,
973 	0x00, 0x00, 0x00, 0x00,
974 	0x00, 0x00, 0x00, 0x00,
975 	0x00, 0x00, 0x00, 0x00,
976 	0x00, 0x00, 0x00, 0x00,
977 	0x00, 0x00, 0x00, 0x00,
978 
979 	0x00, 0x00, 0x00, 0x00, /* TCP 122 */
980 	0x00, 0x00, 0x00, 0x00,
981 	0x00, 0x00, 0x00, 0x00,
982 	0x50, 0x00, 0x00, 0x00,
983 	0x00, 0x00, 0x00, 0x00,
984 
985 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
986 };
987 
988 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_udp) = {
989 	{ ICE_MAC_OFOS,		0 },
990 	{ ICE_IPV6_OFOS,	14 },
991 	{ ICE_UDP_OF,		54 },
992 	{ ICE_GTP,		62 },
993 	{ ICE_IPV6_IL,		82 },
994 	{ ICE_UDP_ILOS,		122 },
995 	{ ICE_PROTOCOL_LAST,	0 },
996 };
997 
998 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_udp) = {
999 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
1000 	0x00, 0x00, 0x00, 0x00,
1001 	0x00, 0x00, 0x00, 0x00,
1002 	0x86, 0xdd,
1003 
1004 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
1005 	0x00, 0x4c, 0x11, 0x00,
1006 	0x00, 0x00, 0x00, 0x00,
1007 	0x00, 0x00, 0x00, 0x00,
1008 	0x00, 0x00, 0x00, 0x00,
1009 	0x00, 0x00, 0x00, 0x00,
1010 	0x00, 0x00, 0x00, 0x00,
1011 	0x00, 0x00, 0x00, 0x00,
1012 	0x00, 0x00, 0x00, 0x00,
1013 	0x00, 0x00, 0x00, 0x00,
1014 
1015 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
1016 	0x00, 0x4c, 0x00, 0x00,
1017 
1018 	0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 62 */
1019 	0x00, 0x00, 0x00, 0x00,
1020 	0x00, 0x00, 0x00, 0x85,
1021 
1022 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
1023 	0x00, 0x00, 0x00, 0x00,
1024 
1025 	0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
1026 	0x00, 0x08, 0x11, 0x00,
1027 	0x00, 0x00, 0x00, 0x00,
1028 	0x00, 0x00, 0x00, 0x00,
1029 	0x00, 0x00, 0x00, 0x00,
1030 	0x00, 0x00, 0x00, 0x00,
1031 	0x00, 0x00, 0x00, 0x00,
1032 	0x00, 0x00, 0x00, 0x00,
1033 	0x00, 0x00, 0x00, 0x00,
1034 	0x00, 0x00, 0x00, 0x00,
1035 
1036 	0x00, 0x00, 0x00, 0x00, /* UDP 122 */
1037 	0x00, 0x08, 0x00, 0x00,
1038 
1039 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
1040 };
1041 
1042 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4) = {
1043 	{ ICE_MAC_OFOS,		0 },
1044 	{ ICE_IPV4_OFOS,	14 },
1045 	{ ICE_UDP_OF,		34 },
1046 	{ ICE_GTP_NO_PAY,	42 },
1047 	{ ICE_PROTOCOL_LAST,	0 },
1048 };
1049 
1050 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4) = {
1051 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1052 	0x00, 0x00, 0x00, 0x00,
1053 	0x00, 0x00, 0x00, 0x00,
1054 	0x08, 0x00,
1055 
1056 	0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
1057 	0x00, 0x00, 0x40, 0x00,
1058 	0x40, 0x11, 0x00, 0x00,
1059 	0x00, 0x00, 0x00, 0x00,
1060 	0x00, 0x00, 0x00, 0x00,
1061 
1062 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
1063 	0x00, 0x00, 0x00, 0x00,
1064 
1065 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
1066 	0x00, 0x00, 0x00, 0x00,
1067 	0x00, 0x00, 0x00, 0x85,
1068 
1069 	0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1070 	0x00, 0x00, 0x00, 0x00,
1071 
1072 	0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
1073 	0x00, 0x00, 0x40, 0x00,
1074 	0x40, 0x00, 0x00, 0x00,
1075 	0x00, 0x00, 0x00, 0x00,
1076 	0x00, 0x00, 0x00, 0x00,
1077 	0x00, 0x00,
1078 };
1079 
1080 ICE_DECLARE_PKT_OFFSETS(ipv6_gtp) = {
1081 	{ ICE_MAC_OFOS,		0 },
1082 	{ ICE_IPV6_OFOS,	14 },
1083 	{ ICE_UDP_OF,		54 },
1084 	{ ICE_GTP_NO_PAY,	62 },
1085 	{ ICE_PROTOCOL_LAST,	0 },
1086 };
1087 
1088 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = {
1089 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1090 	0x00, 0x00, 0x00, 0x00,
1091 	0x00, 0x00, 0x00, 0x00,
1092 	0x86, 0xdd,
1093 
1094 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1095 	0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1096 	0x00, 0x00, 0x00, 0x00,
1097 	0x00, 0x00, 0x00, 0x00,
1098 	0x00, 0x00, 0x00, 0x00,
1099 	0x00, 0x00, 0x00, 0x00,
1100 	0x00, 0x00, 0x00, 0x00,
1101 	0x00, 0x00, 0x00, 0x00,
1102 	0x00, 0x00, 0x00, 0x00,
1103 	0x00, 0x00, 0x00, 0x00,
1104 
1105 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1106 	0x00, 0x00, 0x00, 0x00,
1107 
1108 	0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */
1109 	0x00, 0x00, 0x00, 0x00,
1110 
1111 	0x00, 0x00,
1112 };
1113 
1114 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_tcp) = {
1115 	{ ICE_MAC_OFOS,		0 },
1116 	{ ICE_ETYPE_OL,		12 },
1117 	{ ICE_PPPOE,		14 },
1118 	{ ICE_IPV4_OFOS,	22 },
1119 	{ ICE_TCP_IL,		42 },
1120 	{ ICE_PROTOCOL_LAST,	0 },
1121 };
1122 
1123 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_tcp) = {
1124 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1125 	0x00, 0x00, 0x00, 0x00,
1126 	0x00, 0x00, 0x00, 0x00,
1127 
1128 	0x88, 0x64,		/* ICE_ETYPE_OL 12 */
1129 
1130 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1131 	0x00, 0x16,
1132 
1133 	0x00, 0x21,		/* PPP Link Layer 20 */
1134 
1135 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 22 */
1136 	0x00, 0x01, 0x00, 0x00,
1137 	0x00, 0x06, 0x00, 0x00,
1138 	0x00, 0x00, 0x00, 0x00,
1139 	0x00, 0x00, 0x00, 0x00,
1140 
1141 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 42 */
1142 	0x00, 0x00, 0x00, 0x00,
1143 	0x00, 0x00, 0x00, 0x00,
1144 	0x50, 0x00, 0x00, 0x00,
1145 	0x00, 0x00, 0x00, 0x00,
1146 
1147 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1148 };
1149 
1150 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_udp) = {
1151 	{ ICE_MAC_OFOS,		0 },
1152 	{ ICE_ETYPE_OL,		12 },
1153 	{ ICE_PPPOE,		14 },
1154 	{ ICE_IPV4_OFOS,	22 },
1155 	{ ICE_UDP_ILOS,		42 },
1156 	{ ICE_PROTOCOL_LAST,	0 },
1157 };
1158 
1159 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_udp) = {
1160 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1161 	0x00, 0x00, 0x00, 0x00,
1162 	0x00, 0x00, 0x00, 0x00,
1163 
1164 	0x88, 0x64,		/* ICE_ETYPE_OL 12 */
1165 
1166 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1167 	0x00, 0x16,
1168 
1169 	0x00, 0x21,		/* PPP Link Layer 20 */
1170 
1171 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1172 	0x00, 0x01, 0x00, 0x00,
1173 	0x00, 0x11, 0x00, 0x00,
1174 	0x00, 0x00, 0x00, 0x00,
1175 	0x00, 0x00, 0x00, 0x00,
1176 
1177 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1178 	0x00, 0x08, 0x00, 0x00,
1179 
1180 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1181 };
1182 
1183 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_tcp) = {
1184 	{ ICE_MAC_OFOS,		0 },
1185 	{ ICE_ETYPE_OL,		12 },
1186 	{ ICE_PPPOE,		14 },
1187 	{ ICE_IPV6_OFOS,	22 },
1188 	{ ICE_TCP_IL,		62 },
1189 	{ ICE_PROTOCOL_LAST,	0 },
1190 };
1191 
1192 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_tcp) = {
1193 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1194 	0x00, 0x00, 0x00, 0x00,
1195 	0x00, 0x00, 0x00, 0x00,
1196 
1197 	0x88, 0x64,		/* ICE_ETYPE_OL 12 */
1198 
1199 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1200 	0x00, 0x2a,
1201 
1202 	0x00, 0x57,		/* PPP Link Layer 20 */
1203 
1204 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1205 	0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1206 	0x00, 0x00, 0x00, 0x00,
1207 	0x00, 0x00, 0x00, 0x00,
1208 	0x00, 0x00, 0x00, 0x00,
1209 	0x00, 0x00, 0x00, 0x00,
1210 	0x00, 0x00, 0x00, 0x00,
1211 	0x00, 0x00, 0x00, 0x00,
1212 	0x00, 0x00, 0x00, 0x00,
1213 	0x00, 0x00, 0x00, 0x00,
1214 
1215 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 62 */
1216 	0x00, 0x00, 0x00, 0x00,
1217 	0x00, 0x00, 0x00, 0x00,
1218 	0x50, 0x00, 0x00, 0x00,
1219 	0x00, 0x00, 0x00, 0x00,
1220 
1221 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1222 };
1223 
1224 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_udp) = {
1225 	{ ICE_MAC_OFOS,		0 },
1226 	{ ICE_ETYPE_OL,		12 },
1227 	{ ICE_PPPOE,		14 },
1228 	{ ICE_IPV6_OFOS,	22 },
1229 	{ ICE_UDP_ILOS,		62 },
1230 	{ ICE_PROTOCOL_LAST,	0 },
1231 };
1232 
1233 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_udp) = {
1234 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1235 	0x00, 0x00, 0x00, 0x00,
1236 	0x00, 0x00, 0x00, 0x00,
1237 
1238 	0x88, 0x64,		/* ICE_ETYPE_OL 12 */
1239 
1240 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1241 	0x00, 0x2a,
1242 
1243 	0x00, 0x57,		/* PPP Link Layer 20 */
1244 
1245 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1246 	0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1247 	0x00, 0x00, 0x00, 0x00,
1248 	0x00, 0x00, 0x00, 0x00,
1249 	0x00, 0x00, 0x00, 0x00,
1250 	0x00, 0x00, 0x00, 0x00,
1251 	0x00, 0x00, 0x00, 0x00,
1252 	0x00, 0x00, 0x00, 0x00,
1253 	0x00, 0x00, 0x00, 0x00,
1254 	0x00, 0x00, 0x00, 0x00,
1255 
1256 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1257 	0x00, 0x08, 0x00, 0x00,
1258 
1259 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1260 };
1261 
1262 ICE_DECLARE_PKT_OFFSETS(ipv4_l2tpv3) = {
1263 	{ ICE_MAC_OFOS,		0 },
1264 	{ ICE_ETYPE_OL,		12 },
1265 	{ ICE_IPV4_OFOS,	14 },
1266 	{ ICE_L2TPV3,		34 },
1267 	{ ICE_PROTOCOL_LAST,	0 },
1268 };
1269 
1270 ICE_DECLARE_PKT_TEMPLATE(ipv4_l2tpv3) = {
1271 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1272 	0x00, 0x00, 0x00, 0x00,
1273 	0x00, 0x00, 0x00, 0x00,
1274 
1275 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
1276 
1277 	0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1278 	0x00, 0x00, 0x40, 0x00,
1279 	0x40, 0x73, 0x00, 0x00,
1280 	0x00, 0x00, 0x00, 0x00,
1281 	0x00, 0x00, 0x00, 0x00,
1282 
1283 	0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1284 	0x00, 0x00, 0x00, 0x00,
1285 	0x00, 0x00, 0x00, 0x00,
1286 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1287 };
1288 
1289 ICE_DECLARE_PKT_OFFSETS(ipv6_l2tpv3) = {
1290 	{ ICE_MAC_OFOS,		0 },
1291 	{ ICE_ETYPE_OL,		12 },
1292 	{ ICE_IPV6_OFOS,	14 },
1293 	{ ICE_L2TPV3,		54 },
1294 	{ ICE_PROTOCOL_LAST,	0 },
1295 };
1296 
1297 ICE_DECLARE_PKT_TEMPLATE(ipv6_l2tpv3) = {
1298 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1299 	0x00, 0x00, 0x00, 0x00,
1300 	0x00, 0x00, 0x00, 0x00,
1301 
1302 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
1303 
1304 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1305 	0x00, 0x0c, 0x73, 0x40,
1306 	0x00, 0x00, 0x00, 0x00,
1307 	0x00, 0x00, 0x00, 0x00,
1308 	0x00, 0x00, 0x00, 0x00,
1309 	0x00, 0x00, 0x00, 0x00,
1310 	0x00, 0x00, 0x00, 0x00,
1311 	0x00, 0x00, 0x00, 0x00,
1312 	0x00, 0x00, 0x00, 0x00,
1313 	0x00, 0x00, 0x00, 0x00,
1314 
1315 	0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1316 	0x00, 0x00, 0x00, 0x00,
1317 	0x00, 0x00, 0x00, 0x00,
1318 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1319 };
1320 
1321 static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
1322 	ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 |
1323 				  ICE_PKT_GTP_NOPAY),
1324 	ICE_PKT_PROFILE(ipv6_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1325 					    ICE_PKT_OUTER_IPV6 |
1326 					    ICE_PKT_INNER_IPV6 |
1327 					    ICE_PKT_INNER_UDP),
1328 	ICE_PKT_PROFILE(ipv6_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1329 					    ICE_PKT_OUTER_IPV6 |
1330 					    ICE_PKT_INNER_IPV6),
1331 	ICE_PKT_PROFILE(ipv6_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1332 					    ICE_PKT_OUTER_IPV6 |
1333 					    ICE_PKT_INNER_UDP),
1334 	ICE_PKT_PROFILE(ipv6_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU |
1335 					    ICE_PKT_OUTER_IPV6),
1336 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPU | ICE_PKT_GTP_NOPAY),
1337 	ICE_PKT_PROFILE(ipv4_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1338 					    ICE_PKT_INNER_IPV6 |
1339 					    ICE_PKT_INNER_UDP),
1340 	ICE_PKT_PROFILE(ipv4_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1341 					    ICE_PKT_INNER_IPV6),
1342 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1343 					    ICE_PKT_INNER_UDP),
1344 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU),
1345 	ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6),
1346 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC),
1347 	ICE_PKT_PROFILE(pppoe_ipv6_udp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6 |
1348 					ICE_PKT_INNER_UDP),
1349 	ICE_PKT_PROFILE(pppoe_ipv6_tcp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6),
1350 	ICE_PKT_PROFILE(pppoe_ipv4_udp, ICE_PKT_PPPOE | ICE_PKT_INNER_UDP),
1351 	ICE_PKT_PROFILE(pppoe_ipv4_tcp, ICE_PKT_PPPOE),
1352 	ICE_PKT_PROFILE(gre_ipv6_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6 |
1353 				      ICE_PKT_INNER_TCP),
1354 	ICE_PKT_PROFILE(gre_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_TCP),
1355 	ICE_PKT_PROFILE(gre_ipv6_udp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6),
1356 	ICE_PKT_PROFILE(gre_udp, ICE_PKT_TUN_NVGRE),
1357 	ICE_PKT_PROFILE(udp_tun_ipv6_tcp, ICE_PKT_TUN_UDP |
1358 					  ICE_PKT_INNER_IPV6 |
1359 					  ICE_PKT_INNER_TCP),
1360 	ICE_PKT_PROFILE(ipv6_l2tpv3, ICE_PKT_L2TPV3 | ICE_PKT_OUTER_IPV6),
1361 	ICE_PKT_PROFILE(ipv4_l2tpv3, ICE_PKT_L2TPV3),
1362 	ICE_PKT_PROFILE(udp_tun_tcp, ICE_PKT_TUN_UDP | ICE_PKT_INNER_TCP),
1363 	ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP |
1364 					  ICE_PKT_INNER_IPV6),
1365 	ICE_PKT_PROFILE(udp_tun_udp, ICE_PKT_TUN_UDP),
1366 	ICE_PKT_PROFILE(udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP),
1367 	ICE_PKT_PROFILE(udp, ICE_PKT_INNER_UDP),
1368 	ICE_PKT_PROFILE(tcp_ipv6, ICE_PKT_OUTER_IPV6),
1369 	ICE_PKT_PROFILE(tcp, 0),
1370 };
1371 
1372 #define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l)	struct_size((s), hdr_data, (l))
1373 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s)	\
1374 	ICE_SW_RULE_RX_TX_HDR_SIZE((s), DUMMY_ETH_HDR_LEN)
1375 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s)	\
1376 	ICE_SW_RULE_RX_TX_HDR_SIZE((s), 0)
1377 #define ICE_SW_RULE_LG_ACT_SIZE(s, n)		struct_size((s), act, (n))
1378 #define ICE_SW_RULE_VSI_LIST_SIZE(s, n)		struct_size((s), vsi, (n))
1379 
1380 /* this is a recipe to profile association bitmap */
1381 static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1382 			  ICE_MAX_NUM_PROFILES);
1383 
1384 /* this is a profile to recipe association bitmap */
1385 static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1386 			  ICE_MAX_NUM_RECIPES);
1387 
1388 /**
1389  * ice_init_def_sw_recp - initialize the recipe book keeping tables
1390  * @hw: pointer to the HW struct
1391  *
1392  * Allocate memory for the entire recipe table and initialize the structures/
1393  * entries corresponding to basic recipes.
1394  */
1395 int ice_init_def_sw_recp(struct ice_hw *hw)
1396 {
1397 	struct ice_sw_recipe *recps;
1398 	u8 i;
1399 
1400 	recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
1401 			     sizeof(*recps), GFP_KERNEL);
1402 	if (!recps)
1403 		return -ENOMEM;
1404 
1405 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1406 		recps[i].root_rid = i;
1407 		INIT_LIST_HEAD(&recps[i].filt_rules);
1408 		INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1409 		INIT_LIST_HEAD(&recps[i].rg_list);
1410 		mutex_init(&recps[i].filt_rule_lock);
1411 	}
1412 
1413 	hw->switch_info->recp_list = recps;
1414 
1415 	return 0;
1416 }
1417 
1418 /**
1419  * ice_aq_get_sw_cfg - get switch configuration
1420  * @hw: pointer to the hardware structure
1421  * @buf: pointer to the result buffer
1422  * @buf_size: length of the buffer available for response
1423  * @req_desc: pointer to requested descriptor
1424  * @num_elems: pointer to number of elements
1425  * @cd: pointer to command details structure or NULL
1426  *
1427  * Get switch configuration (0x0200) to be placed in buf.
1428  * This admin command returns information such as initial VSI/port number
1429  * and switch ID it belongs to.
1430  *
1431  * NOTE: *req_desc is both an input/output parameter.
1432  * The caller of this function first calls this function with *request_desc set
1433  * to 0. If the response from f/w has *req_desc set to 0, all the switch
1434  * configuration information has been returned; if non-zero (meaning not all
1435  * the information was returned), the caller should call this function again
1436  * with *req_desc set to the previous value returned by f/w to get the
1437  * next block of switch configuration information.
1438  *
1439  * *num_elems is output only parameter. This reflects the number of elements
1440  * in response buffer. The caller of this function to use *num_elems while
1441  * parsing the response buffer.
1442  */
1443 static int
1444 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
1445 		  u16 buf_size, u16 *req_desc, u16 *num_elems,
1446 		  struct ice_sq_cd *cd)
1447 {
1448 	struct ice_aqc_get_sw_cfg *cmd;
1449 	struct ice_aq_desc desc;
1450 	int status;
1451 
1452 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1453 	cmd = &desc.params.get_sw_conf;
1454 	cmd->element = cpu_to_le16(*req_desc);
1455 
1456 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1457 	if (!status) {
1458 		*req_desc = le16_to_cpu(cmd->element);
1459 		*num_elems = le16_to_cpu(cmd->num_elems);
1460 	}
1461 
1462 	return status;
1463 }
1464 
1465 /**
1466  * ice_aq_add_vsi
1467  * @hw: pointer to the HW struct
1468  * @vsi_ctx: pointer to a VSI context struct
1469  * @cd: pointer to command details structure or NULL
1470  *
1471  * Add a VSI context to the hardware (0x0210)
1472  */
1473 static int
1474 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1475 	       struct ice_sq_cd *cd)
1476 {
1477 	struct ice_aqc_add_update_free_vsi_resp *res;
1478 	struct ice_aqc_add_get_update_free_vsi *cmd;
1479 	struct ice_aq_desc desc;
1480 	int status;
1481 
1482 	cmd = &desc.params.vsi_cmd;
1483 	res = &desc.params.add_update_free_vsi_res;
1484 
1485 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1486 
1487 	if (!vsi_ctx->alloc_from_pool)
1488 		cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
1489 					   ICE_AQ_VSI_IS_VALID);
1490 	cmd->vf_id = vsi_ctx->vf_num;
1491 
1492 	cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1493 
1494 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1495 
1496 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1497 				 sizeof(vsi_ctx->info), cd);
1498 
1499 	if (!status) {
1500 		vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1501 		vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
1502 		vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
1503 	}
1504 
1505 	return status;
1506 }
1507 
1508 /**
1509  * ice_aq_free_vsi
1510  * @hw: pointer to the HW struct
1511  * @vsi_ctx: pointer to a VSI context struct
1512  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1513  * @cd: pointer to command details structure or NULL
1514  *
1515  * Free VSI context info from hardware (0x0213)
1516  */
1517 static int
1518 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1519 		bool keep_vsi_alloc, struct ice_sq_cd *cd)
1520 {
1521 	struct ice_aqc_add_update_free_vsi_resp *resp;
1522 	struct ice_aqc_add_get_update_free_vsi *cmd;
1523 	struct ice_aq_desc desc;
1524 	int status;
1525 
1526 	cmd = &desc.params.vsi_cmd;
1527 	resp = &desc.params.add_update_free_vsi_res;
1528 
1529 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1530 
1531 	cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1532 	if (keep_vsi_alloc)
1533 		cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
1534 
1535 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1536 	if (!status) {
1537 		vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1538 		vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1539 	}
1540 
1541 	return status;
1542 }
1543 
1544 /**
1545  * ice_aq_update_vsi
1546  * @hw: pointer to the HW struct
1547  * @vsi_ctx: pointer to a VSI context struct
1548  * @cd: pointer to command details structure or NULL
1549  *
1550  * Update VSI context in the hardware (0x0211)
1551  */
1552 static int
1553 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1554 		  struct ice_sq_cd *cd)
1555 {
1556 	struct ice_aqc_add_update_free_vsi_resp *resp;
1557 	struct ice_aqc_add_get_update_free_vsi *cmd;
1558 	struct ice_aq_desc desc;
1559 	int status;
1560 
1561 	cmd = &desc.params.vsi_cmd;
1562 	resp = &desc.params.add_update_free_vsi_res;
1563 
1564 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1565 
1566 	cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1567 
1568 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1569 
1570 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1571 				 sizeof(vsi_ctx->info), cd);
1572 
1573 	if (!status) {
1574 		vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1575 		vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1576 	}
1577 
1578 	return status;
1579 }
1580 
1581 /**
1582  * ice_is_vsi_valid - check whether the VSI is valid or not
1583  * @hw: pointer to the HW struct
1584  * @vsi_handle: VSI handle
1585  *
1586  * check whether the VSI is valid or not
1587  */
1588 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1589 {
1590 	return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1591 }
1592 
1593 /**
1594  * ice_get_hw_vsi_num - return the HW VSI number
1595  * @hw: pointer to the HW struct
1596  * @vsi_handle: VSI handle
1597  *
1598  * return the HW VSI number
1599  * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1600  */
1601 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1602 {
1603 	return hw->vsi_ctx[vsi_handle]->vsi_num;
1604 }
1605 
1606 /**
1607  * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1608  * @hw: pointer to the HW struct
1609  * @vsi_handle: VSI handle
1610  *
1611  * return the VSI context entry for a given VSI handle
1612  */
1613 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1614 {
1615 	return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1616 }
1617 
1618 /**
1619  * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1620  * @hw: pointer to the HW struct
1621  * @vsi_handle: VSI handle
1622  * @vsi: VSI context pointer
1623  *
1624  * save the VSI context entry for a given VSI handle
1625  */
1626 static void
1627 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1628 {
1629 	hw->vsi_ctx[vsi_handle] = vsi;
1630 }
1631 
1632 /**
1633  * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1634  * @hw: pointer to the HW struct
1635  * @vsi_handle: VSI handle
1636  */
1637 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1638 {
1639 	struct ice_vsi_ctx *vsi = ice_get_vsi_ctx(hw, vsi_handle);
1640 	u8 i;
1641 
1642 	if (!vsi)
1643 		return;
1644 	ice_for_each_traffic_class(i) {
1645 		devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
1646 		vsi->lan_q_ctx[i] = NULL;
1647 		devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
1648 		vsi->rdma_q_ctx[i] = NULL;
1649 	}
1650 }
1651 
1652 /**
1653  * ice_clear_vsi_ctx - clear the VSI context entry
1654  * @hw: pointer to the HW struct
1655  * @vsi_handle: VSI handle
1656  *
1657  * clear the VSI context entry
1658  */
1659 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1660 {
1661 	struct ice_vsi_ctx *vsi;
1662 
1663 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
1664 	if (vsi) {
1665 		ice_clear_vsi_q_ctx(hw, vsi_handle);
1666 		devm_kfree(ice_hw_to_dev(hw), vsi);
1667 		hw->vsi_ctx[vsi_handle] = NULL;
1668 	}
1669 }
1670 
1671 /**
1672  * ice_clear_all_vsi_ctx - clear all the VSI context entries
1673  * @hw: pointer to the HW struct
1674  */
1675 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1676 {
1677 	u16 i;
1678 
1679 	for (i = 0; i < ICE_MAX_VSI; i++)
1680 		ice_clear_vsi_ctx(hw, i);
1681 }
1682 
1683 /**
1684  * ice_add_vsi - add VSI context to the hardware and VSI handle list
1685  * @hw: pointer to the HW struct
1686  * @vsi_handle: unique VSI handle provided by drivers
1687  * @vsi_ctx: pointer to a VSI context struct
1688  * @cd: pointer to command details structure or NULL
1689  *
1690  * Add a VSI context to the hardware also add it into the VSI handle list.
1691  * If this function gets called after reset for existing VSIs then update
1692  * with the new HW VSI number in the corresponding VSI handle list entry.
1693  */
1694 int
1695 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1696 	    struct ice_sq_cd *cd)
1697 {
1698 	struct ice_vsi_ctx *tmp_vsi_ctx;
1699 	int status;
1700 
1701 	if (vsi_handle >= ICE_MAX_VSI)
1702 		return -EINVAL;
1703 	status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1704 	if (status)
1705 		return status;
1706 	tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1707 	if (!tmp_vsi_ctx) {
1708 		/* Create a new VSI context */
1709 		tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
1710 					   sizeof(*tmp_vsi_ctx), GFP_KERNEL);
1711 		if (!tmp_vsi_ctx) {
1712 			ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1713 			return -ENOMEM;
1714 		}
1715 		*tmp_vsi_ctx = *vsi_ctx;
1716 		ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1717 	} else {
1718 		/* update with new HW VSI num */
1719 		tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1720 	}
1721 
1722 	return 0;
1723 }
1724 
1725 /**
1726  * ice_free_vsi- free VSI context from hardware and VSI handle list
1727  * @hw: pointer to the HW struct
1728  * @vsi_handle: unique VSI handle
1729  * @vsi_ctx: pointer to a VSI context struct
1730  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1731  * @cd: pointer to command details structure or NULL
1732  *
1733  * Free VSI context info from hardware as well as from VSI handle list
1734  */
1735 int
1736 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1737 	     bool keep_vsi_alloc, struct ice_sq_cd *cd)
1738 {
1739 	int status;
1740 
1741 	if (!ice_is_vsi_valid(hw, vsi_handle))
1742 		return -EINVAL;
1743 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1744 	status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1745 	if (!status)
1746 		ice_clear_vsi_ctx(hw, vsi_handle);
1747 	return status;
1748 }
1749 
1750 /**
1751  * ice_update_vsi
1752  * @hw: pointer to the HW struct
1753  * @vsi_handle: unique VSI handle
1754  * @vsi_ctx: pointer to a VSI context struct
1755  * @cd: pointer to command details structure or NULL
1756  *
1757  * Update VSI context in the hardware
1758  */
1759 int
1760 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1761 	       struct ice_sq_cd *cd)
1762 {
1763 	if (!ice_is_vsi_valid(hw, vsi_handle))
1764 		return -EINVAL;
1765 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1766 	return ice_aq_update_vsi(hw, vsi_ctx, cd);
1767 }
1768 
1769 /**
1770  * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI
1771  * @hw: pointer to HW struct
1772  * @vsi_handle: VSI SW index
1773  * @enable: boolean for enable/disable
1774  */
1775 int
1776 ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
1777 {
1778 	struct ice_vsi_ctx *ctx, *cached_ctx;
1779 	int status;
1780 
1781 	cached_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1782 	if (!cached_ctx)
1783 		return -ENOENT;
1784 
1785 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1786 	if (!ctx)
1787 		return -ENOMEM;
1788 
1789 	ctx->info.q_opt_rss = cached_ctx->info.q_opt_rss;
1790 	ctx->info.q_opt_tc = cached_ctx->info.q_opt_tc;
1791 	ctx->info.q_opt_flags = cached_ctx->info.q_opt_flags;
1792 
1793 	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
1794 
1795 	if (enable)
1796 		ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1797 	else
1798 		ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1799 
1800 	status = ice_update_vsi(hw, vsi_handle, ctx, NULL);
1801 	if (!status) {
1802 		cached_ctx->info.q_opt_flags = ctx->info.q_opt_flags;
1803 		cached_ctx->info.valid_sections |= ctx->info.valid_sections;
1804 	}
1805 
1806 	kfree(ctx);
1807 	return status;
1808 }
1809 
1810 /**
1811  * ice_aq_alloc_free_vsi_list
1812  * @hw: pointer to the HW struct
1813  * @vsi_list_id: VSI list ID returned or used for lookup
1814  * @lkup_type: switch rule filter lookup type
1815  * @opc: switch rules population command type - pass in the command opcode
1816  *
1817  * allocates or free a VSI list resource
1818  */
1819 static int
1820 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1821 			   enum ice_sw_lkup_type lkup_type,
1822 			   enum ice_adminq_opc opc)
1823 {
1824 	struct ice_aqc_alloc_free_res_elem *sw_buf;
1825 	struct ice_aqc_res_elem *vsi_ele;
1826 	u16 buf_len;
1827 	int status;
1828 
1829 	buf_len = struct_size(sw_buf, elem, 1);
1830 	sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
1831 	if (!sw_buf)
1832 		return -ENOMEM;
1833 	sw_buf->num_elems = cpu_to_le16(1);
1834 
1835 	if (lkup_type == ICE_SW_LKUP_MAC ||
1836 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1837 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1838 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1839 	    lkup_type == ICE_SW_LKUP_PROMISC ||
1840 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1841 	    lkup_type == ICE_SW_LKUP_DFLT) {
1842 		sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1843 	} else if (lkup_type == ICE_SW_LKUP_VLAN) {
1844 		sw_buf->res_type =
1845 			cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1846 	} else {
1847 		status = -EINVAL;
1848 		goto ice_aq_alloc_free_vsi_list_exit;
1849 	}
1850 
1851 	if (opc == ice_aqc_opc_free_res)
1852 		sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
1853 
1854 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1855 	if (status)
1856 		goto ice_aq_alloc_free_vsi_list_exit;
1857 
1858 	if (opc == ice_aqc_opc_alloc_res) {
1859 		vsi_ele = &sw_buf->elem[0];
1860 		*vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
1861 	}
1862 
1863 ice_aq_alloc_free_vsi_list_exit:
1864 	devm_kfree(ice_hw_to_dev(hw), sw_buf);
1865 	return status;
1866 }
1867 
1868 /**
1869  * ice_aq_sw_rules - add/update/remove switch rules
1870  * @hw: pointer to the HW struct
1871  * @rule_list: pointer to switch rule population list
1872  * @rule_list_sz: total size of the rule list in bytes
1873  * @num_rules: number of switch rules in the rule_list
1874  * @opc: switch rules population command type - pass in the command opcode
1875  * @cd: pointer to command details structure or NULL
1876  *
1877  * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1878  */
1879 int
1880 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1881 		u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1882 {
1883 	struct ice_aq_desc desc;
1884 	int status;
1885 
1886 	if (opc != ice_aqc_opc_add_sw_rules &&
1887 	    opc != ice_aqc_opc_update_sw_rules &&
1888 	    opc != ice_aqc_opc_remove_sw_rules)
1889 		return -EINVAL;
1890 
1891 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1892 
1893 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1894 	desc.params.sw_rules.num_rules_fltr_entry_index =
1895 		cpu_to_le16(num_rules);
1896 	status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1897 	if (opc != ice_aqc_opc_add_sw_rules &&
1898 	    hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
1899 		status = -ENOENT;
1900 
1901 	return status;
1902 }
1903 
1904 /**
1905  * ice_aq_add_recipe - add switch recipe
1906  * @hw: pointer to the HW struct
1907  * @s_recipe_list: pointer to switch rule population list
1908  * @num_recipes: number of switch recipes in the list
1909  * @cd: pointer to command details structure or NULL
1910  *
1911  * Add(0x0290)
1912  */
1913 static int
1914 ice_aq_add_recipe(struct ice_hw *hw,
1915 		  struct ice_aqc_recipe_data_elem *s_recipe_list,
1916 		  u16 num_recipes, struct ice_sq_cd *cd)
1917 {
1918 	struct ice_aqc_add_get_recipe *cmd;
1919 	struct ice_aq_desc desc;
1920 	u16 buf_size;
1921 
1922 	cmd = &desc.params.add_get_recipe;
1923 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1924 
1925 	cmd->num_sub_recipes = cpu_to_le16(num_recipes);
1926 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1927 
1928 	buf_size = num_recipes * sizeof(*s_recipe_list);
1929 
1930 	return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1931 }
1932 
1933 /**
1934  * ice_aq_get_recipe - get switch recipe
1935  * @hw: pointer to the HW struct
1936  * @s_recipe_list: pointer to switch rule population list
1937  * @num_recipes: pointer to the number of recipes (input and output)
1938  * @recipe_root: root recipe number of recipe(s) to retrieve
1939  * @cd: pointer to command details structure or NULL
1940  *
1941  * Get(0x0292)
1942  *
1943  * On input, *num_recipes should equal the number of entries in s_recipe_list.
1944  * On output, *num_recipes will equal the number of entries returned in
1945  * s_recipe_list.
1946  *
1947  * The caller must supply enough space in s_recipe_list to hold all possible
1948  * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1949  */
1950 static int
1951 ice_aq_get_recipe(struct ice_hw *hw,
1952 		  struct ice_aqc_recipe_data_elem *s_recipe_list,
1953 		  u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1954 {
1955 	struct ice_aqc_add_get_recipe *cmd;
1956 	struct ice_aq_desc desc;
1957 	u16 buf_size;
1958 	int status;
1959 
1960 	if (*num_recipes != ICE_MAX_NUM_RECIPES)
1961 		return -EINVAL;
1962 
1963 	cmd = &desc.params.add_get_recipe;
1964 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1965 
1966 	cmd->return_index = cpu_to_le16(recipe_root);
1967 	cmd->num_sub_recipes = 0;
1968 
1969 	buf_size = *num_recipes * sizeof(*s_recipe_list);
1970 
1971 	status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1972 	*num_recipes = le16_to_cpu(cmd->num_sub_recipes);
1973 
1974 	return status;
1975 }
1976 
1977 /**
1978  * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
1979  * @hw: pointer to the HW struct
1980  * @params: parameters used to update the default recipe
1981  *
1982  * This function only supports updating default recipes and it only supports
1983  * updating a single recipe based on the lkup_idx at a time.
1984  *
1985  * This is done as a read-modify-write operation. First, get the current recipe
1986  * contents based on the recipe's ID. Then modify the field vector index and
1987  * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
1988  * the pre-existing recipe with the modifications.
1989  */
1990 int
1991 ice_update_recipe_lkup_idx(struct ice_hw *hw,
1992 			   struct ice_update_recipe_lkup_idx_params *params)
1993 {
1994 	struct ice_aqc_recipe_data_elem *rcp_list;
1995 	u16 num_recps = ICE_MAX_NUM_RECIPES;
1996 	int status;
1997 
1998 	rcp_list = kcalloc(num_recps, sizeof(*rcp_list), GFP_KERNEL);
1999 	if (!rcp_list)
2000 		return -ENOMEM;
2001 
2002 	/* read current recipe list from firmware */
2003 	rcp_list->recipe_indx = params->rid;
2004 	status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
2005 	if (status) {
2006 		ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
2007 			  params->rid, status);
2008 		goto error_out;
2009 	}
2010 
2011 	/* only modify existing recipe's lkup_idx and mask if valid, while
2012 	 * leaving all other fields the same, then update the recipe firmware
2013 	 */
2014 	rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
2015 	if (params->mask_valid)
2016 		rcp_list->content.mask[params->lkup_idx] =
2017 			cpu_to_le16(params->mask);
2018 
2019 	if (params->ignore_valid)
2020 		rcp_list->content.lkup_indx[params->lkup_idx] |=
2021 			ICE_AQ_RECIPE_LKUP_IGNORE;
2022 
2023 	status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
2024 	if (status)
2025 		ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
2026 			  params->rid, params->lkup_idx, params->fv_idx,
2027 			  params->mask, params->mask_valid ? "true" : "false",
2028 			  status);
2029 
2030 error_out:
2031 	kfree(rcp_list);
2032 	return status;
2033 }
2034 
2035 /**
2036  * ice_aq_map_recipe_to_profile - Map recipe to packet profile
2037  * @hw: pointer to the HW struct
2038  * @profile_id: package profile ID to associate the recipe with
2039  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2040  * @cd: pointer to command details structure or NULL
2041  * Recipe to profile association (0x0291)
2042  */
2043 static int
2044 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2045 			     struct ice_sq_cd *cd)
2046 {
2047 	struct ice_aqc_recipe_to_profile *cmd;
2048 	struct ice_aq_desc desc;
2049 
2050 	cmd = &desc.params.recipe_to_profile;
2051 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
2052 	cmd->profile_id = cpu_to_le16(profile_id);
2053 	/* Set the recipe ID bit in the bitmask to let the device know which
2054 	 * profile we are associating the recipe to
2055 	 */
2056 	memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));
2057 
2058 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2059 }
2060 
2061 /**
2062  * ice_aq_get_recipe_to_profile - Map recipe to packet profile
2063  * @hw: pointer to the HW struct
2064  * @profile_id: package profile ID to associate the recipe with
2065  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2066  * @cd: pointer to command details structure or NULL
2067  * Associate profile ID with given recipe (0x0293)
2068  */
2069 static int
2070 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2071 			     struct ice_sq_cd *cd)
2072 {
2073 	struct ice_aqc_recipe_to_profile *cmd;
2074 	struct ice_aq_desc desc;
2075 	int status;
2076 
2077 	cmd = &desc.params.recipe_to_profile;
2078 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2079 	cmd->profile_id = cpu_to_le16(profile_id);
2080 
2081 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2082 	if (!status)
2083 		memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));
2084 
2085 	return status;
2086 }
2087 
2088 /**
2089  * ice_alloc_recipe - add recipe resource
2090  * @hw: pointer to the hardware structure
2091  * @rid: recipe ID returned as response to AQ call
2092  */
2093 static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2094 {
2095 	struct ice_aqc_alloc_free_res_elem *sw_buf;
2096 	u16 buf_len;
2097 	int status;
2098 
2099 	buf_len = struct_size(sw_buf, elem, 1);
2100 	sw_buf = kzalloc(buf_len, GFP_KERNEL);
2101 	if (!sw_buf)
2102 		return -ENOMEM;
2103 
2104 	sw_buf->num_elems = cpu_to_le16(1);
2105 	sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
2106 					ICE_AQC_RES_TYPE_S) |
2107 					ICE_AQC_RES_TYPE_FLAG_SHARED);
2108 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2109 				       ice_aqc_opc_alloc_res, NULL);
2110 	if (!status)
2111 		*rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
2112 	kfree(sw_buf);
2113 
2114 	return status;
2115 }
2116 
2117 /**
2118  * ice_get_recp_to_prof_map - updates recipe to profile mapping
2119  * @hw: pointer to hardware structure
2120  *
2121  * This function is used to populate recipe_to_profile matrix where index to
2122  * this array is the recipe ID and the element is the mapping of which profiles
2123  * is this recipe mapped to.
2124  */
2125 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2126 {
2127 	DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
2128 	u16 i;
2129 
2130 	for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2131 		u16 j;
2132 
2133 		bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2134 		bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
2135 		if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2136 			continue;
2137 		bitmap_copy(profile_to_recipe[i], r_bitmap,
2138 			    ICE_MAX_NUM_RECIPES);
2139 		for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2140 			set_bit(i, recipe_to_profile[j]);
2141 	}
2142 }
2143 
2144 /**
2145  * ice_collect_result_idx - copy result index values
2146  * @buf: buffer that contains the result index
2147  * @recp: the recipe struct to copy data into
2148  */
2149 static void
2150 ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
2151 		       struct ice_sw_recipe *recp)
2152 {
2153 	if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2154 		set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2155 			recp->res_idxs);
2156 }
2157 
2158 /**
2159  * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
2160  * @hw: pointer to hardware structure
2161  * @recps: struct that we need to populate
2162  * @rid: recipe ID that we are populating
2163  * @refresh_required: true if we should get recipe to profile mapping from FW
2164  *
2165  * This function is used to populate all the necessary entries into our
2166  * bookkeeping so that we have a current list of all the recipes that are
2167  * programmed in the firmware.
2168  */
2169 static int
2170 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2171 		    bool *refresh_required)
2172 {
2173 	DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS);
2174 	struct ice_aqc_recipe_data_elem *tmp;
2175 	u16 num_recps = ICE_MAX_NUM_RECIPES;
2176 	struct ice_prot_lkup_ext *lkup_exts;
2177 	u8 fv_word_idx = 0;
2178 	u16 sub_recps;
2179 	int status;
2180 
2181 	bitmap_zero(result_bm, ICE_MAX_FV_WORDS);
2182 
2183 	/* we need a buffer big enough to accommodate all the recipes */
2184 	tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
2185 	if (!tmp)
2186 		return -ENOMEM;
2187 
2188 	tmp[0].recipe_indx = rid;
2189 	status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2190 	/* non-zero status meaning recipe doesn't exist */
2191 	if (status)
2192 		goto err_unroll;
2193 
2194 	/* Get recipe to profile map so that we can get the fv from lkups that
2195 	 * we read for a recipe from FW. Since we want to minimize the number of
2196 	 * times we make this FW call, just make one call and cache the copy
2197 	 * until a new recipe is added. This operation is only required the
2198 	 * first time to get the changes from FW. Then to search existing
2199 	 * entries we don't need to update the cache again until another recipe
2200 	 * gets added.
2201 	 */
2202 	if (*refresh_required) {
2203 		ice_get_recp_to_prof_map(hw);
2204 		*refresh_required = false;
2205 	}
2206 
2207 	/* Start populating all the entries for recps[rid] based on lkups from
2208 	 * firmware. Note that we are only creating the root recipe in our
2209 	 * database.
2210 	 */
2211 	lkup_exts = &recps[rid].lkup_exts;
2212 
2213 	for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2214 		struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2215 		struct ice_recp_grp_entry *rg_entry;
2216 		u8 i, prof, idx, prot = 0;
2217 		bool is_root;
2218 		u16 off = 0;
2219 
2220 		rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
2221 					GFP_KERNEL);
2222 		if (!rg_entry) {
2223 			status = -ENOMEM;
2224 			goto err_unroll;
2225 		}
2226 
2227 		idx = root_bufs.recipe_indx;
2228 		is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2229 
2230 		/* Mark all result indices in this chain */
2231 		if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2232 			set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2233 				result_bm);
2234 
2235 		/* get the first profile that is associated with rid */
2236 		prof = find_first_bit(recipe_to_profile[idx],
2237 				      ICE_MAX_NUM_PROFILES);
2238 		for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2239 			u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2240 
2241 			rg_entry->fv_idx[i] = lkup_indx;
2242 			rg_entry->fv_mask[i] =
2243 				le16_to_cpu(root_bufs.content.mask[i + 1]);
2244 
2245 			/* If the recipe is a chained recipe then all its
2246 			 * child recipe's result will have a result index.
2247 			 * To fill fv_words we should not use those result
2248 			 * index, we only need the protocol ids and offsets.
2249 			 * We will skip all the fv_idx which stores result
2250 			 * index in them. We also need to skip any fv_idx which
2251 			 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2252 			 * valid offset value.
2253 			 */
2254 			if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) ||
2255 			    rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2256 			    rg_entry->fv_idx[i] == 0)
2257 				continue;
2258 
2259 			ice_find_prot_off(hw, ICE_BLK_SW, prof,
2260 					  rg_entry->fv_idx[i], &prot, &off);
2261 			lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2262 			lkup_exts->fv_words[fv_word_idx].off = off;
2263 			lkup_exts->field_mask[fv_word_idx] =
2264 				rg_entry->fv_mask[i];
2265 			fv_word_idx++;
2266 		}
2267 		/* populate rg_list with the data from the child entry of this
2268 		 * recipe
2269 		 */
2270 		list_add(&rg_entry->l_entry, &recps[rid].rg_list);
2271 
2272 		/* Propagate some data to the recipe database */
2273 		recps[idx].is_root = !!is_root;
2274 		recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2275 		recps[idx].need_pass_l2 = root_bufs.content.act_ctrl &
2276 					  ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
2277 		recps[idx].allow_pass_l2 = root_bufs.content.act_ctrl &
2278 					   ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
2279 		bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2280 		if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2281 			recps[idx].chain_idx = root_bufs.content.result_indx &
2282 				~ICE_AQ_RECIPE_RESULT_EN;
2283 			set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2284 		} else {
2285 			recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2286 		}
2287 
2288 		if (!is_root)
2289 			continue;
2290 
2291 		/* Only do the following for root recipes entries */
2292 		memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2293 		       sizeof(recps[idx].r_bitmap));
2294 		recps[idx].root_rid = root_bufs.content.rid &
2295 			~ICE_AQ_RECIPE_ID_IS_ROOT;
2296 		recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2297 	}
2298 
2299 	/* Complete initialization of the root recipe entry */
2300 	lkup_exts->n_val_words = fv_word_idx;
2301 	recps[rid].big_recp = (num_recps > 1);
2302 	recps[rid].n_grp_count = (u8)num_recps;
2303 	recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp,
2304 					   recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
2305 					   GFP_KERNEL);
2306 	if (!recps[rid].root_buf) {
2307 		status = -ENOMEM;
2308 		goto err_unroll;
2309 	}
2310 
2311 	/* Copy result indexes */
2312 	bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2313 	recps[rid].recp_created = true;
2314 
2315 err_unroll:
2316 	kfree(tmp);
2317 	return status;
2318 }
2319 
2320 /* ice_init_port_info - Initialize port_info with switch configuration data
2321  * @pi: pointer to port_info
2322  * @vsi_port_num: VSI number or port number
2323  * @type: Type of switch element (port or VSI)
2324  * @swid: switch ID of the switch the element is attached to
2325  * @pf_vf_num: PF or VF number
2326  * @is_vf: true if the element is a VF, false otherwise
2327  */
2328 static void
2329 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2330 		   u16 swid, u16 pf_vf_num, bool is_vf)
2331 {
2332 	switch (type) {
2333 	case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2334 		pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2335 		pi->sw_id = swid;
2336 		pi->pf_vf_num = pf_vf_num;
2337 		pi->is_vf = is_vf;
2338 		break;
2339 	default:
2340 		ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
2341 		break;
2342 	}
2343 }
2344 
2345 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2346  * @hw: pointer to the hardware structure
2347  */
2348 int ice_get_initial_sw_cfg(struct ice_hw *hw)
2349 {
2350 	struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
2351 	u16 req_desc = 0;
2352 	u16 num_elems;
2353 	int status;
2354 	u16 i;
2355 
2356 	rbuf = kzalloc(ICE_SW_CFG_MAX_BUF_LEN, GFP_KERNEL);
2357 	if (!rbuf)
2358 		return -ENOMEM;
2359 
2360 	/* Multiple calls to ice_aq_get_sw_cfg may be required
2361 	 * to get all the switch configuration information. The need
2362 	 * for additional calls is indicated by ice_aq_get_sw_cfg
2363 	 * writing a non-zero value in req_desc
2364 	 */
2365 	do {
2366 		struct ice_aqc_get_sw_cfg_resp_elem *ele;
2367 
2368 		status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2369 					   &req_desc, &num_elems, NULL);
2370 
2371 		if (status)
2372 			break;
2373 
2374 		for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
2375 			u16 pf_vf_num, swid, vsi_port_num;
2376 			bool is_vf = false;
2377 			u8 res_type;
2378 
2379 			vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
2380 				ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2381 
2382 			pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
2383 				ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2384 
2385 			swid = le16_to_cpu(ele->swid);
2386 
2387 			if (le16_to_cpu(ele->pf_vf_num) &
2388 			    ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2389 				is_vf = true;
2390 
2391 			res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >>
2392 					ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2393 
2394 			if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
2395 				/* FW VSI is not needed. Just continue. */
2396 				continue;
2397 			}
2398 
2399 			ice_init_port_info(hw->port_info, vsi_port_num,
2400 					   res_type, swid, pf_vf_num, is_vf);
2401 		}
2402 	} while (req_desc && !status);
2403 
2404 	kfree(rbuf);
2405 	return status;
2406 }
2407 
2408 /**
2409  * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2410  * @hw: pointer to the hardware structure
2411  * @fi: filter info structure to fill/update
2412  *
2413  * This helper function populates the lb_en and lan_en elements of the provided
2414  * ice_fltr_info struct using the switch's type and characteristics of the
2415  * switch rule being configured.
2416  */
2417 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2418 {
2419 	fi->lb_en = false;
2420 	fi->lan_en = false;
2421 	if ((fi->flag & ICE_FLTR_TX) &&
2422 	    (fi->fltr_act == ICE_FWD_TO_VSI ||
2423 	     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2424 	     fi->fltr_act == ICE_FWD_TO_Q ||
2425 	     fi->fltr_act == ICE_FWD_TO_QGRP)) {
2426 		/* Setting LB for prune actions will result in replicated
2427 		 * packets to the internal switch that will be dropped.
2428 		 */
2429 		if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2430 			fi->lb_en = true;
2431 
2432 		/* Set lan_en to TRUE if
2433 		 * 1. The switch is a VEB AND
2434 		 * 2
2435 		 * 2.1 The lookup is a directional lookup like ethertype,
2436 		 * promiscuous, ethertype-MAC, promiscuous-VLAN
2437 		 * and default-port OR
2438 		 * 2.2 The lookup is VLAN, OR
2439 		 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2440 		 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2441 		 *
2442 		 * OR
2443 		 *
2444 		 * The switch is a VEPA.
2445 		 *
2446 		 * In all other cases, the LAN enable has to be set to false.
2447 		 */
2448 		if (hw->evb_veb) {
2449 			if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2450 			    fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2451 			    fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2452 			    fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2453 			    fi->lkup_type == ICE_SW_LKUP_DFLT ||
2454 			    fi->lkup_type == ICE_SW_LKUP_VLAN ||
2455 			    (fi->lkup_type == ICE_SW_LKUP_MAC &&
2456 			     !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
2457 			    (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2458 			     !is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
2459 				fi->lan_en = true;
2460 		} else {
2461 			fi->lan_en = true;
2462 		}
2463 	}
2464 }
2465 
2466 /**
2467  * ice_fill_sw_rule - Helper function to fill switch rule structure
2468  * @hw: pointer to the hardware structure
2469  * @f_info: entry containing packet forwarding information
2470  * @s_rule: switch rule structure to be filled in based on mac_entry
2471  * @opc: switch rules population command type - pass in the command opcode
2472  */
2473 static void
2474 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2475 		 struct ice_sw_rule_lkup_rx_tx *s_rule,
2476 		 enum ice_adminq_opc opc)
2477 {
2478 	u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2479 	u16 vlan_tpid = ETH_P_8021Q;
2480 	void *daddr = NULL;
2481 	u16 eth_hdr_sz;
2482 	u8 *eth_hdr;
2483 	u32 act = 0;
2484 	__be16 *off;
2485 	u8 q_rgn;
2486 
2487 	if (opc == ice_aqc_opc_remove_sw_rules) {
2488 		s_rule->act = 0;
2489 		s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
2490 		s_rule->hdr_len = 0;
2491 		return;
2492 	}
2493 
2494 	eth_hdr_sz = sizeof(dummy_eth_header);
2495 	eth_hdr = s_rule->hdr_data;
2496 
2497 	/* initialize the ether header with a dummy header */
2498 	memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
2499 	ice_fill_sw_info(hw, f_info);
2500 
2501 	switch (f_info->fltr_act) {
2502 	case ICE_FWD_TO_VSI:
2503 		act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2504 			ICE_SINGLE_ACT_VSI_ID_M;
2505 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2506 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2507 				ICE_SINGLE_ACT_VALID_BIT;
2508 		break;
2509 	case ICE_FWD_TO_VSI_LIST:
2510 		act |= ICE_SINGLE_ACT_VSI_LIST;
2511 		act |= (f_info->fwd_id.vsi_list_id <<
2512 			ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2513 			ICE_SINGLE_ACT_VSI_LIST_ID_M;
2514 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2515 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2516 				ICE_SINGLE_ACT_VALID_BIT;
2517 		break;
2518 	case ICE_FWD_TO_Q:
2519 		act |= ICE_SINGLE_ACT_TO_Q;
2520 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2521 			ICE_SINGLE_ACT_Q_INDEX_M;
2522 		break;
2523 	case ICE_DROP_PACKET:
2524 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2525 			ICE_SINGLE_ACT_VALID_BIT;
2526 		break;
2527 	case ICE_FWD_TO_QGRP:
2528 		q_rgn = f_info->qgrp_size > 0 ?
2529 			(u8)ilog2(f_info->qgrp_size) : 0;
2530 		act |= ICE_SINGLE_ACT_TO_Q;
2531 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2532 			ICE_SINGLE_ACT_Q_INDEX_M;
2533 		act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2534 			ICE_SINGLE_ACT_Q_REGION_M;
2535 		break;
2536 	default:
2537 		return;
2538 	}
2539 
2540 	if (f_info->lb_en)
2541 		act |= ICE_SINGLE_ACT_LB_ENABLE;
2542 	if (f_info->lan_en)
2543 		act |= ICE_SINGLE_ACT_LAN_ENABLE;
2544 
2545 	switch (f_info->lkup_type) {
2546 	case ICE_SW_LKUP_MAC:
2547 		daddr = f_info->l_data.mac.mac_addr;
2548 		break;
2549 	case ICE_SW_LKUP_VLAN:
2550 		vlan_id = f_info->l_data.vlan.vlan_id;
2551 		if (f_info->l_data.vlan.tpid_valid)
2552 			vlan_tpid = f_info->l_data.vlan.tpid;
2553 		if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2554 		    f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2555 			act |= ICE_SINGLE_ACT_PRUNE;
2556 			act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2557 		}
2558 		break;
2559 	case ICE_SW_LKUP_ETHERTYPE_MAC:
2560 		daddr = f_info->l_data.ethertype_mac.mac_addr;
2561 		fallthrough;
2562 	case ICE_SW_LKUP_ETHERTYPE:
2563 		off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2564 		*off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
2565 		break;
2566 	case ICE_SW_LKUP_MAC_VLAN:
2567 		daddr = f_info->l_data.mac_vlan.mac_addr;
2568 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
2569 		break;
2570 	case ICE_SW_LKUP_PROMISC_VLAN:
2571 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
2572 		fallthrough;
2573 	case ICE_SW_LKUP_PROMISC:
2574 		daddr = f_info->l_data.mac_vlan.mac_addr;
2575 		break;
2576 	default:
2577 		break;
2578 	}
2579 
2580 	s_rule->hdr.type = (f_info->flag & ICE_FLTR_RX) ?
2581 		cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2582 		cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
2583 
2584 	/* Recipe set depending on lookup type */
2585 	s_rule->recipe_id = cpu_to_le16(f_info->lkup_type);
2586 	s_rule->src = cpu_to_le16(f_info->src);
2587 	s_rule->act = cpu_to_le32(act);
2588 
2589 	if (daddr)
2590 		ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
2591 
2592 	if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2593 		off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2594 		*off = cpu_to_be16(vlan_id);
2595 		off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2596 		*off = cpu_to_be16(vlan_tpid);
2597 	}
2598 
2599 	/* Create the switch rule with the final dummy Ethernet header */
2600 	if (opc != ice_aqc_opc_update_sw_rules)
2601 		s_rule->hdr_len = cpu_to_le16(eth_hdr_sz);
2602 }
2603 
2604 /**
2605  * ice_add_marker_act
2606  * @hw: pointer to the hardware structure
2607  * @m_ent: the management entry for which sw marker needs to be added
2608  * @sw_marker: sw marker to tag the Rx descriptor with
2609  * @l_id: large action resource ID
2610  *
2611  * Create a large action to hold software marker and update the switch rule
2612  * entry pointed by m_ent with newly created large action
2613  */
2614 static int
2615 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2616 		   u16 sw_marker, u16 l_id)
2617 {
2618 	struct ice_sw_rule_lkup_rx_tx *rx_tx;
2619 	struct ice_sw_rule_lg_act *lg_act;
2620 	/* For software marker we need 3 large actions
2621 	 * 1. FWD action: FWD TO VSI or VSI LIST
2622 	 * 2. GENERIC VALUE action to hold the profile ID
2623 	 * 3. GENERIC VALUE action to hold the software marker ID
2624 	 */
2625 	const u16 num_lg_acts = 3;
2626 	u16 lg_act_size;
2627 	u16 rules_size;
2628 	int status;
2629 	u32 act;
2630 	u16 id;
2631 
2632 	if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2633 		return -EINVAL;
2634 
2635 	/* Create two back-to-back switch rules and submit them to the HW using
2636 	 * one memory buffer:
2637 	 *    1. Large Action
2638 	 *    2. Look up Tx Rx
2639 	 */
2640 	lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(lg_act, num_lg_acts);
2641 	rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(rx_tx);
2642 	lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
2643 	if (!lg_act)
2644 		return -ENOMEM;
2645 
2646 	rx_tx = (typeof(rx_tx))((u8 *)lg_act + lg_act_size);
2647 
2648 	/* Fill in the first switch rule i.e. large action */
2649 	lg_act->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
2650 	lg_act->index = cpu_to_le16(l_id);
2651 	lg_act->size = cpu_to_le16(num_lg_acts);
2652 
2653 	/* First action VSI forwarding or VSI list forwarding depending on how
2654 	 * many VSIs
2655 	 */
2656 	id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2657 		m_ent->fltr_info.fwd_id.hw_vsi_id;
2658 
2659 	act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2660 	act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
2661 	if (m_ent->vsi_count > 1)
2662 		act |= ICE_LG_ACT_VSI_LIST;
2663 	lg_act->act[0] = cpu_to_le32(act);
2664 
2665 	/* Second action descriptor type */
2666 	act = ICE_LG_ACT_GENERIC;
2667 
2668 	act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2669 	lg_act->act[1] = cpu_to_le32(act);
2670 
2671 	act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2672 	       ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2673 
2674 	/* Third action Marker value */
2675 	act |= ICE_LG_ACT_GENERIC;
2676 	act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2677 		ICE_LG_ACT_GENERIC_VALUE_M;
2678 
2679 	lg_act->act[2] = cpu_to_le32(act);
2680 
2681 	/* call the fill switch rule to fill the lookup Tx Rx structure */
2682 	ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2683 			 ice_aqc_opc_update_sw_rules);
2684 
2685 	/* Update the action to point to the large action ID */
2686 	rx_tx->act = cpu_to_le32(ICE_SINGLE_ACT_PTR |
2687 				 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2688 				  ICE_SINGLE_ACT_PTR_VAL_M));
2689 
2690 	/* Use the filter rule ID of the previously created rule with single
2691 	 * act. Once the update happens, hardware will treat this as large
2692 	 * action
2693 	 */
2694 	rx_tx->index = cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
2695 
2696 	status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2697 				 ice_aqc_opc_update_sw_rules, NULL);
2698 	if (!status) {
2699 		m_ent->lg_act_idx = l_id;
2700 		m_ent->sw_marker_id = sw_marker;
2701 	}
2702 
2703 	devm_kfree(ice_hw_to_dev(hw), lg_act);
2704 	return status;
2705 }
2706 
2707 /**
2708  * ice_create_vsi_list_map
2709  * @hw: pointer to the hardware structure
2710  * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2711  * @num_vsi: number of VSI handles in the array
2712  * @vsi_list_id: VSI list ID generated as part of allocate resource
2713  *
2714  * Helper function to create a new entry of VSI list ID to VSI mapping
2715  * using the given VSI list ID
2716  */
2717 static struct ice_vsi_list_map_info *
2718 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2719 			u16 vsi_list_id)
2720 {
2721 	struct ice_switch_info *sw = hw->switch_info;
2722 	struct ice_vsi_list_map_info *v_map;
2723 	int i;
2724 
2725 	v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL);
2726 	if (!v_map)
2727 		return NULL;
2728 
2729 	v_map->vsi_list_id = vsi_list_id;
2730 	v_map->ref_cnt = 1;
2731 	for (i = 0; i < num_vsi; i++)
2732 		set_bit(vsi_handle_arr[i], v_map->vsi_map);
2733 
2734 	list_add(&v_map->list_entry, &sw->vsi_list_map_head);
2735 	return v_map;
2736 }
2737 
2738 /**
2739  * ice_update_vsi_list_rule
2740  * @hw: pointer to the hardware structure
2741  * @vsi_handle_arr: array of VSI handles to form a VSI list
2742  * @num_vsi: number of VSI handles in the array
2743  * @vsi_list_id: VSI list ID generated as part of allocate resource
2744  * @remove: Boolean value to indicate if this is a remove action
2745  * @opc: switch rules population command type - pass in the command opcode
2746  * @lkup_type: lookup type of the filter
2747  *
2748  * Call AQ command to add a new switch rule or update existing switch rule
2749  * using the given VSI list ID
2750  */
2751 static int
2752 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2753 			 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2754 			 enum ice_sw_lkup_type lkup_type)
2755 {
2756 	struct ice_sw_rule_vsi_list *s_rule;
2757 	u16 s_rule_size;
2758 	u16 rule_type;
2759 	int status;
2760 	int i;
2761 
2762 	if (!num_vsi)
2763 		return -EINVAL;
2764 
2765 	if (lkup_type == ICE_SW_LKUP_MAC ||
2766 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2767 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2768 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2769 	    lkup_type == ICE_SW_LKUP_PROMISC ||
2770 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2771 	    lkup_type == ICE_SW_LKUP_DFLT)
2772 		rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2773 			ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2774 	else if (lkup_type == ICE_SW_LKUP_VLAN)
2775 		rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2776 			ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2777 	else
2778 		return -EINVAL;
2779 
2780 	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi);
2781 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
2782 	if (!s_rule)
2783 		return -ENOMEM;
2784 	for (i = 0; i < num_vsi; i++) {
2785 		if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2786 			status = -EINVAL;
2787 			goto exit;
2788 		}
2789 		/* AQ call requires hw_vsi_id(s) */
2790 		s_rule->vsi[i] =
2791 			cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2792 	}
2793 
2794 	s_rule->hdr.type = cpu_to_le16(rule_type);
2795 	s_rule->number_vsi = cpu_to_le16(num_vsi);
2796 	s_rule->index = cpu_to_le16(vsi_list_id);
2797 
2798 	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2799 
2800 exit:
2801 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2802 	return status;
2803 }
2804 
2805 /**
2806  * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2807  * @hw: pointer to the HW struct
2808  * @vsi_handle_arr: array of VSI handles to form a VSI list
2809  * @num_vsi: number of VSI handles in the array
2810  * @vsi_list_id: stores the ID of the VSI list to be created
2811  * @lkup_type: switch rule filter's lookup type
2812  */
2813 static int
2814 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2815 			 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2816 {
2817 	int status;
2818 
2819 	status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2820 					    ice_aqc_opc_alloc_res);
2821 	if (status)
2822 		return status;
2823 
2824 	/* Update the newly created VSI list to include the specified VSIs */
2825 	return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2826 					*vsi_list_id, false,
2827 					ice_aqc_opc_add_sw_rules, lkup_type);
2828 }
2829 
2830 /**
2831  * ice_create_pkt_fwd_rule
2832  * @hw: pointer to the hardware structure
2833  * @f_entry: entry containing packet forwarding information
2834  *
2835  * Create switch rule with given filter information and add an entry
2836  * to the corresponding filter management list to track this switch rule
2837  * and VSI mapping
2838  */
2839 static int
2840 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2841 			struct ice_fltr_list_entry *f_entry)
2842 {
2843 	struct ice_fltr_mgmt_list_entry *fm_entry;
2844 	struct ice_sw_rule_lkup_rx_tx *s_rule;
2845 	enum ice_sw_lkup_type l_type;
2846 	struct ice_sw_recipe *recp;
2847 	int status;
2848 
2849 	s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2850 			      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
2851 			      GFP_KERNEL);
2852 	if (!s_rule)
2853 		return -ENOMEM;
2854 	fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
2855 				GFP_KERNEL);
2856 	if (!fm_entry) {
2857 		status = -ENOMEM;
2858 		goto ice_create_pkt_fwd_rule_exit;
2859 	}
2860 
2861 	fm_entry->fltr_info = f_entry->fltr_info;
2862 
2863 	/* Initialize all the fields for the management entry */
2864 	fm_entry->vsi_count = 1;
2865 	fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2866 	fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2867 	fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2868 
2869 	ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2870 			 ice_aqc_opc_add_sw_rules);
2871 
2872 	status = ice_aq_sw_rules(hw, s_rule,
2873 				 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
2874 				 ice_aqc_opc_add_sw_rules, NULL);
2875 	if (status) {
2876 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
2877 		goto ice_create_pkt_fwd_rule_exit;
2878 	}
2879 
2880 	f_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
2881 	fm_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
2882 
2883 	/* The book keeping entries will get removed when base driver
2884 	 * calls remove filter AQ command
2885 	 */
2886 	l_type = fm_entry->fltr_info.lkup_type;
2887 	recp = &hw->switch_info->recp_list[l_type];
2888 	list_add(&fm_entry->list_entry, &recp->filt_rules);
2889 
2890 ice_create_pkt_fwd_rule_exit:
2891 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2892 	return status;
2893 }
2894 
2895 /**
2896  * ice_update_pkt_fwd_rule
2897  * @hw: pointer to the hardware structure
2898  * @f_info: filter information for switch rule
2899  *
2900  * Call AQ command to update a previously created switch rule with a
2901  * VSI list ID
2902  */
2903 static int
2904 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2905 {
2906 	struct ice_sw_rule_lkup_rx_tx *s_rule;
2907 	int status;
2908 
2909 	s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2910 			      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
2911 			      GFP_KERNEL);
2912 	if (!s_rule)
2913 		return -ENOMEM;
2914 
2915 	ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2916 
2917 	s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
2918 
2919 	/* Update switch rule with new rule set to forward VSI list */
2920 	status = ice_aq_sw_rules(hw, s_rule,
2921 				 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
2922 				 ice_aqc_opc_update_sw_rules, NULL);
2923 
2924 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2925 	return status;
2926 }
2927 
2928 /**
2929  * ice_update_sw_rule_bridge_mode
2930  * @hw: pointer to the HW struct
2931  *
2932  * Updates unicast switch filter rules based on VEB/VEPA mode
2933  */
2934 int ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2935 {
2936 	struct ice_switch_info *sw = hw->switch_info;
2937 	struct ice_fltr_mgmt_list_entry *fm_entry;
2938 	struct list_head *rule_head;
2939 	struct mutex *rule_lock; /* Lock to protect filter rule list */
2940 	int status = 0;
2941 
2942 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2943 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2944 
2945 	mutex_lock(rule_lock);
2946 	list_for_each_entry(fm_entry, rule_head, list_entry) {
2947 		struct ice_fltr_info *fi = &fm_entry->fltr_info;
2948 		u8 *addr = fi->l_data.mac.mac_addr;
2949 
2950 		/* Update unicast Tx rules to reflect the selected
2951 		 * VEB/VEPA mode
2952 		 */
2953 		if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
2954 		    (fi->fltr_act == ICE_FWD_TO_VSI ||
2955 		     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2956 		     fi->fltr_act == ICE_FWD_TO_Q ||
2957 		     fi->fltr_act == ICE_FWD_TO_QGRP)) {
2958 			status = ice_update_pkt_fwd_rule(hw, fi);
2959 			if (status)
2960 				break;
2961 		}
2962 	}
2963 
2964 	mutex_unlock(rule_lock);
2965 
2966 	return status;
2967 }
2968 
2969 /**
2970  * ice_add_update_vsi_list
2971  * @hw: pointer to the hardware structure
2972  * @m_entry: pointer to current filter management list entry
2973  * @cur_fltr: filter information from the book keeping entry
2974  * @new_fltr: filter information with the new VSI to be added
2975  *
2976  * Call AQ command to add or update previously created VSI list with new VSI.
2977  *
2978  * Helper function to do book keeping associated with adding filter information
2979  * The algorithm to do the book keeping is described below :
2980  * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2981  *	if only one VSI has been added till now
2982  *		Allocate a new VSI list and add two VSIs
2983  *		to this list using switch rule command
2984  *		Update the previously created switch rule with the
2985  *		newly created VSI list ID
2986  *	if a VSI list was previously created
2987  *		Add the new VSI to the previously created VSI list set
2988  *		using the update switch rule command
2989  */
2990 static int
2991 ice_add_update_vsi_list(struct ice_hw *hw,
2992 			struct ice_fltr_mgmt_list_entry *m_entry,
2993 			struct ice_fltr_info *cur_fltr,
2994 			struct ice_fltr_info *new_fltr)
2995 {
2996 	u16 vsi_list_id = 0;
2997 	int status = 0;
2998 
2999 	if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
3000 	     cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
3001 		return -EOPNOTSUPP;
3002 
3003 	if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
3004 	     new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
3005 	    (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
3006 	     cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
3007 		return -EOPNOTSUPP;
3008 
3009 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
3010 		/* Only one entry existed in the mapping and it was not already
3011 		 * a part of a VSI list. So, create a VSI list with the old and
3012 		 * new VSIs.
3013 		 */
3014 		struct ice_fltr_info tmp_fltr;
3015 		u16 vsi_handle_arr[2];
3016 
3017 		/* A rule already exists with the new VSI being added */
3018 		if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
3019 			return -EEXIST;
3020 
3021 		vsi_handle_arr[0] = cur_fltr->vsi_handle;
3022 		vsi_handle_arr[1] = new_fltr->vsi_handle;
3023 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3024 						  &vsi_list_id,
3025 						  new_fltr->lkup_type);
3026 		if (status)
3027 			return status;
3028 
3029 		tmp_fltr = *new_fltr;
3030 		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
3031 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3032 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3033 		/* Update the previous switch rule of "MAC forward to VSI" to
3034 		 * "MAC fwd to VSI list"
3035 		 */
3036 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3037 		if (status)
3038 			return status;
3039 
3040 		cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
3041 		cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3042 		m_entry->vsi_list_info =
3043 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3044 						vsi_list_id);
3045 
3046 		if (!m_entry->vsi_list_info)
3047 			return -ENOMEM;
3048 
3049 		/* If this entry was large action then the large action needs
3050 		 * to be updated to point to FWD to VSI list
3051 		 */
3052 		if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
3053 			status =
3054 			    ice_add_marker_act(hw, m_entry,
3055 					       m_entry->sw_marker_id,
3056 					       m_entry->lg_act_idx);
3057 	} else {
3058 		u16 vsi_handle = new_fltr->vsi_handle;
3059 		enum ice_adminq_opc opcode;
3060 
3061 		if (!m_entry->vsi_list_info)
3062 			return -EIO;
3063 
3064 		/* A rule already exists with the new VSI being added */
3065 		if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
3066 			return 0;
3067 
3068 		/* Update the previously created VSI list set with
3069 		 * the new VSI ID passed in
3070 		 */
3071 		vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
3072 		opcode = ice_aqc_opc_update_sw_rules;
3073 
3074 		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
3075 						  vsi_list_id, false, opcode,
3076 						  new_fltr->lkup_type);
3077 		/* update VSI list mapping info with new VSI ID */
3078 		if (!status)
3079 			set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
3080 	}
3081 	if (!status)
3082 		m_entry->vsi_count++;
3083 	return status;
3084 }
3085 
3086 /**
3087  * ice_find_rule_entry - Search a rule entry
3088  * @hw: pointer to the hardware structure
3089  * @recp_id: lookup type for which the specified rule needs to be searched
3090  * @f_info: rule information
3091  *
3092  * Helper function to search for a given rule entry
3093  * Returns pointer to entry storing the rule if found
3094  */
3095 static struct ice_fltr_mgmt_list_entry *
3096 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
3097 {
3098 	struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3099 	struct ice_switch_info *sw = hw->switch_info;
3100 	struct list_head *list_head;
3101 
3102 	list_head = &sw->recp_list[recp_id].filt_rules;
3103 	list_for_each_entry(list_itr, list_head, list_entry) {
3104 		if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3105 			    sizeof(f_info->l_data)) &&
3106 		    f_info->flag == list_itr->fltr_info.flag) {
3107 			ret = list_itr;
3108 			break;
3109 		}
3110 	}
3111 	return ret;
3112 }
3113 
3114 /**
3115  * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3116  * @hw: pointer to the hardware structure
3117  * @recp_id: lookup type for which VSI lists needs to be searched
3118  * @vsi_handle: VSI handle to be found in VSI list
3119  * @vsi_list_id: VSI list ID found containing vsi_handle
3120  *
3121  * Helper function to search a VSI list with single entry containing given VSI
3122  * handle element. This can be extended further to search VSI list with more
3123  * than 1 vsi_count. Returns pointer to VSI list entry if found.
3124  */
3125 static struct ice_vsi_list_map_info *
3126 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
3127 			u16 *vsi_list_id)
3128 {
3129 	struct ice_vsi_list_map_info *map_info = NULL;
3130 	struct ice_switch_info *sw = hw->switch_info;
3131 	struct ice_fltr_mgmt_list_entry *list_itr;
3132 	struct list_head *list_head;
3133 
3134 	list_head = &sw->recp_list[recp_id].filt_rules;
3135 	list_for_each_entry(list_itr, list_head, list_entry) {
3136 		if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
3137 			map_info = list_itr->vsi_list_info;
3138 			if (test_bit(vsi_handle, map_info->vsi_map)) {
3139 				*vsi_list_id = map_info->vsi_list_id;
3140 				return map_info;
3141 			}
3142 		}
3143 	}
3144 	return NULL;
3145 }
3146 
3147 /**
3148  * ice_add_rule_internal - add rule for a given lookup type
3149  * @hw: pointer to the hardware structure
3150  * @recp_id: lookup type (recipe ID) for which rule has to be added
3151  * @f_entry: structure containing MAC forwarding information
3152  *
3153  * Adds or updates the rule lists for a given recipe
3154  */
3155 static int
3156 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
3157 		      struct ice_fltr_list_entry *f_entry)
3158 {
3159 	struct ice_switch_info *sw = hw->switch_info;
3160 	struct ice_fltr_info *new_fltr, *cur_fltr;
3161 	struct ice_fltr_mgmt_list_entry *m_entry;
3162 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3163 	int status = 0;
3164 
3165 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3166 		return -EINVAL;
3167 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3168 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3169 
3170 	rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3171 
3172 	mutex_lock(rule_lock);
3173 	new_fltr = &f_entry->fltr_info;
3174 	if (new_fltr->flag & ICE_FLTR_RX)
3175 		new_fltr->src = hw->port_info->lport;
3176 	else if (new_fltr->flag & ICE_FLTR_TX)
3177 		new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
3178 
3179 	m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
3180 	if (!m_entry) {
3181 		mutex_unlock(rule_lock);
3182 		return ice_create_pkt_fwd_rule(hw, f_entry);
3183 	}
3184 
3185 	cur_fltr = &m_entry->fltr_info;
3186 	status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3187 	mutex_unlock(rule_lock);
3188 
3189 	return status;
3190 }
3191 
3192 /**
3193  * ice_remove_vsi_list_rule
3194  * @hw: pointer to the hardware structure
3195  * @vsi_list_id: VSI list ID generated as part of allocate resource
3196  * @lkup_type: switch rule filter lookup type
3197  *
3198  * The VSI list should be emptied before this function is called to remove the
3199  * VSI list.
3200  */
3201 static int
3202 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3203 			 enum ice_sw_lkup_type lkup_type)
3204 {
3205 	struct ice_sw_rule_vsi_list *s_rule;
3206 	u16 s_rule_size;
3207 	int status;
3208 
3209 	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, 0);
3210 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
3211 	if (!s_rule)
3212 		return -ENOMEM;
3213 
3214 	s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
3215 	s_rule->index = cpu_to_le16(vsi_list_id);
3216 
3217 	/* Free the vsi_list resource that we allocated. It is assumed that the
3218 	 * list is empty at this point.
3219 	 */
3220 	status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3221 					    ice_aqc_opc_free_res);
3222 
3223 	devm_kfree(ice_hw_to_dev(hw), s_rule);
3224 	return status;
3225 }
3226 
3227 /**
3228  * ice_rem_update_vsi_list
3229  * @hw: pointer to the hardware structure
3230  * @vsi_handle: VSI handle of the VSI to remove
3231  * @fm_list: filter management entry for which the VSI list management needs to
3232  *           be done
3233  */
3234 static int
3235 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3236 			struct ice_fltr_mgmt_list_entry *fm_list)
3237 {
3238 	enum ice_sw_lkup_type lkup_type;
3239 	u16 vsi_list_id;
3240 	int status = 0;
3241 
3242 	if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3243 	    fm_list->vsi_count == 0)
3244 		return -EINVAL;
3245 
3246 	/* A rule with the VSI being removed does not exist */
3247 	if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
3248 		return -ENOENT;
3249 
3250 	lkup_type = fm_list->fltr_info.lkup_type;
3251 	vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3252 	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3253 					  ice_aqc_opc_update_sw_rules,
3254 					  lkup_type);
3255 	if (status)
3256 		return status;
3257 
3258 	fm_list->vsi_count--;
3259 	clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3260 
3261 	if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3262 		struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3263 		struct ice_vsi_list_map_info *vsi_list_info =
3264 			fm_list->vsi_list_info;
3265 		u16 rem_vsi_handle;
3266 
3267 		rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
3268 						ICE_MAX_VSI);
3269 		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3270 			return -EIO;
3271 
3272 		/* Make sure VSI list is empty before removing it below */
3273 		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3274 						  vsi_list_id, true,
3275 						  ice_aqc_opc_update_sw_rules,
3276 						  lkup_type);
3277 		if (status)
3278 			return status;
3279 
3280 		tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3281 		tmp_fltr_info.fwd_id.hw_vsi_id =
3282 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
3283 		tmp_fltr_info.vsi_handle = rem_vsi_handle;
3284 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3285 		if (status) {
3286 			ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3287 				  tmp_fltr_info.fwd_id.hw_vsi_id, status);
3288 			return status;
3289 		}
3290 
3291 		fm_list->fltr_info = tmp_fltr_info;
3292 	}
3293 
3294 	if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3295 	    (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3296 		struct ice_vsi_list_map_info *vsi_list_info =
3297 			fm_list->vsi_list_info;
3298 
3299 		/* Remove the VSI list since it is no longer used */
3300 		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3301 		if (status) {
3302 			ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
3303 				  vsi_list_id, status);
3304 			return status;
3305 		}
3306 
3307 		list_del(&vsi_list_info->list_entry);
3308 		devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
3309 		fm_list->vsi_list_info = NULL;
3310 	}
3311 
3312 	return status;
3313 }
3314 
3315 /**
3316  * ice_remove_rule_internal - Remove a filter rule of a given type
3317  * @hw: pointer to the hardware structure
3318  * @recp_id: recipe ID for which the rule needs to removed
3319  * @f_entry: rule entry containing filter information
3320  */
3321 static int
3322 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
3323 			 struct ice_fltr_list_entry *f_entry)
3324 {
3325 	struct ice_switch_info *sw = hw->switch_info;
3326 	struct ice_fltr_mgmt_list_entry *list_elem;
3327 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3328 	bool remove_rule = false;
3329 	u16 vsi_handle;
3330 	int status = 0;
3331 
3332 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3333 		return -EINVAL;
3334 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3335 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3336 
3337 	rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3338 	mutex_lock(rule_lock);
3339 	list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
3340 	if (!list_elem) {
3341 		status = -ENOENT;
3342 		goto exit;
3343 	}
3344 
3345 	if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3346 		remove_rule = true;
3347 	} else if (!list_elem->vsi_list_info) {
3348 		status = -ENOENT;
3349 		goto exit;
3350 	} else if (list_elem->vsi_list_info->ref_cnt > 1) {
3351 		/* a ref_cnt > 1 indicates that the vsi_list is being
3352 		 * shared by multiple rules. Decrement the ref_cnt and
3353 		 * remove this rule, but do not modify the list, as it
3354 		 * is in-use by other rules.
3355 		 */
3356 		list_elem->vsi_list_info->ref_cnt--;
3357 		remove_rule = true;
3358 	} else {
3359 		/* a ref_cnt of 1 indicates the vsi_list is only used
3360 		 * by one rule. However, the original removal request is only
3361 		 * for a single VSI. Update the vsi_list first, and only
3362 		 * remove the rule if there are no further VSIs in this list.
3363 		 */
3364 		vsi_handle = f_entry->fltr_info.vsi_handle;
3365 		status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3366 		if (status)
3367 			goto exit;
3368 		/* if VSI count goes to zero after updating the VSI list */
3369 		if (list_elem->vsi_count == 0)
3370 			remove_rule = true;
3371 	}
3372 
3373 	if (remove_rule) {
3374 		/* Remove the lookup rule */
3375 		struct ice_sw_rule_lkup_rx_tx *s_rule;
3376 
3377 		s_rule = devm_kzalloc(ice_hw_to_dev(hw),
3378 				      ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
3379 				      GFP_KERNEL);
3380 		if (!s_rule) {
3381 			status = -ENOMEM;
3382 			goto exit;
3383 		}
3384 
3385 		ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3386 				 ice_aqc_opc_remove_sw_rules);
3387 
3388 		status = ice_aq_sw_rules(hw, s_rule,
3389 					 ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
3390 					 1, ice_aqc_opc_remove_sw_rules, NULL);
3391 
3392 		/* Remove a book keeping from the list */
3393 		devm_kfree(ice_hw_to_dev(hw), s_rule);
3394 
3395 		if (status)
3396 			goto exit;
3397 
3398 		list_del(&list_elem->list_entry);
3399 		devm_kfree(ice_hw_to_dev(hw), list_elem);
3400 	}
3401 exit:
3402 	mutex_unlock(rule_lock);
3403 	return status;
3404 }
3405 
3406 /**
3407  * ice_mac_fltr_exist - does this MAC filter exist for given VSI
3408  * @hw: pointer to the hardware structure
3409  * @mac: MAC address to be checked (for MAC filter)
3410  * @vsi_handle: check MAC filter for this VSI
3411  */
3412 bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle)
3413 {
3414 	struct ice_fltr_mgmt_list_entry *entry;
3415 	struct list_head *rule_head;
3416 	struct ice_switch_info *sw;
3417 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3418 	u16 hw_vsi_id;
3419 
3420 	if (!ice_is_vsi_valid(hw, vsi_handle))
3421 		return false;
3422 
3423 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3424 	sw = hw->switch_info;
3425 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3426 	if (!rule_head)
3427 		return false;
3428 
3429 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3430 	mutex_lock(rule_lock);
3431 	list_for_each_entry(entry, rule_head, list_entry) {
3432 		struct ice_fltr_info *f_info = &entry->fltr_info;
3433 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3434 
3435 		if (is_zero_ether_addr(mac_addr))
3436 			continue;
3437 
3438 		if (f_info->flag != ICE_FLTR_TX ||
3439 		    f_info->src_id != ICE_SRC_ID_VSI ||
3440 		    f_info->lkup_type != ICE_SW_LKUP_MAC ||
3441 		    f_info->fltr_act != ICE_FWD_TO_VSI ||
3442 		    hw_vsi_id != f_info->fwd_id.hw_vsi_id)
3443 			continue;
3444 
3445 		if (ether_addr_equal(mac, mac_addr)) {
3446 			mutex_unlock(rule_lock);
3447 			return true;
3448 		}
3449 	}
3450 	mutex_unlock(rule_lock);
3451 	return false;
3452 }
3453 
3454 /**
3455  * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI
3456  * @hw: pointer to the hardware structure
3457  * @vlan_id: VLAN ID
3458  * @vsi_handle: check MAC filter for this VSI
3459  */
3460 bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
3461 {
3462 	struct ice_fltr_mgmt_list_entry *entry;
3463 	struct list_head *rule_head;
3464 	struct ice_switch_info *sw;
3465 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3466 	u16 hw_vsi_id;
3467 
3468 	if (vlan_id > ICE_MAX_VLAN_ID)
3469 		return false;
3470 
3471 	if (!ice_is_vsi_valid(hw, vsi_handle))
3472 		return false;
3473 
3474 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3475 	sw = hw->switch_info;
3476 	rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
3477 	if (!rule_head)
3478 		return false;
3479 
3480 	rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3481 	mutex_lock(rule_lock);
3482 	list_for_each_entry(entry, rule_head, list_entry) {
3483 		struct ice_fltr_info *f_info = &entry->fltr_info;
3484 		u16 entry_vlan_id = f_info->l_data.vlan.vlan_id;
3485 		struct ice_vsi_list_map_info *map_info;
3486 
3487 		if (entry_vlan_id > ICE_MAX_VLAN_ID)
3488 			continue;
3489 
3490 		if (f_info->flag != ICE_FLTR_TX ||
3491 		    f_info->src_id != ICE_SRC_ID_VSI ||
3492 		    f_info->lkup_type != ICE_SW_LKUP_VLAN)
3493 			continue;
3494 
3495 		/* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */
3496 		if (f_info->fltr_act != ICE_FWD_TO_VSI &&
3497 		    f_info->fltr_act != ICE_FWD_TO_VSI_LIST)
3498 			continue;
3499 
3500 		if (f_info->fltr_act == ICE_FWD_TO_VSI) {
3501 			if (hw_vsi_id != f_info->fwd_id.hw_vsi_id)
3502 				continue;
3503 		} else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3504 			/* If filter_action is FWD_TO_VSI_LIST, make sure
3505 			 * that VSI being checked is part of VSI list
3506 			 */
3507 			if (entry->vsi_count == 1 &&
3508 			    entry->vsi_list_info) {
3509 				map_info = entry->vsi_list_info;
3510 				if (!test_bit(vsi_handle, map_info->vsi_map))
3511 					continue;
3512 			}
3513 		}
3514 
3515 		if (vlan_id == entry_vlan_id) {
3516 			mutex_unlock(rule_lock);
3517 			return true;
3518 		}
3519 	}
3520 	mutex_unlock(rule_lock);
3521 
3522 	return false;
3523 }
3524 
3525 /**
3526  * ice_add_mac - Add a MAC address based filter rule
3527  * @hw: pointer to the hardware structure
3528  * @m_list: list of MAC addresses and forwarding information
3529  */
3530 int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
3531 {
3532 	struct ice_fltr_list_entry *m_list_itr;
3533 	int status = 0;
3534 
3535 	if (!m_list || !hw)
3536 		return -EINVAL;
3537 
3538 	list_for_each_entry(m_list_itr, m_list, list_entry) {
3539 		u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3540 		u16 vsi_handle;
3541 		u16 hw_vsi_id;
3542 
3543 		m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3544 		vsi_handle = m_list_itr->fltr_info.vsi_handle;
3545 		if (!ice_is_vsi_valid(hw, vsi_handle))
3546 			return -EINVAL;
3547 		hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3548 		m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3549 		/* update the src in case it is VSI num */
3550 		if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3551 			return -EINVAL;
3552 		m_list_itr->fltr_info.src = hw_vsi_id;
3553 		if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3554 		    is_zero_ether_addr(add))
3555 			return -EINVAL;
3556 
3557 		m_list_itr->status = ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3558 							   m_list_itr);
3559 		if (m_list_itr->status)
3560 			return m_list_itr->status;
3561 	}
3562 
3563 	return status;
3564 }
3565 
3566 /**
3567  * ice_add_vlan_internal - Add one VLAN based filter rule
3568  * @hw: pointer to the hardware structure
3569  * @f_entry: filter entry containing one VLAN information
3570  */
3571 static int
3572 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3573 {
3574 	struct ice_switch_info *sw = hw->switch_info;
3575 	struct ice_fltr_mgmt_list_entry *v_list_itr;
3576 	struct ice_fltr_info *new_fltr, *cur_fltr;
3577 	enum ice_sw_lkup_type lkup_type;
3578 	u16 vsi_list_id = 0, vsi_handle;
3579 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3580 	int status = 0;
3581 
3582 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3583 		return -EINVAL;
3584 
3585 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3586 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3587 	new_fltr = &f_entry->fltr_info;
3588 
3589 	/* VLAN ID should only be 12 bits */
3590 	if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3591 		return -EINVAL;
3592 
3593 	if (new_fltr->src_id != ICE_SRC_ID_VSI)
3594 		return -EINVAL;
3595 
3596 	new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3597 	lkup_type = new_fltr->lkup_type;
3598 	vsi_handle = new_fltr->vsi_handle;
3599 	rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3600 	mutex_lock(rule_lock);
3601 	v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3602 	if (!v_list_itr) {
3603 		struct ice_vsi_list_map_info *map_info = NULL;
3604 
3605 		if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3606 			/* All VLAN pruning rules use a VSI list. Check if
3607 			 * there is already a VSI list containing VSI that we
3608 			 * want to add. If found, use the same vsi_list_id for
3609 			 * this new VLAN rule or else create a new list.
3610 			 */
3611 			map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3612 							   vsi_handle,
3613 							   &vsi_list_id);
3614 			if (!map_info) {
3615 				status = ice_create_vsi_list_rule(hw,
3616 								  &vsi_handle,
3617 								  1,
3618 								  &vsi_list_id,
3619 								  lkup_type);
3620 				if (status)
3621 					goto exit;
3622 			}
3623 			/* Convert the action to forwarding to a VSI list. */
3624 			new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3625 			new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3626 		}
3627 
3628 		status = ice_create_pkt_fwd_rule(hw, f_entry);
3629 		if (!status) {
3630 			v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3631 							 new_fltr);
3632 			if (!v_list_itr) {
3633 				status = -ENOENT;
3634 				goto exit;
3635 			}
3636 			/* reuse VSI list for new rule and increment ref_cnt */
3637 			if (map_info) {
3638 				v_list_itr->vsi_list_info = map_info;
3639 				map_info->ref_cnt++;
3640 			} else {
3641 				v_list_itr->vsi_list_info =
3642 					ice_create_vsi_list_map(hw, &vsi_handle,
3643 								1, vsi_list_id);
3644 			}
3645 		}
3646 	} else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3647 		/* Update existing VSI list to add new VSI ID only if it used
3648 		 * by one VLAN rule.
3649 		 */
3650 		cur_fltr = &v_list_itr->fltr_info;
3651 		status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3652 						 new_fltr);
3653 	} else {
3654 		/* If VLAN rule exists and VSI list being used by this rule is
3655 		 * referenced by more than 1 VLAN rule. Then create a new VSI
3656 		 * list appending previous VSI with new VSI and update existing
3657 		 * VLAN rule to point to new VSI list ID
3658 		 */
3659 		struct ice_fltr_info tmp_fltr;
3660 		u16 vsi_handle_arr[2];
3661 		u16 cur_handle;
3662 
3663 		/* Current implementation only supports reusing VSI list with
3664 		 * one VSI count. We should never hit below condition
3665 		 */
3666 		if (v_list_itr->vsi_count > 1 &&
3667 		    v_list_itr->vsi_list_info->ref_cnt > 1) {
3668 			ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3669 			status = -EIO;
3670 			goto exit;
3671 		}
3672 
3673 		cur_handle =
3674 			find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3675 				       ICE_MAX_VSI);
3676 
3677 		/* A rule already exists with the new VSI being added */
3678 		if (cur_handle == vsi_handle) {
3679 			status = -EEXIST;
3680 			goto exit;
3681 		}
3682 
3683 		vsi_handle_arr[0] = cur_handle;
3684 		vsi_handle_arr[1] = vsi_handle;
3685 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3686 						  &vsi_list_id, lkup_type);
3687 		if (status)
3688 			goto exit;
3689 
3690 		tmp_fltr = v_list_itr->fltr_info;
3691 		tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3692 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3693 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3694 		/* Update the previous switch rule to a new VSI list which
3695 		 * includes current VSI that is requested
3696 		 */
3697 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3698 		if (status)
3699 			goto exit;
3700 
3701 		/* before overriding VSI list map info. decrement ref_cnt of
3702 		 * previous VSI list
3703 		 */
3704 		v_list_itr->vsi_list_info->ref_cnt--;
3705 
3706 		/* now update to newly created list */
3707 		v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3708 		v_list_itr->vsi_list_info =
3709 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3710 						vsi_list_id);
3711 		v_list_itr->vsi_count++;
3712 	}
3713 
3714 exit:
3715 	mutex_unlock(rule_lock);
3716 	return status;
3717 }
3718 
3719 /**
3720  * ice_add_vlan - Add VLAN based filter rule
3721  * @hw: pointer to the hardware structure
3722  * @v_list: list of VLAN entries and forwarding information
3723  */
3724 int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
3725 {
3726 	struct ice_fltr_list_entry *v_list_itr;
3727 
3728 	if (!v_list || !hw)
3729 		return -EINVAL;
3730 
3731 	list_for_each_entry(v_list_itr, v_list, list_entry) {
3732 		if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3733 			return -EINVAL;
3734 		v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3735 		v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3736 		if (v_list_itr->status)
3737 			return v_list_itr->status;
3738 	}
3739 	return 0;
3740 }
3741 
3742 /**
3743  * ice_add_eth_mac - Add ethertype and MAC based filter rule
3744  * @hw: pointer to the hardware structure
3745  * @em_list: list of ether type MAC filter, MAC is optional
3746  *
3747  * This function requires the caller to populate the entries in
3748  * the filter list with the necessary fields (including flags to
3749  * indicate Tx or Rx rules).
3750  */
3751 int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3752 {
3753 	struct ice_fltr_list_entry *em_list_itr;
3754 
3755 	if (!em_list || !hw)
3756 		return -EINVAL;
3757 
3758 	list_for_each_entry(em_list_itr, em_list, list_entry) {
3759 		enum ice_sw_lkup_type l_type =
3760 			em_list_itr->fltr_info.lkup_type;
3761 
3762 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3763 		    l_type != ICE_SW_LKUP_ETHERTYPE)
3764 			return -EINVAL;
3765 
3766 		em_list_itr->status = ice_add_rule_internal(hw, l_type,
3767 							    em_list_itr);
3768 		if (em_list_itr->status)
3769 			return em_list_itr->status;
3770 	}
3771 	return 0;
3772 }
3773 
3774 /**
3775  * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3776  * @hw: pointer to the hardware structure
3777  * @em_list: list of ethertype or ethertype MAC entries
3778  */
3779 int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3780 {
3781 	struct ice_fltr_list_entry *em_list_itr, *tmp;
3782 
3783 	if (!em_list || !hw)
3784 		return -EINVAL;
3785 
3786 	list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
3787 		enum ice_sw_lkup_type l_type =
3788 			em_list_itr->fltr_info.lkup_type;
3789 
3790 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3791 		    l_type != ICE_SW_LKUP_ETHERTYPE)
3792 			return -EINVAL;
3793 
3794 		em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3795 							       em_list_itr);
3796 		if (em_list_itr->status)
3797 			return em_list_itr->status;
3798 	}
3799 	return 0;
3800 }
3801 
3802 /**
3803  * ice_rem_sw_rule_info
3804  * @hw: pointer to the hardware structure
3805  * @rule_head: pointer to the switch list structure that we want to delete
3806  */
3807 static void
3808 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3809 {
3810 	if (!list_empty(rule_head)) {
3811 		struct ice_fltr_mgmt_list_entry *entry;
3812 		struct ice_fltr_mgmt_list_entry *tmp;
3813 
3814 		list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
3815 			list_del(&entry->list_entry);
3816 			devm_kfree(ice_hw_to_dev(hw), entry);
3817 		}
3818 	}
3819 }
3820 
3821 /**
3822  * ice_rem_adv_rule_info
3823  * @hw: pointer to the hardware structure
3824  * @rule_head: pointer to the switch list structure that we want to delete
3825  */
3826 static void
3827 ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3828 {
3829 	struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3830 	struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3831 
3832 	if (list_empty(rule_head))
3833 		return;
3834 
3835 	list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) {
3836 		list_del(&lst_itr->list_entry);
3837 		devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
3838 		devm_kfree(ice_hw_to_dev(hw), lst_itr);
3839 	}
3840 }
3841 
3842 /**
3843  * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3844  * @pi: pointer to the port_info structure
3845  * @vsi_handle: VSI handle to set as default
3846  * @set: true to add the above mentioned switch rule, false to remove it
3847  * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3848  *
3849  * add filter rule to set/unset given VSI as default VSI for the switch
3850  * (represented by swid)
3851  */
3852 int
3853 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3854 		 u8 direction)
3855 {
3856 	struct ice_fltr_list_entry f_list_entry;
3857 	struct ice_fltr_info f_info;
3858 	struct ice_hw *hw = pi->hw;
3859 	u16 hw_vsi_id;
3860 	int status;
3861 
3862 	if (!ice_is_vsi_valid(hw, vsi_handle))
3863 		return -EINVAL;
3864 
3865 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3866 
3867 	memset(&f_info, 0, sizeof(f_info));
3868 
3869 	f_info.lkup_type = ICE_SW_LKUP_DFLT;
3870 	f_info.flag = direction;
3871 	f_info.fltr_act = ICE_FWD_TO_VSI;
3872 	f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3873 	f_info.vsi_handle = vsi_handle;
3874 
3875 	if (f_info.flag & ICE_FLTR_RX) {
3876 		f_info.src = hw->port_info->lport;
3877 		f_info.src_id = ICE_SRC_ID_LPORT;
3878 	} else if (f_info.flag & ICE_FLTR_TX) {
3879 		f_info.src_id = ICE_SRC_ID_VSI;
3880 		f_info.src = hw_vsi_id;
3881 	}
3882 	f_list_entry.fltr_info = f_info;
3883 
3884 	if (set)
3885 		status = ice_add_rule_internal(hw, ICE_SW_LKUP_DFLT,
3886 					       &f_list_entry);
3887 	else
3888 		status = ice_remove_rule_internal(hw, ICE_SW_LKUP_DFLT,
3889 						  &f_list_entry);
3890 
3891 	return status;
3892 }
3893 
3894 /**
3895  * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3896  * @fm_entry: filter entry to inspect
3897  * @vsi_handle: VSI handle to compare with filter info
3898  */
3899 static bool
3900 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3901 {
3902 	return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3903 		 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3904 		(fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3905 		 fm_entry->vsi_list_info &&
3906 		 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
3907 }
3908 
3909 /**
3910  * ice_check_if_dflt_vsi - check if VSI is default VSI
3911  * @pi: pointer to the port_info structure
3912  * @vsi_handle: vsi handle to check for in filter list
3913  * @rule_exists: indicates if there are any VSI's in the rule list
3914  *
3915  * checks if the VSI is in a default VSI list, and also indicates
3916  * if the default VSI list is empty
3917  */
3918 bool
3919 ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
3920 		      bool *rule_exists)
3921 {
3922 	struct ice_fltr_mgmt_list_entry *fm_entry;
3923 	struct ice_sw_recipe *recp_list;
3924 	struct list_head *rule_head;
3925 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3926 	bool ret = false;
3927 
3928 	recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT];
3929 	rule_lock = &recp_list->filt_rule_lock;
3930 	rule_head = &recp_list->filt_rules;
3931 
3932 	mutex_lock(rule_lock);
3933 
3934 	if (rule_exists && !list_empty(rule_head))
3935 		*rule_exists = true;
3936 
3937 	list_for_each_entry(fm_entry, rule_head, list_entry) {
3938 		if (ice_vsi_uses_fltr(fm_entry, vsi_handle)) {
3939 			ret = true;
3940 			break;
3941 		}
3942 	}
3943 
3944 	mutex_unlock(rule_lock);
3945 
3946 	return ret;
3947 }
3948 
3949 /**
3950  * ice_remove_mac - remove a MAC address based filter rule
3951  * @hw: pointer to the hardware structure
3952  * @m_list: list of MAC addresses and forwarding information
3953  *
3954  * This function removes either a MAC filter rule or a specific VSI from a
3955  * VSI list for a multicast MAC address.
3956  *
3957  * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should
3958  * be aware that this call will only work if all the entries passed into m_list
3959  * were added previously. It will not attempt to do a partial remove of entries
3960  * that were found.
3961  */
3962 int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
3963 {
3964 	struct ice_fltr_list_entry *list_itr, *tmp;
3965 
3966 	if (!m_list)
3967 		return -EINVAL;
3968 
3969 	list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
3970 		enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3971 		u16 vsi_handle;
3972 
3973 		if (l_type != ICE_SW_LKUP_MAC)
3974 			return -EINVAL;
3975 
3976 		vsi_handle = list_itr->fltr_info.vsi_handle;
3977 		if (!ice_is_vsi_valid(hw, vsi_handle))
3978 			return -EINVAL;
3979 
3980 		list_itr->fltr_info.fwd_id.hw_vsi_id =
3981 					ice_get_hw_vsi_num(hw, vsi_handle);
3982 
3983 		list_itr->status = ice_remove_rule_internal(hw,
3984 							    ICE_SW_LKUP_MAC,
3985 							    list_itr);
3986 		if (list_itr->status)
3987 			return list_itr->status;
3988 	}
3989 	return 0;
3990 }
3991 
3992 /**
3993  * ice_remove_vlan - Remove VLAN based filter rule
3994  * @hw: pointer to the hardware structure
3995  * @v_list: list of VLAN entries and forwarding information
3996  */
3997 int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
3998 {
3999 	struct ice_fltr_list_entry *v_list_itr, *tmp;
4000 
4001 	if (!v_list || !hw)
4002 		return -EINVAL;
4003 
4004 	list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
4005 		enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4006 
4007 		if (l_type != ICE_SW_LKUP_VLAN)
4008 			return -EINVAL;
4009 		v_list_itr->status = ice_remove_rule_internal(hw,
4010 							      ICE_SW_LKUP_VLAN,
4011 							      v_list_itr);
4012 		if (v_list_itr->status)
4013 			return v_list_itr->status;
4014 	}
4015 	return 0;
4016 }
4017 
4018 /**
4019  * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4020  * @hw: pointer to the hardware structure
4021  * @vsi_handle: VSI handle to remove filters from
4022  * @vsi_list_head: pointer to the list to add entry to
4023  * @fi: pointer to fltr_info of filter entry to copy & add
4024  *
4025  * Helper function, used when creating a list of filters to remove from
4026  * a specific VSI. The entry added to vsi_list_head is a COPY of the
4027  * original filter entry, with the exception of fltr_info.fltr_act and
4028  * fltr_info.fwd_id fields. These are set such that later logic can
4029  * extract which VSI to remove the fltr from, and pass on that information.
4030  */
4031 static int
4032 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4033 			       struct list_head *vsi_list_head,
4034 			       struct ice_fltr_info *fi)
4035 {
4036 	struct ice_fltr_list_entry *tmp;
4037 
4038 	/* this memory is freed up in the caller function
4039 	 * once filters for this VSI are removed
4040 	 */
4041 	tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
4042 	if (!tmp)
4043 		return -ENOMEM;
4044 
4045 	tmp->fltr_info = *fi;
4046 
4047 	/* Overwrite these fields to indicate which VSI to remove filter from,
4048 	 * so find and remove logic can extract the information from the
4049 	 * list entries. Note that original entries will still have proper
4050 	 * values.
4051 	 */
4052 	tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4053 	tmp->fltr_info.vsi_handle = vsi_handle;
4054 	tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4055 
4056 	list_add(&tmp->list_entry, vsi_list_head);
4057 
4058 	return 0;
4059 }
4060 
4061 /**
4062  * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4063  * @hw: pointer to the hardware structure
4064  * @vsi_handle: VSI handle to remove filters from
4065  * @lkup_list_head: pointer to the list that has certain lookup type filters
4066  * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4067  *
4068  * Locates all filters in lkup_list_head that are used by the given VSI,
4069  * and adds COPIES of those entries to vsi_list_head (intended to be used
4070  * to remove the listed filters).
4071  * Note that this means all entries in vsi_list_head must be explicitly
4072  * deallocated by the caller when done with list.
4073  */
4074 static int
4075 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4076 			 struct list_head *lkup_list_head,
4077 			 struct list_head *vsi_list_head)
4078 {
4079 	struct ice_fltr_mgmt_list_entry *fm_entry;
4080 	int status = 0;
4081 
4082 	/* check to make sure VSI ID is valid and within boundary */
4083 	if (!ice_is_vsi_valid(hw, vsi_handle))
4084 		return -EINVAL;
4085 
4086 	list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
4087 		if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
4088 			continue;
4089 
4090 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4091 							vsi_list_head,
4092 							&fm_entry->fltr_info);
4093 		if (status)
4094 			return status;
4095 	}
4096 	return status;
4097 }
4098 
4099 /**
4100  * ice_determine_promisc_mask
4101  * @fi: filter info to parse
4102  *
4103  * Helper function to determine which ICE_PROMISC_ mask corresponds
4104  * to given filter into.
4105  */
4106 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4107 {
4108 	u16 vid = fi->l_data.mac_vlan.vlan_id;
4109 	u8 *macaddr = fi->l_data.mac.mac_addr;
4110 	bool is_tx_fltr = false;
4111 	u8 promisc_mask = 0;
4112 
4113 	if (fi->flag == ICE_FLTR_TX)
4114 		is_tx_fltr = true;
4115 
4116 	if (is_broadcast_ether_addr(macaddr))
4117 		promisc_mask |= is_tx_fltr ?
4118 			ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4119 	else if (is_multicast_ether_addr(macaddr))
4120 		promisc_mask |= is_tx_fltr ?
4121 			ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4122 	else if (is_unicast_ether_addr(macaddr))
4123 		promisc_mask |= is_tx_fltr ?
4124 			ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4125 	if (vid)
4126 		promisc_mask |= is_tx_fltr ?
4127 			ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4128 
4129 	return promisc_mask;
4130 }
4131 
4132 /**
4133  * ice_remove_promisc - Remove promisc based filter rules
4134  * @hw: pointer to the hardware structure
4135  * @recp_id: recipe ID for which the rule needs to removed
4136  * @v_list: list of promisc entries
4137  */
4138 static int
4139 ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list)
4140 {
4141 	struct ice_fltr_list_entry *v_list_itr, *tmp;
4142 
4143 	list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
4144 		v_list_itr->status =
4145 			ice_remove_rule_internal(hw, recp_id, v_list_itr);
4146 		if (v_list_itr->status)
4147 			return v_list_itr->status;
4148 	}
4149 	return 0;
4150 }
4151 
4152 /**
4153  * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4154  * @hw: pointer to the hardware structure
4155  * @vsi_handle: VSI handle to clear mode
4156  * @promisc_mask: mask of promiscuous config bits to clear
4157  * @vid: VLAN ID to clear VLAN promiscuous
4158  */
4159 int
4160 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4161 		      u16 vid)
4162 {
4163 	struct ice_switch_info *sw = hw->switch_info;
4164 	struct ice_fltr_list_entry *fm_entry, *tmp;
4165 	struct list_head remove_list_head;
4166 	struct ice_fltr_mgmt_list_entry *itr;
4167 	struct list_head *rule_head;
4168 	struct mutex *rule_lock;	/* Lock to protect filter rule list */
4169 	int status = 0;
4170 	u8 recipe_id;
4171 
4172 	if (!ice_is_vsi_valid(hw, vsi_handle))
4173 		return -EINVAL;
4174 
4175 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4176 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4177 	else
4178 		recipe_id = ICE_SW_LKUP_PROMISC;
4179 
4180 	rule_head = &sw->recp_list[recipe_id].filt_rules;
4181 	rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4182 
4183 	INIT_LIST_HEAD(&remove_list_head);
4184 
4185 	mutex_lock(rule_lock);
4186 	list_for_each_entry(itr, rule_head, list_entry) {
4187 		struct ice_fltr_info *fltr_info;
4188 		u8 fltr_promisc_mask = 0;
4189 
4190 		if (!ice_vsi_uses_fltr(itr, vsi_handle))
4191 			continue;
4192 		fltr_info = &itr->fltr_info;
4193 
4194 		if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4195 		    vid != fltr_info->l_data.mac_vlan.vlan_id)
4196 			continue;
4197 
4198 		fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4199 
4200 		/* Skip if filter is not completely specified by given mask */
4201 		if (fltr_promisc_mask & ~promisc_mask)
4202 			continue;
4203 
4204 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4205 							&remove_list_head,
4206 							fltr_info);
4207 		if (status) {
4208 			mutex_unlock(rule_lock);
4209 			goto free_fltr_list;
4210 		}
4211 	}
4212 	mutex_unlock(rule_lock);
4213 
4214 	status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4215 
4216 free_fltr_list:
4217 	list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4218 		list_del(&fm_entry->list_entry);
4219 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
4220 	}
4221 
4222 	return status;
4223 }
4224 
4225 /**
4226  * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4227  * @hw: pointer to the hardware structure
4228  * @vsi_handle: VSI handle to configure
4229  * @promisc_mask: mask of promiscuous config bits
4230  * @vid: VLAN ID to set VLAN promiscuous
4231  */
4232 int
4233 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4234 {
4235 	enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4236 	struct ice_fltr_list_entry f_list_entry;
4237 	struct ice_fltr_info new_fltr;
4238 	bool is_tx_fltr;
4239 	int status = 0;
4240 	u16 hw_vsi_id;
4241 	int pkt_type;
4242 	u8 recipe_id;
4243 
4244 	if (!ice_is_vsi_valid(hw, vsi_handle))
4245 		return -EINVAL;
4246 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4247 
4248 	memset(&new_fltr, 0, sizeof(new_fltr));
4249 
4250 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4251 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4252 		new_fltr.l_data.mac_vlan.vlan_id = vid;
4253 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4254 	} else {
4255 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4256 		recipe_id = ICE_SW_LKUP_PROMISC;
4257 	}
4258 
4259 	/* Separate filters must be set for each direction/packet type
4260 	 * combination, so we will loop over the mask value, store the
4261 	 * individual type, and clear it out in the input mask as it
4262 	 * is found.
4263 	 */
4264 	while (promisc_mask) {
4265 		u8 *mac_addr;
4266 
4267 		pkt_type = 0;
4268 		is_tx_fltr = false;
4269 
4270 		if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4271 			promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4272 			pkt_type = UCAST_FLTR;
4273 		} else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4274 			promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4275 			pkt_type = UCAST_FLTR;
4276 			is_tx_fltr = true;
4277 		} else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4278 			promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4279 			pkt_type = MCAST_FLTR;
4280 		} else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4281 			promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4282 			pkt_type = MCAST_FLTR;
4283 			is_tx_fltr = true;
4284 		} else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4285 			promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4286 			pkt_type = BCAST_FLTR;
4287 		} else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4288 			promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4289 			pkt_type = BCAST_FLTR;
4290 			is_tx_fltr = true;
4291 		}
4292 
4293 		/* Check for VLAN promiscuous flag */
4294 		if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4295 			promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4296 		} else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4297 			promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4298 			is_tx_fltr = true;
4299 		}
4300 
4301 		/* Set filter DA based on packet type */
4302 		mac_addr = new_fltr.l_data.mac.mac_addr;
4303 		if (pkt_type == BCAST_FLTR) {
4304 			eth_broadcast_addr(mac_addr);
4305 		} else if (pkt_type == MCAST_FLTR ||
4306 			   pkt_type == UCAST_FLTR) {
4307 			/* Use the dummy ether header DA */
4308 			ether_addr_copy(mac_addr, dummy_eth_header);
4309 			if (pkt_type == MCAST_FLTR)
4310 				mac_addr[0] |= 0x1;	/* Set multicast bit */
4311 		}
4312 
4313 		/* Need to reset this to zero for all iterations */
4314 		new_fltr.flag = 0;
4315 		if (is_tx_fltr) {
4316 			new_fltr.flag |= ICE_FLTR_TX;
4317 			new_fltr.src = hw_vsi_id;
4318 		} else {
4319 			new_fltr.flag |= ICE_FLTR_RX;
4320 			new_fltr.src = hw->port_info->lport;
4321 		}
4322 
4323 		new_fltr.fltr_act = ICE_FWD_TO_VSI;
4324 		new_fltr.vsi_handle = vsi_handle;
4325 		new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4326 		f_list_entry.fltr_info = new_fltr;
4327 
4328 		status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4329 		if (status)
4330 			goto set_promisc_exit;
4331 	}
4332 
4333 set_promisc_exit:
4334 	return status;
4335 }
4336 
4337 /**
4338  * ice_set_vlan_vsi_promisc
4339  * @hw: pointer to the hardware structure
4340  * @vsi_handle: VSI handle to configure
4341  * @promisc_mask: mask of promiscuous config bits
4342  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4343  *
4344  * Configure VSI with all associated VLANs to given promiscuous mode(s)
4345  */
4346 int
4347 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4348 			 bool rm_vlan_promisc)
4349 {
4350 	struct ice_switch_info *sw = hw->switch_info;
4351 	struct ice_fltr_list_entry *list_itr, *tmp;
4352 	struct list_head vsi_list_head;
4353 	struct list_head *vlan_head;
4354 	struct mutex *vlan_lock; /* Lock to protect filter rule list */
4355 	u16 vlan_id;
4356 	int status;
4357 
4358 	INIT_LIST_HEAD(&vsi_list_head);
4359 	vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4360 	vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4361 	mutex_lock(vlan_lock);
4362 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4363 					  &vsi_list_head);
4364 	mutex_unlock(vlan_lock);
4365 	if (status)
4366 		goto free_fltr_list;
4367 
4368 	list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
4369 		/* Avoid enabling or disabling VLAN zero twice when in double
4370 		 * VLAN mode
4371 		 */
4372 		if (ice_is_dvm_ena(hw) &&
4373 		    list_itr->fltr_info.l_data.vlan.tpid == 0)
4374 			continue;
4375 
4376 		vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4377 		if (rm_vlan_promisc)
4378 			status = ice_clear_vsi_promisc(hw, vsi_handle,
4379 						       promisc_mask, vlan_id);
4380 		else
4381 			status = ice_set_vsi_promisc(hw, vsi_handle,
4382 						     promisc_mask, vlan_id);
4383 		if (status && status != -EEXIST)
4384 			break;
4385 	}
4386 
4387 free_fltr_list:
4388 	list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
4389 		list_del(&list_itr->list_entry);
4390 		devm_kfree(ice_hw_to_dev(hw), list_itr);
4391 	}
4392 	return status;
4393 }
4394 
4395 /**
4396  * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4397  * @hw: pointer to the hardware structure
4398  * @vsi_handle: VSI handle to remove filters from
4399  * @lkup: switch rule filter lookup type
4400  */
4401 static void
4402 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4403 			 enum ice_sw_lkup_type lkup)
4404 {
4405 	struct ice_switch_info *sw = hw->switch_info;
4406 	struct ice_fltr_list_entry *fm_entry;
4407 	struct list_head remove_list_head;
4408 	struct list_head *rule_head;
4409 	struct ice_fltr_list_entry *tmp;
4410 	struct mutex *rule_lock;	/* Lock to protect filter rule list */
4411 	int status;
4412 
4413 	INIT_LIST_HEAD(&remove_list_head);
4414 	rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4415 	rule_head = &sw->recp_list[lkup].filt_rules;
4416 	mutex_lock(rule_lock);
4417 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4418 					  &remove_list_head);
4419 	mutex_unlock(rule_lock);
4420 	if (status)
4421 		goto free_fltr_list;
4422 
4423 	switch (lkup) {
4424 	case ICE_SW_LKUP_MAC:
4425 		ice_remove_mac(hw, &remove_list_head);
4426 		break;
4427 	case ICE_SW_LKUP_VLAN:
4428 		ice_remove_vlan(hw, &remove_list_head);
4429 		break;
4430 	case ICE_SW_LKUP_PROMISC:
4431 	case ICE_SW_LKUP_PROMISC_VLAN:
4432 		ice_remove_promisc(hw, lkup, &remove_list_head);
4433 		break;
4434 	case ICE_SW_LKUP_MAC_VLAN:
4435 	case ICE_SW_LKUP_ETHERTYPE:
4436 	case ICE_SW_LKUP_ETHERTYPE_MAC:
4437 	case ICE_SW_LKUP_DFLT:
4438 	case ICE_SW_LKUP_LAST:
4439 	default:
4440 		ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
4441 		break;
4442 	}
4443 
4444 free_fltr_list:
4445 	list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4446 		list_del(&fm_entry->list_entry);
4447 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
4448 	}
4449 }
4450 
4451 /**
4452  * ice_remove_vsi_fltr - Remove all filters for a VSI
4453  * @hw: pointer to the hardware structure
4454  * @vsi_handle: VSI handle to remove filters from
4455  */
4456 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4457 {
4458 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4459 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4460 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4461 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4462 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4463 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4464 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4465 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4466 }
4467 
4468 /**
4469  * ice_alloc_res_cntr - allocating resource counter
4470  * @hw: pointer to the hardware structure
4471  * @type: type of resource
4472  * @alloc_shared: if set it is shared else dedicated
4473  * @num_items: number of entries requested for FD resource type
4474  * @counter_id: counter index returned by AQ call
4475  */
4476 int
4477 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4478 		   u16 *counter_id)
4479 {
4480 	struct ice_aqc_alloc_free_res_elem *buf;
4481 	u16 buf_len;
4482 	int status;
4483 
4484 	/* Allocate resource */
4485 	buf_len = struct_size(buf, elem, 1);
4486 	buf = kzalloc(buf_len, GFP_KERNEL);
4487 	if (!buf)
4488 		return -ENOMEM;
4489 
4490 	buf->num_elems = cpu_to_le16(num_items);
4491 	buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4492 				      ICE_AQC_RES_TYPE_M) | alloc_shared);
4493 
4494 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4495 				       ice_aqc_opc_alloc_res, NULL);
4496 	if (status)
4497 		goto exit;
4498 
4499 	*counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
4500 
4501 exit:
4502 	kfree(buf);
4503 	return status;
4504 }
4505 
4506 /**
4507  * ice_free_res_cntr - free resource counter
4508  * @hw: pointer to the hardware structure
4509  * @type: type of resource
4510  * @alloc_shared: if set it is shared else dedicated
4511  * @num_items: number of entries to be freed for FD resource type
4512  * @counter_id: counter ID resource which needs to be freed
4513  */
4514 int
4515 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4516 		  u16 counter_id)
4517 {
4518 	struct ice_aqc_alloc_free_res_elem *buf;
4519 	u16 buf_len;
4520 	int status;
4521 
4522 	/* Free resource */
4523 	buf_len = struct_size(buf, elem, 1);
4524 	buf = kzalloc(buf_len, GFP_KERNEL);
4525 	if (!buf)
4526 		return -ENOMEM;
4527 
4528 	buf->num_elems = cpu_to_le16(num_items);
4529 	buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4530 				      ICE_AQC_RES_TYPE_M) | alloc_shared);
4531 	buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
4532 
4533 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4534 				       ice_aqc_opc_free_res, NULL);
4535 	if (status)
4536 		ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
4537 
4538 	kfree(buf);
4539 	return status;
4540 }
4541 
4542 #define ICE_PROTOCOL_ENTRY(id, ...) {		\
4543 	.prot_type	= id,			\
4544 	.offs		= {__VA_ARGS__},	\
4545 }
4546 
4547 /* This is mapping table entry that maps every word within a given protocol
4548  * structure to the real byte offset as per the specification of that
4549  * protocol header.
4550  * for example dst address is 3 words in ethertype header and corresponding
4551  * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4552  * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4553  * matching entry describing its field. This needs to be updated if new
4554  * structure is added to that union.
4555  */
4556 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4557 	ICE_PROTOCOL_ENTRY(ICE_MAC_OFOS, 0, 2, 4, 6, 8, 10, 12),
4558 	ICE_PROTOCOL_ENTRY(ICE_MAC_IL, 0, 2, 4, 6, 8, 10, 12),
4559 	ICE_PROTOCOL_ENTRY(ICE_ETYPE_OL, 0),
4560 	ICE_PROTOCOL_ENTRY(ICE_ETYPE_IL, 0),
4561 	ICE_PROTOCOL_ENTRY(ICE_VLAN_OFOS, 2, 0),
4562 	ICE_PROTOCOL_ENTRY(ICE_IPV4_OFOS, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18),
4563 	ICE_PROTOCOL_ENTRY(ICE_IPV4_IL,	0, 2, 4, 6, 8, 10, 12, 14, 16, 18),
4564 	ICE_PROTOCOL_ENTRY(ICE_IPV6_OFOS, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18,
4565 			   20, 22, 24, 26, 28, 30, 32, 34, 36, 38),
4566 	ICE_PROTOCOL_ENTRY(ICE_IPV6_IL, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
4567 			   22, 24, 26, 28, 30, 32, 34, 36, 38),
4568 	ICE_PROTOCOL_ENTRY(ICE_TCP_IL, 0, 2),
4569 	ICE_PROTOCOL_ENTRY(ICE_UDP_OF, 0, 2),
4570 	ICE_PROTOCOL_ENTRY(ICE_UDP_ILOS, 0, 2),
4571 	ICE_PROTOCOL_ENTRY(ICE_VXLAN, 8, 10, 12, 14),
4572 	ICE_PROTOCOL_ENTRY(ICE_GENEVE, 8, 10, 12, 14),
4573 	ICE_PROTOCOL_ENTRY(ICE_NVGRE, 0, 2, 4, 6),
4574 	ICE_PROTOCOL_ENTRY(ICE_GTP, 8, 10, 12, 14, 16, 18, 20, 22),
4575 	ICE_PROTOCOL_ENTRY(ICE_GTP_NO_PAY, 8, 10, 12, 14),
4576 	ICE_PROTOCOL_ENTRY(ICE_PPPOE, 0, 2, 4, 6),
4577 	ICE_PROTOCOL_ENTRY(ICE_L2TPV3, 0, 2, 4, 6, 8, 10),
4578 	ICE_PROTOCOL_ENTRY(ICE_VLAN_EX, 2, 0),
4579 	ICE_PROTOCOL_ENTRY(ICE_VLAN_IN, 2, 0),
4580 	ICE_PROTOCOL_ENTRY(ICE_HW_METADATA,
4581 			   ICE_SOURCE_PORT_MDID_OFFSET,
4582 			   ICE_PTYPE_MDID_OFFSET,
4583 			   ICE_PACKET_LENGTH_MDID_OFFSET,
4584 			   ICE_SOURCE_VSI_MDID_OFFSET,
4585 			   ICE_PKT_VLAN_MDID_OFFSET,
4586 			   ICE_PKT_TUNNEL_MDID_OFFSET,
4587 			   ICE_PKT_TCP_MDID_OFFSET,
4588 			   ICE_PKT_ERROR_MDID_OFFSET),
4589 };
4590 
4591 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4592 	{ ICE_MAC_OFOS,		ICE_MAC_OFOS_HW },
4593 	{ ICE_MAC_IL,		ICE_MAC_IL_HW },
4594 	{ ICE_ETYPE_OL,		ICE_ETYPE_OL_HW },
4595 	{ ICE_ETYPE_IL,		ICE_ETYPE_IL_HW },
4596 	{ ICE_VLAN_OFOS,	ICE_VLAN_OL_HW },
4597 	{ ICE_IPV4_OFOS,	ICE_IPV4_OFOS_HW },
4598 	{ ICE_IPV4_IL,		ICE_IPV4_IL_HW },
4599 	{ ICE_IPV6_OFOS,	ICE_IPV6_OFOS_HW },
4600 	{ ICE_IPV6_IL,		ICE_IPV6_IL_HW },
4601 	{ ICE_TCP_IL,		ICE_TCP_IL_HW },
4602 	{ ICE_UDP_OF,		ICE_UDP_OF_HW },
4603 	{ ICE_UDP_ILOS,		ICE_UDP_ILOS_HW },
4604 	{ ICE_VXLAN,		ICE_UDP_OF_HW },
4605 	{ ICE_GENEVE,		ICE_UDP_OF_HW },
4606 	{ ICE_NVGRE,		ICE_GRE_OF_HW },
4607 	{ ICE_GTP,		ICE_UDP_OF_HW },
4608 	{ ICE_GTP_NO_PAY,	ICE_UDP_ILOS_HW },
4609 	{ ICE_PPPOE,		ICE_PPPOE_HW },
4610 	{ ICE_L2TPV3,		ICE_L2TPV3_HW },
4611 	{ ICE_VLAN_EX,          ICE_VLAN_OF_HW },
4612 	{ ICE_VLAN_IN,          ICE_VLAN_OL_HW },
4613 	{ ICE_HW_METADATA,      ICE_META_DATA_ID_HW },
4614 };
4615 
4616 /**
4617  * ice_find_recp - find a recipe
4618  * @hw: pointer to the hardware structure
4619  * @lkup_exts: extension sequence to match
4620  * @rinfo: information regarding the rule e.g. priority and action info
4621  *
4622  * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4623  */
4624 static u16
4625 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
4626 	      const struct ice_adv_rule_info *rinfo)
4627 {
4628 	bool refresh_required = true;
4629 	struct ice_sw_recipe *recp;
4630 	u8 i;
4631 
4632 	/* Walk through existing recipes to find a match */
4633 	recp = hw->switch_info->recp_list;
4634 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4635 		/* If recipe was not created for this ID, in SW bookkeeping,
4636 		 * check if FW has an entry for this recipe. If the FW has an
4637 		 * entry update it in our SW bookkeeping and continue with the
4638 		 * matching.
4639 		 */
4640 		if (!recp[i].recp_created)
4641 			if (ice_get_recp_frm_fw(hw,
4642 						hw->switch_info->recp_list, i,
4643 						&refresh_required))
4644 				continue;
4645 
4646 		/* Skip inverse action recipes */
4647 		if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4648 		    ICE_AQ_RECIPE_ACT_INV_ACT)
4649 			continue;
4650 
4651 		/* if number of words we are looking for match */
4652 		if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4653 			struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
4654 			struct ice_fv_word *be = lkup_exts->fv_words;
4655 			u16 *cr = recp[i].lkup_exts.field_mask;
4656 			u16 *de = lkup_exts->field_mask;
4657 			bool found = true;
4658 			u8 pe, qr;
4659 
4660 			/* ar, cr, and qr are related to the recipe words, while
4661 			 * be, de, and pe are related to the lookup words
4662 			 */
4663 			for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
4664 				for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
4665 				     qr++) {
4666 					if (ar[qr].off == be[pe].off &&
4667 					    ar[qr].prot_id == be[pe].prot_id &&
4668 					    cr[qr] == de[pe])
4669 						/* Found the "pe"th word in the
4670 						 * given recipe
4671 						 */
4672 						break;
4673 				}
4674 				/* After walking through all the words in the
4675 				 * "i"th recipe if "p"th word was not found then
4676 				 * this recipe is not what we are looking for.
4677 				 * So break out from this loop and try the next
4678 				 * recipe
4679 				 */
4680 				if (qr >= recp[i].lkup_exts.n_val_words) {
4681 					found = false;
4682 					break;
4683 				}
4684 			}
4685 			/* If for "i"th recipe the found was never set to false
4686 			 * then it means we found our match
4687 			 * Also tun type and *_pass_l2 of recipe needs to be
4688 			 * checked
4689 			 */
4690 			if (found && recp[i].tun_type == rinfo->tun_type &&
4691 			    recp[i].need_pass_l2 == rinfo->need_pass_l2 &&
4692 			    recp[i].allow_pass_l2 == rinfo->allow_pass_l2)
4693 				return i; /* Return the recipe ID */
4694 		}
4695 	}
4696 	return ICE_MAX_NUM_RECIPES;
4697 }
4698 
4699 /**
4700  * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
4701  *
4702  * As protocol id for outer vlan is different in dvm and svm, if dvm is
4703  * supported protocol array record for outer vlan has to be modified to
4704  * reflect the value proper for DVM.
4705  */
4706 void ice_change_proto_id_to_dvm(void)
4707 {
4708 	u8 i;
4709 
4710 	for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4711 		if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
4712 		    ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
4713 			ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
4714 }
4715 
4716 /**
4717  * ice_prot_type_to_id - get protocol ID from protocol type
4718  * @type: protocol type
4719  * @id: pointer to variable that will receive the ID
4720  *
4721  * Returns true if found, false otherwise
4722  */
4723 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
4724 {
4725 	u8 i;
4726 
4727 	for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4728 		if (ice_prot_id_tbl[i].type == type) {
4729 			*id = ice_prot_id_tbl[i].protocol_id;
4730 			return true;
4731 		}
4732 	return false;
4733 }
4734 
4735 /**
4736  * ice_fill_valid_words - count valid words
4737  * @rule: advanced rule with lookup information
4738  * @lkup_exts: byte offset extractions of the words that are valid
4739  *
4740  * calculate valid words in a lookup rule using mask value
4741  */
4742 static u8
4743 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4744 		     struct ice_prot_lkup_ext *lkup_exts)
4745 {
4746 	u8 j, word, prot_id, ret_val;
4747 
4748 	if (!ice_prot_type_to_id(rule->type, &prot_id))
4749 		return 0;
4750 
4751 	word = lkup_exts->n_val_words;
4752 
4753 	for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4754 		if (((u16 *)&rule->m_u)[j] &&
4755 		    rule->type < ARRAY_SIZE(ice_prot_ext)) {
4756 			/* No more space to accommodate */
4757 			if (word >= ICE_MAX_CHAIN_WORDS)
4758 				return 0;
4759 			lkup_exts->fv_words[word].off =
4760 				ice_prot_ext[rule->type].offs[j];
4761 			lkup_exts->fv_words[word].prot_id =
4762 				ice_prot_id_tbl[rule->type].protocol_id;
4763 			lkup_exts->field_mask[word] =
4764 				be16_to_cpu(((__force __be16 *)&rule->m_u)[j]);
4765 			word++;
4766 		}
4767 
4768 	ret_val = word - lkup_exts->n_val_words;
4769 	lkup_exts->n_val_words = word;
4770 
4771 	return ret_val;
4772 }
4773 
4774 /**
4775  * ice_create_first_fit_recp_def - Create a recipe grouping
4776  * @hw: pointer to the hardware structure
4777  * @lkup_exts: an array of protocol header extractions
4778  * @rg_list: pointer to a list that stores new recipe groups
4779  * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4780  *
4781  * Using first fit algorithm, take all the words that are still not done
4782  * and start grouping them in 4-word groups. Each group makes up one
4783  * recipe.
4784  */
4785 static int
4786 ice_create_first_fit_recp_def(struct ice_hw *hw,
4787 			      struct ice_prot_lkup_ext *lkup_exts,
4788 			      struct list_head *rg_list,
4789 			      u8 *recp_cnt)
4790 {
4791 	struct ice_pref_recipe_group *grp = NULL;
4792 	u8 j;
4793 
4794 	*recp_cnt = 0;
4795 
4796 	/* Walk through every word in the rule to check if it is not done. If so
4797 	 * then this word needs to be part of a new recipe.
4798 	 */
4799 	for (j = 0; j < lkup_exts->n_val_words; j++)
4800 		if (!test_bit(j, lkup_exts->done)) {
4801 			if (!grp ||
4802 			    grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4803 				struct ice_recp_grp_entry *entry;
4804 
4805 				entry = devm_kzalloc(ice_hw_to_dev(hw),
4806 						     sizeof(*entry),
4807 						     GFP_KERNEL);
4808 				if (!entry)
4809 					return -ENOMEM;
4810 				list_add(&entry->l_entry, rg_list);
4811 				grp = &entry->r_group;
4812 				(*recp_cnt)++;
4813 			}
4814 
4815 			grp->pairs[grp->n_val_pairs].prot_id =
4816 				lkup_exts->fv_words[j].prot_id;
4817 			grp->pairs[grp->n_val_pairs].off =
4818 				lkup_exts->fv_words[j].off;
4819 			grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4820 			grp->n_val_pairs++;
4821 		}
4822 
4823 	return 0;
4824 }
4825 
4826 /**
4827  * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4828  * @hw: pointer to the hardware structure
4829  * @fv_list: field vector with the extraction sequence information
4830  * @rg_list: recipe groupings with protocol-offset pairs
4831  *
4832  * Helper function to fill in the field vector indices for protocol-offset
4833  * pairs. These indexes are then ultimately programmed into a recipe.
4834  */
4835 static int
4836 ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
4837 		       struct list_head *rg_list)
4838 {
4839 	struct ice_sw_fv_list_entry *fv;
4840 	struct ice_recp_grp_entry *rg;
4841 	struct ice_fv_word *fv_ext;
4842 
4843 	if (list_empty(fv_list))
4844 		return 0;
4845 
4846 	fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry,
4847 			      list_entry);
4848 	fv_ext = fv->fv_ptr->ew;
4849 
4850 	list_for_each_entry(rg, rg_list, l_entry) {
4851 		u8 i;
4852 
4853 		for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4854 			struct ice_fv_word *pr;
4855 			bool found = false;
4856 			u16 mask;
4857 			u8 j;
4858 
4859 			pr = &rg->r_group.pairs[i];
4860 			mask = rg->r_group.mask[i];
4861 
4862 			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4863 				if (fv_ext[j].prot_id == pr->prot_id &&
4864 				    fv_ext[j].off == pr->off) {
4865 					found = true;
4866 
4867 					/* Store index of field vector */
4868 					rg->fv_idx[i] = j;
4869 					rg->fv_mask[i] = mask;
4870 					break;
4871 				}
4872 
4873 			/* Protocol/offset could not be found, caller gave an
4874 			 * invalid pair
4875 			 */
4876 			if (!found)
4877 				return -EINVAL;
4878 		}
4879 	}
4880 
4881 	return 0;
4882 }
4883 
4884 /**
4885  * ice_find_free_recp_res_idx - find free result indexes for recipe
4886  * @hw: pointer to hardware structure
4887  * @profiles: bitmap of profiles that will be associated with the new recipe
4888  * @free_idx: pointer to variable to receive the free index bitmap
4889  *
4890  * The algorithm used here is:
4891  *	1. When creating a new recipe, create a set P which contains all
4892  *	   Profiles that will be associated with our new recipe
4893  *
4894  *	2. For each Profile p in set P:
4895  *	    a. Add all recipes associated with Profile p into set R
4896  *	    b. Optional : PossibleIndexes &= profile[p].possibleIndexes
4897  *		[initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
4898  *		i. Or just assume they all have the same possible indexes:
4899  *			44, 45, 46, 47
4900  *			i.e., PossibleIndexes = 0x0000F00000000000
4901  *
4902  *	3. For each Recipe r in set R:
4903  *	    a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
4904  *	    b. FreeIndexes = UsedIndexes ^ PossibleIndexes
4905  *
4906  *	FreeIndexes will contain the bits indicating the indexes free for use,
4907  *      then the code needs to update the recipe[r].used_result_idx_bits to
4908  *      indicate which indexes were selected for use by this recipe.
4909  */
4910 static u16
4911 ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
4912 			   unsigned long *free_idx)
4913 {
4914 	DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS);
4915 	DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES);
4916 	DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS);
4917 	u16 bit;
4918 
4919 	bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
4920 	bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
4921 
4922 	bitmap_fill(possible_idx, ICE_MAX_FV_WORDS);
4923 
4924 	/* For each profile we are going to associate the recipe with, add the
4925 	 * recipes that are associated with that profile. This will give us
4926 	 * the set of recipes that our recipe may collide with. Also, determine
4927 	 * what possible result indexes are usable given this set of profiles.
4928 	 */
4929 	for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
4930 		bitmap_or(recipes, recipes, profile_to_recipe[bit],
4931 			  ICE_MAX_NUM_RECIPES);
4932 		bitmap_and(possible_idx, possible_idx,
4933 			   hw->switch_info->prof_res_bm[bit],
4934 			   ICE_MAX_FV_WORDS);
4935 	}
4936 
4937 	/* For each recipe that our new recipe may collide with, determine
4938 	 * which indexes have been used.
4939 	 */
4940 	for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
4941 		bitmap_or(used_idx, used_idx,
4942 			  hw->switch_info->recp_list[bit].res_idxs,
4943 			  ICE_MAX_FV_WORDS);
4944 
4945 	bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
4946 
4947 	/* return number of free indexes */
4948 	return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS);
4949 }
4950 
4951 /**
4952  * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4953  * @hw: pointer to hardware structure
4954  * @rm: recipe management list entry
4955  * @profiles: bitmap of profiles that will be associated.
4956  */
4957 static int
4958 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
4959 		  unsigned long *profiles)
4960 {
4961 	DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
4962 	struct ice_aqc_recipe_content *content;
4963 	struct ice_aqc_recipe_data_elem *tmp;
4964 	struct ice_aqc_recipe_data_elem *buf;
4965 	struct ice_recp_grp_entry *entry;
4966 	u16 free_res_idx;
4967 	u16 recipe_count;
4968 	u8 chain_idx;
4969 	u8 recps = 0;
4970 	int status;
4971 
4972 	/* When more than one recipe are required, another recipe is needed to
4973 	 * chain them together. Matching a tunnel metadata ID takes up one of
4974 	 * the match fields in the chaining recipe reducing the number of
4975 	 * chained recipes by one.
4976 	 */
4977 	 /* check number of free result indices */
4978 	bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
4979 	free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
4980 
4981 	ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
4982 		  free_res_idx, rm->n_grp_count);
4983 
4984 	if (rm->n_grp_count > 1) {
4985 		if (rm->n_grp_count > free_res_idx)
4986 			return -ENOSPC;
4987 
4988 		rm->n_grp_count++;
4989 	}
4990 
4991 	if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
4992 		return -ENOSPC;
4993 
4994 	tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
4995 	if (!tmp)
4996 		return -ENOMEM;
4997 
4998 	buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
4999 			   GFP_KERNEL);
5000 	if (!buf) {
5001 		status = -ENOMEM;
5002 		goto err_mem;
5003 	}
5004 
5005 	bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5006 	recipe_count = ICE_MAX_NUM_RECIPES;
5007 	status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5008 				   NULL);
5009 	if (status || recipe_count == 0)
5010 		goto err_unroll;
5011 
5012 	/* Allocate the recipe resources, and configure them according to the
5013 	 * match fields from protocol headers and extracted field vectors.
5014 	 */
5015 	chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5016 	list_for_each_entry(entry, &rm->rg_list, l_entry) {
5017 		u8 i;
5018 
5019 		status = ice_alloc_recipe(hw, &entry->rid);
5020 		if (status)
5021 			goto err_unroll;
5022 
5023 		content = &buf[recps].content;
5024 
5025 		/* Clear the result index of the located recipe, as this will be
5026 		 * updated, if needed, later in the recipe creation process.
5027 		 */
5028 		tmp[0].content.result_indx = 0;
5029 
5030 		buf[recps] = tmp[0];
5031 		buf[recps].recipe_indx = (u8)entry->rid;
5032 		/* if the recipe is a non-root recipe RID should be programmed
5033 		 * as 0 for the rules to be applied correctly.
5034 		 */
5035 		content->rid = 0;
5036 		memset(&content->lkup_indx, 0,
5037 		       sizeof(content->lkup_indx));
5038 
5039 		/* All recipes use look-up index 0 to match switch ID. */
5040 		content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5041 		content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
5042 		/* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5043 		 * to be 0
5044 		 */
5045 		for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5046 			content->lkup_indx[i] = 0x80;
5047 			content->mask[i] = 0;
5048 		}
5049 
5050 		for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5051 			content->lkup_indx[i + 1] = entry->fv_idx[i];
5052 			content->mask[i + 1] = cpu_to_le16(entry->fv_mask[i]);
5053 		}
5054 
5055 		if (rm->n_grp_count > 1) {
5056 			/* Checks to see if there really is a valid result index
5057 			 * that can be used.
5058 			 */
5059 			if (chain_idx >= ICE_MAX_FV_WORDS) {
5060 				ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
5061 				status = -ENOSPC;
5062 				goto err_unroll;
5063 			}
5064 
5065 			entry->chain_idx = chain_idx;
5066 			content->result_indx =
5067 				ICE_AQ_RECIPE_RESULT_EN |
5068 				((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5069 				 ICE_AQ_RECIPE_RESULT_DATA_M);
5070 			clear_bit(chain_idx, result_idx_bm);
5071 			chain_idx = find_first_bit(result_idx_bm,
5072 						   ICE_MAX_FV_WORDS);
5073 		}
5074 
5075 		/* fill recipe dependencies */
5076 		bitmap_zero((unsigned long *)buf[recps].recipe_bitmap,
5077 			    ICE_MAX_NUM_RECIPES);
5078 		set_bit(buf[recps].recipe_indx,
5079 			(unsigned long *)buf[recps].recipe_bitmap);
5080 		content->act_ctrl_fwd_priority = rm->priority;
5081 
5082 		if (rm->need_pass_l2)
5083 			content->act_ctrl |= ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
5084 
5085 		if (rm->allow_pass_l2)
5086 			content->act_ctrl |= ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
5087 		recps++;
5088 	}
5089 
5090 	if (rm->n_grp_count == 1) {
5091 		rm->root_rid = buf[0].recipe_indx;
5092 		set_bit(buf[0].recipe_indx, rm->r_bitmap);
5093 		buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5094 		if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5095 			memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5096 			       sizeof(buf[0].recipe_bitmap));
5097 		} else {
5098 			status = -EINVAL;
5099 			goto err_unroll;
5100 		}
5101 		/* Applicable only for ROOT_RECIPE, set the fwd_priority for
5102 		 * the recipe which is getting created if specified
5103 		 * by user. Usually any advanced switch filter, which results
5104 		 * into new extraction sequence, ended up creating a new recipe
5105 		 * of type ROOT and usually recipes are associated with profiles
5106 		 * Switch rule referreing newly created recipe, needs to have
5107 		 * either/or 'fwd' or 'join' priority, otherwise switch rule
5108 		 * evaluation will not happen correctly. In other words, if
5109 		 * switch rule to be evaluated on priority basis, then recipe
5110 		 * needs to have priority, otherwise it will be evaluated last.
5111 		 */
5112 		buf[0].content.act_ctrl_fwd_priority = rm->priority;
5113 	} else {
5114 		struct ice_recp_grp_entry *last_chain_entry;
5115 		u16 rid, i;
5116 
5117 		/* Allocate the last recipe that will chain the outcomes of the
5118 		 * other recipes together
5119 		 */
5120 		status = ice_alloc_recipe(hw, &rid);
5121 		if (status)
5122 			goto err_unroll;
5123 
5124 		content = &buf[recps].content;
5125 
5126 		buf[recps].recipe_indx = (u8)rid;
5127 		content->rid = (u8)rid;
5128 		content->rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5129 		/* the new entry created should also be part of rg_list to
5130 		 * make sure we have complete recipe
5131 		 */
5132 		last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw),
5133 						sizeof(*last_chain_entry),
5134 						GFP_KERNEL);
5135 		if (!last_chain_entry) {
5136 			status = -ENOMEM;
5137 			goto err_unroll;
5138 		}
5139 		last_chain_entry->rid = rid;
5140 		memset(&content->lkup_indx, 0, sizeof(content->lkup_indx));
5141 		/* All recipes use look-up index 0 to match switch ID. */
5142 		content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5143 		content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
5144 		for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5145 			content->lkup_indx[i] = ICE_AQ_RECIPE_LKUP_IGNORE;
5146 			content->mask[i] = 0;
5147 		}
5148 
5149 		i = 1;
5150 		/* update r_bitmap with the recp that is used for chaining */
5151 		set_bit(rid, rm->r_bitmap);
5152 		/* this is the recipe that chains all the other recipes so it
5153 		 * should not have a chaining ID to indicate the same
5154 		 */
5155 		last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5156 		list_for_each_entry(entry, &rm->rg_list, l_entry) {
5157 			last_chain_entry->fv_idx[i] = entry->chain_idx;
5158 			content->lkup_indx[i] = entry->chain_idx;
5159 			content->mask[i++] = cpu_to_le16(0xFFFF);
5160 			set_bit(entry->rid, rm->r_bitmap);
5161 		}
5162 		list_add(&last_chain_entry->l_entry, &rm->rg_list);
5163 		if (sizeof(buf[recps].recipe_bitmap) >=
5164 		    sizeof(rm->r_bitmap)) {
5165 			memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5166 			       sizeof(buf[recps].recipe_bitmap));
5167 		} else {
5168 			status = -EINVAL;
5169 			goto err_unroll;
5170 		}
5171 		content->act_ctrl_fwd_priority = rm->priority;
5172 
5173 		recps++;
5174 		rm->root_rid = (u8)rid;
5175 	}
5176 	status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5177 	if (status)
5178 		goto err_unroll;
5179 
5180 	status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5181 	ice_release_change_lock(hw);
5182 	if (status)
5183 		goto err_unroll;
5184 
5185 	/* Every recipe that just got created add it to the recipe
5186 	 * book keeping list
5187 	 */
5188 	list_for_each_entry(entry, &rm->rg_list, l_entry) {
5189 		struct ice_switch_info *sw = hw->switch_info;
5190 		bool is_root, idx_found = false;
5191 		struct ice_sw_recipe *recp;
5192 		u16 idx, buf_idx = 0;
5193 
5194 		/* find buffer index for copying some data */
5195 		for (idx = 0; idx < rm->n_grp_count; idx++)
5196 			if (buf[idx].recipe_indx == entry->rid) {
5197 				buf_idx = idx;
5198 				idx_found = true;
5199 			}
5200 
5201 		if (!idx_found) {
5202 			status = -EIO;
5203 			goto err_unroll;
5204 		}
5205 
5206 		recp = &sw->recp_list[entry->rid];
5207 		is_root = (rm->root_rid == entry->rid);
5208 		recp->is_root = is_root;
5209 
5210 		recp->root_rid = entry->rid;
5211 		recp->big_recp = (is_root && rm->n_grp_count > 1);
5212 
5213 		memcpy(&recp->ext_words, entry->r_group.pairs,
5214 		       entry->r_group.n_val_pairs * sizeof(struct ice_fv_word));
5215 
5216 		memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5217 		       sizeof(recp->r_bitmap));
5218 
5219 		/* Copy non-result fv index values and masks to recipe. This
5220 		 * call will also update the result recipe bitmask.
5221 		 */
5222 		ice_collect_result_idx(&buf[buf_idx], recp);
5223 
5224 		/* for non-root recipes, also copy to the root, this allows
5225 		 * easier matching of a complete chained recipe
5226 		 */
5227 		if (!is_root)
5228 			ice_collect_result_idx(&buf[buf_idx],
5229 					       &sw->recp_list[rm->root_rid]);
5230 
5231 		recp->n_ext_words = entry->r_group.n_val_pairs;
5232 		recp->chain_idx = entry->chain_idx;
5233 		recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5234 		recp->n_grp_count = rm->n_grp_count;
5235 		recp->tun_type = rm->tun_type;
5236 		recp->need_pass_l2 = rm->need_pass_l2;
5237 		recp->allow_pass_l2 = rm->allow_pass_l2;
5238 		recp->recp_created = true;
5239 	}
5240 	rm->root_buf = buf;
5241 	kfree(tmp);
5242 	return status;
5243 
5244 err_unroll:
5245 err_mem:
5246 	kfree(tmp);
5247 	devm_kfree(ice_hw_to_dev(hw), buf);
5248 	return status;
5249 }
5250 
5251 /**
5252  * ice_create_recipe_group - creates recipe group
5253  * @hw: pointer to hardware structure
5254  * @rm: recipe management list entry
5255  * @lkup_exts: lookup elements
5256  */
5257 static int
5258 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5259 			struct ice_prot_lkup_ext *lkup_exts)
5260 {
5261 	u8 recp_count = 0;
5262 	int status;
5263 
5264 	rm->n_grp_count = 0;
5265 
5266 	/* Create recipes for words that are marked not done by packing them
5267 	 * as best fit.
5268 	 */
5269 	status = ice_create_first_fit_recp_def(hw, lkup_exts,
5270 					       &rm->rg_list, &recp_count);
5271 	if (!status) {
5272 		rm->n_grp_count += recp_count;
5273 		rm->n_ext_words = lkup_exts->n_val_words;
5274 		memcpy(&rm->ext_words, lkup_exts->fv_words,
5275 		       sizeof(rm->ext_words));
5276 		memcpy(rm->word_masks, lkup_exts->field_mask,
5277 		       sizeof(rm->word_masks));
5278 	}
5279 
5280 	return status;
5281 }
5282 
5283 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5284  * @hw: pointer to hardware structure
5285  * @rinfo: other information regarding the rule e.g. priority and action info
5286  * @bm: pointer to memory for returning the bitmap of field vectors
5287  */
5288 static void
5289 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5290 			 unsigned long *bm)
5291 {
5292 	enum ice_prof_type prof_type;
5293 
5294 	bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
5295 
5296 	switch (rinfo->tun_type) {
5297 	case ICE_NON_TUN:
5298 		prof_type = ICE_PROF_NON_TUN;
5299 		break;
5300 	case ICE_ALL_TUNNELS:
5301 		prof_type = ICE_PROF_TUN_ALL;
5302 		break;
5303 	case ICE_SW_TUN_GENEVE:
5304 	case ICE_SW_TUN_VXLAN:
5305 		prof_type = ICE_PROF_TUN_UDP;
5306 		break;
5307 	case ICE_SW_TUN_NVGRE:
5308 		prof_type = ICE_PROF_TUN_GRE;
5309 		break;
5310 	case ICE_SW_TUN_GTPU:
5311 		prof_type = ICE_PROF_TUN_GTPU;
5312 		break;
5313 	case ICE_SW_TUN_GTPC:
5314 		prof_type = ICE_PROF_TUN_GTPC;
5315 		break;
5316 	case ICE_SW_TUN_AND_NON_TUN:
5317 	default:
5318 		prof_type = ICE_PROF_ALL;
5319 		break;
5320 	}
5321 
5322 	ice_get_sw_fv_bitmap(hw, prof_type, bm);
5323 }
5324 
5325 /**
5326  * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5327  * @hw: pointer to hardware structure
5328  * @lkups: lookup elements or match criteria for the advanced recipe, one
5329  *  structure per protocol header
5330  * @lkups_cnt: number of protocols
5331  * @rinfo: other information regarding the rule e.g. priority and action info
5332  * @rid: return the recipe ID of the recipe created
5333  */
5334 static int
5335 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5336 		   u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5337 {
5338 	DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES);
5339 	DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES);
5340 	struct ice_prot_lkup_ext *lkup_exts;
5341 	struct ice_recp_grp_entry *r_entry;
5342 	struct ice_sw_fv_list_entry *fvit;
5343 	struct ice_recp_grp_entry *r_tmp;
5344 	struct ice_sw_fv_list_entry *tmp;
5345 	struct ice_sw_recipe *rm;
5346 	int status = 0;
5347 	u8 i;
5348 
5349 	if (!lkups_cnt)
5350 		return -EINVAL;
5351 
5352 	lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
5353 	if (!lkup_exts)
5354 		return -ENOMEM;
5355 
5356 	/* Determine the number of words to be matched and if it exceeds a
5357 	 * recipe's restrictions
5358 	 */
5359 	for (i = 0; i < lkups_cnt; i++) {
5360 		u16 count;
5361 
5362 		if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5363 			status = -EIO;
5364 			goto err_free_lkup_exts;
5365 		}
5366 
5367 		count = ice_fill_valid_words(&lkups[i], lkup_exts);
5368 		if (!count) {
5369 			status = -EIO;
5370 			goto err_free_lkup_exts;
5371 		}
5372 	}
5373 
5374 	rm = kzalloc(sizeof(*rm), GFP_KERNEL);
5375 	if (!rm) {
5376 		status = -ENOMEM;
5377 		goto err_free_lkup_exts;
5378 	}
5379 
5380 	/* Get field vectors that contain fields extracted from all the protocol
5381 	 * headers being programmed.
5382 	 */
5383 	INIT_LIST_HEAD(&rm->fv_list);
5384 	INIT_LIST_HEAD(&rm->rg_list);
5385 
5386 	/* Get bitmap of field vectors (profiles) that are compatible with the
5387 	 * rule request; only these will be searched in the subsequent call to
5388 	 * ice_get_sw_fv_list.
5389 	 */
5390 	ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5391 
5392 	status = ice_get_sw_fv_list(hw, lkup_exts, fv_bitmap, &rm->fv_list);
5393 	if (status)
5394 		goto err_unroll;
5395 
5396 	/* Group match words into recipes using preferred recipe grouping
5397 	 * criteria.
5398 	 */
5399 	status = ice_create_recipe_group(hw, rm, lkup_exts);
5400 	if (status)
5401 		goto err_unroll;
5402 
5403 	/* set the recipe priority if specified */
5404 	rm->priority = (u8)rinfo->priority;
5405 
5406 	rm->need_pass_l2 = rinfo->need_pass_l2;
5407 	rm->allow_pass_l2 = rinfo->allow_pass_l2;
5408 
5409 	/* Find offsets from the field vector. Pick the first one for all the
5410 	 * recipes.
5411 	 */
5412 	status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5413 	if (status)
5414 		goto err_unroll;
5415 
5416 	/* get bitmap of all profiles the recipe will be associated with */
5417 	bitmap_zero(profiles, ICE_MAX_NUM_PROFILES);
5418 	list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5419 		ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5420 		set_bit((u16)fvit->profile_id, profiles);
5421 	}
5422 
5423 	/* Look for a recipe which matches our requested fv / mask list */
5424 	*rid = ice_find_recp(hw, lkup_exts, rinfo);
5425 	if (*rid < ICE_MAX_NUM_RECIPES)
5426 		/* Success if found a recipe that match the existing criteria */
5427 		goto err_unroll;
5428 
5429 	rm->tun_type = rinfo->tun_type;
5430 	/* Recipe we need does not exist, add a recipe */
5431 	status = ice_add_sw_recipe(hw, rm, profiles);
5432 	if (status)
5433 		goto err_unroll;
5434 
5435 	/* Associate all the recipes created with all the profiles in the
5436 	 * common field vector.
5437 	 */
5438 	list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5439 		DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
5440 		u16 j;
5441 
5442 		status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5443 						      (u8 *)r_bitmap, NULL);
5444 		if (status)
5445 			goto err_unroll;
5446 
5447 		bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
5448 			  ICE_MAX_NUM_RECIPES);
5449 		status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5450 		if (status)
5451 			goto err_unroll;
5452 
5453 		status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5454 						      (u8 *)r_bitmap,
5455 						      NULL);
5456 		ice_release_change_lock(hw);
5457 
5458 		if (status)
5459 			goto err_unroll;
5460 
5461 		/* Update profile to recipe bitmap array */
5462 		bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap,
5463 			    ICE_MAX_NUM_RECIPES);
5464 
5465 		/* Update recipe to profile bitmap array */
5466 		for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
5467 			set_bit((u16)fvit->profile_id, recipe_to_profile[j]);
5468 	}
5469 
5470 	*rid = rm->root_rid;
5471 	memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts,
5472 	       sizeof(*lkup_exts));
5473 err_unroll:
5474 	list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
5475 		list_del(&r_entry->l_entry);
5476 		devm_kfree(ice_hw_to_dev(hw), r_entry);
5477 	}
5478 
5479 	list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) {
5480 		list_del(&fvit->list_entry);
5481 		devm_kfree(ice_hw_to_dev(hw), fvit);
5482 	}
5483 
5484 	devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
5485 	kfree(rm);
5486 
5487 err_free_lkup_exts:
5488 	kfree(lkup_exts);
5489 
5490 	return status;
5491 }
5492 
5493 /**
5494  * ice_dummy_packet_add_vlan - insert VLAN header to dummy pkt
5495  *
5496  * @dummy_pkt: dummy packet profile pattern to which VLAN tag(s) will be added
5497  * @num_vlan: number of VLAN tags
5498  */
5499 static struct ice_dummy_pkt_profile *
5500 ice_dummy_packet_add_vlan(const struct ice_dummy_pkt_profile *dummy_pkt,
5501 			  u32 num_vlan)
5502 {
5503 	struct ice_dummy_pkt_profile *profile;
5504 	struct ice_dummy_pkt_offsets *offsets;
5505 	u32 buf_len, off, etype_off, i;
5506 	u8 *pkt;
5507 
5508 	if (num_vlan < 1 || num_vlan > 2)
5509 		return ERR_PTR(-EINVAL);
5510 
5511 	off = num_vlan * VLAN_HLEN;
5512 
5513 	buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet_offsets)) +
5514 		  dummy_pkt->offsets_len;
5515 	offsets = kzalloc(buf_len, GFP_KERNEL);
5516 	if (!offsets)
5517 		return ERR_PTR(-ENOMEM);
5518 
5519 	offsets[0] = dummy_pkt->offsets[0];
5520 	if (num_vlan == 2) {
5521 		offsets[1] = ice_dummy_qinq_packet_offsets[0];
5522 		offsets[2] = ice_dummy_qinq_packet_offsets[1];
5523 	} else if (num_vlan == 1) {
5524 		offsets[1] = ice_dummy_vlan_packet_offsets[0];
5525 	}
5526 
5527 	for (i = 1; dummy_pkt->offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5528 		offsets[i + num_vlan].type = dummy_pkt->offsets[i].type;
5529 		offsets[i + num_vlan].offset =
5530 			dummy_pkt->offsets[i].offset + off;
5531 	}
5532 	offsets[i + num_vlan] = dummy_pkt->offsets[i];
5533 
5534 	etype_off = dummy_pkt->offsets[1].offset;
5535 
5536 	buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet)) +
5537 		  dummy_pkt->pkt_len;
5538 	pkt = kzalloc(buf_len, GFP_KERNEL);
5539 	if (!pkt) {
5540 		kfree(offsets);
5541 		return ERR_PTR(-ENOMEM);
5542 	}
5543 
5544 	memcpy(pkt, dummy_pkt->pkt, etype_off);
5545 	memcpy(pkt + etype_off,
5546 	       num_vlan == 2 ? ice_dummy_qinq_packet : ice_dummy_vlan_packet,
5547 	       off);
5548 	memcpy(pkt + etype_off + off, dummy_pkt->pkt + etype_off,
5549 	       dummy_pkt->pkt_len - etype_off);
5550 
5551 	profile = kzalloc(sizeof(*profile), GFP_KERNEL);
5552 	if (!profile) {
5553 		kfree(offsets);
5554 		kfree(pkt);
5555 		return ERR_PTR(-ENOMEM);
5556 	}
5557 
5558 	profile->offsets = offsets;
5559 	profile->pkt = pkt;
5560 	profile->pkt_len = buf_len;
5561 	profile->match |= ICE_PKT_KMALLOC;
5562 
5563 	return profile;
5564 }
5565 
5566 /**
5567  * ice_find_dummy_packet - find dummy packet
5568  *
5569  * @lkups: lookup elements or match criteria for the advanced recipe, one
5570  *	   structure per protocol header
5571  * @lkups_cnt: number of protocols
5572  * @tun_type: tunnel type
5573  *
5574  * Returns the &ice_dummy_pkt_profile corresponding to these lookup params.
5575  */
5576 static const struct ice_dummy_pkt_profile *
5577 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5578 		      enum ice_sw_tunnel_type tun_type)
5579 {
5580 	const struct ice_dummy_pkt_profile *ret = ice_dummy_pkt_profiles;
5581 	u32 match = 0, vlan_count = 0;
5582 	u16 i;
5583 
5584 	switch (tun_type) {
5585 	case ICE_SW_TUN_GTPC:
5586 		match |= ICE_PKT_TUN_GTPC;
5587 		break;
5588 	case ICE_SW_TUN_GTPU:
5589 		match |= ICE_PKT_TUN_GTPU;
5590 		break;
5591 	case ICE_SW_TUN_NVGRE:
5592 		match |= ICE_PKT_TUN_NVGRE;
5593 		break;
5594 	case ICE_SW_TUN_GENEVE:
5595 	case ICE_SW_TUN_VXLAN:
5596 		match |= ICE_PKT_TUN_UDP;
5597 		break;
5598 	default:
5599 		break;
5600 	}
5601 
5602 	for (i = 0; i < lkups_cnt; i++) {
5603 		if (lkups[i].type == ICE_UDP_ILOS)
5604 			match |= ICE_PKT_INNER_UDP;
5605 		else if (lkups[i].type == ICE_TCP_IL)
5606 			match |= ICE_PKT_INNER_TCP;
5607 		else if (lkups[i].type == ICE_IPV6_OFOS)
5608 			match |= ICE_PKT_OUTER_IPV6;
5609 		else if (lkups[i].type == ICE_VLAN_OFOS ||
5610 			 lkups[i].type == ICE_VLAN_EX)
5611 			vlan_count++;
5612 		else if (lkups[i].type == ICE_VLAN_IN)
5613 			vlan_count++;
5614 		else if (lkups[i].type == ICE_ETYPE_OL &&
5615 			 lkups[i].h_u.ethertype.ethtype_id ==
5616 				cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5617 			 lkups[i].m_u.ethertype.ethtype_id ==
5618 				cpu_to_be16(0xFFFF))
5619 			match |= ICE_PKT_OUTER_IPV6;
5620 		else if (lkups[i].type == ICE_ETYPE_IL &&
5621 			 lkups[i].h_u.ethertype.ethtype_id ==
5622 				cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5623 			 lkups[i].m_u.ethertype.ethtype_id ==
5624 				cpu_to_be16(0xFFFF))
5625 			match |= ICE_PKT_INNER_IPV6;
5626 		else if (lkups[i].type == ICE_IPV6_IL)
5627 			match |= ICE_PKT_INNER_IPV6;
5628 		else if (lkups[i].type == ICE_GTP_NO_PAY)
5629 			match |= ICE_PKT_GTP_NOPAY;
5630 		else if (lkups[i].type == ICE_PPPOE) {
5631 			match |= ICE_PKT_PPPOE;
5632 			if (lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
5633 			    htons(PPP_IPV6))
5634 				match |= ICE_PKT_OUTER_IPV6;
5635 		} else if (lkups[i].type == ICE_L2TPV3)
5636 			match |= ICE_PKT_L2TPV3;
5637 	}
5638 
5639 	while (ret->match && (match & ret->match) != ret->match)
5640 		ret++;
5641 
5642 	if (vlan_count != 0)
5643 		ret = ice_dummy_packet_add_vlan(ret, vlan_count);
5644 
5645 	return ret;
5646 }
5647 
5648 /**
5649  * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5650  *
5651  * @lkups: lookup elements or match criteria for the advanced recipe, one
5652  *	   structure per protocol header
5653  * @lkups_cnt: number of protocols
5654  * @s_rule: stores rule information from the match criteria
5655  * @profile: dummy packet profile (the template, its size and header offsets)
5656  */
5657 static int
5658 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5659 			  struct ice_sw_rule_lkup_rx_tx *s_rule,
5660 			  const struct ice_dummy_pkt_profile *profile)
5661 {
5662 	u8 *pkt;
5663 	u16 i;
5664 
5665 	/* Start with a packet with a pre-defined/dummy content. Then, fill
5666 	 * in the header values to be looked up or matched.
5667 	 */
5668 	pkt = s_rule->hdr_data;
5669 
5670 	memcpy(pkt, profile->pkt, profile->pkt_len);
5671 
5672 	for (i = 0; i < lkups_cnt; i++) {
5673 		const struct ice_dummy_pkt_offsets *offsets = profile->offsets;
5674 		enum ice_protocol_type type;
5675 		u16 offset = 0, len = 0, j;
5676 		bool found = false;
5677 
5678 		/* find the start of this layer; it should be found since this
5679 		 * was already checked when search for the dummy packet
5680 		 */
5681 		type = lkups[i].type;
5682 		/* metadata isn't present in the packet */
5683 		if (type == ICE_HW_METADATA)
5684 			continue;
5685 
5686 		for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5687 			if (type == offsets[j].type) {
5688 				offset = offsets[j].offset;
5689 				found = true;
5690 				break;
5691 			}
5692 		}
5693 		/* this should never happen in a correct calling sequence */
5694 		if (!found)
5695 			return -EINVAL;
5696 
5697 		switch (lkups[i].type) {
5698 		case ICE_MAC_OFOS:
5699 		case ICE_MAC_IL:
5700 			len = sizeof(struct ice_ether_hdr);
5701 			break;
5702 		case ICE_ETYPE_OL:
5703 		case ICE_ETYPE_IL:
5704 			len = sizeof(struct ice_ethtype_hdr);
5705 			break;
5706 		case ICE_VLAN_OFOS:
5707 		case ICE_VLAN_EX:
5708 		case ICE_VLAN_IN:
5709 			len = sizeof(struct ice_vlan_hdr);
5710 			break;
5711 		case ICE_IPV4_OFOS:
5712 		case ICE_IPV4_IL:
5713 			len = sizeof(struct ice_ipv4_hdr);
5714 			break;
5715 		case ICE_IPV6_OFOS:
5716 		case ICE_IPV6_IL:
5717 			len = sizeof(struct ice_ipv6_hdr);
5718 			break;
5719 		case ICE_TCP_IL:
5720 		case ICE_UDP_OF:
5721 		case ICE_UDP_ILOS:
5722 			len = sizeof(struct ice_l4_hdr);
5723 			break;
5724 		case ICE_SCTP_IL:
5725 			len = sizeof(struct ice_sctp_hdr);
5726 			break;
5727 		case ICE_NVGRE:
5728 			len = sizeof(struct ice_nvgre_hdr);
5729 			break;
5730 		case ICE_VXLAN:
5731 		case ICE_GENEVE:
5732 			len = sizeof(struct ice_udp_tnl_hdr);
5733 			break;
5734 		case ICE_GTP_NO_PAY:
5735 		case ICE_GTP:
5736 			len = sizeof(struct ice_udp_gtp_hdr);
5737 			break;
5738 		case ICE_PPPOE:
5739 			len = sizeof(struct ice_pppoe_hdr);
5740 			break;
5741 		case ICE_L2TPV3:
5742 			len = sizeof(struct ice_l2tpv3_sess_hdr);
5743 			break;
5744 		default:
5745 			return -EINVAL;
5746 		}
5747 
5748 		/* the length should be a word multiple */
5749 		if (len % ICE_BYTES_PER_WORD)
5750 			return -EIO;
5751 
5752 		/* We have the offset to the header start, the length, the
5753 		 * caller's header values and mask. Use this information to
5754 		 * copy the data into the dummy packet appropriately based on
5755 		 * the mask. Note that we need to only write the bits as
5756 		 * indicated by the mask to make sure we don't improperly write
5757 		 * over any significant packet data.
5758 		 */
5759 		for (j = 0; j < len / sizeof(u16); j++) {
5760 			u16 *ptr = (u16 *)(pkt + offset);
5761 			u16 mask = lkups[i].m_raw[j];
5762 
5763 			if (!mask)
5764 				continue;
5765 
5766 			ptr[j] = (ptr[j] & ~mask) | (lkups[i].h_raw[j] & mask);
5767 		}
5768 	}
5769 
5770 	s_rule->hdr_len = cpu_to_le16(profile->pkt_len);
5771 
5772 	return 0;
5773 }
5774 
5775 /**
5776  * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
5777  * @hw: pointer to the hardware structure
5778  * @tun_type: tunnel type
5779  * @pkt: dummy packet to fill in
5780  * @offsets: offset info for the dummy packet
5781  */
5782 static int
5783 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
5784 			u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
5785 {
5786 	u16 open_port, i;
5787 
5788 	switch (tun_type) {
5789 	case ICE_SW_TUN_VXLAN:
5790 		if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
5791 			return -EIO;
5792 		break;
5793 	case ICE_SW_TUN_GENEVE:
5794 		if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
5795 			return -EIO;
5796 		break;
5797 	default:
5798 		/* Nothing needs to be done for this tunnel type */
5799 		return 0;
5800 	}
5801 
5802 	/* Find the outer UDP protocol header and insert the port number */
5803 	for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5804 		if (offsets[i].type == ICE_UDP_OF) {
5805 			struct ice_l4_hdr *hdr;
5806 			u16 offset;
5807 
5808 			offset = offsets[i].offset;
5809 			hdr = (struct ice_l4_hdr *)&pkt[offset];
5810 			hdr->dst_port = cpu_to_be16(open_port);
5811 
5812 			return 0;
5813 		}
5814 	}
5815 
5816 	return -EIO;
5817 }
5818 
5819 /**
5820  * ice_fill_adv_packet_vlan - fill dummy packet with VLAN tag type
5821  * @hw: pointer to hw structure
5822  * @vlan_type: VLAN tag type
5823  * @pkt: dummy packet to fill in
5824  * @offsets: offset info for the dummy packet
5825  */
5826 static int
5827 ice_fill_adv_packet_vlan(struct ice_hw *hw, u16 vlan_type, u8 *pkt,
5828 			 const struct ice_dummy_pkt_offsets *offsets)
5829 {
5830 	u16 i;
5831 
5832 	/* Check if there is something to do */
5833 	if (!vlan_type || !ice_is_dvm_ena(hw))
5834 		return 0;
5835 
5836 	/* Find VLAN header and insert VLAN TPID */
5837 	for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5838 		if (offsets[i].type == ICE_VLAN_OFOS ||
5839 		    offsets[i].type == ICE_VLAN_EX) {
5840 			struct ice_vlan_hdr *hdr;
5841 			u16 offset;
5842 
5843 			offset = offsets[i].offset;
5844 			hdr = (struct ice_vlan_hdr *)&pkt[offset];
5845 			hdr->type = cpu_to_be16(vlan_type);
5846 
5847 			return 0;
5848 		}
5849 	}
5850 
5851 	return -EIO;
5852 }
5853 
5854 static bool ice_rules_equal(const struct ice_adv_rule_info *first,
5855 			    const struct ice_adv_rule_info *second)
5856 {
5857 	return first->sw_act.flag == second->sw_act.flag &&
5858 	       first->tun_type == second->tun_type &&
5859 	       first->vlan_type == second->vlan_type &&
5860 	       first->src_vsi == second->src_vsi &&
5861 	       first->need_pass_l2 == second->need_pass_l2 &&
5862 	       first->allow_pass_l2 == second->allow_pass_l2;
5863 }
5864 
5865 /**
5866  * ice_find_adv_rule_entry - Search a rule entry
5867  * @hw: pointer to the hardware structure
5868  * @lkups: lookup elements or match criteria for the advanced recipe, one
5869  *	   structure per protocol header
5870  * @lkups_cnt: number of protocols
5871  * @recp_id: recipe ID for which we are finding the rule
5872  * @rinfo: other information regarding the rule e.g. priority and action info
5873  *
5874  * Helper function to search for a given advance rule entry
5875  * Returns pointer to entry storing the rule if found
5876  */
5877 static struct ice_adv_fltr_mgmt_list_entry *
5878 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5879 			u16 lkups_cnt, u16 recp_id,
5880 			struct ice_adv_rule_info *rinfo)
5881 {
5882 	struct ice_adv_fltr_mgmt_list_entry *list_itr;
5883 	struct ice_switch_info *sw = hw->switch_info;
5884 	int i;
5885 
5886 	list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules,
5887 			    list_entry) {
5888 		bool lkups_matched = true;
5889 
5890 		if (lkups_cnt != list_itr->lkups_cnt)
5891 			continue;
5892 		for (i = 0; i < list_itr->lkups_cnt; i++)
5893 			if (memcmp(&list_itr->lkups[i], &lkups[i],
5894 				   sizeof(*lkups))) {
5895 				lkups_matched = false;
5896 				break;
5897 			}
5898 		if (ice_rules_equal(rinfo, &list_itr->rule_info) &&
5899 		    lkups_matched)
5900 			return list_itr;
5901 	}
5902 	return NULL;
5903 }
5904 
5905 /**
5906  * ice_adv_add_update_vsi_list
5907  * @hw: pointer to the hardware structure
5908  * @m_entry: pointer to current adv filter management list entry
5909  * @cur_fltr: filter information from the book keeping entry
5910  * @new_fltr: filter information with the new VSI to be added
5911  *
5912  * Call AQ command to add or update previously created VSI list with new VSI.
5913  *
5914  * Helper function to do book keeping associated with adding filter information
5915  * The algorithm to do the booking keeping is described below :
5916  * When a VSI needs to subscribe to a given advanced filter
5917  *	if only one VSI has been added till now
5918  *		Allocate a new VSI list and add two VSIs
5919  *		to this list using switch rule command
5920  *		Update the previously created switch rule with the
5921  *		newly created VSI list ID
5922  *	if a VSI list was previously created
5923  *		Add the new VSI to the previously created VSI list set
5924  *		using the update switch rule command
5925  */
5926 static int
5927 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5928 			    struct ice_adv_fltr_mgmt_list_entry *m_entry,
5929 			    struct ice_adv_rule_info *cur_fltr,
5930 			    struct ice_adv_rule_info *new_fltr)
5931 {
5932 	u16 vsi_list_id = 0;
5933 	int status;
5934 
5935 	if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5936 	    cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
5937 	    cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5938 		return -EOPNOTSUPP;
5939 
5940 	if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5941 	     new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5942 	    (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5943 	     cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5944 		return -EOPNOTSUPP;
5945 
5946 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5947 		 /* Only one entry existed in the mapping and it was not already
5948 		  * a part of a VSI list. So, create a VSI list with the old and
5949 		  * new VSIs.
5950 		  */
5951 		struct ice_fltr_info tmp_fltr;
5952 		u16 vsi_handle_arr[2];
5953 
5954 		/* A rule already exists with the new VSI being added */
5955 		if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5956 		    new_fltr->sw_act.fwd_id.hw_vsi_id)
5957 			return -EEXIST;
5958 
5959 		vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5960 		vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5961 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5962 						  &vsi_list_id,
5963 						  ICE_SW_LKUP_LAST);
5964 		if (status)
5965 			return status;
5966 
5967 		memset(&tmp_fltr, 0, sizeof(tmp_fltr));
5968 		tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
5969 		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5970 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5971 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5972 		tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
5973 
5974 		/* Update the previous switch rule of "forward to VSI" to
5975 		 * "fwd to VSI list"
5976 		 */
5977 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5978 		if (status)
5979 			return status;
5980 
5981 		cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
5982 		cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
5983 		m_entry->vsi_list_info =
5984 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5985 						vsi_list_id);
5986 	} else {
5987 		u16 vsi_handle = new_fltr->sw_act.vsi_handle;
5988 
5989 		if (!m_entry->vsi_list_info)
5990 			return -EIO;
5991 
5992 		/* A rule already exists with the new VSI being added */
5993 		if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
5994 			return 0;
5995 
5996 		/* Update the previously created VSI list set with
5997 		 * the new VSI ID passed in
5998 		 */
5999 		vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6000 
6001 		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6002 						  vsi_list_id, false,
6003 						  ice_aqc_opc_update_sw_rules,
6004 						  ICE_SW_LKUP_LAST);
6005 		/* update VSI list mapping info with new VSI ID */
6006 		if (!status)
6007 			set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
6008 	}
6009 	if (!status)
6010 		m_entry->vsi_count++;
6011 	return status;
6012 }
6013 
6014 void ice_rule_add_tunnel_metadata(struct ice_adv_lkup_elem *lkup)
6015 {
6016 	lkup->type = ICE_HW_METADATA;
6017 	lkup->m_u.metadata.flags[ICE_PKT_FLAGS_TUNNEL] =
6018 		cpu_to_be16(ICE_PKT_TUNNEL_MASK);
6019 }
6020 
6021 void ice_rule_add_vlan_metadata(struct ice_adv_lkup_elem *lkup)
6022 {
6023 	lkup->type = ICE_HW_METADATA;
6024 	lkup->m_u.metadata.flags[ICE_PKT_FLAGS_VLAN] =
6025 		cpu_to_be16(ICE_PKT_VLAN_MASK);
6026 }
6027 
6028 void ice_rule_add_src_vsi_metadata(struct ice_adv_lkup_elem *lkup)
6029 {
6030 	lkup->type = ICE_HW_METADATA;
6031 	lkup->m_u.metadata.source_vsi = cpu_to_be16(ICE_MDID_SOURCE_VSI_MASK);
6032 }
6033 
6034 /**
6035  * ice_add_adv_rule - helper function to create an advanced switch rule
6036  * @hw: pointer to the hardware structure
6037  * @lkups: information on the words that needs to be looked up. All words
6038  * together makes one recipe
6039  * @lkups_cnt: num of entries in the lkups array
6040  * @rinfo: other information related to the rule that needs to be programmed
6041  * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6042  *               ignored is case of error.
6043  *
6044  * This function can program only 1 rule at a time. The lkups is used to
6045  * describe the all the words that forms the "lookup" portion of the recipe.
6046  * These words can span multiple protocols. Callers to this function need to
6047  * pass in a list of protocol headers with lookup information along and mask
6048  * that determines which words are valid from the given protocol header.
6049  * rinfo describes other information related to this rule such as forwarding
6050  * IDs, priority of this rule, etc.
6051  */
6052 int
6053 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6054 		 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6055 		 struct ice_rule_query_data *added_entry)
6056 {
6057 	struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6058 	struct ice_sw_rule_lkup_rx_tx *s_rule = NULL;
6059 	const struct ice_dummy_pkt_profile *profile;
6060 	u16 rid = 0, i, rule_buf_sz, vsi_handle;
6061 	struct list_head *rule_head;
6062 	struct ice_switch_info *sw;
6063 	u16 word_cnt;
6064 	u32 act = 0;
6065 	int status;
6066 	u8 q_rgn;
6067 
6068 	/* Initialize profile to result index bitmap */
6069 	if (!hw->switch_info->prof_res_bm_init) {
6070 		hw->switch_info->prof_res_bm_init = 1;
6071 		ice_init_prof_result_bm(hw);
6072 	}
6073 
6074 	if (!lkups_cnt)
6075 		return -EINVAL;
6076 
6077 	/* get # of words we need to match */
6078 	word_cnt = 0;
6079 	for (i = 0; i < lkups_cnt; i++) {
6080 		u16 j;
6081 
6082 		for (j = 0; j < ARRAY_SIZE(lkups->m_raw); j++)
6083 			if (lkups[i].m_raw[j])
6084 				word_cnt++;
6085 	}
6086 
6087 	if (!word_cnt)
6088 		return -EINVAL;
6089 
6090 	if (word_cnt > ICE_MAX_CHAIN_WORDS)
6091 		return -ENOSPC;
6092 
6093 	/* locate a dummy packet */
6094 	profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type);
6095 	if (IS_ERR(profile))
6096 		return PTR_ERR(profile);
6097 
6098 	if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6099 	      rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6100 	      rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6101 	      rinfo->sw_act.fltr_act == ICE_DROP_PACKET ||
6102 	      rinfo->sw_act.fltr_act == ICE_NOP)) {
6103 		status = -EIO;
6104 		goto free_pkt_profile;
6105 	}
6106 
6107 	vsi_handle = rinfo->sw_act.vsi_handle;
6108 	if (!ice_is_vsi_valid(hw, vsi_handle)) {
6109 		status =  -EINVAL;
6110 		goto free_pkt_profile;
6111 	}
6112 
6113 	if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6114 	    rinfo->sw_act.fltr_act == ICE_NOP)
6115 		rinfo->sw_act.fwd_id.hw_vsi_id =
6116 			ice_get_hw_vsi_num(hw, vsi_handle);
6117 
6118 	if (rinfo->src_vsi)
6119 		rinfo->sw_act.src = ice_get_hw_vsi_num(hw, rinfo->src_vsi);
6120 	else
6121 		rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6122 
6123 	status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6124 	if (status)
6125 		goto free_pkt_profile;
6126 	m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6127 	if (m_entry) {
6128 		/* we have to add VSI to VSI_LIST and increment vsi_count.
6129 		 * Also Update VSI list so that we can change forwarding rule
6130 		 * if the rule already exists, we will check if it exists with
6131 		 * same vsi_id, if not then add it to the VSI list if it already
6132 		 * exists if not then create a VSI list and add the existing VSI
6133 		 * ID and the new VSI ID to the list
6134 		 * We will add that VSI to the list
6135 		 */
6136 		status = ice_adv_add_update_vsi_list(hw, m_entry,
6137 						     &m_entry->rule_info,
6138 						     rinfo);
6139 		if (added_entry) {
6140 			added_entry->rid = rid;
6141 			added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6142 			added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6143 		}
6144 		goto free_pkt_profile;
6145 	}
6146 	rule_buf_sz = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, profile->pkt_len);
6147 	s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6148 	if (!s_rule) {
6149 		status = -ENOMEM;
6150 		goto free_pkt_profile;
6151 	}
6152 	if (!rinfo->flags_info.act_valid) {
6153 		act |= ICE_SINGLE_ACT_LAN_ENABLE;
6154 		act |= ICE_SINGLE_ACT_LB_ENABLE;
6155 	} else {
6156 		act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
6157 						ICE_SINGLE_ACT_LB_ENABLE);
6158 	}
6159 
6160 	switch (rinfo->sw_act.fltr_act) {
6161 	case ICE_FWD_TO_VSI:
6162 		act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6163 			ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6164 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6165 		break;
6166 	case ICE_FWD_TO_Q:
6167 		act |= ICE_SINGLE_ACT_TO_Q;
6168 		act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6169 		       ICE_SINGLE_ACT_Q_INDEX_M;
6170 		break;
6171 	case ICE_FWD_TO_QGRP:
6172 		q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6173 			(u8)ilog2(rinfo->sw_act.qgrp_size) : 0;
6174 		act |= ICE_SINGLE_ACT_TO_Q;
6175 		act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6176 		       ICE_SINGLE_ACT_Q_INDEX_M;
6177 		act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6178 		       ICE_SINGLE_ACT_Q_REGION_M;
6179 		break;
6180 	case ICE_DROP_PACKET:
6181 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6182 		       ICE_SINGLE_ACT_VALID_BIT;
6183 		break;
6184 	case ICE_NOP:
6185 		act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
6186 				  rinfo->sw_act.fwd_id.hw_vsi_id);
6187 		act &= ~ICE_SINGLE_ACT_VALID_BIT;
6188 		break;
6189 	default:
6190 		status = -EIO;
6191 		goto err_ice_add_adv_rule;
6192 	}
6193 
6194 	/* If there is no matching criteria for direction there
6195 	 * is only one difference between Rx and Tx:
6196 	 * - get switch id base on VSI number from source field (Tx)
6197 	 * - get switch id base on port number (Rx)
6198 	 *
6199 	 * If matching on direction metadata is chose rule direction is
6200 	 * extracted from type value set here.
6201 	 */
6202 	if (rinfo->sw_act.flag & ICE_FLTR_TX) {
6203 		s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
6204 		s_rule->src = cpu_to_le16(rinfo->sw_act.src);
6205 	} else {
6206 		s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
6207 		s_rule->src = cpu_to_le16(hw->port_info->lport);
6208 	}
6209 
6210 	s_rule->recipe_id = cpu_to_le16(rid);
6211 	s_rule->act = cpu_to_le32(act);
6212 
6213 	status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, profile);
6214 	if (status)
6215 		goto err_ice_add_adv_rule;
6216 
6217 	status = ice_fill_adv_packet_tun(hw, rinfo->tun_type, s_rule->hdr_data,
6218 					 profile->offsets);
6219 	if (status)
6220 		goto err_ice_add_adv_rule;
6221 
6222 	status = ice_fill_adv_packet_vlan(hw, rinfo->vlan_type,
6223 					  s_rule->hdr_data,
6224 					  profile->offsets);
6225 	if (status)
6226 		goto err_ice_add_adv_rule;
6227 
6228 	status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6229 				 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6230 				 NULL);
6231 	if (status)
6232 		goto err_ice_add_adv_rule;
6233 	adv_fltr = devm_kzalloc(ice_hw_to_dev(hw),
6234 				sizeof(struct ice_adv_fltr_mgmt_list_entry),
6235 				GFP_KERNEL);
6236 	if (!adv_fltr) {
6237 		status = -ENOMEM;
6238 		goto err_ice_add_adv_rule;
6239 	}
6240 
6241 	adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
6242 				       lkups_cnt * sizeof(*lkups), GFP_KERNEL);
6243 	if (!adv_fltr->lkups) {
6244 		status = -ENOMEM;
6245 		goto err_ice_add_adv_rule;
6246 	}
6247 
6248 	adv_fltr->lkups_cnt = lkups_cnt;
6249 	adv_fltr->rule_info = *rinfo;
6250 	adv_fltr->rule_info.fltr_rule_id = le16_to_cpu(s_rule->index);
6251 	sw = hw->switch_info;
6252 	sw->recp_list[rid].adv_rule = true;
6253 	rule_head = &sw->recp_list[rid].filt_rules;
6254 
6255 	if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6256 		adv_fltr->vsi_count = 1;
6257 
6258 	/* Add rule entry to book keeping list */
6259 	list_add(&adv_fltr->list_entry, rule_head);
6260 	if (added_entry) {
6261 		added_entry->rid = rid;
6262 		added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6263 		added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6264 	}
6265 err_ice_add_adv_rule:
6266 	if (status && adv_fltr) {
6267 		devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups);
6268 		devm_kfree(ice_hw_to_dev(hw), adv_fltr);
6269 	}
6270 
6271 	kfree(s_rule);
6272 
6273 free_pkt_profile:
6274 	if (profile->match & ICE_PKT_KMALLOC) {
6275 		kfree(profile->offsets);
6276 		kfree(profile->pkt);
6277 		kfree(profile);
6278 	}
6279 
6280 	return status;
6281 }
6282 
6283 /**
6284  * ice_replay_vsi_fltr - Replay filters for requested VSI
6285  * @hw: pointer to the hardware structure
6286  * @vsi_handle: driver VSI handle
6287  * @recp_id: Recipe ID for which rules need to be replayed
6288  * @list_head: list for which filters need to be replayed
6289  *
6290  * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6291  * It is required to pass valid VSI handle.
6292  */
6293 static int
6294 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6295 		    struct list_head *list_head)
6296 {
6297 	struct ice_fltr_mgmt_list_entry *itr;
6298 	int status = 0;
6299 	u16 hw_vsi_id;
6300 
6301 	if (list_empty(list_head))
6302 		return status;
6303 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6304 
6305 	list_for_each_entry(itr, list_head, list_entry) {
6306 		struct ice_fltr_list_entry f_entry;
6307 
6308 		f_entry.fltr_info = itr->fltr_info;
6309 		if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6310 		    itr->fltr_info.vsi_handle == vsi_handle) {
6311 			/* update the src in case it is VSI num */
6312 			if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6313 				f_entry.fltr_info.src = hw_vsi_id;
6314 			status = ice_add_rule_internal(hw, recp_id, &f_entry);
6315 			if (status)
6316 				goto end;
6317 			continue;
6318 		}
6319 		if (!itr->vsi_list_info ||
6320 		    !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
6321 			continue;
6322 		/* Clearing it so that the logic can add it back */
6323 		clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6324 		f_entry.fltr_info.vsi_handle = vsi_handle;
6325 		f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6326 		/* update the src in case it is VSI num */
6327 		if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6328 			f_entry.fltr_info.src = hw_vsi_id;
6329 		if (recp_id == ICE_SW_LKUP_VLAN)
6330 			status = ice_add_vlan_internal(hw, &f_entry);
6331 		else
6332 			status = ice_add_rule_internal(hw, recp_id, &f_entry);
6333 		if (status)
6334 			goto end;
6335 	}
6336 end:
6337 	return status;
6338 }
6339 
6340 /**
6341  * ice_adv_rem_update_vsi_list
6342  * @hw: pointer to the hardware structure
6343  * @vsi_handle: VSI handle of the VSI to remove
6344  * @fm_list: filter management entry for which the VSI list management needs to
6345  *	     be done
6346  */
6347 static int
6348 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6349 			    struct ice_adv_fltr_mgmt_list_entry *fm_list)
6350 {
6351 	struct ice_vsi_list_map_info *vsi_list_info;
6352 	enum ice_sw_lkup_type lkup_type;
6353 	u16 vsi_list_id;
6354 	int status;
6355 
6356 	if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6357 	    fm_list->vsi_count == 0)
6358 		return -EINVAL;
6359 
6360 	/* A rule with the VSI being removed does not exist */
6361 	if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
6362 		return -ENOENT;
6363 
6364 	lkup_type = ICE_SW_LKUP_LAST;
6365 	vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6366 	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6367 					  ice_aqc_opc_update_sw_rules,
6368 					  lkup_type);
6369 	if (status)
6370 		return status;
6371 
6372 	fm_list->vsi_count--;
6373 	clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6374 	vsi_list_info = fm_list->vsi_list_info;
6375 	if (fm_list->vsi_count == 1) {
6376 		struct ice_fltr_info tmp_fltr;
6377 		u16 rem_vsi_handle;
6378 
6379 		rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
6380 						ICE_MAX_VSI);
6381 		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6382 			return -EIO;
6383 
6384 		/* Make sure VSI list is empty before removing it below */
6385 		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6386 						  vsi_list_id, true,
6387 						  ice_aqc_opc_update_sw_rules,
6388 						  lkup_type);
6389 		if (status)
6390 			return status;
6391 
6392 		memset(&tmp_fltr, 0, sizeof(tmp_fltr));
6393 		tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
6394 		tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6395 		fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6396 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6397 		tmp_fltr.fwd_id.hw_vsi_id =
6398 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
6399 		fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6400 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
6401 		fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
6402 
6403 		/* Update the previous switch rule of "MAC forward to VSI" to
6404 		 * "MAC fwd to VSI list"
6405 		 */
6406 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6407 		if (status) {
6408 			ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6409 				  tmp_fltr.fwd_id.hw_vsi_id, status);
6410 			return status;
6411 		}
6412 		fm_list->vsi_list_info->ref_cnt--;
6413 
6414 		/* Remove the VSI list since it is no longer used */
6415 		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6416 		if (status) {
6417 			ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
6418 				  vsi_list_id, status);
6419 			return status;
6420 		}
6421 
6422 		list_del(&vsi_list_info->list_entry);
6423 		devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
6424 		fm_list->vsi_list_info = NULL;
6425 	}
6426 
6427 	return status;
6428 }
6429 
6430 /**
6431  * ice_rem_adv_rule - removes existing advanced switch rule
6432  * @hw: pointer to the hardware structure
6433  * @lkups: information on the words that needs to be looked up. All words
6434  *         together makes one recipe
6435  * @lkups_cnt: num of entries in the lkups array
6436  * @rinfo: Its the pointer to the rule information for the rule
6437  *
6438  * This function can be used to remove 1 rule at a time. The lkups is
6439  * used to describe all the words that forms the "lookup" portion of the
6440  * rule. These words can span multiple protocols. Callers to this function
6441  * need to pass in a list of protocol headers with lookup information along
6442  * and mask that determines which words are valid from the given protocol
6443  * header. rinfo describes other information related to this rule such as
6444  * forwarding IDs, priority of this rule, etc.
6445  */
6446 static int
6447 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6448 		 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6449 {
6450 	struct ice_adv_fltr_mgmt_list_entry *list_elem;
6451 	struct ice_prot_lkup_ext lkup_exts;
6452 	bool remove_rule = false;
6453 	struct mutex *rule_lock; /* Lock to protect filter rule list */
6454 	u16 i, rid, vsi_handle;
6455 	int status = 0;
6456 
6457 	memset(&lkup_exts, 0, sizeof(lkup_exts));
6458 	for (i = 0; i < lkups_cnt; i++) {
6459 		u16 count;
6460 
6461 		if (lkups[i].type >= ICE_PROTOCOL_LAST)
6462 			return -EIO;
6463 
6464 		count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6465 		if (!count)
6466 			return -EIO;
6467 	}
6468 
6469 	rid = ice_find_recp(hw, &lkup_exts, rinfo);
6470 	/* If did not find a recipe that match the existing criteria */
6471 	if (rid == ICE_MAX_NUM_RECIPES)
6472 		return -EINVAL;
6473 
6474 	rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6475 	list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6476 	/* the rule is already removed */
6477 	if (!list_elem)
6478 		return 0;
6479 	mutex_lock(rule_lock);
6480 	if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6481 		remove_rule = true;
6482 	} else if (list_elem->vsi_count > 1) {
6483 		remove_rule = false;
6484 		vsi_handle = rinfo->sw_act.vsi_handle;
6485 		status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6486 	} else {
6487 		vsi_handle = rinfo->sw_act.vsi_handle;
6488 		status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6489 		if (status) {
6490 			mutex_unlock(rule_lock);
6491 			return status;
6492 		}
6493 		if (list_elem->vsi_count == 0)
6494 			remove_rule = true;
6495 	}
6496 	mutex_unlock(rule_lock);
6497 	if (remove_rule) {
6498 		struct ice_sw_rule_lkup_rx_tx *s_rule;
6499 		u16 rule_buf_sz;
6500 
6501 		rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule);
6502 		s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6503 		if (!s_rule)
6504 			return -ENOMEM;
6505 		s_rule->act = 0;
6506 		s_rule->index = cpu_to_le16(list_elem->rule_info.fltr_rule_id);
6507 		s_rule->hdr_len = 0;
6508 		status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6509 					 rule_buf_sz, 1,
6510 					 ice_aqc_opc_remove_sw_rules, NULL);
6511 		if (!status || status == -ENOENT) {
6512 			struct ice_switch_info *sw = hw->switch_info;
6513 
6514 			mutex_lock(rule_lock);
6515 			list_del(&list_elem->list_entry);
6516 			devm_kfree(ice_hw_to_dev(hw), list_elem->lkups);
6517 			devm_kfree(ice_hw_to_dev(hw), list_elem);
6518 			mutex_unlock(rule_lock);
6519 			if (list_empty(&sw->recp_list[rid].filt_rules))
6520 				sw->recp_list[rid].adv_rule = false;
6521 		}
6522 		kfree(s_rule);
6523 	}
6524 	return status;
6525 }
6526 
6527 /**
6528  * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6529  * @hw: pointer to the hardware structure
6530  * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6531  *
6532  * This function is used to remove 1 rule at a time. The removal is based on
6533  * the remove_entry parameter. This function will remove rule for a given
6534  * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6535  */
6536 int
6537 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6538 		       struct ice_rule_query_data *remove_entry)
6539 {
6540 	struct ice_adv_fltr_mgmt_list_entry *list_itr;
6541 	struct list_head *list_head;
6542 	struct ice_adv_rule_info rinfo;
6543 	struct ice_switch_info *sw;
6544 
6545 	sw = hw->switch_info;
6546 	if (!sw->recp_list[remove_entry->rid].recp_created)
6547 		return -EINVAL;
6548 	list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6549 	list_for_each_entry(list_itr, list_head, list_entry) {
6550 		if (list_itr->rule_info.fltr_rule_id ==
6551 		    remove_entry->rule_id) {
6552 			rinfo = list_itr->rule_info;
6553 			rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6554 			return ice_rem_adv_rule(hw, list_itr->lkups,
6555 						list_itr->lkups_cnt, &rinfo);
6556 		}
6557 	}
6558 	/* either list is empty or unable to find rule */
6559 	return -ENOENT;
6560 }
6561 
6562 /**
6563  * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6564  * @hw: pointer to the hardware structure
6565  * @vsi_handle: driver VSI handle
6566  * @list_head: list for which filters need to be replayed
6567  *
6568  * Replay the advanced rule for the given VSI.
6569  */
6570 static int
6571 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6572 			struct list_head *list_head)
6573 {
6574 	struct ice_rule_query_data added_entry = { 0 };
6575 	struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6576 	int status = 0;
6577 
6578 	if (list_empty(list_head))
6579 		return status;
6580 	list_for_each_entry(adv_fltr, list_head, list_entry) {
6581 		struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6582 		u16 lk_cnt = adv_fltr->lkups_cnt;
6583 
6584 		if (vsi_handle != rinfo->sw_act.vsi_handle)
6585 			continue;
6586 		status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6587 					  &added_entry);
6588 		if (status)
6589 			break;
6590 	}
6591 	return status;
6592 }
6593 
6594 /**
6595  * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6596  * @hw: pointer to the hardware structure
6597  * @vsi_handle: driver VSI handle
6598  *
6599  * Replays filters for requested VSI via vsi_handle.
6600  */
6601 int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6602 {
6603 	struct ice_switch_info *sw = hw->switch_info;
6604 	int status;
6605 	u8 i;
6606 
6607 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6608 		struct list_head *head;
6609 
6610 		head = &sw->recp_list[i].filt_replay_rules;
6611 		if (!sw->recp_list[i].adv_rule)
6612 			status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6613 		else
6614 			status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6615 		if (status)
6616 			return status;
6617 	}
6618 	return status;
6619 }
6620 
6621 /**
6622  * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6623  * @hw: pointer to the HW struct
6624  *
6625  * Deletes the filter replay rules.
6626  */
6627 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6628 {
6629 	struct ice_switch_info *sw = hw->switch_info;
6630 	u8 i;
6631 
6632 	if (!sw)
6633 		return;
6634 
6635 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6636 		if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
6637 			struct list_head *l_head;
6638 
6639 			l_head = &sw->recp_list[i].filt_replay_rules;
6640 			if (!sw->recp_list[i].adv_rule)
6641 				ice_rem_sw_rule_info(hw, l_head);
6642 			else
6643 				ice_rem_adv_rule_info(hw, l_head);
6644 		}
6645 	}
6646 }
6647