1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_lib.h"
5 #include "ice_switch.h"
6 
7 #define ICE_ETH_DA_OFFSET		0
8 #define ICE_ETH_ETHTYPE_OFFSET		12
9 #define ICE_ETH_VLAN_TCI_OFFSET		14
10 #define ICE_MAX_VLAN_ID			0xFFF
11 #define ICE_IPV6_ETHER_ID		0x86DD
12 
13 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
14  * struct to configure any switch filter rules.
15  * {DA (6 bytes), SA(6 bytes),
16  * Ether type (2 bytes for header without VLAN tag) OR
17  * VLAN tag (4 bytes for header with VLAN tag) }
18  *
19  * Word on Hardcoded values
20  * byte 0 = 0x2: to identify it as locally administered DA MAC
21  * byte 6 = 0x2: to identify it as locally administered SA MAC
22  * byte 12 = 0x81 & byte 13 = 0x00:
23  *	In case of VLAN filter first two bytes defines ether type (0x8100)
24  *	and remaining two bytes are placeholder for programming a given VLAN ID
25  *	In case of Ether type filter it is treated as header without VLAN tag
26  *	and byte 12 and 13 is used to program a given Ether type instead
27  */
28 #define DUMMY_ETH_HDR_LEN		16
29 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
30 							0x2, 0, 0, 0, 0, 0,
31 							0x81, 0, 0, 0};
32 
33 enum {
34 	ICE_PKT_OUTER_IPV6	= BIT(0),
35 	ICE_PKT_TUN_GTPC	= BIT(1),
36 	ICE_PKT_TUN_GTPU	= BIT(2),
37 	ICE_PKT_TUN_NVGRE	= BIT(3),
38 	ICE_PKT_TUN_UDP		= BIT(4),
39 	ICE_PKT_INNER_IPV6	= BIT(5),
40 	ICE_PKT_INNER_TCP	= BIT(6),
41 	ICE_PKT_INNER_UDP	= BIT(7),
42 	ICE_PKT_GTP_NOPAY	= BIT(8),
43 	ICE_PKT_KMALLOC		= BIT(9),
44 	ICE_PKT_PPPOE		= BIT(10),
45 	ICE_PKT_L2TPV3		= BIT(11),
46 };
47 
48 struct ice_dummy_pkt_offsets {
49 	enum ice_protocol_type type;
50 	u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
51 };
52 
53 struct ice_dummy_pkt_profile {
54 	const struct ice_dummy_pkt_offsets *offsets;
55 	const u8 *pkt;
56 	u32 match;
57 	u16 pkt_len;
58 	u16 offsets_len;
59 };
60 
61 #define ICE_DECLARE_PKT_OFFSETS(type)					\
62 	static const struct ice_dummy_pkt_offsets			\
63 	ice_dummy_##type##_packet_offsets[]
64 
65 #define ICE_DECLARE_PKT_TEMPLATE(type)					\
66 	static const u8 ice_dummy_##type##_packet[]
67 
68 #define ICE_PKT_PROFILE(type, m) {					\
69 	.match		= (m),						\
70 	.pkt		= ice_dummy_##type##_packet,			\
71 	.pkt_len	= sizeof(ice_dummy_##type##_packet),		\
72 	.offsets	= ice_dummy_##type##_packet_offsets,		\
73 	.offsets_len	= sizeof(ice_dummy_##type##_packet_offsets),	\
74 }
75 
76 ICE_DECLARE_PKT_OFFSETS(vlan) = {
77 	{ ICE_VLAN_OFOS,        12 },
78 };
79 
80 ICE_DECLARE_PKT_TEMPLATE(vlan) = {
81 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
82 };
83 
84 ICE_DECLARE_PKT_OFFSETS(qinq) = {
85 	{ ICE_VLAN_EX,          12 },
86 	{ ICE_VLAN_IN,          16 },
87 };
88 
89 ICE_DECLARE_PKT_TEMPLATE(qinq) = {
90 	0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
91 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
92 };
93 
94 ICE_DECLARE_PKT_OFFSETS(gre_tcp) = {
95 	{ ICE_MAC_OFOS,		0 },
96 	{ ICE_ETYPE_OL,		12 },
97 	{ ICE_IPV4_OFOS,	14 },
98 	{ ICE_NVGRE,		34 },
99 	{ ICE_MAC_IL,		42 },
100 	{ ICE_ETYPE_IL,		54 },
101 	{ ICE_IPV4_IL,		56 },
102 	{ ICE_TCP_IL,		76 },
103 	{ ICE_PROTOCOL_LAST,	0 },
104 };
105 
106 ICE_DECLARE_PKT_TEMPLATE(gre_tcp) = {
107 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_OFOS 0 */
108 	0x00, 0x00, 0x00, 0x00,
109 	0x00, 0x00, 0x00, 0x00,
110 
111 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
112 
113 	0x45, 0x00, 0x00, 0x3E,	/* ICE_IPV4_OFOS 14 */
114 	0x00, 0x00, 0x00, 0x00,
115 	0x00, 0x2F, 0x00, 0x00,
116 	0x00, 0x00, 0x00, 0x00,
117 	0x00, 0x00, 0x00, 0x00,
118 
119 	0x80, 0x00, 0x65, 0x58,	/* ICE_NVGRE 34 */
120 	0x00, 0x00, 0x00, 0x00,
121 
122 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_IL 42 */
123 	0x00, 0x00, 0x00, 0x00,
124 	0x00, 0x00, 0x00, 0x00,
125 
126 	0x08, 0x00,		/* ICE_ETYPE_IL 54 */
127 
128 	0x45, 0x00, 0x00, 0x14,	/* ICE_IPV4_IL 56 */
129 	0x00, 0x00, 0x00, 0x00,
130 	0x00, 0x06, 0x00, 0x00,
131 	0x00, 0x00, 0x00, 0x00,
132 	0x00, 0x00, 0x00, 0x00,
133 
134 	0x00, 0x00, 0x00, 0x00,	/* ICE_TCP_IL 76 */
135 	0x00, 0x00, 0x00, 0x00,
136 	0x00, 0x00, 0x00, 0x00,
137 	0x50, 0x02, 0x20, 0x00,
138 	0x00, 0x00, 0x00, 0x00
139 };
140 
141 ICE_DECLARE_PKT_OFFSETS(gre_udp) = {
142 	{ ICE_MAC_OFOS,		0 },
143 	{ ICE_ETYPE_OL,		12 },
144 	{ ICE_IPV4_OFOS,	14 },
145 	{ ICE_NVGRE,		34 },
146 	{ ICE_MAC_IL,		42 },
147 	{ ICE_ETYPE_IL,		54 },
148 	{ ICE_IPV4_IL,		56 },
149 	{ ICE_UDP_ILOS,		76 },
150 	{ ICE_PROTOCOL_LAST,	0 },
151 };
152 
153 ICE_DECLARE_PKT_TEMPLATE(gre_udp) = {
154 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_OFOS 0 */
155 	0x00, 0x00, 0x00, 0x00,
156 	0x00, 0x00, 0x00, 0x00,
157 
158 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
159 
160 	0x45, 0x00, 0x00, 0x3E,	/* ICE_IPV4_OFOS 14 */
161 	0x00, 0x00, 0x00, 0x00,
162 	0x00, 0x2F, 0x00, 0x00,
163 	0x00, 0x00, 0x00, 0x00,
164 	0x00, 0x00, 0x00, 0x00,
165 
166 	0x80, 0x00, 0x65, 0x58,	/* ICE_NVGRE 34 */
167 	0x00, 0x00, 0x00, 0x00,
168 
169 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_IL 42 */
170 	0x00, 0x00, 0x00, 0x00,
171 	0x00, 0x00, 0x00, 0x00,
172 
173 	0x08, 0x00,		/* ICE_ETYPE_IL 54 */
174 
175 	0x45, 0x00, 0x00, 0x14,	/* ICE_IPV4_IL 56 */
176 	0x00, 0x00, 0x00, 0x00,
177 	0x00, 0x11, 0x00, 0x00,
178 	0x00, 0x00, 0x00, 0x00,
179 	0x00, 0x00, 0x00, 0x00,
180 
181 	0x00, 0x00, 0x00, 0x00,	/* ICE_UDP_ILOS 76 */
182 	0x00, 0x08, 0x00, 0x00,
183 };
184 
185 ICE_DECLARE_PKT_OFFSETS(udp_tun_tcp) = {
186 	{ ICE_MAC_OFOS,		0 },
187 	{ ICE_ETYPE_OL,		12 },
188 	{ ICE_IPV4_OFOS,	14 },
189 	{ ICE_UDP_OF,		34 },
190 	{ ICE_VXLAN,		42 },
191 	{ ICE_GENEVE,		42 },
192 	{ ICE_VXLAN_GPE,	42 },
193 	{ ICE_MAC_IL,		50 },
194 	{ ICE_ETYPE_IL,		62 },
195 	{ ICE_IPV4_IL,		64 },
196 	{ ICE_TCP_IL,		84 },
197 	{ ICE_PROTOCOL_LAST,	0 },
198 };
199 
200 ICE_DECLARE_PKT_TEMPLATE(udp_tun_tcp) = {
201 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
202 	0x00, 0x00, 0x00, 0x00,
203 	0x00, 0x00, 0x00, 0x00,
204 
205 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
206 
207 	0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
208 	0x00, 0x01, 0x00, 0x00,
209 	0x40, 0x11, 0x00, 0x00,
210 	0x00, 0x00, 0x00, 0x00,
211 	0x00, 0x00, 0x00, 0x00,
212 
213 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
214 	0x00, 0x46, 0x00, 0x00,
215 
216 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
217 	0x00, 0x00, 0x00, 0x00,
218 
219 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
220 	0x00, 0x00, 0x00, 0x00,
221 	0x00, 0x00, 0x00, 0x00,
222 
223 	0x08, 0x00,		/* ICE_ETYPE_IL 62 */
224 
225 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
226 	0x00, 0x01, 0x00, 0x00,
227 	0x40, 0x06, 0x00, 0x00,
228 	0x00, 0x00, 0x00, 0x00,
229 	0x00, 0x00, 0x00, 0x00,
230 
231 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
232 	0x00, 0x00, 0x00, 0x00,
233 	0x00, 0x00, 0x00, 0x00,
234 	0x50, 0x02, 0x20, 0x00,
235 	0x00, 0x00, 0x00, 0x00
236 };
237 
238 ICE_DECLARE_PKT_OFFSETS(udp_tun_udp) = {
239 	{ ICE_MAC_OFOS,		0 },
240 	{ ICE_ETYPE_OL,		12 },
241 	{ ICE_IPV4_OFOS,	14 },
242 	{ ICE_UDP_OF,		34 },
243 	{ ICE_VXLAN,		42 },
244 	{ ICE_GENEVE,		42 },
245 	{ ICE_VXLAN_GPE,	42 },
246 	{ ICE_MAC_IL,		50 },
247 	{ ICE_ETYPE_IL,		62 },
248 	{ ICE_IPV4_IL,		64 },
249 	{ ICE_UDP_ILOS,		84 },
250 	{ ICE_PROTOCOL_LAST,	0 },
251 };
252 
253 ICE_DECLARE_PKT_TEMPLATE(udp_tun_udp) = {
254 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
255 	0x00, 0x00, 0x00, 0x00,
256 	0x00, 0x00, 0x00, 0x00,
257 
258 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
259 
260 	0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
261 	0x00, 0x01, 0x00, 0x00,
262 	0x00, 0x11, 0x00, 0x00,
263 	0x00, 0x00, 0x00, 0x00,
264 	0x00, 0x00, 0x00, 0x00,
265 
266 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
267 	0x00, 0x3a, 0x00, 0x00,
268 
269 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
270 	0x00, 0x00, 0x00, 0x00,
271 
272 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
273 	0x00, 0x00, 0x00, 0x00,
274 	0x00, 0x00, 0x00, 0x00,
275 
276 	0x08, 0x00,		/* ICE_ETYPE_IL 62 */
277 
278 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
279 	0x00, 0x01, 0x00, 0x00,
280 	0x00, 0x11, 0x00, 0x00,
281 	0x00, 0x00, 0x00, 0x00,
282 	0x00, 0x00, 0x00, 0x00,
283 
284 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
285 	0x00, 0x08, 0x00, 0x00,
286 };
287 
288 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_tcp) = {
289 	{ ICE_MAC_OFOS,		0 },
290 	{ ICE_ETYPE_OL,		12 },
291 	{ ICE_IPV4_OFOS,	14 },
292 	{ ICE_NVGRE,		34 },
293 	{ ICE_MAC_IL,		42 },
294 	{ ICE_ETYPE_IL,		54 },
295 	{ ICE_IPV6_IL,		56 },
296 	{ ICE_TCP_IL,		96 },
297 	{ ICE_PROTOCOL_LAST,	0 },
298 };
299 
300 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_tcp) = {
301 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
302 	0x00, 0x00, 0x00, 0x00,
303 	0x00, 0x00, 0x00, 0x00,
304 
305 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
306 
307 	0x45, 0x00, 0x00, 0x66, /* ICE_IPV4_OFOS 14 */
308 	0x00, 0x00, 0x00, 0x00,
309 	0x00, 0x2F, 0x00, 0x00,
310 	0x00, 0x00, 0x00, 0x00,
311 	0x00, 0x00, 0x00, 0x00,
312 
313 	0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
314 	0x00, 0x00, 0x00, 0x00,
315 
316 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
317 	0x00, 0x00, 0x00, 0x00,
318 	0x00, 0x00, 0x00, 0x00,
319 
320 	0x86, 0xdd,		/* ICE_ETYPE_IL 54 */
321 
322 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
323 	0x00, 0x08, 0x06, 0x40,
324 	0x00, 0x00, 0x00, 0x00,
325 	0x00, 0x00, 0x00, 0x00,
326 	0x00, 0x00, 0x00, 0x00,
327 	0x00, 0x00, 0x00, 0x00,
328 	0x00, 0x00, 0x00, 0x00,
329 	0x00, 0x00, 0x00, 0x00,
330 	0x00, 0x00, 0x00, 0x00,
331 	0x00, 0x00, 0x00, 0x00,
332 
333 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 96 */
334 	0x00, 0x00, 0x00, 0x00,
335 	0x00, 0x00, 0x00, 0x00,
336 	0x50, 0x02, 0x20, 0x00,
337 	0x00, 0x00, 0x00, 0x00
338 };
339 
340 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_udp) = {
341 	{ ICE_MAC_OFOS,		0 },
342 	{ ICE_ETYPE_OL,		12 },
343 	{ ICE_IPV4_OFOS,	14 },
344 	{ ICE_NVGRE,		34 },
345 	{ ICE_MAC_IL,		42 },
346 	{ ICE_ETYPE_IL,		54 },
347 	{ ICE_IPV6_IL,		56 },
348 	{ ICE_UDP_ILOS,		96 },
349 	{ ICE_PROTOCOL_LAST,	0 },
350 };
351 
352 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_udp) = {
353 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
354 	0x00, 0x00, 0x00, 0x00,
355 	0x00, 0x00, 0x00, 0x00,
356 
357 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
358 
359 	0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
360 	0x00, 0x00, 0x00, 0x00,
361 	0x00, 0x2F, 0x00, 0x00,
362 	0x00, 0x00, 0x00, 0x00,
363 	0x00, 0x00, 0x00, 0x00,
364 
365 	0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
366 	0x00, 0x00, 0x00, 0x00,
367 
368 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
369 	0x00, 0x00, 0x00, 0x00,
370 	0x00, 0x00, 0x00, 0x00,
371 
372 	0x86, 0xdd,		/* ICE_ETYPE_IL 54 */
373 
374 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
375 	0x00, 0x08, 0x11, 0x40,
376 	0x00, 0x00, 0x00, 0x00,
377 	0x00, 0x00, 0x00, 0x00,
378 	0x00, 0x00, 0x00, 0x00,
379 	0x00, 0x00, 0x00, 0x00,
380 	0x00, 0x00, 0x00, 0x00,
381 	0x00, 0x00, 0x00, 0x00,
382 	0x00, 0x00, 0x00, 0x00,
383 	0x00, 0x00, 0x00, 0x00,
384 
385 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 96 */
386 	0x00, 0x08, 0x00, 0x00,
387 };
388 
389 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_tcp) = {
390 	{ ICE_MAC_OFOS,		0 },
391 	{ ICE_ETYPE_OL,		12 },
392 	{ ICE_IPV4_OFOS,	14 },
393 	{ ICE_UDP_OF,		34 },
394 	{ ICE_VXLAN,		42 },
395 	{ ICE_GENEVE,		42 },
396 	{ ICE_VXLAN_GPE,	42 },
397 	{ ICE_MAC_IL,		50 },
398 	{ ICE_ETYPE_IL,		62 },
399 	{ ICE_IPV6_IL,		64 },
400 	{ ICE_TCP_IL,		104 },
401 	{ ICE_PROTOCOL_LAST,	0 },
402 };
403 
404 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_tcp) = {
405 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
406 	0x00, 0x00, 0x00, 0x00,
407 	0x00, 0x00, 0x00, 0x00,
408 
409 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
410 
411 	0x45, 0x00, 0x00, 0x6e, /* ICE_IPV4_OFOS 14 */
412 	0x00, 0x01, 0x00, 0x00,
413 	0x40, 0x11, 0x00, 0x00,
414 	0x00, 0x00, 0x00, 0x00,
415 	0x00, 0x00, 0x00, 0x00,
416 
417 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
418 	0x00, 0x5a, 0x00, 0x00,
419 
420 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
421 	0x00, 0x00, 0x00, 0x00,
422 
423 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
424 	0x00, 0x00, 0x00, 0x00,
425 	0x00, 0x00, 0x00, 0x00,
426 
427 	0x86, 0xdd,		/* ICE_ETYPE_IL 62 */
428 
429 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
430 	0x00, 0x08, 0x06, 0x40,
431 	0x00, 0x00, 0x00, 0x00,
432 	0x00, 0x00, 0x00, 0x00,
433 	0x00, 0x00, 0x00, 0x00,
434 	0x00, 0x00, 0x00, 0x00,
435 	0x00, 0x00, 0x00, 0x00,
436 	0x00, 0x00, 0x00, 0x00,
437 	0x00, 0x00, 0x00, 0x00,
438 	0x00, 0x00, 0x00, 0x00,
439 
440 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 104 */
441 	0x00, 0x00, 0x00, 0x00,
442 	0x00, 0x00, 0x00, 0x00,
443 	0x50, 0x02, 0x20, 0x00,
444 	0x00, 0x00, 0x00, 0x00
445 };
446 
447 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_udp) = {
448 	{ ICE_MAC_OFOS,		0 },
449 	{ ICE_ETYPE_OL,		12 },
450 	{ ICE_IPV4_OFOS,	14 },
451 	{ ICE_UDP_OF,		34 },
452 	{ ICE_VXLAN,		42 },
453 	{ ICE_GENEVE,		42 },
454 	{ ICE_VXLAN_GPE,	42 },
455 	{ ICE_MAC_IL,		50 },
456 	{ ICE_ETYPE_IL,		62 },
457 	{ ICE_IPV6_IL,		64 },
458 	{ ICE_UDP_ILOS,		104 },
459 	{ ICE_PROTOCOL_LAST,	0 },
460 };
461 
462 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_udp) = {
463 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
464 	0x00, 0x00, 0x00, 0x00,
465 	0x00, 0x00, 0x00, 0x00,
466 
467 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
468 
469 	0x45, 0x00, 0x00, 0x62, /* ICE_IPV4_OFOS 14 */
470 	0x00, 0x01, 0x00, 0x00,
471 	0x00, 0x11, 0x00, 0x00,
472 	0x00, 0x00, 0x00, 0x00,
473 	0x00, 0x00, 0x00, 0x00,
474 
475 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
476 	0x00, 0x4e, 0x00, 0x00,
477 
478 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
479 	0x00, 0x00, 0x00, 0x00,
480 
481 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
482 	0x00, 0x00, 0x00, 0x00,
483 	0x00, 0x00, 0x00, 0x00,
484 
485 	0x86, 0xdd,		/* ICE_ETYPE_IL 62 */
486 
487 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
488 	0x00, 0x08, 0x11, 0x40,
489 	0x00, 0x00, 0x00, 0x00,
490 	0x00, 0x00, 0x00, 0x00,
491 	0x00, 0x00, 0x00, 0x00,
492 	0x00, 0x00, 0x00, 0x00,
493 	0x00, 0x00, 0x00, 0x00,
494 	0x00, 0x00, 0x00, 0x00,
495 	0x00, 0x00, 0x00, 0x00,
496 	0x00, 0x00, 0x00, 0x00,
497 
498 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 104 */
499 	0x00, 0x08, 0x00, 0x00,
500 };
501 
502 /* offset info for MAC + IPv4 + UDP dummy packet */
503 ICE_DECLARE_PKT_OFFSETS(udp) = {
504 	{ ICE_MAC_OFOS,		0 },
505 	{ ICE_ETYPE_OL,		12 },
506 	{ ICE_IPV4_OFOS,	14 },
507 	{ ICE_UDP_ILOS,		34 },
508 	{ ICE_PROTOCOL_LAST,	0 },
509 };
510 
511 /* Dummy packet for MAC + IPv4 + UDP */
512 ICE_DECLARE_PKT_TEMPLATE(udp) = {
513 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
514 	0x00, 0x00, 0x00, 0x00,
515 	0x00, 0x00, 0x00, 0x00,
516 
517 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
518 
519 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
520 	0x00, 0x01, 0x00, 0x00,
521 	0x00, 0x11, 0x00, 0x00,
522 	0x00, 0x00, 0x00, 0x00,
523 	0x00, 0x00, 0x00, 0x00,
524 
525 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
526 	0x00, 0x08, 0x00, 0x00,
527 
528 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
529 };
530 
531 /* offset info for MAC + IPv4 + TCP dummy packet */
532 ICE_DECLARE_PKT_OFFSETS(tcp) = {
533 	{ ICE_MAC_OFOS,		0 },
534 	{ ICE_ETYPE_OL,		12 },
535 	{ ICE_IPV4_OFOS,	14 },
536 	{ ICE_TCP_IL,		34 },
537 	{ ICE_PROTOCOL_LAST,	0 },
538 };
539 
540 /* Dummy packet for MAC + IPv4 + TCP */
541 ICE_DECLARE_PKT_TEMPLATE(tcp) = {
542 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
543 	0x00, 0x00, 0x00, 0x00,
544 	0x00, 0x00, 0x00, 0x00,
545 
546 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
547 
548 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
549 	0x00, 0x01, 0x00, 0x00,
550 	0x00, 0x06, 0x00, 0x00,
551 	0x00, 0x00, 0x00, 0x00,
552 	0x00, 0x00, 0x00, 0x00,
553 
554 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
555 	0x00, 0x00, 0x00, 0x00,
556 	0x00, 0x00, 0x00, 0x00,
557 	0x50, 0x00, 0x00, 0x00,
558 	0x00, 0x00, 0x00, 0x00,
559 
560 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
561 };
562 
563 ICE_DECLARE_PKT_OFFSETS(tcp_ipv6) = {
564 	{ ICE_MAC_OFOS,		0 },
565 	{ ICE_ETYPE_OL,		12 },
566 	{ ICE_IPV6_OFOS,	14 },
567 	{ ICE_TCP_IL,		54 },
568 	{ ICE_PROTOCOL_LAST,	0 },
569 };
570 
571 ICE_DECLARE_PKT_TEMPLATE(tcp_ipv6) = {
572 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
573 	0x00, 0x00, 0x00, 0x00,
574 	0x00, 0x00, 0x00, 0x00,
575 
576 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
577 
578 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
579 	0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
580 	0x00, 0x00, 0x00, 0x00,
581 	0x00, 0x00, 0x00, 0x00,
582 	0x00, 0x00, 0x00, 0x00,
583 	0x00, 0x00, 0x00, 0x00,
584 	0x00, 0x00, 0x00, 0x00,
585 	0x00, 0x00, 0x00, 0x00,
586 	0x00, 0x00, 0x00, 0x00,
587 	0x00, 0x00, 0x00, 0x00,
588 
589 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
590 	0x00, 0x00, 0x00, 0x00,
591 	0x00, 0x00, 0x00, 0x00,
592 	0x50, 0x00, 0x00, 0x00,
593 	0x00, 0x00, 0x00, 0x00,
594 
595 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
596 };
597 
598 /* IPv6 + UDP */
599 ICE_DECLARE_PKT_OFFSETS(udp_ipv6) = {
600 	{ ICE_MAC_OFOS,		0 },
601 	{ ICE_ETYPE_OL,		12 },
602 	{ ICE_IPV6_OFOS,	14 },
603 	{ ICE_UDP_ILOS,		54 },
604 	{ ICE_PROTOCOL_LAST,	0 },
605 };
606 
607 /* IPv6 + UDP dummy packet */
608 ICE_DECLARE_PKT_TEMPLATE(udp_ipv6) = {
609 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
610 	0x00, 0x00, 0x00, 0x00,
611 	0x00, 0x00, 0x00, 0x00,
612 
613 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
614 
615 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
616 	0x00, 0x10, 0x11, 0x00, /* Next header UDP */
617 	0x00, 0x00, 0x00, 0x00,
618 	0x00, 0x00, 0x00, 0x00,
619 	0x00, 0x00, 0x00, 0x00,
620 	0x00, 0x00, 0x00, 0x00,
621 	0x00, 0x00, 0x00, 0x00,
622 	0x00, 0x00, 0x00, 0x00,
623 	0x00, 0x00, 0x00, 0x00,
624 	0x00, 0x00, 0x00, 0x00,
625 
626 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
627 	0x00, 0x10, 0x00, 0x00,
628 
629 	0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
630 	0x00, 0x00, 0x00, 0x00,
631 
632 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
633 };
634 
635 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
636 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_tcp) = {
637 	{ ICE_MAC_OFOS,		0 },
638 	{ ICE_IPV4_OFOS,	14 },
639 	{ ICE_UDP_OF,		34 },
640 	{ ICE_GTP,		42 },
641 	{ ICE_IPV4_IL,		62 },
642 	{ ICE_TCP_IL,		82 },
643 	{ ICE_PROTOCOL_LAST,	0 },
644 };
645 
646 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_tcp) = {
647 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
648 	0x00, 0x00, 0x00, 0x00,
649 	0x00, 0x00, 0x00, 0x00,
650 	0x08, 0x00,
651 
652 	0x45, 0x00, 0x00, 0x58, /* IP 14 */
653 	0x00, 0x00, 0x00, 0x00,
654 	0x00, 0x11, 0x00, 0x00,
655 	0x00, 0x00, 0x00, 0x00,
656 	0x00, 0x00, 0x00, 0x00,
657 
658 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
659 	0x00, 0x44, 0x00, 0x00,
660 
661 	0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 42 */
662 	0x00, 0x00, 0x00, 0x00,
663 	0x00, 0x00, 0x00, 0x85,
664 
665 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
666 	0x00, 0x00, 0x00, 0x00,
667 
668 	0x45, 0x00, 0x00, 0x28, /* IP 62 */
669 	0x00, 0x00, 0x00, 0x00,
670 	0x00, 0x06, 0x00, 0x00,
671 	0x00, 0x00, 0x00, 0x00,
672 	0x00, 0x00, 0x00, 0x00,
673 
674 	0x00, 0x00, 0x00, 0x00, /* TCP 82 */
675 	0x00, 0x00, 0x00, 0x00,
676 	0x00, 0x00, 0x00, 0x00,
677 	0x50, 0x00, 0x00, 0x00,
678 	0x00, 0x00, 0x00, 0x00,
679 
680 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
681 };
682 
683 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
684 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_udp) = {
685 	{ ICE_MAC_OFOS,		0 },
686 	{ ICE_IPV4_OFOS,	14 },
687 	{ ICE_UDP_OF,		34 },
688 	{ ICE_GTP,		42 },
689 	{ ICE_IPV4_IL,		62 },
690 	{ ICE_UDP_ILOS,		82 },
691 	{ ICE_PROTOCOL_LAST,	0 },
692 };
693 
694 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_udp) = {
695 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
696 	0x00, 0x00, 0x00, 0x00,
697 	0x00, 0x00, 0x00, 0x00,
698 	0x08, 0x00,
699 
700 	0x45, 0x00, 0x00, 0x4c, /* IP 14 */
701 	0x00, 0x00, 0x00, 0x00,
702 	0x00, 0x11, 0x00, 0x00,
703 	0x00, 0x00, 0x00, 0x00,
704 	0x00, 0x00, 0x00, 0x00,
705 
706 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
707 	0x00, 0x38, 0x00, 0x00,
708 
709 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 42 */
710 	0x00, 0x00, 0x00, 0x00,
711 	0x00, 0x00, 0x00, 0x85,
712 
713 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
714 	0x00, 0x00, 0x00, 0x00,
715 
716 	0x45, 0x00, 0x00, 0x1c, /* IP 62 */
717 	0x00, 0x00, 0x00, 0x00,
718 	0x00, 0x11, 0x00, 0x00,
719 	0x00, 0x00, 0x00, 0x00,
720 	0x00, 0x00, 0x00, 0x00,
721 
722 	0x00, 0x00, 0x00, 0x00, /* UDP 82 */
723 	0x00, 0x08, 0x00, 0x00,
724 
725 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
726 };
727 
728 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
729 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_tcp) = {
730 	{ ICE_MAC_OFOS,		0 },
731 	{ ICE_IPV4_OFOS,	14 },
732 	{ ICE_UDP_OF,		34 },
733 	{ ICE_GTP,		42 },
734 	{ ICE_IPV6_IL,		62 },
735 	{ ICE_TCP_IL,		102 },
736 	{ ICE_PROTOCOL_LAST,	0 },
737 };
738 
739 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_tcp) = {
740 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
741 	0x00, 0x00, 0x00, 0x00,
742 	0x00, 0x00, 0x00, 0x00,
743 	0x08, 0x00,
744 
745 	0x45, 0x00, 0x00, 0x6c, /* IP 14 */
746 	0x00, 0x00, 0x00, 0x00,
747 	0x00, 0x11, 0x00, 0x00,
748 	0x00, 0x00, 0x00, 0x00,
749 	0x00, 0x00, 0x00, 0x00,
750 
751 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
752 	0x00, 0x58, 0x00, 0x00,
753 
754 	0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 42 */
755 	0x00, 0x00, 0x00, 0x00,
756 	0x00, 0x00, 0x00, 0x85,
757 
758 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
759 	0x00, 0x00, 0x00, 0x00,
760 
761 	0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
762 	0x00, 0x14, 0x06, 0x00,
763 	0x00, 0x00, 0x00, 0x00,
764 	0x00, 0x00, 0x00, 0x00,
765 	0x00, 0x00, 0x00, 0x00,
766 	0x00, 0x00, 0x00, 0x00,
767 	0x00, 0x00, 0x00, 0x00,
768 	0x00, 0x00, 0x00, 0x00,
769 	0x00, 0x00, 0x00, 0x00,
770 	0x00, 0x00, 0x00, 0x00,
771 
772 	0x00, 0x00, 0x00, 0x00, /* TCP 102 */
773 	0x00, 0x00, 0x00, 0x00,
774 	0x00, 0x00, 0x00, 0x00,
775 	0x50, 0x00, 0x00, 0x00,
776 	0x00, 0x00, 0x00, 0x00,
777 
778 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
779 };
780 
781 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_udp) = {
782 	{ ICE_MAC_OFOS,		0 },
783 	{ ICE_IPV4_OFOS,	14 },
784 	{ ICE_UDP_OF,		34 },
785 	{ ICE_GTP,		42 },
786 	{ ICE_IPV6_IL,		62 },
787 	{ ICE_UDP_ILOS,		102 },
788 	{ ICE_PROTOCOL_LAST,	0 },
789 };
790 
791 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_udp) = {
792 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
793 	0x00, 0x00, 0x00, 0x00,
794 	0x00, 0x00, 0x00, 0x00,
795 	0x08, 0x00,
796 
797 	0x45, 0x00, 0x00, 0x60, /* IP 14 */
798 	0x00, 0x00, 0x00, 0x00,
799 	0x00, 0x11, 0x00, 0x00,
800 	0x00, 0x00, 0x00, 0x00,
801 	0x00, 0x00, 0x00, 0x00,
802 
803 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
804 	0x00, 0x4c, 0x00, 0x00,
805 
806 	0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 42 */
807 	0x00, 0x00, 0x00, 0x00,
808 	0x00, 0x00, 0x00, 0x85,
809 
810 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
811 	0x00, 0x00, 0x00, 0x00,
812 
813 	0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
814 	0x00, 0x08, 0x11, 0x00,
815 	0x00, 0x00, 0x00, 0x00,
816 	0x00, 0x00, 0x00, 0x00,
817 	0x00, 0x00, 0x00, 0x00,
818 	0x00, 0x00, 0x00, 0x00,
819 	0x00, 0x00, 0x00, 0x00,
820 	0x00, 0x00, 0x00, 0x00,
821 	0x00, 0x00, 0x00, 0x00,
822 	0x00, 0x00, 0x00, 0x00,
823 
824 	0x00, 0x00, 0x00, 0x00, /* UDP 102 */
825 	0x00, 0x08, 0x00, 0x00,
826 
827 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
828 };
829 
830 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_tcp) = {
831 	{ ICE_MAC_OFOS,		0 },
832 	{ ICE_IPV6_OFOS,	14 },
833 	{ ICE_UDP_OF,		54 },
834 	{ ICE_GTP,		62 },
835 	{ ICE_IPV4_IL,		82 },
836 	{ ICE_TCP_IL,		102 },
837 	{ ICE_PROTOCOL_LAST,	0 },
838 };
839 
840 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_tcp) = {
841 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
842 	0x00, 0x00, 0x00, 0x00,
843 	0x00, 0x00, 0x00, 0x00,
844 	0x86, 0xdd,
845 
846 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
847 	0x00, 0x44, 0x11, 0x00,
848 	0x00, 0x00, 0x00, 0x00,
849 	0x00, 0x00, 0x00, 0x00,
850 	0x00, 0x00, 0x00, 0x00,
851 	0x00, 0x00, 0x00, 0x00,
852 	0x00, 0x00, 0x00, 0x00,
853 	0x00, 0x00, 0x00, 0x00,
854 	0x00, 0x00, 0x00, 0x00,
855 	0x00, 0x00, 0x00, 0x00,
856 
857 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
858 	0x00, 0x44, 0x00, 0x00,
859 
860 	0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 62 */
861 	0x00, 0x00, 0x00, 0x00,
862 	0x00, 0x00, 0x00, 0x85,
863 
864 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
865 	0x00, 0x00, 0x00, 0x00,
866 
867 	0x45, 0x00, 0x00, 0x28, /* IP 82 */
868 	0x00, 0x00, 0x00, 0x00,
869 	0x00, 0x06, 0x00, 0x00,
870 	0x00, 0x00, 0x00, 0x00,
871 	0x00, 0x00, 0x00, 0x00,
872 
873 	0x00, 0x00, 0x00, 0x00, /* TCP 102 */
874 	0x00, 0x00, 0x00, 0x00,
875 	0x00, 0x00, 0x00, 0x00,
876 	0x50, 0x00, 0x00, 0x00,
877 	0x00, 0x00, 0x00, 0x00,
878 
879 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
880 };
881 
882 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_udp) = {
883 	{ ICE_MAC_OFOS,		0 },
884 	{ ICE_IPV6_OFOS,	14 },
885 	{ ICE_UDP_OF,		54 },
886 	{ ICE_GTP,		62 },
887 	{ ICE_IPV4_IL,		82 },
888 	{ ICE_UDP_ILOS,		102 },
889 	{ ICE_PROTOCOL_LAST,	0 },
890 };
891 
892 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_udp) = {
893 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
894 	0x00, 0x00, 0x00, 0x00,
895 	0x00, 0x00, 0x00, 0x00,
896 	0x86, 0xdd,
897 
898 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
899 	0x00, 0x38, 0x11, 0x00,
900 	0x00, 0x00, 0x00, 0x00,
901 	0x00, 0x00, 0x00, 0x00,
902 	0x00, 0x00, 0x00, 0x00,
903 	0x00, 0x00, 0x00, 0x00,
904 	0x00, 0x00, 0x00, 0x00,
905 	0x00, 0x00, 0x00, 0x00,
906 	0x00, 0x00, 0x00, 0x00,
907 	0x00, 0x00, 0x00, 0x00,
908 
909 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
910 	0x00, 0x38, 0x00, 0x00,
911 
912 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 62 */
913 	0x00, 0x00, 0x00, 0x00,
914 	0x00, 0x00, 0x00, 0x85,
915 
916 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
917 	0x00, 0x00, 0x00, 0x00,
918 
919 	0x45, 0x00, 0x00, 0x1c, /* IP 82 */
920 	0x00, 0x00, 0x00, 0x00,
921 	0x00, 0x11, 0x00, 0x00,
922 	0x00, 0x00, 0x00, 0x00,
923 	0x00, 0x00, 0x00, 0x00,
924 
925 	0x00, 0x00, 0x00, 0x00, /* UDP 102 */
926 	0x00, 0x08, 0x00, 0x00,
927 
928 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
929 };
930 
931 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_tcp) = {
932 	{ ICE_MAC_OFOS,		0 },
933 	{ ICE_IPV6_OFOS,	14 },
934 	{ ICE_UDP_OF,		54 },
935 	{ ICE_GTP,		62 },
936 	{ ICE_IPV6_IL,		82 },
937 	{ ICE_TCP_IL,		122 },
938 	{ ICE_PROTOCOL_LAST,	0 },
939 };
940 
941 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_tcp) = {
942 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
943 	0x00, 0x00, 0x00, 0x00,
944 	0x00, 0x00, 0x00, 0x00,
945 	0x86, 0xdd,
946 
947 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
948 	0x00, 0x58, 0x11, 0x00,
949 	0x00, 0x00, 0x00, 0x00,
950 	0x00, 0x00, 0x00, 0x00,
951 	0x00, 0x00, 0x00, 0x00,
952 	0x00, 0x00, 0x00, 0x00,
953 	0x00, 0x00, 0x00, 0x00,
954 	0x00, 0x00, 0x00, 0x00,
955 	0x00, 0x00, 0x00, 0x00,
956 	0x00, 0x00, 0x00, 0x00,
957 
958 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
959 	0x00, 0x58, 0x00, 0x00,
960 
961 	0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 62 */
962 	0x00, 0x00, 0x00, 0x00,
963 	0x00, 0x00, 0x00, 0x85,
964 
965 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
966 	0x00, 0x00, 0x00, 0x00,
967 
968 	0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
969 	0x00, 0x14, 0x06, 0x00,
970 	0x00, 0x00, 0x00, 0x00,
971 	0x00, 0x00, 0x00, 0x00,
972 	0x00, 0x00, 0x00, 0x00,
973 	0x00, 0x00, 0x00, 0x00,
974 	0x00, 0x00, 0x00, 0x00,
975 	0x00, 0x00, 0x00, 0x00,
976 	0x00, 0x00, 0x00, 0x00,
977 	0x00, 0x00, 0x00, 0x00,
978 
979 	0x00, 0x00, 0x00, 0x00, /* TCP 122 */
980 	0x00, 0x00, 0x00, 0x00,
981 	0x00, 0x00, 0x00, 0x00,
982 	0x50, 0x00, 0x00, 0x00,
983 	0x00, 0x00, 0x00, 0x00,
984 
985 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
986 };
987 
988 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_udp) = {
989 	{ ICE_MAC_OFOS,		0 },
990 	{ ICE_IPV6_OFOS,	14 },
991 	{ ICE_UDP_OF,		54 },
992 	{ ICE_GTP,		62 },
993 	{ ICE_IPV6_IL,		82 },
994 	{ ICE_UDP_ILOS,		122 },
995 	{ ICE_PROTOCOL_LAST,	0 },
996 };
997 
998 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_udp) = {
999 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
1000 	0x00, 0x00, 0x00, 0x00,
1001 	0x00, 0x00, 0x00, 0x00,
1002 	0x86, 0xdd,
1003 
1004 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
1005 	0x00, 0x4c, 0x11, 0x00,
1006 	0x00, 0x00, 0x00, 0x00,
1007 	0x00, 0x00, 0x00, 0x00,
1008 	0x00, 0x00, 0x00, 0x00,
1009 	0x00, 0x00, 0x00, 0x00,
1010 	0x00, 0x00, 0x00, 0x00,
1011 	0x00, 0x00, 0x00, 0x00,
1012 	0x00, 0x00, 0x00, 0x00,
1013 	0x00, 0x00, 0x00, 0x00,
1014 
1015 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
1016 	0x00, 0x4c, 0x00, 0x00,
1017 
1018 	0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 62 */
1019 	0x00, 0x00, 0x00, 0x00,
1020 	0x00, 0x00, 0x00, 0x85,
1021 
1022 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
1023 	0x00, 0x00, 0x00, 0x00,
1024 
1025 	0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
1026 	0x00, 0x08, 0x11, 0x00,
1027 	0x00, 0x00, 0x00, 0x00,
1028 	0x00, 0x00, 0x00, 0x00,
1029 	0x00, 0x00, 0x00, 0x00,
1030 	0x00, 0x00, 0x00, 0x00,
1031 	0x00, 0x00, 0x00, 0x00,
1032 	0x00, 0x00, 0x00, 0x00,
1033 	0x00, 0x00, 0x00, 0x00,
1034 	0x00, 0x00, 0x00, 0x00,
1035 
1036 	0x00, 0x00, 0x00, 0x00, /* UDP 122 */
1037 	0x00, 0x08, 0x00, 0x00,
1038 
1039 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
1040 };
1041 
1042 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4) = {
1043 	{ ICE_MAC_OFOS,		0 },
1044 	{ ICE_IPV4_OFOS,	14 },
1045 	{ ICE_UDP_OF,		34 },
1046 	{ ICE_GTP_NO_PAY,	42 },
1047 	{ ICE_PROTOCOL_LAST,	0 },
1048 };
1049 
1050 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4) = {
1051 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1052 	0x00, 0x00, 0x00, 0x00,
1053 	0x00, 0x00, 0x00, 0x00,
1054 	0x08, 0x00,
1055 
1056 	0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
1057 	0x00, 0x00, 0x40, 0x00,
1058 	0x40, 0x11, 0x00, 0x00,
1059 	0x00, 0x00, 0x00, 0x00,
1060 	0x00, 0x00, 0x00, 0x00,
1061 
1062 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
1063 	0x00, 0x00, 0x00, 0x00,
1064 
1065 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
1066 	0x00, 0x00, 0x00, 0x00,
1067 	0x00, 0x00, 0x00, 0x85,
1068 
1069 	0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1070 	0x00, 0x00, 0x00, 0x00,
1071 
1072 	0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
1073 	0x00, 0x00, 0x40, 0x00,
1074 	0x40, 0x00, 0x00, 0x00,
1075 	0x00, 0x00, 0x00, 0x00,
1076 	0x00, 0x00, 0x00, 0x00,
1077 	0x00, 0x00,
1078 };
1079 
1080 ICE_DECLARE_PKT_OFFSETS(ipv6_gtp) = {
1081 	{ ICE_MAC_OFOS,		0 },
1082 	{ ICE_IPV6_OFOS,	14 },
1083 	{ ICE_UDP_OF,		54 },
1084 	{ ICE_GTP_NO_PAY,	62 },
1085 	{ ICE_PROTOCOL_LAST,	0 },
1086 };
1087 
1088 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = {
1089 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1090 	0x00, 0x00, 0x00, 0x00,
1091 	0x00, 0x00, 0x00, 0x00,
1092 	0x86, 0xdd,
1093 
1094 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1095 	0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1096 	0x00, 0x00, 0x00, 0x00,
1097 	0x00, 0x00, 0x00, 0x00,
1098 	0x00, 0x00, 0x00, 0x00,
1099 	0x00, 0x00, 0x00, 0x00,
1100 	0x00, 0x00, 0x00, 0x00,
1101 	0x00, 0x00, 0x00, 0x00,
1102 	0x00, 0x00, 0x00, 0x00,
1103 	0x00, 0x00, 0x00, 0x00,
1104 
1105 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1106 	0x00, 0x00, 0x00, 0x00,
1107 
1108 	0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */
1109 	0x00, 0x00, 0x00, 0x00,
1110 
1111 	0x00, 0x00,
1112 };
1113 
1114 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_tcp) = {
1115 	{ ICE_MAC_OFOS,		0 },
1116 	{ ICE_ETYPE_OL,		12 },
1117 	{ ICE_PPPOE,		14 },
1118 	{ ICE_IPV4_OFOS,	22 },
1119 	{ ICE_TCP_IL,		42 },
1120 	{ ICE_PROTOCOL_LAST,	0 },
1121 };
1122 
1123 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_tcp) = {
1124 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1125 	0x00, 0x00, 0x00, 0x00,
1126 	0x00, 0x00, 0x00, 0x00,
1127 
1128 	0x88, 0x64,		/* ICE_ETYPE_OL 12 */
1129 
1130 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1131 	0x00, 0x16,
1132 
1133 	0x00, 0x21,		/* PPP Link Layer 20 */
1134 
1135 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 22 */
1136 	0x00, 0x01, 0x00, 0x00,
1137 	0x00, 0x06, 0x00, 0x00,
1138 	0x00, 0x00, 0x00, 0x00,
1139 	0x00, 0x00, 0x00, 0x00,
1140 
1141 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 42 */
1142 	0x00, 0x00, 0x00, 0x00,
1143 	0x00, 0x00, 0x00, 0x00,
1144 	0x50, 0x00, 0x00, 0x00,
1145 	0x00, 0x00, 0x00, 0x00,
1146 
1147 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1148 };
1149 
1150 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_udp) = {
1151 	{ ICE_MAC_OFOS,		0 },
1152 	{ ICE_ETYPE_OL,		12 },
1153 	{ ICE_PPPOE,		14 },
1154 	{ ICE_IPV4_OFOS,	22 },
1155 	{ ICE_UDP_ILOS,		42 },
1156 	{ ICE_PROTOCOL_LAST,	0 },
1157 };
1158 
1159 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_udp) = {
1160 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1161 	0x00, 0x00, 0x00, 0x00,
1162 	0x00, 0x00, 0x00, 0x00,
1163 
1164 	0x88, 0x64,		/* ICE_ETYPE_OL 12 */
1165 
1166 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1167 	0x00, 0x16,
1168 
1169 	0x00, 0x21,		/* PPP Link Layer 20 */
1170 
1171 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1172 	0x00, 0x01, 0x00, 0x00,
1173 	0x00, 0x11, 0x00, 0x00,
1174 	0x00, 0x00, 0x00, 0x00,
1175 	0x00, 0x00, 0x00, 0x00,
1176 
1177 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1178 	0x00, 0x08, 0x00, 0x00,
1179 
1180 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1181 };
1182 
1183 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_tcp) = {
1184 	{ ICE_MAC_OFOS,		0 },
1185 	{ ICE_ETYPE_OL,		12 },
1186 	{ ICE_PPPOE,		14 },
1187 	{ ICE_IPV6_OFOS,	22 },
1188 	{ ICE_TCP_IL,		62 },
1189 	{ ICE_PROTOCOL_LAST,	0 },
1190 };
1191 
1192 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_tcp) = {
1193 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1194 	0x00, 0x00, 0x00, 0x00,
1195 	0x00, 0x00, 0x00, 0x00,
1196 
1197 	0x88, 0x64,		/* ICE_ETYPE_OL 12 */
1198 
1199 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1200 	0x00, 0x2a,
1201 
1202 	0x00, 0x57,		/* PPP Link Layer 20 */
1203 
1204 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1205 	0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1206 	0x00, 0x00, 0x00, 0x00,
1207 	0x00, 0x00, 0x00, 0x00,
1208 	0x00, 0x00, 0x00, 0x00,
1209 	0x00, 0x00, 0x00, 0x00,
1210 	0x00, 0x00, 0x00, 0x00,
1211 	0x00, 0x00, 0x00, 0x00,
1212 	0x00, 0x00, 0x00, 0x00,
1213 	0x00, 0x00, 0x00, 0x00,
1214 
1215 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 62 */
1216 	0x00, 0x00, 0x00, 0x00,
1217 	0x00, 0x00, 0x00, 0x00,
1218 	0x50, 0x00, 0x00, 0x00,
1219 	0x00, 0x00, 0x00, 0x00,
1220 
1221 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1222 };
1223 
1224 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_udp) = {
1225 	{ ICE_MAC_OFOS,		0 },
1226 	{ ICE_ETYPE_OL,		12 },
1227 	{ ICE_PPPOE,		14 },
1228 	{ ICE_IPV6_OFOS,	22 },
1229 	{ ICE_UDP_ILOS,		62 },
1230 	{ ICE_PROTOCOL_LAST,	0 },
1231 };
1232 
1233 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_udp) = {
1234 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1235 	0x00, 0x00, 0x00, 0x00,
1236 	0x00, 0x00, 0x00, 0x00,
1237 
1238 	0x88, 0x64,		/* ICE_ETYPE_OL 12 */
1239 
1240 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1241 	0x00, 0x2a,
1242 
1243 	0x00, 0x57,		/* PPP Link Layer 20 */
1244 
1245 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1246 	0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1247 	0x00, 0x00, 0x00, 0x00,
1248 	0x00, 0x00, 0x00, 0x00,
1249 	0x00, 0x00, 0x00, 0x00,
1250 	0x00, 0x00, 0x00, 0x00,
1251 	0x00, 0x00, 0x00, 0x00,
1252 	0x00, 0x00, 0x00, 0x00,
1253 	0x00, 0x00, 0x00, 0x00,
1254 	0x00, 0x00, 0x00, 0x00,
1255 
1256 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1257 	0x00, 0x08, 0x00, 0x00,
1258 
1259 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1260 };
1261 
1262 ICE_DECLARE_PKT_OFFSETS(ipv4_l2tpv3) = {
1263 	{ ICE_MAC_OFOS,		0 },
1264 	{ ICE_ETYPE_OL,		12 },
1265 	{ ICE_IPV4_OFOS,	14 },
1266 	{ ICE_L2TPV3,		34 },
1267 	{ ICE_PROTOCOL_LAST,	0 },
1268 };
1269 
1270 ICE_DECLARE_PKT_TEMPLATE(ipv4_l2tpv3) = {
1271 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1272 	0x00, 0x00, 0x00, 0x00,
1273 	0x00, 0x00, 0x00, 0x00,
1274 
1275 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
1276 
1277 	0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1278 	0x00, 0x00, 0x40, 0x00,
1279 	0x40, 0x73, 0x00, 0x00,
1280 	0x00, 0x00, 0x00, 0x00,
1281 	0x00, 0x00, 0x00, 0x00,
1282 
1283 	0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1284 	0x00, 0x00, 0x00, 0x00,
1285 	0x00, 0x00, 0x00, 0x00,
1286 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1287 };
1288 
1289 ICE_DECLARE_PKT_OFFSETS(ipv6_l2tpv3) = {
1290 	{ ICE_MAC_OFOS,		0 },
1291 	{ ICE_ETYPE_OL,		12 },
1292 	{ ICE_IPV6_OFOS,	14 },
1293 	{ ICE_L2TPV3,		54 },
1294 	{ ICE_PROTOCOL_LAST,	0 },
1295 };
1296 
1297 ICE_DECLARE_PKT_TEMPLATE(ipv6_l2tpv3) = {
1298 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1299 	0x00, 0x00, 0x00, 0x00,
1300 	0x00, 0x00, 0x00, 0x00,
1301 
1302 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
1303 
1304 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1305 	0x00, 0x0c, 0x73, 0x40,
1306 	0x00, 0x00, 0x00, 0x00,
1307 	0x00, 0x00, 0x00, 0x00,
1308 	0x00, 0x00, 0x00, 0x00,
1309 	0x00, 0x00, 0x00, 0x00,
1310 	0x00, 0x00, 0x00, 0x00,
1311 	0x00, 0x00, 0x00, 0x00,
1312 	0x00, 0x00, 0x00, 0x00,
1313 	0x00, 0x00, 0x00, 0x00,
1314 
1315 	0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1316 	0x00, 0x00, 0x00, 0x00,
1317 	0x00, 0x00, 0x00, 0x00,
1318 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1319 };
1320 
1321 static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
1322 	ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 |
1323 				  ICE_PKT_GTP_NOPAY),
1324 	ICE_PKT_PROFILE(ipv6_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1325 					    ICE_PKT_OUTER_IPV6 |
1326 					    ICE_PKT_INNER_IPV6 |
1327 					    ICE_PKT_INNER_UDP),
1328 	ICE_PKT_PROFILE(ipv6_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1329 					    ICE_PKT_OUTER_IPV6 |
1330 					    ICE_PKT_INNER_IPV6),
1331 	ICE_PKT_PROFILE(ipv6_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1332 					    ICE_PKT_OUTER_IPV6 |
1333 					    ICE_PKT_INNER_UDP),
1334 	ICE_PKT_PROFILE(ipv6_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU |
1335 					    ICE_PKT_OUTER_IPV6),
1336 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPU | ICE_PKT_GTP_NOPAY),
1337 	ICE_PKT_PROFILE(ipv4_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1338 					    ICE_PKT_INNER_IPV6 |
1339 					    ICE_PKT_INNER_UDP),
1340 	ICE_PKT_PROFILE(ipv4_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1341 					    ICE_PKT_INNER_IPV6),
1342 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1343 					    ICE_PKT_INNER_UDP),
1344 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU),
1345 	ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6),
1346 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC),
1347 	ICE_PKT_PROFILE(pppoe_ipv6_udp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6 |
1348 					ICE_PKT_INNER_UDP),
1349 	ICE_PKT_PROFILE(pppoe_ipv6_tcp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6),
1350 	ICE_PKT_PROFILE(pppoe_ipv4_udp, ICE_PKT_PPPOE | ICE_PKT_INNER_UDP),
1351 	ICE_PKT_PROFILE(pppoe_ipv4_tcp, ICE_PKT_PPPOE),
1352 	ICE_PKT_PROFILE(gre_ipv6_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6 |
1353 				      ICE_PKT_INNER_TCP),
1354 	ICE_PKT_PROFILE(gre_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_TCP),
1355 	ICE_PKT_PROFILE(gre_ipv6_udp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6),
1356 	ICE_PKT_PROFILE(gre_udp, ICE_PKT_TUN_NVGRE),
1357 	ICE_PKT_PROFILE(udp_tun_ipv6_tcp, ICE_PKT_TUN_UDP |
1358 					  ICE_PKT_INNER_IPV6 |
1359 					  ICE_PKT_INNER_TCP),
1360 	ICE_PKT_PROFILE(ipv6_l2tpv3, ICE_PKT_L2TPV3 | ICE_PKT_OUTER_IPV6),
1361 	ICE_PKT_PROFILE(ipv4_l2tpv3, ICE_PKT_L2TPV3),
1362 	ICE_PKT_PROFILE(udp_tun_tcp, ICE_PKT_TUN_UDP | ICE_PKT_INNER_TCP),
1363 	ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP |
1364 					  ICE_PKT_INNER_IPV6),
1365 	ICE_PKT_PROFILE(udp_tun_udp, ICE_PKT_TUN_UDP),
1366 	ICE_PKT_PROFILE(udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP),
1367 	ICE_PKT_PROFILE(udp, ICE_PKT_INNER_UDP),
1368 	ICE_PKT_PROFILE(tcp_ipv6, ICE_PKT_OUTER_IPV6),
1369 	ICE_PKT_PROFILE(tcp, 0),
1370 };
1371 
1372 #define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l)	struct_size((s), hdr_data, (l))
1373 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s)	\
1374 	ICE_SW_RULE_RX_TX_HDR_SIZE((s), DUMMY_ETH_HDR_LEN)
1375 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s)	\
1376 	ICE_SW_RULE_RX_TX_HDR_SIZE((s), 0)
1377 #define ICE_SW_RULE_LG_ACT_SIZE(s, n)		struct_size((s), act, (n))
1378 #define ICE_SW_RULE_VSI_LIST_SIZE(s, n)		struct_size((s), vsi, (n))
1379 
1380 /* this is a recipe to profile association bitmap */
1381 static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1382 			  ICE_MAX_NUM_PROFILES);
1383 
1384 /* this is a profile to recipe association bitmap */
1385 static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1386 			  ICE_MAX_NUM_RECIPES);
1387 
1388 /**
1389  * ice_init_def_sw_recp - initialize the recipe book keeping tables
1390  * @hw: pointer to the HW struct
1391  *
1392  * Allocate memory for the entire recipe table and initialize the structures/
1393  * entries corresponding to basic recipes.
1394  */
1395 int ice_init_def_sw_recp(struct ice_hw *hw)
1396 {
1397 	struct ice_sw_recipe *recps;
1398 	u8 i;
1399 
1400 	recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
1401 			     sizeof(*recps), GFP_KERNEL);
1402 	if (!recps)
1403 		return -ENOMEM;
1404 
1405 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1406 		recps[i].root_rid = i;
1407 		INIT_LIST_HEAD(&recps[i].filt_rules);
1408 		INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1409 		INIT_LIST_HEAD(&recps[i].rg_list);
1410 		mutex_init(&recps[i].filt_rule_lock);
1411 	}
1412 
1413 	hw->switch_info->recp_list = recps;
1414 
1415 	return 0;
1416 }
1417 
1418 /**
1419  * ice_aq_get_sw_cfg - get switch configuration
1420  * @hw: pointer to the hardware structure
1421  * @buf: pointer to the result buffer
1422  * @buf_size: length of the buffer available for response
1423  * @req_desc: pointer to requested descriptor
1424  * @num_elems: pointer to number of elements
1425  * @cd: pointer to command details structure or NULL
1426  *
1427  * Get switch configuration (0x0200) to be placed in buf.
1428  * This admin command returns information such as initial VSI/port number
1429  * and switch ID it belongs to.
1430  *
1431  * NOTE: *req_desc is both an input/output parameter.
1432  * The caller of this function first calls this function with *request_desc set
1433  * to 0. If the response from f/w has *req_desc set to 0, all the switch
1434  * configuration information has been returned; if non-zero (meaning not all
1435  * the information was returned), the caller should call this function again
1436  * with *req_desc set to the previous value returned by f/w to get the
1437  * next block of switch configuration information.
1438  *
1439  * *num_elems is output only parameter. This reflects the number of elements
1440  * in response buffer. The caller of this function to use *num_elems while
1441  * parsing the response buffer.
1442  */
1443 static int
1444 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
1445 		  u16 buf_size, u16 *req_desc, u16 *num_elems,
1446 		  struct ice_sq_cd *cd)
1447 {
1448 	struct ice_aqc_get_sw_cfg *cmd;
1449 	struct ice_aq_desc desc;
1450 	int status;
1451 
1452 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1453 	cmd = &desc.params.get_sw_conf;
1454 	cmd->element = cpu_to_le16(*req_desc);
1455 
1456 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1457 	if (!status) {
1458 		*req_desc = le16_to_cpu(cmd->element);
1459 		*num_elems = le16_to_cpu(cmd->num_elems);
1460 	}
1461 
1462 	return status;
1463 }
1464 
1465 /**
1466  * ice_aq_add_vsi
1467  * @hw: pointer to the HW struct
1468  * @vsi_ctx: pointer to a VSI context struct
1469  * @cd: pointer to command details structure or NULL
1470  *
1471  * Add a VSI context to the hardware (0x0210)
1472  */
1473 static int
1474 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1475 	       struct ice_sq_cd *cd)
1476 {
1477 	struct ice_aqc_add_update_free_vsi_resp *res;
1478 	struct ice_aqc_add_get_update_free_vsi *cmd;
1479 	struct ice_aq_desc desc;
1480 	int status;
1481 
1482 	cmd = &desc.params.vsi_cmd;
1483 	res = &desc.params.add_update_free_vsi_res;
1484 
1485 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1486 
1487 	if (!vsi_ctx->alloc_from_pool)
1488 		cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
1489 					   ICE_AQ_VSI_IS_VALID);
1490 	cmd->vf_id = vsi_ctx->vf_num;
1491 
1492 	cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1493 
1494 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1495 
1496 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1497 				 sizeof(vsi_ctx->info), cd);
1498 
1499 	if (!status) {
1500 		vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1501 		vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
1502 		vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
1503 	}
1504 
1505 	return status;
1506 }
1507 
1508 /**
1509  * ice_aq_free_vsi
1510  * @hw: pointer to the HW struct
1511  * @vsi_ctx: pointer to a VSI context struct
1512  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1513  * @cd: pointer to command details structure or NULL
1514  *
1515  * Free VSI context info from hardware (0x0213)
1516  */
1517 static int
1518 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1519 		bool keep_vsi_alloc, struct ice_sq_cd *cd)
1520 {
1521 	struct ice_aqc_add_update_free_vsi_resp *resp;
1522 	struct ice_aqc_add_get_update_free_vsi *cmd;
1523 	struct ice_aq_desc desc;
1524 	int status;
1525 
1526 	cmd = &desc.params.vsi_cmd;
1527 	resp = &desc.params.add_update_free_vsi_res;
1528 
1529 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1530 
1531 	cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1532 	if (keep_vsi_alloc)
1533 		cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
1534 
1535 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1536 	if (!status) {
1537 		vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1538 		vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1539 	}
1540 
1541 	return status;
1542 }
1543 
1544 /**
1545  * ice_aq_update_vsi
1546  * @hw: pointer to the HW struct
1547  * @vsi_ctx: pointer to a VSI context struct
1548  * @cd: pointer to command details structure or NULL
1549  *
1550  * Update VSI context in the hardware (0x0211)
1551  */
1552 static int
1553 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1554 		  struct ice_sq_cd *cd)
1555 {
1556 	struct ice_aqc_add_update_free_vsi_resp *resp;
1557 	struct ice_aqc_add_get_update_free_vsi *cmd;
1558 	struct ice_aq_desc desc;
1559 	int status;
1560 
1561 	cmd = &desc.params.vsi_cmd;
1562 	resp = &desc.params.add_update_free_vsi_res;
1563 
1564 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1565 
1566 	cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1567 
1568 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1569 
1570 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1571 				 sizeof(vsi_ctx->info), cd);
1572 
1573 	if (!status) {
1574 		vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1575 		vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1576 	}
1577 
1578 	return status;
1579 }
1580 
1581 /**
1582  * ice_is_vsi_valid - check whether the VSI is valid or not
1583  * @hw: pointer to the HW struct
1584  * @vsi_handle: VSI handle
1585  *
1586  * check whether the VSI is valid or not
1587  */
1588 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1589 {
1590 	return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1591 }
1592 
1593 /**
1594  * ice_get_hw_vsi_num - return the HW VSI number
1595  * @hw: pointer to the HW struct
1596  * @vsi_handle: VSI handle
1597  *
1598  * return the HW VSI number
1599  * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1600  */
1601 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1602 {
1603 	return hw->vsi_ctx[vsi_handle]->vsi_num;
1604 }
1605 
1606 /**
1607  * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1608  * @hw: pointer to the HW struct
1609  * @vsi_handle: VSI handle
1610  *
1611  * return the VSI context entry for a given VSI handle
1612  */
1613 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1614 {
1615 	return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1616 }
1617 
1618 /**
1619  * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1620  * @hw: pointer to the HW struct
1621  * @vsi_handle: VSI handle
1622  * @vsi: VSI context pointer
1623  *
1624  * save the VSI context entry for a given VSI handle
1625  */
1626 static void
1627 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1628 {
1629 	hw->vsi_ctx[vsi_handle] = vsi;
1630 }
1631 
1632 /**
1633  * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1634  * @hw: pointer to the HW struct
1635  * @vsi_handle: VSI handle
1636  */
1637 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1638 {
1639 	struct ice_vsi_ctx *vsi = ice_get_vsi_ctx(hw, vsi_handle);
1640 	u8 i;
1641 
1642 	if (!vsi)
1643 		return;
1644 	ice_for_each_traffic_class(i) {
1645 		devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
1646 		vsi->lan_q_ctx[i] = NULL;
1647 		devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
1648 		vsi->rdma_q_ctx[i] = NULL;
1649 	}
1650 }
1651 
1652 /**
1653  * ice_clear_vsi_ctx - clear the VSI context entry
1654  * @hw: pointer to the HW struct
1655  * @vsi_handle: VSI handle
1656  *
1657  * clear the VSI context entry
1658  */
1659 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1660 {
1661 	struct ice_vsi_ctx *vsi;
1662 
1663 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
1664 	if (vsi) {
1665 		ice_clear_vsi_q_ctx(hw, vsi_handle);
1666 		devm_kfree(ice_hw_to_dev(hw), vsi);
1667 		hw->vsi_ctx[vsi_handle] = NULL;
1668 	}
1669 }
1670 
1671 /**
1672  * ice_clear_all_vsi_ctx - clear all the VSI context entries
1673  * @hw: pointer to the HW struct
1674  */
1675 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1676 {
1677 	u16 i;
1678 
1679 	for (i = 0; i < ICE_MAX_VSI; i++)
1680 		ice_clear_vsi_ctx(hw, i);
1681 }
1682 
1683 /**
1684  * ice_add_vsi - add VSI context to the hardware and VSI handle list
1685  * @hw: pointer to the HW struct
1686  * @vsi_handle: unique VSI handle provided by drivers
1687  * @vsi_ctx: pointer to a VSI context struct
1688  * @cd: pointer to command details structure or NULL
1689  *
1690  * Add a VSI context to the hardware also add it into the VSI handle list.
1691  * If this function gets called after reset for existing VSIs then update
1692  * with the new HW VSI number in the corresponding VSI handle list entry.
1693  */
1694 int
1695 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1696 	    struct ice_sq_cd *cd)
1697 {
1698 	struct ice_vsi_ctx *tmp_vsi_ctx;
1699 	int status;
1700 
1701 	if (vsi_handle >= ICE_MAX_VSI)
1702 		return -EINVAL;
1703 	status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1704 	if (status)
1705 		return status;
1706 	tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1707 	if (!tmp_vsi_ctx) {
1708 		/* Create a new VSI context */
1709 		tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
1710 					   sizeof(*tmp_vsi_ctx), GFP_KERNEL);
1711 		if (!tmp_vsi_ctx) {
1712 			ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1713 			return -ENOMEM;
1714 		}
1715 		*tmp_vsi_ctx = *vsi_ctx;
1716 		ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1717 	} else {
1718 		/* update with new HW VSI num */
1719 		tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1720 	}
1721 
1722 	return 0;
1723 }
1724 
1725 /**
1726  * ice_free_vsi- free VSI context from hardware and VSI handle list
1727  * @hw: pointer to the HW struct
1728  * @vsi_handle: unique VSI handle
1729  * @vsi_ctx: pointer to a VSI context struct
1730  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1731  * @cd: pointer to command details structure or NULL
1732  *
1733  * Free VSI context info from hardware as well as from VSI handle list
1734  */
1735 int
1736 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1737 	     bool keep_vsi_alloc, struct ice_sq_cd *cd)
1738 {
1739 	int status;
1740 
1741 	if (!ice_is_vsi_valid(hw, vsi_handle))
1742 		return -EINVAL;
1743 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1744 	status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1745 	if (!status)
1746 		ice_clear_vsi_ctx(hw, vsi_handle);
1747 	return status;
1748 }
1749 
1750 /**
1751  * ice_update_vsi
1752  * @hw: pointer to the HW struct
1753  * @vsi_handle: unique VSI handle
1754  * @vsi_ctx: pointer to a VSI context struct
1755  * @cd: pointer to command details structure or NULL
1756  *
1757  * Update VSI context in the hardware
1758  */
1759 int
1760 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1761 	       struct ice_sq_cd *cd)
1762 {
1763 	if (!ice_is_vsi_valid(hw, vsi_handle))
1764 		return -EINVAL;
1765 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1766 	return ice_aq_update_vsi(hw, vsi_ctx, cd);
1767 }
1768 
1769 /**
1770  * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI
1771  * @hw: pointer to HW struct
1772  * @vsi_handle: VSI SW index
1773  * @enable: boolean for enable/disable
1774  */
1775 int
1776 ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
1777 {
1778 	struct ice_vsi_ctx *ctx, *cached_ctx;
1779 	int status;
1780 
1781 	cached_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1782 	if (!cached_ctx)
1783 		return -ENOENT;
1784 
1785 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1786 	if (!ctx)
1787 		return -ENOMEM;
1788 
1789 	ctx->info.q_opt_rss = cached_ctx->info.q_opt_rss;
1790 	ctx->info.q_opt_tc = cached_ctx->info.q_opt_tc;
1791 	ctx->info.q_opt_flags = cached_ctx->info.q_opt_flags;
1792 
1793 	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
1794 
1795 	if (enable)
1796 		ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1797 	else
1798 		ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1799 
1800 	status = ice_update_vsi(hw, vsi_handle, ctx, NULL);
1801 	if (!status) {
1802 		cached_ctx->info.q_opt_flags = ctx->info.q_opt_flags;
1803 		cached_ctx->info.valid_sections |= ctx->info.valid_sections;
1804 	}
1805 
1806 	kfree(ctx);
1807 	return status;
1808 }
1809 
1810 /**
1811  * ice_aq_alloc_free_vsi_list
1812  * @hw: pointer to the HW struct
1813  * @vsi_list_id: VSI list ID returned or used for lookup
1814  * @lkup_type: switch rule filter lookup type
1815  * @opc: switch rules population command type - pass in the command opcode
1816  *
1817  * allocates or free a VSI list resource
1818  */
1819 static int
1820 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1821 			   enum ice_sw_lkup_type lkup_type,
1822 			   enum ice_adminq_opc opc)
1823 {
1824 	struct ice_aqc_alloc_free_res_elem *sw_buf;
1825 	struct ice_aqc_res_elem *vsi_ele;
1826 	u16 buf_len;
1827 	int status;
1828 
1829 	buf_len = struct_size(sw_buf, elem, 1);
1830 	sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
1831 	if (!sw_buf)
1832 		return -ENOMEM;
1833 	sw_buf->num_elems = cpu_to_le16(1);
1834 
1835 	if (lkup_type == ICE_SW_LKUP_MAC ||
1836 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1837 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1838 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1839 	    lkup_type == ICE_SW_LKUP_PROMISC ||
1840 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1841 	    lkup_type == ICE_SW_LKUP_DFLT) {
1842 		sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1843 	} else if (lkup_type == ICE_SW_LKUP_VLAN) {
1844 		sw_buf->res_type =
1845 			cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1846 	} else {
1847 		status = -EINVAL;
1848 		goto ice_aq_alloc_free_vsi_list_exit;
1849 	}
1850 
1851 	if (opc == ice_aqc_opc_free_res)
1852 		sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
1853 
1854 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1855 	if (status)
1856 		goto ice_aq_alloc_free_vsi_list_exit;
1857 
1858 	if (opc == ice_aqc_opc_alloc_res) {
1859 		vsi_ele = &sw_buf->elem[0];
1860 		*vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
1861 	}
1862 
1863 ice_aq_alloc_free_vsi_list_exit:
1864 	devm_kfree(ice_hw_to_dev(hw), sw_buf);
1865 	return status;
1866 }
1867 
1868 /**
1869  * ice_aq_sw_rules - add/update/remove switch rules
1870  * @hw: pointer to the HW struct
1871  * @rule_list: pointer to switch rule population list
1872  * @rule_list_sz: total size of the rule list in bytes
1873  * @num_rules: number of switch rules in the rule_list
1874  * @opc: switch rules population command type - pass in the command opcode
1875  * @cd: pointer to command details structure or NULL
1876  *
1877  * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1878  */
1879 int
1880 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1881 		u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1882 {
1883 	struct ice_aq_desc desc;
1884 	int status;
1885 
1886 	if (opc != ice_aqc_opc_add_sw_rules &&
1887 	    opc != ice_aqc_opc_update_sw_rules &&
1888 	    opc != ice_aqc_opc_remove_sw_rules)
1889 		return -EINVAL;
1890 
1891 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1892 
1893 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1894 	desc.params.sw_rules.num_rules_fltr_entry_index =
1895 		cpu_to_le16(num_rules);
1896 	status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1897 	if (opc != ice_aqc_opc_add_sw_rules &&
1898 	    hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
1899 		status = -ENOENT;
1900 
1901 	return status;
1902 }
1903 
1904 /**
1905  * ice_aq_add_recipe - add switch recipe
1906  * @hw: pointer to the HW struct
1907  * @s_recipe_list: pointer to switch rule population list
1908  * @num_recipes: number of switch recipes in the list
1909  * @cd: pointer to command details structure or NULL
1910  *
1911  * Add(0x0290)
1912  */
1913 static int
1914 ice_aq_add_recipe(struct ice_hw *hw,
1915 		  struct ice_aqc_recipe_data_elem *s_recipe_list,
1916 		  u16 num_recipes, struct ice_sq_cd *cd)
1917 {
1918 	struct ice_aqc_add_get_recipe *cmd;
1919 	struct ice_aq_desc desc;
1920 	u16 buf_size;
1921 
1922 	cmd = &desc.params.add_get_recipe;
1923 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1924 
1925 	cmd->num_sub_recipes = cpu_to_le16(num_recipes);
1926 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1927 
1928 	buf_size = num_recipes * sizeof(*s_recipe_list);
1929 
1930 	return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1931 }
1932 
1933 /**
1934  * ice_aq_get_recipe - get switch recipe
1935  * @hw: pointer to the HW struct
1936  * @s_recipe_list: pointer to switch rule population list
1937  * @num_recipes: pointer to the number of recipes (input and output)
1938  * @recipe_root: root recipe number of recipe(s) to retrieve
1939  * @cd: pointer to command details structure or NULL
1940  *
1941  * Get(0x0292)
1942  *
1943  * On input, *num_recipes should equal the number of entries in s_recipe_list.
1944  * On output, *num_recipes will equal the number of entries returned in
1945  * s_recipe_list.
1946  *
1947  * The caller must supply enough space in s_recipe_list to hold all possible
1948  * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1949  */
1950 static int
1951 ice_aq_get_recipe(struct ice_hw *hw,
1952 		  struct ice_aqc_recipe_data_elem *s_recipe_list,
1953 		  u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1954 {
1955 	struct ice_aqc_add_get_recipe *cmd;
1956 	struct ice_aq_desc desc;
1957 	u16 buf_size;
1958 	int status;
1959 
1960 	if (*num_recipes != ICE_MAX_NUM_RECIPES)
1961 		return -EINVAL;
1962 
1963 	cmd = &desc.params.add_get_recipe;
1964 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1965 
1966 	cmd->return_index = cpu_to_le16(recipe_root);
1967 	cmd->num_sub_recipes = 0;
1968 
1969 	buf_size = *num_recipes * sizeof(*s_recipe_list);
1970 
1971 	status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1972 	*num_recipes = le16_to_cpu(cmd->num_sub_recipes);
1973 
1974 	return status;
1975 }
1976 
1977 /**
1978  * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
1979  * @hw: pointer to the HW struct
1980  * @params: parameters used to update the default recipe
1981  *
1982  * This function only supports updating default recipes and it only supports
1983  * updating a single recipe based on the lkup_idx at a time.
1984  *
1985  * This is done as a read-modify-write operation. First, get the current recipe
1986  * contents based on the recipe's ID. Then modify the field vector index and
1987  * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
1988  * the pre-existing recipe with the modifications.
1989  */
1990 int
1991 ice_update_recipe_lkup_idx(struct ice_hw *hw,
1992 			   struct ice_update_recipe_lkup_idx_params *params)
1993 {
1994 	struct ice_aqc_recipe_data_elem *rcp_list;
1995 	u16 num_recps = ICE_MAX_NUM_RECIPES;
1996 	int status;
1997 
1998 	rcp_list = kcalloc(num_recps, sizeof(*rcp_list), GFP_KERNEL);
1999 	if (!rcp_list)
2000 		return -ENOMEM;
2001 
2002 	/* read current recipe list from firmware */
2003 	rcp_list->recipe_indx = params->rid;
2004 	status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
2005 	if (status) {
2006 		ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
2007 			  params->rid, status);
2008 		goto error_out;
2009 	}
2010 
2011 	/* only modify existing recipe's lkup_idx and mask if valid, while
2012 	 * leaving all other fields the same, then update the recipe firmware
2013 	 */
2014 	rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
2015 	if (params->mask_valid)
2016 		rcp_list->content.mask[params->lkup_idx] =
2017 			cpu_to_le16(params->mask);
2018 
2019 	if (params->ignore_valid)
2020 		rcp_list->content.lkup_indx[params->lkup_idx] |=
2021 			ICE_AQ_RECIPE_LKUP_IGNORE;
2022 
2023 	status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
2024 	if (status)
2025 		ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
2026 			  params->rid, params->lkup_idx, params->fv_idx,
2027 			  params->mask, params->mask_valid ? "true" : "false",
2028 			  status);
2029 
2030 error_out:
2031 	kfree(rcp_list);
2032 	return status;
2033 }
2034 
2035 /**
2036  * ice_aq_map_recipe_to_profile - Map recipe to packet profile
2037  * @hw: pointer to the HW struct
2038  * @profile_id: package profile ID to associate the recipe with
2039  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2040  * @cd: pointer to command details structure or NULL
2041  * Recipe to profile association (0x0291)
2042  */
2043 static int
2044 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2045 			     struct ice_sq_cd *cd)
2046 {
2047 	struct ice_aqc_recipe_to_profile *cmd;
2048 	struct ice_aq_desc desc;
2049 
2050 	cmd = &desc.params.recipe_to_profile;
2051 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
2052 	cmd->profile_id = cpu_to_le16(profile_id);
2053 	/* Set the recipe ID bit in the bitmask to let the device know which
2054 	 * profile we are associating the recipe to
2055 	 */
2056 	memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));
2057 
2058 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2059 }
2060 
2061 /**
2062  * ice_aq_get_recipe_to_profile - Map recipe to packet profile
2063  * @hw: pointer to the HW struct
2064  * @profile_id: package profile ID to associate the recipe with
2065  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2066  * @cd: pointer to command details structure or NULL
2067  * Associate profile ID with given recipe (0x0293)
2068  */
2069 static int
2070 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2071 			     struct ice_sq_cd *cd)
2072 {
2073 	struct ice_aqc_recipe_to_profile *cmd;
2074 	struct ice_aq_desc desc;
2075 	int status;
2076 
2077 	cmd = &desc.params.recipe_to_profile;
2078 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2079 	cmd->profile_id = cpu_to_le16(profile_id);
2080 
2081 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2082 	if (!status)
2083 		memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));
2084 
2085 	return status;
2086 }
2087 
2088 /**
2089  * ice_alloc_recipe - add recipe resource
2090  * @hw: pointer to the hardware structure
2091  * @rid: recipe ID returned as response to AQ call
2092  */
2093 static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2094 {
2095 	struct ice_aqc_alloc_free_res_elem *sw_buf;
2096 	u16 buf_len;
2097 	int status;
2098 
2099 	buf_len = struct_size(sw_buf, elem, 1);
2100 	sw_buf = kzalloc(buf_len, GFP_KERNEL);
2101 	if (!sw_buf)
2102 		return -ENOMEM;
2103 
2104 	sw_buf->num_elems = cpu_to_le16(1);
2105 	sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
2106 					ICE_AQC_RES_TYPE_S) |
2107 					ICE_AQC_RES_TYPE_FLAG_SHARED);
2108 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2109 				       ice_aqc_opc_alloc_res, NULL);
2110 	if (!status)
2111 		*rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
2112 	kfree(sw_buf);
2113 
2114 	return status;
2115 }
2116 
2117 /**
2118  * ice_get_recp_to_prof_map - updates recipe to profile mapping
2119  * @hw: pointer to hardware structure
2120  *
2121  * This function is used to populate recipe_to_profile matrix where index to
2122  * this array is the recipe ID and the element is the mapping of which profiles
2123  * is this recipe mapped to.
2124  */
2125 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2126 {
2127 	DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
2128 	u16 i;
2129 
2130 	for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2131 		u16 j;
2132 
2133 		bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2134 		bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
2135 		if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2136 			continue;
2137 		bitmap_copy(profile_to_recipe[i], r_bitmap,
2138 			    ICE_MAX_NUM_RECIPES);
2139 		for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2140 			set_bit(i, recipe_to_profile[j]);
2141 	}
2142 }
2143 
2144 /**
2145  * ice_collect_result_idx - copy result index values
2146  * @buf: buffer that contains the result index
2147  * @recp: the recipe struct to copy data into
2148  */
2149 static void
2150 ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
2151 		       struct ice_sw_recipe *recp)
2152 {
2153 	if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2154 		set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2155 			recp->res_idxs);
2156 }
2157 
2158 /**
2159  * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
2160  * @hw: pointer to hardware structure
2161  * @recps: struct that we need to populate
2162  * @rid: recipe ID that we are populating
2163  * @refresh_required: true if we should get recipe to profile mapping from FW
2164  *
2165  * This function is used to populate all the necessary entries into our
2166  * bookkeeping so that we have a current list of all the recipes that are
2167  * programmed in the firmware.
2168  */
2169 static int
2170 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2171 		    bool *refresh_required)
2172 {
2173 	DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS);
2174 	struct ice_aqc_recipe_data_elem *tmp;
2175 	u16 num_recps = ICE_MAX_NUM_RECIPES;
2176 	struct ice_prot_lkup_ext *lkup_exts;
2177 	u8 fv_word_idx = 0;
2178 	u16 sub_recps;
2179 	int status;
2180 
2181 	bitmap_zero(result_bm, ICE_MAX_FV_WORDS);
2182 
2183 	/* we need a buffer big enough to accommodate all the recipes */
2184 	tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
2185 	if (!tmp)
2186 		return -ENOMEM;
2187 
2188 	tmp[0].recipe_indx = rid;
2189 	status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2190 	/* non-zero status meaning recipe doesn't exist */
2191 	if (status)
2192 		goto err_unroll;
2193 
2194 	/* Get recipe to profile map so that we can get the fv from lkups that
2195 	 * we read for a recipe from FW. Since we want to minimize the number of
2196 	 * times we make this FW call, just make one call and cache the copy
2197 	 * until a new recipe is added. This operation is only required the
2198 	 * first time to get the changes from FW. Then to search existing
2199 	 * entries we don't need to update the cache again until another recipe
2200 	 * gets added.
2201 	 */
2202 	if (*refresh_required) {
2203 		ice_get_recp_to_prof_map(hw);
2204 		*refresh_required = false;
2205 	}
2206 
2207 	/* Start populating all the entries for recps[rid] based on lkups from
2208 	 * firmware. Note that we are only creating the root recipe in our
2209 	 * database.
2210 	 */
2211 	lkup_exts = &recps[rid].lkup_exts;
2212 
2213 	for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2214 		struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2215 		struct ice_recp_grp_entry *rg_entry;
2216 		u8 i, prof, idx, prot = 0;
2217 		bool is_root;
2218 		u16 off = 0;
2219 
2220 		rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
2221 					GFP_KERNEL);
2222 		if (!rg_entry) {
2223 			status = -ENOMEM;
2224 			goto err_unroll;
2225 		}
2226 
2227 		idx = root_bufs.recipe_indx;
2228 		is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2229 
2230 		/* Mark all result indices in this chain */
2231 		if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2232 			set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2233 				result_bm);
2234 
2235 		/* get the first profile that is associated with rid */
2236 		prof = find_first_bit(recipe_to_profile[idx],
2237 				      ICE_MAX_NUM_PROFILES);
2238 		for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2239 			u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2240 
2241 			rg_entry->fv_idx[i] = lkup_indx;
2242 			rg_entry->fv_mask[i] =
2243 				le16_to_cpu(root_bufs.content.mask[i + 1]);
2244 
2245 			/* If the recipe is a chained recipe then all its
2246 			 * child recipe's result will have a result index.
2247 			 * To fill fv_words we should not use those result
2248 			 * index, we only need the protocol ids and offsets.
2249 			 * We will skip all the fv_idx which stores result
2250 			 * index in them. We also need to skip any fv_idx which
2251 			 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2252 			 * valid offset value.
2253 			 */
2254 			if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) ||
2255 			    rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2256 			    rg_entry->fv_idx[i] == 0)
2257 				continue;
2258 
2259 			ice_find_prot_off(hw, ICE_BLK_SW, prof,
2260 					  rg_entry->fv_idx[i], &prot, &off);
2261 			lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2262 			lkup_exts->fv_words[fv_word_idx].off = off;
2263 			lkup_exts->field_mask[fv_word_idx] =
2264 				rg_entry->fv_mask[i];
2265 			fv_word_idx++;
2266 		}
2267 		/* populate rg_list with the data from the child entry of this
2268 		 * recipe
2269 		 */
2270 		list_add(&rg_entry->l_entry, &recps[rid].rg_list);
2271 
2272 		/* Propagate some data to the recipe database */
2273 		recps[idx].is_root = !!is_root;
2274 		recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2275 		bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2276 		if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2277 			recps[idx].chain_idx = root_bufs.content.result_indx &
2278 				~ICE_AQ_RECIPE_RESULT_EN;
2279 			set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2280 		} else {
2281 			recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2282 		}
2283 
2284 		if (!is_root)
2285 			continue;
2286 
2287 		/* Only do the following for root recipes entries */
2288 		memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2289 		       sizeof(recps[idx].r_bitmap));
2290 		recps[idx].root_rid = root_bufs.content.rid &
2291 			~ICE_AQ_RECIPE_ID_IS_ROOT;
2292 		recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2293 	}
2294 
2295 	/* Complete initialization of the root recipe entry */
2296 	lkup_exts->n_val_words = fv_word_idx;
2297 	recps[rid].big_recp = (num_recps > 1);
2298 	recps[rid].n_grp_count = (u8)num_recps;
2299 	recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp,
2300 					   recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
2301 					   GFP_KERNEL);
2302 	if (!recps[rid].root_buf) {
2303 		status = -ENOMEM;
2304 		goto err_unroll;
2305 	}
2306 
2307 	/* Copy result indexes */
2308 	bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2309 	recps[rid].recp_created = true;
2310 
2311 err_unroll:
2312 	kfree(tmp);
2313 	return status;
2314 }
2315 
2316 /* ice_init_port_info - Initialize port_info with switch configuration data
2317  * @pi: pointer to port_info
2318  * @vsi_port_num: VSI number or port number
2319  * @type: Type of switch element (port or VSI)
2320  * @swid: switch ID of the switch the element is attached to
2321  * @pf_vf_num: PF or VF number
2322  * @is_vf: true if the element is a VF, false otherwise
2323  */
2324 static void
2325 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2326 		   u16 swid, u16 pf_vf_num, bool is_vf)
2327 {
2328 	switch (type) {
2329 	case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2330 		pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2331 		pi->sw_id = swid;
2332 		pi->pf_vf_num = pf_vf_num;
2333 		pi->is_vf = is_vf;
2334 		break;
2335 	default:
2336 		ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
2337 		break;
2338 	}
2339 }
2340 
2341 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2342  * @hw: pointer to the hardware structure
2343  */
2344 int ice_get_initial_sw_cfg(struct ice_hw *hw)
2345 {
2346 	struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
2347 	u16 req_desc = 0;
2348 	u16 num_elems;
2349 	int status;
2350 	u16 i;
2351 
2352 	rbuf = kzalloc(ICE_SW_CFG_MAX_BUF_LEN, GFP_KERNEL);
2353 	if (!rbuf)
2354 		return -ENOMEM;
2355 
2356 	/* Multiple calls to ice_aq_get_sw_cfg may be required
2357 	 * to get all the switch configuration information. The need
2358 	 * for additional calls is indicated by ice_aq_get_sw_cfg
2359 	 * writing a non-zero value in req_desc
2360 	 */
2361 	do {
2362 		struct ice_aqc_get_sw_cfg_resp_elem *ele;
2363 
2364 		status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2365 					   &req_desc, &num_elems, NULL);
2366 
2367 		if (status)
2368 			break;
2369 
2370 		for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
2371 			u16 pf_vf_num, swid, vsi_port_num;
2372 			bool is_vf = false;
2373 			u8 res_type;
2374 
2375 			vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
2376 				ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2377 
2378 			pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
2379 				ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2380 
2381 			swid = le16_to_cpu(ele->swid);
2382 
2383 			if (le16_to_cpu(ele->pf_vf_num) &
2384 			    ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2385 				is_vf = true;
2386 
2387 			res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >>
2388 					ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2389 
2390 			if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
2391 				/* FW VSI is not needed. Just continue. */
2392 				continue;
2393 			}
2394 
2395 			ice_init_port_info(hw->port_info, vsi_port_num,
2396 					   res_type, swid, pf_vf_num, is_vf);
2397 		}
2398 	} while (req_desc && !status);
2399 
2400 	kfree(rbuf);
2401 	return status;
2402 }
2403 
2404 /**
2405  * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2406  * @hw: pointer to the hardware structure
2407  * @fi: filter info structure to fill/update
2408  *
2409  * This helper function populates the lb_en and lan_en elements of the provided
2410  * ice_fltr_info struct using the switch's type and characteristics of the
2411  * switch rule being configured.
2412  */
2413 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2414 {
2415 	fi->lb_en = false;
2416 	fi->lan_en = false;
2417 	if ((fi->flag & ICE_FLTR_TX) &&
2418 	    (fi->fltr_act == ICE_FWD_TO_VSI ||
2419 	     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2420 	     fi->fltr_act == ICE_FWD_TO_Q ||
2421 	     fi->fltr_act == ICE_FWD_TO_QGRP)) {
2422 		/* Setting LB for prune actions will result in replicated
2423 		 * packets to the internal switch that will be dropped.
2424 		 */
2425 		if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2426 			fi->lb_en = true;
2427 
2428 		/* Set lan_en to TRUE if
2429 		 * 1. The switch is a VEB AND
2430 		 * 2
2431 		 * 2.1 The lookup is a directional lookup like ethertype,
2432 		 * promiscuous, ethertype-MAC, promiscuous-VLAN
2433 		 * and default-port OR
2434 		 * 2.2 The lookup is VLAN, OR
2435 		 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2436 		 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2437 		 *
2438 		 * OR
2439 		 *
2440 		 * The switch is a VEPA.
2441 		 *
2442 		 * In all other cases, the LAN enable has to be set to false.
2443 		 */
2444 		if (hw->evb_veb) {
2445 			if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2446 			    fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2447 			    fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2448 			    fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2449 			    fi->lkup_type == ICE_SW_LKUP_DFLT ||
2450 			    fi->lkup_type == ICE_SW_LKUP_VLAN ||
2451 			    (fi->lkup_type == ICE_SW_LKUP_MAC &&
2452 			     !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
2453 			    (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2454 			     !is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
2455 				fi->lan_en = true;
2456 		} else {
2457 			fi->lan_en = true;
2458 		}
2459 	}
2460 }
2461 
2462 /**
2463  * ice_fill_sw_rule - Helper function to fill switch rule structure
2464  * @hw: pointer to the hardware structure
2465  * @f_info: entry containing packet forwarding information
2466  * @s_rule: switch rule structure to be filled in based on mac_entry
2467  * @opc: switch rules population command type - pass in the command opcode
2468  */
2469 static void
2470 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2471 		 struct ice_sw_rule_lkup_rx_tx *s_rule,
2472 		 enum ice_adminq_opc opc)
2473 {
2474 	u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2475 	u16 vlan_tpid = ETH_P_8021Q;
2476 	void *daddr = NULL;
2477 	u16 eth_hdr_sz;
2478 	u8 *eth_hdr;
2479 	u32 act = 0;
2480 	__be16 *off;
2481 	u8 q_rgn;
2482 
2483 	if (opc == ice_aqc_opc_remove_sw_rules) {
2484 		s_rule->act = 0;
2485 		s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
2486 		s_rule->hdr_len = 0;
2487 		return;
2488 	}
2489 
2490 	eth_hdr_sz = sizeof(dummy_eth_header);
2491 	eth_hdr = s_rule->hdr_data;
2492 
2493 	/* initialize the ether header with a dummy header */
2494 	memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
2495 	ice_fill_sw_info(hw, f_info);
2496 
2497 	switch (f_info->fltr_act) {
2498 	case ICE_FWD_TO_VSI:
2499 		act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2500 			ICE_SINGLE_ACT_VSI_ID_M;
2501 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2502 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2503 				ICE_SINGLE_ACT_VALID_BIT;
2504 		break;
2505 	case ICE_FWD_TO_VSI_LIST:
2506 		act |= ICE_SINGLE_ACT_VSI_LIST;
2507 		act |= (f_info->fwd_id.vsi_list_id <<
2508 			ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2509 			ICE_SINGLE_ACT_VSI_LIST_ID_M;
2510 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2511 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2512 				ICE_SINGLE_ACT_VALID_BIT;
2513 		break;
2514 	case ICE_FWD_TO_Q:
2515 		act |= ICE_SINGLE_ACT_TO_Q;
2516 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2517 			ICE_SINGLE_ACT_Q_INDEX_M;
2518 		break;
2519 	case ICE_DROP_PACKET:
2520 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2521 			ICE_SINGLE_ACT_VALID_BIT;
2522 		break;
2523 	case ICE_FWD_TO_QGRP:
2524 		q_rgn = f_info->qgrp_size > 0 ?
2525 			(u8)ilog2(f_info->qgrp_size) : 0;
2526 		act |= ICE_SINGLE_ACT_TO_Q;
2527 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2528 			ICE_SINGLE_ACT_Q_INDEX_M;
2529 		act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2530 			ICE_SINGLE_ACT_Q_REGION_M;
2531 		break;
2532 	default:
2533 		return;
2534 	}
2535 
2536 	if (f_info->lb_en)
2537 		act |= ICE_SINGLE_ACT_LB_ENABLE;
2538 	if (f_info->lan_en)
2539 		act |= ICE_SINGLE_ACT_LAN_ENABLE;
2540 
2541 	switch (f_info->lkup_type) {
2542 	case ICE_SW_LKUP_MAC:
2543 		daddr = f_info->l_data.mac.mac_addr;
2544 		break;
2545 	case ICE_SW_LKUP_VLAN:
2546 		vlan_id = f_info->l_data.vlan.vlan_id;
2547 		if (f_info->l_data.vlan.tpid_valid)
2548 			vlan_tpid = f_info->l_data.vlan.tpid;
2549 		if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2550 		    f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2551 			act |= ICE_SINGLE_ACT_PRUNE;
2552 			act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2553 		}
2554 		break;
2555 	case ICE_SW_LKUP_ETHERTYPE_MAC:
2556 		daddr = f_info->l_data.ethertype_mac.mac_addr;
2557 		fallthrough;
2558 	case ICE_SW_LKUP_ETHERTYPE:
2559 		off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2560 		*off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
2561 		break;
2562 	case ICE_SW_LKUP_MAC_VLAN:
2563 		daddr = f_info->l_data.mac_vlan.mac_addr;
2564 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
2565 		break;
2566 	case ICE_SW_LKUP_PROMISC_VLAN:
2567 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
2568 		fallthrough;
2569 	case ICE_SW_LKUP_PROMISC:
2570 		daddr = f_info->l_data.mac_vlan.mac_addr;
2571 		break;
2572 	default:
2573 		break;
2574 	}
2575 
2576 	s_rule->hdr.type = (f_info->flag & ICE_FLTR_RX) ?
2577 		cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2578 		cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
2579 
2580 	/* Recipe set depending on lookup type */
2581 	s_rule->recipe_id = cpu_to_le16(f_info->lkup_type);
2582 	s_rule->src = cpu_to_le16(f_info->src);
2583 	s_rule->act = cpu_to_le32(act);
2584 
2585 	if (daddr)
2586 		ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
2587 
2588 	if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2589 		off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2590 		*off = cpu_to_be16(vlan_id);
2591 		off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2592 		*off = cpu_to_be16(vlan_tpid);
2593 	}
2594 
2595 	/* Create the switch rule with the final dummy Ethernet header */
2596 	if (opc != ice_aqc_opc_update_sw_rules)
2597 		s_rule->hdr_len = cpu_to_le16(eth_hdr_sz);
2598 }
2599 
2600 /**
2601  * ice_add_marker_act
2602  * @hw: pointer to the hardware structure
2603  * @m_ent: the management entry for which sw marker needs to be added
2604  * @sw_marker: sw marker to tag the Rx descriptor with
2605  * @l_id: large action resource ID
2606  *
2607  * Create a large action to hold software marker and update the switch rule
2608  * entry pointed by m_ent with newly created large action
2609  */
2610 static int
2611 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2612 		   u16 sw_marker, u16 l_id)
2613 {
2614 	struct ice_sw_rule_lkup_rx_tx *rx_tx;
2615 	struct ice_sw_rule_lg_act *lg_act;
2616 	/* For software marker we need 3 large actions
2617 	 * 1. FWD action: FWD TO VSI or VSI LIST
2618 	 * 2. GENERIC VALUE action to hold the profile ID
2619 	 * 3. GENERIC VALUE action to hold the software marker ID
2620 	 */
2621 	const u16 num_lg_acts = 3;
2622 	u16 lg_act_size;
2623 	u16 rules_size;
2624 	int status;
2625 	u32 act;
2626 	u16 id;
2627 
2628 	if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2629 		return -EINVAL;
2630 
2631 	/* Create two back-to-back switch rules and submit them to the HW using
2632 	 * one memory buffer:
2633 	 *    1. Large Action
2634 	 *    2. Look up Tx Rx
2635 	 */
2636 	lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(lg_act, num_lg_acts);
2637 	rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(rx_tx);
2638 	lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
2639 	if (!lg_act)
2640 		return -ENOMEM;
2641 
2642 	rx_tx = (typeof(rx_tx))((u8 *)lg_act + lg_act_size);
2643 
2644 	/* Fill in the first switch rule i.e. large action */
2645 	lg_act->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
2646 	lg_act->index = cpu_to_le16(l_id);
2647 	lg_act->size = cpu_to_le16(num_lg_acts);
2648 
2649 	/* First action VSI forwarding or VSI list forwarding depending on how
2650 	 * many VSIs
2651 	 */
2652 	id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2653 		m_ent->fltr_info.fwd_id.hw_vsi_id;
2654 
2655 	act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2656 	act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
2657 	if (m_ent->vsi_count > 1)
2658 		act |= ICE_LG_ACT_VSI_LIST;
2659 	lg_act->act[0] = cpu_to_le32(act);
2660 
2661 	/* Second action descriptor type */
2662 	act = ICE_LG_ACT_GENERIC;
2663 
2664 	act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2665 	lg_act->act[1] = cpu_to_le32(act);
2666 
2667 	act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2668 	       ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2669 
2670 	/* Third action Marker value */
2671 	act |= ICE_LG_ACT_GENERIC;
2672 	act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2673 		ICE_LG_ACT_GENERIC_VALUE_M;
2674 
2675 	lg_act->act[2] = cpu_to_le32(act);
2676 
2677 	/* call the fill switch rule to fill the lookup Tx Rx structure */
2678 	ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2679 			 ice_aqc_opc_update_sw_rules);
2680 
2681 	/* Update the action to point to the large action ID */
2682 	rx_tx->act = cpu_to_le32(ICE_SINGLE_ACT_PTR |
2683 				 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2684 				  ICE_SINGLE_ACT_PTR_VAL_M));
2685 
2686 	/* Use the filter rule ID of the previously created rule with single
2687 	 * act. Once the update happens, hardware will treat this as large
2688 	 * action
2689 	 */
2690 	rx_tx->index = cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
2691 
2692 	status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2693 				 ice_aqc_opc_update_sw_rules, NULL);
2694 	if (!status) {
2695 		m_ent->lg_act_idx = l_id;
2696 		m_ent->sw_marker_id = sw_marker;
2697 	}
2698 
2699 	devm_kfree(ice_hw_to_dev(hw), lg_act);
2700 	return status;
2701 }
2702 
2703 /**
2704  * ice_create_vsi_list_map
2705  * @hw: pointer to the hardware structure
2706  * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2707  * @num_vsi: number of VSI handles in the array
2708  * @vsi_list_id: VSI list ID generated as part of allocate resource
2709  *
2710  * Helper function to create a new entry of VSI list ID to VSI mapping
2711  * using the given VSI list ID
2712  */
2713 static struct ice_vsi_list_map_info *
2714 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2715 			u16 vsi_list_id)
2716 {
2717 	struct ice_switch_info *sw = hw->switch_info;
2718 	struct ice_vsi_list_map_info *v_map;
2719 	int i;
2720 
2721 	v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL);
2722 	if (!v_map)
2723 		return NULL;
2724 
2725 	v_map->vsi_list_id = vsi_list_id;
2726 	v_map->ref_cnt = 1;
2727 	for (i = 0; i < num_vsi; i++)
2728 		set_bit(vsi_handle_arr[i], v_map->vsi_map);
2729 
2730 	list_add(&v_map->list_entry, &sw->vsi_list_map_head);
2731 	return v_map;
2732 }
2733 
2734 /**
2735  * ice_update_vsi_list_rule
2736  * @hw: pointer to the hardware structure
2737  * @vsi_handle_arr: array of VSI handles to form a VSI list
2738  * @num_vsi: number of VSI handles in the array
2739  * @vsi_list_id: VSI list ID generated as part of allocate resource
2740  * @remove: Boolean value to indicate if this is a remove action
2741  * @opc: switch rules population command type - pass in the command opcode
2742  * @lkup_type: lookup type of the filter
2743  *
2744  * Call AQ command to add a new switch rule or update existing switch rule
2745  * using the given VSI list ID
2746  */
2747 static int
2748 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2749 			 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2750 			 enum ice_sw_lkup_type lkup_type)
2751 {
2752 	struct ice_sw_rule_vsi_list *s_rule;
2753 	u16 s_rule_size;
2754 	u16 rule_type;
2755 	int status;
2756 	int i;
2757 
2758 	if (!num_vsi)
2759 		return -EINVAL;
2760 
2761 	if (lkup_type == ICE_SW_LKUP_MAC ||
2762 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2763 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2764 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2765 	    lkup_type == ICE_SW_LKUP_PROMISC ||
2766 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2767 	    lkup_type == ICE_SW_LKUP_DFLT)
2768 		rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2769 			ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2770 	else if (lkup_type == ICE_SW_LKUP_VLAN)
2771 		rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2772 			ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2773 	else
2774 		return -EINVAL;
2775 
2776 	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi);
2777 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
2778 	if (!s_rule)
2779 		return -ENOMEM;
2780 	for (i = 0; i < num_vsi; i++) {
2781 		if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2782 			status = -EINVAL;
2783 			goto exit;
2784 		}
2785 		/* AQ call requires hw_vsi_id(s) */
2786 		s_rule->vsi[i] =
2787 			cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2788 	}
2789 
2790 	s_rule->hdr.type = cpu_to_le16(rule_type);
2791 	s_rule->number_vsi = cpu_to_le16(num_vsi);
2792 	s_rule->index = cpu_to_le16(vsi_list_id);
2793 
2794 	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2795 
2796 exit:
2797 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2798 	return status;
2799 }
2800 
2801 /**
2802  * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2803  * @hw: pointer to the HW struct
2804  * @vsi_handle_arr: array of VSI handles to form a VSI list
2805  * @num_vsi: number of VSI handles in the array
2806  * @vsi_list_id: stores the ID of the VSI list to be created
2807  * @lkup_type: switch rule filter's lookup type
2808  */
2809 static int
2810 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2811 			 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2812 {
2813 	int status;
2814 
2815 	status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2816 					    ice_aqc_opc_alloc_res);
2817 	if (status)
2818 		return status;
2819 
2820 	/* Update the newly created VSI list to include the specified VSIs */
2821 	return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2822 					*vsi_list_id, false,
2823 					ice_aqc_opc_add_sw_rules, lkup_type);
2824 }
2825 
2826 /**
2827  * ice_create_pkt_fwd_rule
2828  * @hw: pointer to the hardware structure
2829  * @f_entry: entry containing packet forwarding information
2830  *
2831  * Create switch rule with given filter information and add an entry
2832  * to the corresponding filter management list to track this switch rule
2833  * and VSI mapping
2834  */
2835 static int
2836 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2837 			struct ice_fltr_list_entry *f_entry)
2838 {
2839 	struct ice_fltr_mgmt_list_entry *fm_entry;
2840 	struct ice_sw_rule_lkup_rx_tx *s_rule;
2841 	enum ice_sw_lkup_type l_type;
2842 	struct ice_sw_recipe *recp;
2843 	int status;
2844 
2845 	s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2846 			      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
2847 			      GFP_KERNEL);
2848 	if (!s_rule)
2849 		return -ENOMEM;
2850 	fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
2851 				GFP_KERNEL);
2852 	if (!fm_entry) {
2853 		status = -ENOMEM;
2854 		goto ice_create_pkt_fwd_rule_exit;
2855 	}
2856 
2857 	fm_entry->fltr_info = f_entry->fltr_info;
2858 
2859 	/* Initialize all the fields for the management entry */
2860 	fm_entry->vsi_count = 1;
2861 	fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2862 	fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2863 	fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2864 
2865 	ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2866 			 ice_aqc_opc_add_sw_rules);
2867 
2868 	status = ice_aq_sw_rules(hw, s_rule,
2869 				 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
2870 				 ice_aqc_opc_add_sw_rules, NULL);
2871 	if (status) {
2872 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
2873 		goto ice_create_pkt_fwd_rule_exit;
2874 	}
2875 
2876 	f_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
2877 	fm_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
2878 
2879 	/* The book keeping entries will get removed when base driver
2880 	 * calls remove filter AQ command
2881 	 */
2882 	l_type = fm_entry->fltr_info.lkup_type;
2883 	recp = &hw->switch_info->recp_list[l_type];
2884 	list_add(&fm_entry->list_entry, &recp->filt_rules);
2885 
2886 ice_create_pkt_fwd_rule_exit:
2887 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2888 	return status;
2889 }
2890 
2891 /**
2892  * ice_update_pkt_fwd_rule
2893  * @hw: pointer to the hardware structure
2894  * @f_info: filter information for switch rule
2895  *
2896  * Call AQ command to update a previously created switch rule with a
2897  * VSI list ID
2898  */
2899 static int
2900 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2901 {
2902 	struct ice_sw_rule_lkup_rx_tx *s_rule;
2903 	int status;
2904 
2905 	s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2906 			      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
2907 			      GFP_KERNEL);
2908 	if (!s_rule)
2909 		return -ENOMEM;
2910 
2911 	ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2912 
2913 	s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
2914 
2915 	/* Update switch rule with new rule set to forward VSI list */
2916 	status = ice_aq_sw_rules(hw, s_rule,
2917 				 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
2918 				 ice_aqc_opc_update_sw_rules, NULL);
2919 
2920 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2921 	return status;
2922 }
2923 
2924 /**
2925  * ice_update_sw_rule_bridge_mode
2926  * @hw: pointer to the HW struct
2927  *
2928  * Updates unicast switch filter rules based on VEB/VEPA mode
2929  */
2930 int ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2931 {
2932 	struct ice_switch_info *sw = hw->switch_info;
2933 	struct ice_fltr_mgmt_list_entry *fm_entry;
2934 	struct list_head *rule_head;
2935 	struct mutex *rule_lock; /* Lock to protect filter rule list */
2936 	int status = 0;
2937 
2938 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2939 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2940 
2941 	mutex_lock(rule_lock);
2942 	list_for_each_entry(fm_entry, rule_head, list_entry) {
2943 		struct ice_fltr_info *fi = &fm_entry->fltr_info;
2944 		u8 *addr = fi->l_data.mac.mac_addr;
2945 
2946 		/* Update unicast Tx rules to reflect the selected
2947 		 * VEB/VEPA mode
2948 		 */
2949 		if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
2950 		    (fi->fltr_act == ICE_FWD_TO_VSI ||
2951 		     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2952 		     fi->fltr_act == ICE_FWD_TO_Q ||
2953 		     fi->fltr_act == ICE_FWD_TO_QGRP)) {
2954 			status = ice_update_pkt_fwd_rule(hw, fi);
2955 			if (status)
2956 				break;
2957 		}
2958 	}
2959 
2960 	mutex_unlock(rule_lock);
2961 
2962 	return status;
2963 }
2964 
2965 /**
2966  * ice_add_update_vsi_list
2967  * @hw: pointer to the hardware structure
2968  * @m_entry: pointer to current filter management list entry
2969  * @cur_fltr: filter information from the book keeping entry
2970  * @new_fltr: filter information with the new VSI to be added
2971  *
2972  * Call AQ command to add or update previously created VSI list with new VSI.
2973  *
2974  * Helper function to do book keeping associated with adding filter information
2975  * The algorithm to do the book keeping is described below :
2976  * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2977  *	if only one VSI has been added till now
2978  *		Allocate a new VSI list and add two VSIs
2979  *		to this list using switch rule command
2980  *		Update the previously created switch rule with the
2981  *		newly created VSI list ID
2982  *	if a VSI list was previously created
2983  *		Add the new VSI to the previously created VSI list set
2984  *		using the update switch rule command
2985  */
2986 static int
2987 ice_add_update_vsi_list(struct ice_hw *hw,
2988 			struct ice_fltr_mgmt_list_entry *m_entry,
2989 			struct ice_fltr_info *cur_fltr,
2990 			struct ice_fltr_info *new_fltr)
2991 {
2992 	u16 vsi_list_id = 0;
2993 	int status = 0;
2994 
2995 	if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2996 	     cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2997 		return -EOPNOTSUPP;
2998 
2999 	if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
3000 	     new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
3001 	    (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
3002 	     cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
3003 		return -EOPNOTSUPP;
3004 
3005 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
3006 		/* Only one entry existed in the mapping and it was not already
3007 		 * a part of a VSI list. So, create a VSI list with the old and
3008 		 * new VSIs.
3009 		 */
3010 		struct ice_fltr_info tmp_fltr;
3011 		u16 vsi_handle_arr[2];
3012 
3013 		/* A rule already exists with the new VSI being added */
3014 		if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
3015 			return -EEXIST;
3016 
3017 		vsi_handle_arr[0] = cur_fltr->vsi_handle;
3018 		vsi_handle_arr[1] = new_fltr->vsi_handle;
3019 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3020 						  &vsi_list_id,
3021 						  new_fltr->lkup_type);
3022 		if (status)
3023 			return status;
3024 
3025 		tmp_fltr = *new_fltr;
3026 		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
3027 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3028 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3029 		/* Update the previous switch rule of "MAC forward to VSI" to
3030 		 * "MAC fwd to VSI list"
3031 		 */
3032 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3033 		if (status)
3034 			return status;
3035 
3036 		cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
3037 		cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3038 		m_entry->vsi_list_info =
3039 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3040 						vsi_list_id);
3041 
3042 		if (!m_entry->vsi_list_info)
3043 			return -ENOMEM;
3044 
3045 		/* If this entry was large action then the large action needs
3046 		 * to be updated to point to FWD to VSI list
3047 		 */
3048 		if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
3049 			status =
3050 			    ice_add_marker_act(hw, m_entry,
3051 					       m_entry->sw_marker_id,
3052 					       m_entry->lg_act_idx);
3053 	} else {
3054 		u16 vsi_handle = new_fltr->vsi_handle;
3055 		enum ice_adminq_opc opcode;
3056 
3057 		if (!m_entry->vsi_list_info)
3058 			return -EIO;
3059 
3060 		/* A rule already exists with the new VSI being added */
3061 		if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
3062 			return 0;
3063 
3064 		/* Update the previously created VSI list set with
3065 		 * the new VSI ID passed in
3066 		 */
3067 		vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
3068 		opcode = ice_aqc_opc_update_sw_rules;
3069 
3070 		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
3071 						  vsi_list_id, false, opcode,
3072 						  new_fltr->lkup_type);
3073 		/* update VSI list mapping info with new VSI ID */
3074 		if (!status)
3075 			set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
3076 	}
3077 	if (!status)
3078 		m_entry->vsi_count++;
3079 	return status;
3080 }
3081 
3082 /**
3083  * ice_find_rule_entry - Search a rule entry
3084  * @hw: pointer to the hardware structure
3085  * @recp_id: lookup type for which the specified rule needs to be searched
3086  * @f_info: rule information
3087  *
3088  * Helper function to search for a given rule entry
3089  * Returns pointer to entry storing the rule if found
3090  */
3091 static struct ice_fltr_mgmt_list_entry *
3092 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
3093 {
3094 	struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3095 	struct ice_switch_info *sw = hw->switch_info;
3096 	struct list_head *list_head;
3097 
3098 	list_head = &sw->recp_list[recp_id].filt_rules;
3099 	list_for_each_entry(list_itr, list_head, list_entry) {
3100 		if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3101 			    sizeof(f_info->l_data)) &&
3102 		    f_info->flag == list_itr->fltr_info.flag) {
3103 			ret = list_itr;
3104 			break;
3105 		}
3106 	}
3107 	return ret;
3108 }
3109 
3110 /**
3111  * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3112  * @hw: pointer to the hardware structure
3113  * @recp_id: lookup type for which VSI lists needs to be searched
3114  * @vsi_handle: VSI handle to be found in VSI list
3115  * @vsi_list_id: VSI list ID found containing vsi_handle
3116  *
3117  * Helper function to search a VSI list with single entry containing given VSI
3118  * handle element. This can be extended further to search VSI list with more
3119  * than 1 vsi_count. Returns pointer to VSI list entry if found.
3120  */
3121 static struct ice_vsi_list_map_info *
3122 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
3123 			u16 *vsi_list_id)
3124 {
3125 	struct ice_vsi_list_map_info *map_info = NULL;
3126 	struct ice_switch_info *sw = hw->switch_info;
3127 	struct ice_fltr_mgmt_list_entry *list_itr;
3128 	struct list_head *list_head;
3129 
3130 	list_head = &sw->recp_list[recp_id].filt_rules;
3131 	list_for_each_entry(list_itr, list_head, list_entry) {
3132 		if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
3133 			map_info = list_itr->vsi_list_info;
3134 			if (test_bit(vsi_handle, map_info->vsi_map)) {
3135 				*vsi_list_id = map_info->vsi_list_id;
3136 				return map_info;
3137 			}
3138 		}
3139 	}
3140 	return NULL;
3141 }
3142 
3143 /**
3144  * ice_add_rule_internal - add rule for a given lookup type
3145  * @hw: pointer to the hardware structure
3146  * @recp_id: lookup type (recipe ID) for which rule has to be added
3147  * @f_entry: structure containing MAC forwarding information
3148  *
3149  * Adds or updates the rule lists for a given recipe
3150  */
3151 static int
3152 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
3153 		      struct ice_fltr_list_entry *f_entry)
3154 {
3155 	struct ice_switch_info *sw = hw->switch_info;
3156 	struct ice_fltr_info *new_fltr, *cur_fltr;
3157 	struct ice_fltr_mgmt_list_entry *m_entry;
3158 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3159 	int status = 0;
3160 
3161 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3162 		return -EINVAL;
3163 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3164 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3165 
3166 	rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3167 
3168 	mutex_lock(rule_lock);
3169 	new_fltr = &f_entry->fltr_info;
3170 	if (new_fltr->flag & ICE_FLTR_RX)
3171 		new_fltr->src = hw->port_info->lport;
3172 	else if (new_fltr->flag & ICE_FLTR_TX)
3173 		new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
3174 
3175 	m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
3176 	if (!m_entry) {
3177 		mutex_unlock(rule_lock);
3178 		return ice_create_pkt_fwd_rule(hw, f_entry);
3179 	}
3180 
3181 	cur_fltr = &m_entry->fltr_info;
3182 	status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3183 	mutex_unlock(rule_lock);
3184 
3185 	return status;
3186 }
3187 
3188 /**
3189  * ice_remove_vsi_list_rule
3190  * @hw: pointer to the hardware structure
3191  * @vsi_list_id: VSI list ID generated as part of allocate resource
3192  * @lkup_type: switch rule filter lookup type
3193  *
3194  * The VSI list should be emptied before this function is called to remove the
3195  * VSI list.
3196  */
3197 static int
3198 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3199 			 enum ice_sw_lkup_type lkup_type)
3200 {
3201 	struct ice_sw_rule_vsi_list *s_rule;
3202 	u16 s_rule_size;
3203 	int status;
3204 
3205 	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, 0);
3206 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
3207 	if (!s_rule)
3208 		return -ENOMEM;
3209 
3210 	s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
3211 	s_rule->index = cpu_to_le16(vsi_list_id);
3212 
3213 	/* Free the vsi_list resource that we allocated. It is assumed that the
3214 	 * list is empty at this point.
3215 	 */
3216 	status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3217 					    ice_aqc_opc_free_res);
3218 
3219 	devm_kfree(ice_hw_to_dev(hw), s_rule);
3220 	return status;
3221 }
3222 
3223 /**
3224  * ice_rem_update_vsi_list
3225  * @hw: pointer to the hardware structure
3226  * @vsi_handle: VSI handle of the VSI to remove
3227  * @fm_list: filter management entry for which the VSI list management needs to
3228  *           be done
3229  */
3230 static int
3231 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3232 			struct ice_fltr_mgmt_list_entry *fm_list)
3233 {
3234 	enum ice_sw_lkup_type lkup_type;
3235 	u16 vsi_list_id;
3236 	int status = 0;
3237 
3238 	if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3239 	    fm_list->vsi_count == 0)
3240 		return -EINVAL;
3241 
3242 	/* A rule with the VSI being removed does not exist */
3243 	if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
3244 		return -ENOENT;
3245 
3246 	lkup_type = fm_list->fltr_info.lkup_type;
3247 	vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3248 	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3249 					  ice_aqc_opc_update_sw_rules,
3250 					  lkup_type);
3251 	if (status)
3252 		return status;
3253 
3254 	fm_list->vsi_count--;
3255 	clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3256 
3257 	if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3258 		struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3259 		struct ice_vsi_list_map_info *vsi_list_info =
3260 			fm_list->vsi_list_info;
3261 		u16 rem_vsi_handle;
3262 
3263 		rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
3264 						ICE_MAX_VSI);
3265 		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3266 			return -EIO;
3267 
3268 		/* Make sure VSI list is empty before removing it below */
3269 		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3270 						  vsi_list_id, true,
3271 						  ice_aqc_opc_update_sw_rules,
3272 						  lkup_type);
3273 		if (status)
3274 			return status;
3275 
3276 		tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3277 		tmp_fltr_info.fwd_id.hw_vsi_id =
3278 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
3279 		tmp_fltr_info.vsi_handle = rem_vsi_handle;
3280 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3281 		if (status) {
3282 			ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3283 				  tmp_fltr_info.fwd_id.hw_vsi_id, status);
3284 			return status;
3285 		}
3286 
3287 		fm_list->fltr_info = tmp_fltr_info;
3288 	}
3289 
3290 	if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3291 	    (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3292 		struct ice_vsi_list_map_info *vsi_list_info =
3293 			fm_list->vsi_list_info;
3294 
3295 		/* Remove the VSI list since it is no longer used */
3296 		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3297 		if (status) {
3298 			ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
3299 				  vsi_list_id, status);
3300 			return status;
3301 		}
3302 
3303 		list_del(&vsi_list_info->list_entry);
3304 		devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
3305 		fm_list->vsi_list_info = NULL;
3306 	}
3307 
3308 	return status;
3309 }
3310 
3311 /**
3312  * ice_remove_rule_internal - Remove a filter rule of a given type
3313  * @hw: pointer to the hardware structure
3314  * @recp_id: recipe ID for which the rule needs to removed
3315  * @f_entry: rule entry containing filter information
3316  */
3317 static int
3318 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
3319 			 struct ice_fltr_list_entry *f_entry)
3320 {
3321 	struct ice_switch_info *sw = hw->switch_info;
3322 	struct ice_fltr_mgmt_list_entry *list_elem;
3323 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3324 	bool remove_rule = false;
3325 	u16 vsi_handle;
3326 	int status = 0;
3327 
3328 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3329 		return -EINVAL;
3330 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3331 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3332 
3333 	rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3334 	mutex_lock(rule_lock);
3335 	list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
3336 	if (!list_elem) {
3337 		status = -ENOENT;
3338 		goto exit;
3339 	}
3340 
3341 	if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3342 		remove_rule = true;
3343 	} else if (!list_elem->vsi_list_info) {
3344 		status = -ENOENT;
3345 		goto exit;
3346 	} else if (list_elem->vsi_list_info->ref_cnt > 1) {
3347 		/* a ref_cnt > 1 indicates that the vsi_list is being
3348 		 * shared by multiple rules. Decrement the ref_cnt and
3349 		 * remove this rule, but do not modify the list, as it
3350 		 * is in-use by other rules.
3351 		 */
3352 		list_elem->vsi_list_info->ref_cnt--;
3353 		remove_rule = true;
3354 	} else {
3355 		/* a ref_cnt of 1 indicates the vsi_list is only used
3356 		 * by one rule. However, the original removal request is only
3357 		 * for a single VSI. Update the vsi_list first, and only
3358 		 * remove the rule if there are no further VSIs in this list.
3359 		 */
3360 		vsi_handle = f_entry->fltr_info.vsi_handle;
3361 		status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3362 		if (status)
3363 			goto exit;
3364 		/* if VSI count goes to zero after updating the VSI list */
3365 		if (list_elem->vsi_count == 0)
3366 			remove_rule = true;
3367 	}
3368 
3369 	if (remove_rule) {
3370 		/* Remove the lookup rule */
3371 		struct ice_sw_rule_lkup_rx_tx *s_rule;
3372 
3373 		s_rule = devm_kzalloc(ice_hw_to_dev(hw),
3374 				      ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
3375 				      GFP_KERNEL);
3376 		if (!s_rule) {
3377 			status = -ENOMEM;
3378 			goto exit;
3379 		}
3380 
3381 		ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3382 				 ice_aqc_opc_remove_sw_rules);
3383 
3384 		status = ice_aq_sw_rules(hw, s_rule,
3385 					 ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
3386 					 1, ice_aqc_opc_remove_sw_rules, NULL);
3387 
3388 		/* Remove a book keeping from the list */
3389 		devm_kfree(ice_hw_to_dev(hw), s_rule);
3390 
3391 		if (status)
3392 			goto exit;
3393 
3394 		list_del(&list_elem->list_entry);
3395 		devm_kfree(ice_hw_to_dev(hw), list_elem);
3396 	}
3397 exit:
3398 	mutex_unlock(rule_lock);
3399 	return status;
3400 }
3401 
3402 /**
3403  * ice_mac_fltr_exist - does this MAC filter exist for given VSI
3404  * @hw: pointer to the hardware structure
3405  * @mac: MAC address to be checked (for MAC filter)
3406  * @vsi_handle: check MAC filter for this VSI
3407  */
3408 bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle)
3409 {
3410 	struct ice_fltr_mgmt_list_entry *entry;
3411 	struct list_head *rule_head;
3412 	struct ice_switch_info *sw;
3413 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3414 	u16 hw_vsi_id;
3415 
3416 	if (!ice_is_vsi_valid(hw, vsi_handle))
3417 		return false;
3418 
3419 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3420 	sw = hw->switch_info;
3421 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3422 	if (!rule_head)
3423 		return false;
3424 
3425 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3426 	mutex_lock(rule_lock);
3427 	list_for_each_entry(entry, rule_head, list_entry) {
3428 		struct ice_fltr_info *f_info = &entry->fltr_info;
3429 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3430 
3431 		if (is_zero_ether_addr(mac_addr))
3432 			continue;
3433 
3434 		if (f_info->flag != ICE_FLTR_TX ||
3435 		    f_info->src_id != ICE_SRC_ID_VSI ||
3436 		    f_info->lkup_type != ICE_SW_LKUP_MAC ||
3437 		    f_info->fltr_act != ICE_FWD_TO_VSI ||
3438 		    hw_vsi_id != f_info->fwd_id.hw_vsi_id)
3439 			continue;
3440 
3441 		if (ether_addr_equal(mac, mac_addr)) {
3442 			mutex_unlock(rule_lock);
3443 			return true;
3444 		}
3445 	}
3446 	mutex_unlock(rule_lock);
3447 	return false;
3448 }
3449 
3450 /**
3451  * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI
3452  * @hw: pointer to the hardware structure
3453  * @vlan_id: VLAN ID
3454  * @vsi_handle: check MAC filter for this VSI
3455  */
3456 bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
3457 {
3458 	struct ice_fltr_mgmt_list_entry *entry;
3459 	struct list_head *rule_head;
3460 	struct ice_switch_info *sw;
3461 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3462 	u16 hw_vsi_id;
3463 
3464 	if (vlan_id > ICE_MAX_VLAN_ID)
3465 		return false;
3466 
3467 	if (!ice_is_vsi_valid(hw, vsi_handle))
3468 		return false;
3469 
3470 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3471 	sw = hw->switch_info;
3472 	rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
3473 	if (!rule_head)
3474 		return false;
3475 
3476 	rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3477 	mutex_lock(rule_lock);
3478 	list_for_each_entry(entry, rule_head, list_entry) {
3479 		struct ice_fltr_info *f_info = &entry->fltr_info;
3480 		u16 entry_vlan_id = f_info->l_data.vlan.vlan_id;
3481 		struct ice_vsi_list_map_info *map_info;
3482 
3483 		if (entry_vlan_id > ICE_MAX_VLAN_ID)
3484 			continue;
3485 
3486 		if (f_info->flag != ICE_FLTR_TX ||
3487 		    f_info->src_id != ICE_SRC_ID_VSI ||
3488 		    f_info->lkup_type != ICE_SW_LKUP_VLAN)
3489 			continue;
3490 
3491 		/* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */
3492 		if (f_info->fltr_act != ICE_FWD_TO_VSI &&
3493 		    f_info->fltr_act != ICE_FWD_TO_VSI_LIST)
3494 			continue;
3495 
3496 		if (f_info->fltr_act == ICE_FWD_TO_VSI) {
3497 			if (hw_vsi_id != f_info->fwd_id.hw_vsi_id)
3498 				continue;
3499 		} else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3500 			/* If filter_action is FWD_TO_VSI_LIST, make sure
3501 			 * that VSI being checked is part of VSI list
3502 			 */
3503 			if (entry->vsi_count == 1 &&
3504 			    entry->vsi_list_info) {
3505 				map_info = entry->vsi_list_info;
3506 				if (!test_bit(vsi_handle, map_info->vsi_map))
3507 					continue;
3508 			}
3509 		}
3510 
3511 		if (vlan_id == entry_vlan_id) {
3512 			mutex_unlock(rule_lock);
3513 			return true;
3514 		}
3515 	}
3516 	mutex_unlock(rule_lock);
3517 
3518 	return false;
3519 }
3520 
3521 /**
3522  * ice_add_mac - Add a MAC address based filter rule
3523  * @hw: pointer to the hardware structure
3524  * @m_list: list of MAC addresses and forwarding information
3525  */
3526 int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
3527 {
3528 	struct ice_fltr_list_entry *m_list_itr;
3529 	int status = 0;
3530 
3531 	if (!m_list || !hw)
3532 		return -EINVAL;
3533 
3534 	list_for_each_entry(m_list_itr, m_list, list_entry) {
3535 		u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3536 		u16 vsi_handle;
3537 		u16 hw_vsi_id;
3538 
3539 		m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3540 		vsi_handle = m_list_itr->fltr_info.vsi_handle;
3541 		if (!ice_is_vsi_valid(hw, vsi_handle))
3542 			return -EINVAL;
3543 		hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3544 		m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3545 		/* update the src in case it is VSI num */
3546 		if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3547 			return -EINVAL;
3548 		m_list_itr->fltr_info.src = hw_vsi_id;
3549 		if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3550 		    is_zero_ether_addr(add))
3551 			return -EINVAL;
3552 
3553 		m_list_itr->status = ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3554 							   m_list_itr);
3555 		if (m_list_itr->status)
3556 			return m_list_itr->status;
3557 	}
3558 
3559 	return status;
3560 }
3561 
3562 /**
3563  * ice_add_vlan_internal - Add one VLAN based filter rule
3564  * @hw: pointer to the hardware structure
3565  * @f_entry: filter entry containing one VLAN information
3566  */
3567 static int
3568 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3569 {
3570 	struct ice_switch_info *sw = hw->switch_info;
3571 	struct ice_fltr_mgmt_list_entry *v_list_itr;
3572 	struct ice_fltr_info *new_fltr, *cur_fltr;
3573 	enum ice_sw_lkup_type lkup_type;
3574 	u16 vsi_list_id = 0, vsi_handle;
3575 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3576 	int status = 0;
3577 
3578 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3579 		return -EINVAL;
3580 
3581 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3582 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3583 	new_fltr = &f_entry->fltr_info;
3584 
3585 	/* VLAN ID should only be 12 bits */
3586 	if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3587 		return -EINVAL;
3588 
3589 	if (new_fltr->src_id != ICE_SRC_ID_VSI)
3590 		return -EINVAL;
3591 
3592 	new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3593 	lkup_type = new_fltr->lkup_type;
3594 	vsi_handle = new_fltr->vsi_handle;
3595 	rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3596 	mutex_lock(rule_lock);
3597 	v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3598 	if (!v_list_itr) {
3599 		struct ice_vsi_list_map_info *map_info = NULL;
3600 
3601 		if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3602 			/* All VLAN pruning rules use a VSI list. Check if
3603 			 * there is already a VSI list containing VSI that we
3604 			 * want to add. If found, use the same vsi_list_id for
3605 			 * this new VLAN rule or else create a new list.
3606 			 */
3607 			map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3608 							   vsi_handle,
3609 							   &vsi_list_id);
3610 			if (!map_info) {
3611 				status = ice_create_vsi_list_rule(hw,
3612 								  &vsi_handle,
3613 								  1,
3614 								  &vsi_list_id,
3615 								  lkup_type);
3616 				if (status)
3617 					goto exit;
3618 			}
3619 			/* Convert the action to forwarding to a VSI list. */
3620 			new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3621 			new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3622 		}
3623 
3624 		status = ice_create_pkt_fwd_rule(hw, f_entry);
3625 		if (!status) {
3626 			v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3627 							 new_fltr);
3628 			if (!v_list_itr) {
3629 				status = -ENOENT;
3630 				goto exit;
3631 			}
3632 			/* reuse VSI list for new rule and increment ref_cnt */
3633 			if (map_info) {
3634 				v_list_itr->vsi_list_info = map_info;
3635 				map_info->ref_cnt++;
3636 			} else {
3637 				v_list_itr->vsi_list_info =
3638 					ice_create_vsi_list_map(hw, &vsi_handle,
3639 								1, vsi_list_id);
3640 			}
3641 		}
3642 	} else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3643 		/* Update existing VSI list to add new VSI ID only if it used
3644 		 * by one VLAN rule.
3645 		 */
3646 		cur_fltr = &v_list_itr->fltr_info;
3647 		status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3648 						 new_fltr);
3649 	} else {
3650 		/* If VLAN rule exists and VSI list being used by this rule is
3651 		 * referenced by more than 1 VLAN rule. Then create a new VSI
3652 		 * list appending previous VSI with new VSI and update existing
3653 		 * VLAN rule to point to new VSI list ID
3654 		 */
3655 		struct ice_fltr_info tmp_fltr;
3656 		u16 vsi_handle_arr[2];
3657 		u16 cur_handle;
3658 
3659 		/* Current implementation only supports reusing VSI list with
3660 		 * one VSI count. We should never hit below condition
3661 		 */
3662 		if (v_list_itr->vsi_count > 1 &&
3663 		    v_list_itr->vsi_list_info->ref_cnt > 1) {
3664 			ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3665 			status = -EIO;
3666 			goto exit;
3667 		}
3668 
3669 		cur_handle =
3670 			find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3671 				       ICE_MAX_VSI);
3672 
3673 		/* A rule already exists with the new VSI being added */
3674 		if (cur_handle == vsi_handle) {
3675 			status = -EEXIST;
3676 			goto exit;
3677 		}
3678 
3679 		vsi_handle_arr[0] = cur_handle;
3680 		vsi_handle_arr[1] = vsi_handle;
3681 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3682 						  &vsi_list_id, lkup_type);
3683 		if (status)
3684 			goto exit;
3685 
3686 		tmp_fltr = v_list_itr->fltr_info;
3687 		tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3688 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3689 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3690 		/* Update the previous switch rule to a new VSI list which
3691 		 * includes current VSI that is requested
3692 		 */
3693 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3694 		if (status)
3695 			goto exit;
3696 
3697 		/* before overriding VSI list map info. decrement ref_cnt of
3698 		 * previous VSI list
3699 		 */
3700 		v_list_itr->vsi_list_info->ref_cnt--;
3701 
3702 		/* now update to newly created list */
3703 		v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3704 		v_list_itr->vsi_list_info =
3705 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3706 						vsi_list_id);
3707 		v_list_itr->vsi_count++;
3708 	}
3709 
3710 exit:
3711 	mutex_unlock(rule_lock);
3712 	return status;
3713 }
3714 
3715 /**
3716  * ice_add_vlan - Add VLAN based filter rule
3717  * @hw: pointer to the hardware structure
3718  * @v_list: list of VLAN entries and forwarding information
3719  */
3720 int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
3721 {
3722 	struct ice_fltr_list_entry *v_list_itr;
3723 
3724 	if (!v_list || !hw)
3725 		return -EINVAL;
3726 
3727 	list_for_each_entry(v_list_itr, v_list, list_entry) {
3728 		if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3729 			return -EINVAL;
3730 		v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3731 		v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3732 		if (v_list_itr->status)
3733 			return v_list_itr->status;
3734 	}
3735 	return 0;
3736 }
3737 
3738 /**
3739  * ice_add_eth_mac - Add ethertype and MAC based filter rule
3740  * @hw: pointer to the hardware structure
3741  * @em_list: list of ether type MAC filter, MAC is optional
3742  *
3743  * This function requires the caller to populate the entries in
3744  * the filter list with the necessary fields (including flags to
3745  * indicate Tx or Rx rules).
3746  */
3747 int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3748 {
3749 	struct ice_fltr_list_entry *em_list_itr;
3750 
3751 	if (!em_list || !hw)
3752 		return -EINVAL;
3753 
3754 	list_for_each_entry(em_list_itr, em_list, list_entry) {
3755 		enum ice_sw_lkup_type l_type =
3756 			em_list_itr->fltr_info.lkup_type;
3757 
3758 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3759 		    l_type != ICE_SW_LKUP_ETHERTYPE)
3760 			return -EINVAL;
3761 
3762 		em_list_itr->status = ice_add_rule_internal(hw, l_type,
3763 							    em_list_itr);
3764 		if (em_list_itr->status)
3765 			return em_list_itr->status;
3766 	}
3767 	return 0;
3768 }
3769 
3770 /**
3771  * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3772  * @hw: pointer to the hardware structure
3773  * @em_list: list of ethertype or ethertype MAC entries
3774  */
3775 int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3776 {
3777 	struct ice_fltr_list_entry *em_list_itr, *tmp;
3778 
3779 	if (!em_list || !hw)
3780 		return -EINVAL;
3781 
3782 	list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
3783 		enum ice_sw_lkup_type l_type =
3784 			em_list_itr->fltr_info.lkup_type;
3785 
3786 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3787 		    l_type != ICE_SW_LKUP_ETHERTYPE)
3788 			return -EINVAL;
3789 
3790 		em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3791 							       em_list_itr);
3792 		if (em_list_itr->status)
3793 			return em_list_itr->status;
3794 	}
3795 	return 0;
3796 }
3797 
3798 /**
3799  * ice_rem_sw_rule_info
3800  * @hw: pointer to the hardware structure
3801  * @rule_head: pointer to the switch list structure that we want to delete
3802  */
3803 static void
3804 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3805 {
3806 	if (!list_empty(rule_head)) {
3807 		struct ice_fltr_mgmt_list_entry *entry;
3808 		struct ice_fltr_mgmt_list_entry *tmp;
3809 
3810 		list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
3811 			list_del(&entry->list_entry);
3812 			devm_kfree(ice_hw_to_dev(hw), entry);
3813 		}
3814 	}
3815 }
3816 
3817 /**
3818  * ice_rem_adv_rule_info
3819  * @hw: pointer to the hardware structure
3820  * @rule_head: pointer to the switch list structure that we want to delete
3821  */
3822 static void
3823 ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3824 {
3825 	struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3826 	struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3827 
3828 	if (list_empty(rule_head))
3829 		return;
3830 
3831 	list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) {
3832 		list_del(&lst_itr->list_entry);
3833 		devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
3834 		devm_kfree(ice_hw_to_dev(hw), lst_itr);
3835 	}
3836 }
3837 
3838 /**
3839  * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3840  * @pi: pointer to the port_info structure
3841  * @vsi_handle: VSI handle to set as default
3842  * @set: true to add the above mentioned switch rule, false to remove it
3843  * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3844  *
3845  * add filter rule to set/unset given VSI as default VSI for the switch
3846  * (represented by swid)
3847  */
3848 int
3849 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3850 		 u8 direction)
3851 {
3852 	struct ice_fltr_list_entry f_list_entry;
3853 	struct ice_fltr_info f_info;
3854 	struct ice_hw *hw = pi->hw;
3855 	u16 hw_vsi_id;
3856 	int status;
3857 
3858 	if (!ice_is_vsi_valid(hw, vsi_handle))
3859 		return -EINVAL;
3860 
3861 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3862 
3863 	memset(&f_info, 0, sizeof(f_info));
3864 
3865 	f_info.lkup_type = ICE_SW_LKUP_DFLT;
3866 	f_info.flag = direction;
3867 	f_info.fltr_act = ICE_FWD_TO_VSI;
3868 	f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3869 	f_info.vsi_handle = vsi_handle;
3870 
3871 	if (f_info.flag & ICE_FLTR_RX) {
3872 		f_info.src = hw->port_info->lport;
3873 		f_info.src_id = ICE_SRC_ID_LPORT;
3874 	} else if (f_info.flag & ICE_FLTR_TX) {
3875 		f_info.src_id = ICE_SRC_ID_VSI;
3876 		f_info.src = hw_vsi_id;
3877 	}
3878 	f_list_entry.fltr_info = f_info;
3879 
3880 	if (set)
3881 		status = ice_add_rule_internal(hw, ICE_SW_LKUP_DFLT,
3882 					       &f_list_entry);
3883 	else
3884 		status = ice_remove_rule_internal(hw, ICE_SW_LKUP_DFLT,
3885 						  &f_list_entry);
3886 
3887 	return status;
3888 }
3889 
3890 /**
3891  * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3892  * @fm_entry: filter entry to inspect
3893  * @vsi_handle: VSI handle to compare with filter info
3894  */
3895 static bool
3896 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3897 {
3898 	return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3899 		 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3900 		(fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3901 		 fm_entry->vsi_list_info &&
3902 		 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
3903 }
3904 
3905 /**
3906  * ice_check_if_dflt_vsi - check if VSI is default VSI
3907  * @pi: pointer to the port_info structure
3908  * @vsi_handle: vsi handle to check for in filter list
3909  * @rule_exists: indicates if there are any VSI's in the rule list
3910  *
3911  * checks if the VSI is in a default VSI list, and also indicates
3912  * if the default VSI list is empty
3913  */
3914 bool
3915 ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
3916 		      bool *rule_exists)
3917 {
3918 	struct ice_fltr_mgmt_list_entry *fm_entry;
3919 	struct ice_sw_recipe *recp_list;
3920 	struct list_head *rule_head;
3921 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3922 	bool ret = false;
3923 
3924 	recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT];
3925 	rule_lock = &recp_list->filt_rule_lock;
3926 	rule_head = &recp_list->filt_rules;
3927 
3928 	mutex_lock(rule_lock);
3929 
3930 	if (rule_exists && !list_empty(rule_head))
3931 		*rule_exists = true;
3932 
3933 	list_for_each_entry(fm_entry, rule_head, list_entry) {
3934 		if (ice_vsi_uses_fltr(fm_entry, vsi_handle)) {
3935 			ret = true;
3936 			break;
3937 		}
3938 	}
3939 
3940 	mutex_unlock(rule_lock);
3941 
3942 	return ret;
3943 }
3944 
3945 /**
3946  * ice_remove_mac - remove a MAC address based filter rule
3947  * @hw: pointer to the hardware structure
3948  * @m_list: list of MAC addresses and forwarding information
3949  *
3950  * This function removes either a MAC filter rule or a specific VSI from a
3951  * VSI list for a multicast MAC address.
3952  *
3953  * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should
3954  * be aware that this call will only work if all the entries passed into m_list
3955  * were added previously. It will not attempt to do a partial remove of entries
3956  * that were found.
3957  */
3958 int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
3959 {
3960 	struct ice_fltr_list_entry *list_itr, *tmp;
3961 
3962 	if (!m_list)
3963 		return -EINVAL;
3964 
3965 	list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
3966 		enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3967 		u16 vsi_handle;
3968 
3969 		if (l_type != ICE_SW_LKUP_MAC)
3970 			return -EINVAL;
3971 
3972 		vsi_handle = list_itr->fltr_info.vsi_handle;
3973 		if (!ice_is_vsi_valid(hw, vsi_handle))
3974 			return -EINVAL;
3975 
3976 		list_itr->fltr_info.fwd_id.hw_vsi_id =
3977 					ice_get_hw_vsi_num(hw, vsi_handle);
3978 
3979 		list_itr->status = ice_remove_rule_internal(hw,
3980 							    ICE_SW_LKUP_MAC,
3981 							    list_itr);
3982 		if (list_itr->status)
3983 			return list_itr->status;
3984 	}
3985 	return 0;
3986 }
3987 
3988 /**
3989  * ice_remove_vlan - Remove VLAN based filter rule
3990  * @hw: pointer to the hardware structure
3991  * @v_list: list of VLAN entries and forwarding information
3992  */
3993 int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
3994 {
3995 	struct ice_fltr_list_entry *v_list_itr, *tmp;
3996 
3997 	if (!v_list || !hw)
3998 		return -EINVAL;
3999 
4000 	list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
4001 		enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4002 
4003 		if (l_type != ICE_SW_LKUP_VLAN)
4004 			return -EINVAL;
4005 		v_list_itr->status = ice_remove_rule_internal(hw,
4006 							      ICE_SW_LKUP_VLAN,
4007 							      v_list_itr);
4008 		if (v_list_itr->status)
4009 			return v_list_itr->status;
4010 	}
4011 	return 0;
4012 }
4013 
4014 /**
4015  * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4016  * @hw: pointer to the hardware structure
4017  * @vsi_handle: VSI handle to remove filters from
4018  * @vsi_list_head: pointer to the list to add entry to
4019  * @fi: pointer to fltr_info of filter entry to copy & add
4020  *
4021  * Helper function, used when creating a list of filters to remove from
4022  * a specific VSI. The entry added to vsi_list_head is a COPY of the
4023  * original filter entry, with the exception of fltr_info.fltr_act and
4024  * fltr_info.fwd_id fields. These are set such that later logic can
4025  * extract which VSI to remove the fltr from, and pass on that information.
4026  */
4027 static int
4028 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4029 			       struct list_head *vsi_list_head,
4030 			       struct ice_fltr_info *fi)
4031 {
4032 	struct ice_fltr_list_entry *tmp;
4033 
4034 	/* this memory is freed up in the caller function
4035 	 * once filters for this VSI are removed
4036 	 */
4037 	tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
4038 	if (!tmp)
4039 		return -ENOMEM;
4040 
4041 	tmp->fltr_info = *fi;
4042 
4043 	/* Overwrite these fields to indicate which VSI to remove filter from,
4044 	 * so find and remove logic can extract the information from the
4045 	 * list entries. Note that original entries will still have proper
4046 	 * values.
4047 	 */
4048 	tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4049 	tmp->fltr_info.vsi_handle = vsi_handle;
4050 	tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4051 
4052 	list_add(&tmp->list_entry, vsi_list_head);
4053 
4054 	return 0;
4055 }
4056 
4057 /**
4058  * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4059  * @hw: pointer to the hardware structure
4060  * @vsi_handle: VSI handle to remove filters from
4061  * @lkup_list_head: pointer to the list that has certain lookup type filters
4062  * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4063  *
4064  * Locates all filters in lkup_list_head that are used by the given VSI,
4065  * and adds COPIES of those entries to vsi_list_head (intended to be used
4066  * to remove the listed filters).
4067  * Note that this means all entries in vsi_list_head must be explicitly
4068  * deallocated by the caller when done with list.
4069  */
4070 static int
4071 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4072 			 struct list_head *lkup_list_head,
4073 			 struct list_head *vsi_list_head)
4074 {
4075 	struct ice_fltr_mgmt_list_entry *fm_entry;
4076 	int status = 0;
4077 
4078 	/* check to make sure VSI ID is valid and within boundary */
4079 	if (!ice_is_vsi_valid(hw, vsi_handle))
4080 		return -EINVAL;
4081 
4082 	list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
4083 		if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
4084 			continue;
4085 
4086 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4087 							vsi_list_head,
4088 							&fm_entry->fltr_info);
4089 		if (status)
4090 			return status;
4091 	}
4092 	return status;
4093 }
4094 
4095 /**
4096  * ice_determine_promisc_mask
4097  * @fi: filter info to parse
4098  *
4099  * Helper function to determine which ICE_PROMISC_ mask corresponds
4100  * to given filter into.
4101  */
4102 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4103 {
4104 	u16 vid = fi->l_data.mac_vlan.vlan_id;
4105 	u8 *macaddr = fi->l_data.mac.mac_addr;
4106 	bool is_tx_fltr = false;
4107 	u8 promisc_mask = 0;
4108 
4109 	if (fi->flag == ICE_FLTR_TX)
4110 		is_tx_fltr = true;
4111 
4112 	if (is_broadcast_ether_addr(macaddr))
4113 		promisc_mask |= is_tx_fltr ?
4114 			ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4115 	else if (is_multicast_ether_addr(macaddr))
4116 		promisc_mask |= is_tx_fltr ?
4117 			ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4118 	else if (is_unicast_ether_addr(macaddr))
4119 		promisc_mask |= is_tx_fltr ?
4120 			ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4121 	if (vid)
4122 		promisc_mask |= is_tx_fltr ?
4123 			ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4124 
4125 	return promisc_mask;
4126 }
4127 
4128 /**
4129  * ice_remove_promisc - Remove promisc based filter rules
4130  * @hw: pointer to the hardware structure
4131  * @recp_id: recipe ID for which the rule needs to removed
4132  * @v_list: list of promisc entries
4133  */
4134 static int
4135 ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list)
4136 {
4137 	struct ice_fltr_list_entry *v_list_itr, *tmp;
4138 
4139 	list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
4140 		v_list_itr->status =
4141 			ice_remove_rule_internal(hw, recp_id, v_list_itr);
4142 		if (v_list_itr->status)
4143 			return v_list_itr->status;
4144 	}
4145 	return 0;
4146 }
4147 
4148 /**
4149  * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4150  * @hw: pointer to the hardware structure
4151  * @vsi_handle: VSI handle to clear mode
4152  * @promisc_mask: mask of promiscuous config bits to clear
4153  * @vid: VLAN ID to clear VLAN promiscuous
4154  */
4155 int
4156 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4157 		      u16 vid)
4158 {
4159 	struct ice_switch_info *sw = hw->switch_info;
4160 	struct ice_fltr_list_entry *fm_entry, *tmp;
4161 	struct list_head remove_list_head;
4162 	struct ice_fltr_mgmt_list_entry *itr;
4163 	struct list_head *rule_head;
4164 	struct mutex *rule_lock;	/* Lock to protect filter rule list */
4165 	int status = 0;
4166 	u8 recipe_id;
4167 
4168 	if (!ice_is_vsi_valid(hw, vsi_handle))
4169 		return -EINVAL;
4170 
4171 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4172 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4173 	else
4174 		recipe_id = ICE_SW_LKUP_PROMISC;
4175 
4176 	rule_head = &sw->recp_list[recipe_id].filt_rules;
4177 	rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4178 
4179 	INIT_LIST_HEAD(&remove_list_head);
4180 
4181 	mutex_lock(rule_lock);
4182 	list_for_each_entry(itr, rule_head, list_entry) {
4183 		struct ice_fltr_info *fltr_info;
4184 		u8 fltr_promisc_mask = 0;
4185 
4186 		if (!ice_vsi_uses_fltr(itr, vsi_handle))
4187 			continue;
4188 		fltr_info = &itr->fltr_info;
4189 
4190 		if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4191 		    vid != fltr_info->l_data.mac_vlan.vlan_id)
4192 			continue;
4193 
4194 		fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4195 
4196 		/* Skip if filter is not completely specified by given mask */
4197 		if (fltr_promisc_mask & ~promisc_mask)
4198 			continue;
4199 
4200 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4201 							&remove_list_head,
4202 							fltr_info);
4203 		if (status) {
4204 			mutex_unlock(rule_lock);
4205 			goto free_fltr_list;
4206 		}
4207 	}
4208 	mutex_unlock(rule_lock);
4209 
4210 	status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4211 
4212 free_fltr_list:
4213 	list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4214 		list_del(&fm_entry->list_entry);
4215 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
4216 	}
4217 
4218 	return status;
4219 }
4220 
4221 /**
4222  * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4223  * @hw: pointer to the hardware structure
4224  * @vsi_handle: VSI handle to configure
4225  * @promisc_mask: mask of promiscuous config bits
4226  * @vid: VLAN ID to set VLAN promiscuous
4227  */
4228 int
4229 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4230 {
4231 	enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4232 	struct ice_fltr_list_entry f_list_entry;
4233 	struct ice_fltr_info new_fltr;
4234 	bool is_tx_fltr;
4235 	int status = 0;
4236 	u16 hw_vsi_id;
4237 	int pkt_type;
4238 	u8 recipe_id;
4239 
4240 	if (!ice_is_vsi_valid(hw, vsi_handle))
4241 		return -EINVAL;
4242 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4243 
4244 	memset(&new_fltr, 0, sizeof(new_fltr));
4245 
4246 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4247 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4248 		new_fltr.l_data.mac_vlan.vlan_id = vid;
4249 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4250 	} else {
4251 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4252 		recipe_id = ICE_SW_LKUP_PROMISC;
4253 	}
4254 
4255 	/* Separate filters must be set for each direction/packet type
4256 	 * combination, so we will loop over the mask value, store the
4257 	 * individual type, and clear it out in the input mask as it
4258 	 * is found.
4259 	 */
4260 	while (promisc_mask) {
4261 		u8 *mac_addr;
4262 
4263 		pkt_type = 0;
4264 		is_tx_fltr = false;
4265 
4266 		if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4267 			promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4268 			pkt_type = UCAST_FLTR;
4269 		} else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4270 			promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4271 			pkt_type = UCAST_FLTR;
4272 			is_tx_fltr = true;
4273 		} else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4274 			promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4275 			pkt_type = MCAST_FLTR;
4276 		} else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4277 			promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4278 			pkt_type = MCAST_FLTR;
4279 			is_tx_fltr = true;
4280 		} else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4281 			promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4282 			pkt_type = BCAST_FLTR;
4283 		} else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4284 			promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4285 			pkt_type = BCAST_FLTR;
4286 			is_tx_fltr = true;
4287 		}
4288 
4289 		/* Check for VLAN promiscuous flag */
4290 		if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4291 			promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4292 		} else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4293 			promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4294 			is_tx_fltr = true;
4295 		}
4296 
4297 		/* Set filter DA based on packet type */
4298 		mac_addr = new_fltr.l_data.mac.mac_addr;
4299 		if (pkt_type == BCAST_FLTR) {
4300 			eth_broadcast_addr(mac_addr);
4301 		} else if (pkt_type == MCAST_FLTR ||
4302 			   pkt_type == UCAST_FLTR) {
4303 			/* Use the dummy ether header DA */
4304 			ether_addr_copy(mac_addr, dummy_eth_header);
4305 			if (pkt_type == MCAST_FLTR)
4306 				mac_addr[0] |= 0x1;	/* Set multicast bit */
4307 		}
4308 
4309 		/* Need to reset this to zero for all iterations */
4310 		new_fltr.flag = 0;
4311 		if (is_tx_fltr) {
4312 			new_fltr.flag |= ICE_FLTR_TX;
4313 			new_fltr.src = hw_vsi_id;
4314 		} else {
4315 			new_fltr.flag |= ICE_FLTR_RX;
4316 			new_fltr.src = hw->port_info->lport;
4317 		}
4318 
4319 		new_fltr.fltr_act = ICE_FWD_TO_VSI;
4320 		new_fltr.vsi_handle = vsi_handle;
4321 		new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4322 		f_list_entry.fltr_info = new_fltr;
4323 
4324 		status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4325 		if (status)
4326 			goto set_promisc_exit;
4327 	}
4328 
4329 set_promisc_exit:
4330 	return status;
4331 }
4332 
4333 /**
4334  * ice_set_vlan_vsi_promisc
4335  * @hw: pointer to the hardware structure
4336  * @vsi_handle: VSI handle to configure
4337  * @promisc_mask: mask of promiscuous config bits
4338  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4339  *
4340  * Configure VSI with all associated VLANs to given promiscuous mode(s)
4341  */
4342 int
4343 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4344 			 bool rm_vlan_promisc)
4345 {
4346 	struct ice_switch_info *sw = hw->switch_info;
4347 	struct ice_fltr_list_entry *list_itr, *tmp;
4348 	struct list_head vsi_list_head;
4349 	struct list_head *vlan_head;
4350 	struct mutex *vlan_lock; /* Lock to protect filter rule list */
4351 	u16 vlan_id;
4352 	int status;
4353 
4354 	INIT_LIST_HEAD(&vsi_list_head);
4355 	vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4356 	vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4357 	mutex_lock(vlan_lock);
4358 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4359 					  &vsi_list_head);
4360 	mutex_unlock(vlan_lock);
4361 	if (status)
4362 		goto free_fltr_list;
4363 
4364 	list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
4365 		/* Avoid enabling or disabling VLAN zero twice when in double
4366 		 * VLAN mode
4367 		 */
4368 		if (ice_is_dvm_ena(hw) &&
4369 		    list_itr->fltr_info.l_data.vlan.tpid == 0)
4370 			continue;
4371 
4372 		vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4373 		if (rm_vlan_promisc)
4374 			status = ice_clear_vsi_promisc(hw, vsi_handle,
4375 						       promisc_mask, vlan_id);
4376 		else
4377 			status = ice_set_vsi_promisc(hw, vsi_handle,
4378 						     promisc_mask, vlan_id);
4379 		if (status && status != -EEXIST)
4380 			break;
4381 	}
4382 
4383 free_fltr_list:
4384 	list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
4385 		list_del(&list_itr->list_entry);
4386 		devm_kfree(ice_hw_to_dev(hw), list_itr);
4387 	}
4388 	return status;
4389 }
4390 
4391 /**
4392  * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4393  * @hw: pointer to the hardware structure
4394  * @vsi_handle: VSI handle to remove filters from
4395  * @lkup: switch rule filter lookup type
4396  */
4397 static void
4398 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4399 			 enum ice_sw_lkup_type lkup)
4400 {
4401 	struct ice_switch_info *sw = hw->switch_info;
4402 	struct ice_fltr_list_entry *fm_entry;
4403 	struct list_head remove_list_head;
4404 	struct list_head *rule_head;
4405 	struct ice_fltr_list_entry *tmp;
4406 	struct mutex *rule_lock;	/* Lock to protect filter rule list */
4407 	int status;
4408 
4409 	INIT_LIST_HEAD(&remove_list_head);
4410 	rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4411 	rule_head = &sw->recp_list[lkup].filt_rules;
4412 	mutex_lock(rule_lock);
4413 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4414 					  &remove_list_head);
4415 	mutex_unlock(rule_lock);
4416 	if (status)
4417 		goto free_fltr_list;
4418 
4419 	switch (lkup) {
4420 	case ICE_SW_LKUP_MAC:
4421 		ice_remove_mac(hw, &remove_list_head);
4422 		break;
4423 	case ICE_SW_LKUP_VLAN:
4424 		ice_remove_vlan(hw, &remove_list_head);
4425 		break;
4426 	case ICE_SW_LKUP_PROMISC:
4427 	case ICE_SW_LKUP_PROMISC_VLAN:
4428 		ice_remove_promisc(hw, lkup, &remove_list_head);
4429 		break;
4430 	case ICE_SW_LKUP_MAC_VLAN:
4431 	case ICE_SW_LKUP_ETHERTYPE:
4432 	case ICE_SW_LKUP_ETHERTYPE_MAC:
4433 	case ICE_SW_LKUP_DFLT:
4434 	case ICE_SW_LKUP_LAST:
4435 	default:
4436 		ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
4437 		break;
4438 	}
4439 
4440 free_fltr_list:
4441 	list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4442 		list_del(&fm_entry->list_entry);
4443 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
4444 	}
4445 }
4446 
4447 /**
4448  * ice_remove_vsi_fltr - Remove all filters for a VSI
4449  * @hw: pointer to the hardware structure
4450  * @vsi_handle: VSI handle to remove filters from
4451  */
4452 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4453 {
4454 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4455 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4456 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4457 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4458 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4459 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4460 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4461 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4462 }
4463 
4464 /**
4465  * ice_alloc_res_cntr - allocating resource counter
4466  * @hw: pointer to the hardware structure
4467  * @type: type of resource
4468  * @alloc_shared: if set it is shared else dedicated
4469  * @num_items: number of entries requested for FD resource type
4470  * @counter_id: counter index returned by AQ call
4471  */
4472 int
4473 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4474 		   u16 *counter_id)
4475 {
4476 	struct ice_aqc_alloc_free_res_elem *buf;
4477 	u16 buf_len;
4478 	int status;
4479 
4480 	/* Allocate resource */
4481 	buf_len = struct_size(buf, elem, 1);
4482 	buf = kzalloc(buf_len, GFP_KERNEL);
4483 	if (!buf)
4484 		return -ENOMEM;
4485 
4486 	buf->num_elems = cpu_to_le16(num_items);
4487 	buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4488 				      ICE_AQC_RES_TYPE_M) | alloc_shared);
4489 
4490 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4491 				       ice_aqc_opc_alloc_res, NULL);
4492 	if (status)
4493 		goto exit;
4494 
4495 	*counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
4496 
4497 exit:
4498 	kfree(buf);
4499 	return status;
4500 }
4501 
4502 /**
4503  * ice_free_res_cntr - free resource counter
4504  * @hw: pointer to the hardware structure
4505  * @type: type of resource
4506  * @alloc_shared: if set it is shared else dedicated
4507  * @num_items: number of entries to be freed for FD resource type
4508  * @counter_id: counter ID resource which needs to be freed
4509  */
4510 int
4511 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4512 		  u16 counter_id)
4513 {
4514 	struct ice_aqc_alloc_free_res_elem *buf;
4515 	u16 buf_len;
4516 	int status;
4517 
4518 	/* Free resource */
4519 	buf_len = struct_size(buf, elem, 1);
4520 	buf = kzalloc(buf_len, GFP_KERNEL);
4521 	if (!buf)
4522 		return -ENOMEM;
4523 
4524 	buf->num_elems = cpu_to_le16(num_items);
4525 	buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4526 				      ICE_AQC_RES_TYPE_M) | alloc_shared);
4527 	buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
4528 
4529 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4530 				       ice_aqc_opc_free_res, NULL);
4531 	if (status)
4532 		ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
4533 
4534 	kfree(buf);
4535 	return status;
4536 }
4537 
4538 #define ICE_PROTOCOL_ENTRY(id, ...) {		\
4539 	.prot_type	= id,			\
4540 	.offs		= {__VA_ARGS__},	\
4541 }
4542 
4543 /* This is mapping table entry that maps every word within a given protocol
4544  * structure to the real byte offset as per the specification of that
4545  * protocol header.
4546  * for example dst address is 3 words in ethertype header and corresponding
4547  * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4548  * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4549  * matching entry describing its field. This needs to be updated if new
4550  * structure is added to that union.
4551  */
4552 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4553 	ICE_PROTOCOL_ENTRY(ICE_MAC_OFOS, 0, 2, 4, 6, 8, 10, 12),
4554 	ICE_PROTOCOL_ENTRY(ICE_MAC_IL, 0, 2, 4, 6, 8, 10, 12),
4555 	ICE_PROTOCOL_ENTRY(ICE_ETYPE_OL, 0),
4556 	ICE_PROTOCOL_ENTRY(ICE_ETYPE_IL, 0),
4557 	ICE_PROTOCOL_ENTRY(ICE_VLAN_OFOS, 2, 0),
4558 	ICE_PROTOCOL_ENTRY(ICE_IPV4_OFOS, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18),
4559 	ICE_PROTOCOL_ENTRY(ICE_IPV4_IL,	0, 2, 4, 6, 8, 10, 12, 14, 16, 18),
4560 	ICE_PROTOCOL_ENTRY(ICE_IPV6_OFOS, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18,
4561 			   20, 22, 24, 26, 28, 30, 32, 34, 36, 38),
4562 	ICE_PROTOCOL_ENTRY(ICE_IPV6_IL, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
4563 			   22, 24, 26, 28, 30, 32, 34, 36, 38),
4564 	ICE_PROTOCOL_ENTRY(ICE_TCP_IL, 0, 2),
4565 	ICE_PROTOCOL_ENTRY(ICE_UDP_OF, 0, 2),
4566 	ICE_PROTOCOL_ENTRY(ICE_UDP_ILOS, 0, 2),
4567 	ICE_PROTOCOL_ENTRY(ICE_VXLAN, 8, 10, 12, 14),
4568 	ICE_PROTOCOL_ENTRY(ICE_GENEVE, 8, 10, 12, 14),
4569 	ICE_PROTOCOL_ENTRY(ICE_NVGRE, 0, 2, 4, 6),
4570 	ICE_PROTOCOL_ENTRY(ICE_GTP, 8, 10, 12, 14, 16, 18, 20, 22),
4571 	ICE_PROTOCOL_ENTRY(ICE_GTP_NO_PAY, 8, 10, 12, 14),
4572 	ICE_PROTOCOL_ENTRY(ICE_PPPOE, 0, 2, 4, 6),
4573 	ICE_PROTOCOL_ENTRY(ICE_L2TPV3, 0, 2, 4, 6, 8, 10),
4574 	ICE_PROTOCOL_ENTRY(ICE_VLAN_EX, 2, 0),
4575 	ICE_PROTOCOL_ENTRY(ICE_VLAN_IN, 2, 0),
4576 	ICE_PROTOCOL_ENTRY(ICE_HW_METADATA,
4577 			   ICE_SOURCE_PORT_MDID_OFFSET,
4578 			   ICE_PTYPE_MDID_OFFSET,
4579 			   ICE_PACKET_LENGTH_MDID_OFFSET,
4580 			   ICE_SOURCE_VSI_MDID_OFFSET,
4581 			   ICE_PKT_VLAN_MDID_OFFSET,
4582 			   ICE_PKT_TUNNEL_MDID_OFFSET,
4583 			   ICE_PKT_TCP_MDID_OFFSET,
4584 			   ICE_PKT_ERROR_MDID_OFFSET),
4585 };
4586 
4587 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4588 	{ ICE_MAC_OFOS,		ICE_MAC_OFOS_HW },
4589 	{ ICE_MAC_IL,		ICE_MAC_IL_HW },
4590 	{ ICE_ETYPE_OL,		ICE_ETYPE_OL_HW },
4591 	{ ICE_ETYPE_IL,		ICE_ETYPE_IL_HW },
4592 	{ ICE_VLAN_OFOS,	ICE_VLAN_OL_HW },
4593 	{ ICE_IPV4_OFOS,	ICE_IPV4_OFOS_HW },
4594 	{ ICE_IPV4_IL,		ICE_IPV4_IL_HW },
4595 	{ ICE_IPV6_OFOS,	ICE_IPV6_OFOS_HW },
4596 	{ ICE_IPV6_IL,		ICE_IPV6_IL_HW },
4597 	{ ICE_TCP_IL,		ICE_TCP_IL_HW },
4598 	{ ICE_UDP_OF,		ICE_UDP_OF_HW },
4599 	{ ICE_UDP_ILOS,		ICE_UDP_ILOS_HW },
4600 	{ ICE_VXLAN,		ICE_UDP_OF_HW },
4601 	{ ICE_GENEVE,		ICE_UDP_OF_HW },
4602 	{ ICE_NVGRE,		ICE_GRE_OF_HW },
4603 	{ ICE_GTP,		ICE_UDP_OF_HW },
4604 	{ ICE_GTP_NO_PAY,	ICE_UDP_ILOS_HW },
4605 	{ ICE_PPPOE,		ICE_PPPOE_HW },
4606 	{ ICE_L2TPV3,		ICE_L2TPV3_HW },
4607 	{ ICE_VLAN_EX,          ICE_VLAN_OF_HW },
4608 	{ ICE_VLAN_IN,          ICE_VLAN_OL_HW },
4609 	{ ICE_HW_METADATA,      ICE_META_DATA_ID_HW },
4610 };
4611 
4612 /**
4613  * ice_find_recp - find a recipe
4614  * @hw: pointer to the hardware structure
4615  * @lkup_exts: extension sequence to match
4616  * @tun_type: type of recipe tunnel
4617  *
4618  * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4619  */
4620 static u16
4621 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
4622 	      enum ice_sw_tunnel_type tun_type)
4623 {
4624 	bool refresh_required = true;
4625 	struct ice_sw_recipe *recp;
4626 	u8 i;
4627 
4628 	/* Walk through existing recipes to find a match */
4629 	recp = hw->switch_info->recp_list;
4630 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4631 		/* If recipe was not created for this ID, in SW bookkeeping,
4632 		 * check if FW has an entry for this recipe. If the FW has an
4633 		 * entry update it in our SW bookkeeping and continue with the
4634 		 * matching.
4635 		 */
4636 		if (!recp[i].recp_created)
4637 			if (ice_get_recp_frm_fw(hw,
4638 						hw->switch_info->recp_list, i,
4639 						&refresh_required))
4640 				continue;
4641 
4642 		/* Skip inverse action recipes */
4643 		if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4644 		    ICE_AQ_RECIPE_ACT_INV_ACT)
4645 			continue;
4646 
4647 		/* if number of words we are looking for match */
4648 		if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4649 			struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
4650 			struct ice_fv_word *be = lkup_exts->fv_words;
4651 			u16 *cr = recp[i].lkup_exts.field_mask;
4652 			u16 *de = lkup_exts->field_mask;
4653 			bool found = true;
4654 			u8 pe, qr;
4655 
4656 			/* ar, cr, and qr are related to the recipe words, while
4657 			 * be, de, and pe are related to the lookup words
4658 			 */
4659 			for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
4660 				for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
4661 				     qr++) {
4662 					if (ar[qr].off == be[pe].off &&
4663 					    ar[qr].prot_id == be[pe].prot_id &&
4664 					    cr[qr] == de[pe])
4665 						/* Found the "pe"th word in the
4666 						 * given recipe
4667 						 */
4668 						break;
4669 				}
4670 				/* After walking through all the words in the
4671 				 * "i"th recipe if "p"th word was not found then
4672 				 * this recipe is not what we are looking for.
4673 				 * So break out from this loop and try the next
4674 				 * recipe
4675 				 */
4676 				if (qr >= recp[i].lkup_exts.n_val_words) {
4677 					found = false;
4678 					break;
4679 				}
4680 			}
4681 			/* If for "i"th recipe the found was never set to false
4682 			 * then it means we found our match
4683 			 * Also tun type of recipe needs to be checked
4684 			 */
4685 			if (found && recp[i].tun_type == tun_type)
4686 				return i; /* Return the recipe ID */
4687 		}
4688 	}
4689 	return ICE_MAX_NUM_RECIPES;
4690 }
4691 
4692 /**
4693  * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
4694  *
4695  * As protocol id for outer vlan is different in dvm and svm, if dvm is
4696  * supported protocol array record for outer vlan has to be modified to
4697  * reflect the value proper for DVM.
4698  */
4699 void ice_change_proto_id_to_dvm(void)
4700 {
4701 	u8 i;
4702 
4703 	for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4704 		if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
4705 		    ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
4706 			ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
4707 }
4708 
4709 /**
4710  * ice_prot_type_to_id - get protocol ID from protocol type
4711  * @type: protocol type
4712  * @id: pointer to variable that will receive the ID
4713  *
4714  * Returns true if found, false otherwise
4715  */
4716 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
4717 {
4718 	u8 i;
4719 
4720 	for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4721 		if (ice_prot_id_tbl[i].type == type) {
4722 			*id = ice_prot_id_tbl[i].protocol_id;
4723 			return true;
4724 		}
4725 	return false;
4726 }
4727 
4728 /**
4729  * ice_fill_valid_words - count valid words
4730  * @rule: advanced rule with lookup information
4731  * @lkup_exts: byte offset extractions of the words that are valid
4732  *
4733  * calculate valid words in a lookup rule using mask value
4734  */
4735 static u8
4736 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4737 		     struct ice_prot_lkup_ext *lkup_exts)
4738 {
4739 	u8 j, word, prot_id, ret_val;
4740 
4741 	if (!ice_prot_type_to_id(rule->type, &prot_id))
4742 		return 0;
4743 
4744 	word = lkup_exts->n_val_words;
4745 
4746 	for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4747 		if (((u16 *)&rule->m_u)[j] &&
4748 		    rule->type < ARRAY_SIZE(ice_prot_ext)) {
4749 			/* No more space to accommodate */
4750 			if (word >= ICE_MAX_CHAIN_WORDS)
4751 				return 0;
4752 			lkup_exts->fv_words[word].off =
4753 				ice_prot_ext[rule->type].offs[j];
4754 			lkup_exts->fv_words[word].prot_id =
4755 				ice_prot_id_tbl[rule->type].protocol_id;
4756 			lkup_exts->field_mask[word] =
4757 				be16_to_cpu(((__force __be16 *)&rule->m_u)[j]);
4758 			word++;
4759 		}
4760 
4761 	ret_val = word - lkup_exts->n_val_words;
4762 	lkup_exts->n_val_words = word;
4763 
4764 	return ret_val;
4765 }
4766 
4767 /**
4768  * ice_create_first_fit_recp_def - Create a recipe grouping
4769  * @hw: pointer to the hardware structure
4770  * @lkup_exts: an array of protocol header extractions
4771  * @rg_list: pointer to a list that stores new recipe groups
4772  * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4773  *
4774  * Using first fit algorithm, take all the words that are still not done
4775  * and start grouping them in 4-word groups. Each group makes up one
4776  * recipe.
4777  */
4778 static int
4779 ice_create_first_fit_recp_def(struct ice_hw *hw,
4780 			      struct ice_prot_lkup_ext *lkup_exts,
4781 			      struct list_head *rg_list,
4782 			      u8 *recp_cnt)
4783 {
4784 	struct ice_pref_recipe_group *grp = NULL;
4785 	u8 j;
4786 
4787 	*recp_cnt = 0;
4788 
4789 	/* Walk through every word in the rule to check if it is not done. If so
4790 	 * then this word needs to be part of a new recipe.
4791 	 */
4792 	for (j = 0; j < lkup_exts->n_val_words; j++)
4793 		if (!test_bit(j, lkup_exts->done)) {
4794 			if (!grp ||
4795 			    grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4796 				struct ice_recp_grp_entry *entry;
4797 
4798 				entry = devm_kzalloc(ice_hw_to_dev(hw),
4799 						     sizeof(*entry),
4800 						     GFP_KERNEL);
4801 				if (!entry)
4802 					return -ENOMEM;
4803 				list_add(&entry->l_entry, rg_list);
4804 				grp = &entry->r_group;
4805 				(*recp_cnt)++;
4806 			}
4807 
4808 			grp->pairs[grp->n_val_pairs].prot_id =
4809 				lkup_exts->fv_words[j].prot_id;
4810 			grp->pairs[grp->n_val_pairs].off =
4811 				lkup_exts->fv_words[j].off;
4812 			grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4813 			grp->n_val_pairs++;
4814 		}
4815 
4816 	return 0;
4817 }
4818 
4819 /**
4820  * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4821  * @hw: pointer to the hardware structure
4822  * @fv_list: field vector with the extraction sequence information
4823  * @rg_list: recipe groupings with protocol-offset pairs
4824  *
4825  * Helper function to fill in the field vector indices for protocol-offset
4826  * pairs. These indexes are then ultimately programmed into a recipe.
4827  */
4828 static int
4829 ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
4830 		       struct list_head *rg_list)
4831 {
4832 	struct ice_sw_fv_list_entry *fv;
4833 	struct ice_recp_grp_entry *rg;
4834 	struct ice_fv_word *fv_ext;
4835 
4836 	if (list_empty(fv_list))
4837 		return 0;
4838 
4839 	fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry,
4840 			      list_entry);
4841 	fv_ext = fv->fv_ptr->ew;
4842 
4843 	list_for_each_entry(rg, rg_list, l_entry) {
4844 		u8 i;
4845 
4846 		for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4847 			struct ice_fv_word *pr;
4848 			bool found = false;
4849 			u16 mask;
4850 			u8 j;
4851 
4852 			pr = &rg->r_group.pairs[i];
4853 			mask = rg->r_group.mask[i];
4854 
4855 			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4856 				if (fv_ext[j].prot_id == pr->prot_id &&
4857 				    fv_ext[j].off == pr->off) {
4858 					found = true;
4859 
4860 					/* Store index of field vector */
4861 					rg->fv_idx[i] = j;
4862 					rg->fv_mask[i] = mask;
4863 					break;
4864 				}
4865 
4866 			/* Protocol/offset could not be found, caller gave an
4867 			 * invalid pair
4868 			 */
4869 			if (!found)
4870 				return -EINVAL;
4871 		}
4872 	}
4873 
4874 	return 0;
4875 }
4876 
4877 /**
4878  * ice_find_free_recp_res_idx - find free result indexes for recipe
4879  * @hw: pointer to hardware structure
4880  * @profiles: bitmap of profiles that will be associated with the new recipe
4881  * @free_idx: pointer to variable to receive the free index bitmap
4882  *
4883  * The algorithm used here is:
4884  *	1. When creating a new recipe, create a set P which contains all
4885  *	   Profiles that will be associated with our new recipe
4886  *
4887  *	2. For each Profile p in set P:
4888  *	    a. Add all recipes associated with Profile p into set R
4889  *	    b. Optional : PossibleIndexes &= profile[p].possibleIndexes
4890  *		[initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
4891  *		i. Or just assume they all have the same possible indexes:
4892  *			44, 45, 46, 47
4893  *			i.e., PossibleIndexes = 0x0000F00000000000
4894  *
4895  *	3. For each Recipe r in set R:
4896  *	    a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
4897  *	    b. FreeIndexes = UsedIndexes ^ PossibleIndexes
4898  *
4899  *	FreeIndexes will contain the bits indicating the indexes free for use,
4900  *      then the code needs to update the recipe[r].used_result_idx_bits to
4901  *      indicate which indexes were selected for use by this recipe.
4902  */
4903 static u16
4904 ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
4905 			   unsigned long *free_idx)
4906 {
4907 	DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS);
4908 	DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES);
4909 	DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS);
4910 	u16 bit;
4911 
4912 	bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
4913 	bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
4914 
4915 	bitmap_fill(possible_idx, ICE_MAX_FV_WORDS);
4916 
4917 	/* For each profile we are going to associate the recipe with, add the
4918 	 * recipes that are associated with that profile. This will give us
4919 	 * the set of recipes that our recipe may collide with. Also, determine
4920 	 * what possible result indexes are usable given this set of profiles.
4921 	 */
4922 	for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
4923 		bitmap_or(recipes, recipes, profile_to_recipe[bit],
4924 			  ICE_MAX_NUM_RECIPES);
4925 		bitmap_and(possible_idx, possible_idx,
4926 			   hw->switch_info->prof_res_bm[bit],
4927 			   ICE_MAX_FV_WORDS);
4928 	}
4929 
4930 	/* For each recipe that our new recipe may collide with, determine
4931 	 * which indexes have been used.
4932 	 */
4933 	for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
4934 		bitmap_or(used_idx, used_idx,
4935 			  hw->switch_info->recp_list[bit].res_idxs,
4936 			  ICE_MAX_FV_WORDS);
4937 
4938 	bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
4939 
4940 	/* return number of free indexes */
4941 	return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS);
4942 }
4943 
4944 /**
4945  * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4946  * @hw: pointer to hardware structure
4947  * @rm: recipe management list entry
4948  * @profiles: bitmap of profiles that will be associated.
4949  */
4950 static int
4951 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
4952 		  unsigned long *profiles)
4953 {
4954 	DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
4955 	struct ice_aqc_recipe_data_elem *tmp;
4956 	struct ice_aqc_recipe_data_elem *buf;
4957 	struct ice_recp_grp_entry *entry;
4958 	u16 free_res_idx;
4959 	u16 recipe_count;
4960 	u8 chain_idx;
4961 	u8 recps = 0;
4962 	int status;
4963 
4964 	/* When more than one recipe are required, another recipe is needed to
4965 	 * chain them together. Matching a tunnel metadata ID takes up one of
4966 	 * the match fields in the chaining recipe reducing the number of
4967 	 * chained recipes by one.
4968 	 */
4969 	 /* check number of free result indices */
4970 	bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
4971 	free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
4972 
4973 	ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
4974 		  free_res_idx, rm->n_grp_count);
4975 
4976 	if (rm->n_grp_count > 1) {
4977 		if (rm->n_grp_count > free_res_idx)
4978 			return -ENOSPC;
4979 
4980 		rm->n_grp_count++;
4981 	}
4982 
4983 	if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
4984 		return -ENOSPC;
4985 
4986 	tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
4987 	if (!tmp)
4988 		return -ENOMEM;
4989 
4990 	buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
4991 			   GFP_KERNEL);
4992 	if (!buf) {
4993 		status = -ENOMEM;
4994 		goto err_mem;
4995 	}
4996 
4997 	bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
4998 	recipe_count = ICE_MAX_NUM_RECIPES;
4999 	status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5000 				   NULL);
5001 	if (status || recipe_count == 0)
5002 		goto err_unroll;
5003 
5004 	/* Allocate the recipe resources, and configure them according to the
5005 	 * match fields from protocol headers and extracted field vectors.
5006 	 */
5007 	chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5008 	list_for_each_entry(entry, &rm->rg_list, l_entry) {
5009 		u8 i;
5010 
5011 		status = ice_alloc_recipe(hw, &entry->rid);
5012 		if (status)
5013 			goto err_unroll;
5014 
5015 		/* Clear the result index of the located recipe, as this will be
5016 		 * updated, if needed, later in the recipe creation process.
5017 		 */
5018 		tmp[0].content.result_indx = 0;
5019 
5020 		buf[recps] = tmp[0];
5021 		buf[recps].recipe_indx = (u8)entry->rid;
5022 		/* if the recipe is a non-root recipe RID should be programmed
5023 		 * as 0 for the rules to be applied correctly.
5024 		 */
5025 		buf[recps].content.rid = 0;
5026 		memset(&buf[recps].content.lkup_indx, 0,
5027 		       sizeof(buf[recps].content.lkup_indx));
5028 
5029 		/* All recipes use look-up index 0 to match switch ID. */
5030 		buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5031 		buf[recps].content.mask[0] =
5032 			cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
5033 		/* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5034 		 * to be 0
5035 		 */
5036 		for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5037 			buf[recps].content.lkup_indx[i] = 0x80;
5038 			buf[recps].content.mask[i] = 0;
5039 		}
5040 
5041 		for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5042 			buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5043 			buf[recps].content.mask[i + 1] =
5044 				cpu_to_le16(entry->fv_mask[i]);
5045 		}
5046 
5047 		if (rm->n_grp_count > 1) {
5048 			/* Checks to see if there really is a valid result index
5049 			 * that can be used.
5050 			 */
5051 			if (chain_idx >= ICE_MAX_FV_WORDS) {
5052 				ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
5053 				status = -ENOSPC;
5054 				goto err_unroll;
5055 			}
5056 
5057 			entry->chain_idx = chain_idx;
5058 			buf[recps].content.result_indx =
5059 				ICE_AQ_RECIPE_RESULT_EN |
5060 				((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5061 				 ICE_AQ_RECIPE_RESULT_DATA_M);
5062 			clear_bit(chain_idx, result_idx_bm);
5063 			chain_idx = find_first_bit(result_idx_bm,
5064 						   ICE_MAX_FV_WORDS);
5065 		}
5066 
5067 		/* fill recipe dependencies */
5068 		bitmap_zero((unsigned long *)buf[recps].recipe_bitmap,
5069 			    ICE_MAX_NUM_RECIPES);
5070 		set_bit(buf[recps].recipe_indx,
5071 			(unsigned long *)buf[recps].recipe_bitmap);
5072 		buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5073 		recps++;
5074 	}
5075 
5076 	if (rm->n_grp_count == 1) {
5077 		rm->root_rid = buf[0].recipe_indx;
5078 		set_bit(buf[0].recipe_indx, rm->r_bitmap);
5079 		buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5080 		if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5081 			memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5082 			       sizeof(buf[0].recipe_bitmap));
5083 		} else {
5084 			status = -EINVAL;
5085 			goto err_unroll;
5086 		}
5087 		/* Applicable only for ROOT_RECIPE, set the fwd_priority for
5088 		 * the recipe which is getting created if specified
5089 		 * by user. Usually any advanced switch filter, which results
5090 		 * into new extraction sequence, ended up creating a new recipe
5091 		 * of type ROOT and usually recipes are associated with profiles
5092 		 * Switch rule referreing newly created recipe, needs to have
5093 		 * either/or 'fwd' or 'join' priority, otherwise switch rule
5094 		 * evaluation will not happen correctly. In other words, if
5095 		 * switch rule to be evaluated on priority basis, then recipe
5096 		 * needs to have priority, otherwise it will be evaluated last.
5097 		 */
5098 		buf[0].content.act_ctrl_fwd_priority = rm->priority;
5099 	} else {
5100 		struct ice_recp_grp_entry *last_chain_entry;
5101 		u16 rid, i;
5102 
5103 		/* Allocate the last recipe that will chain the outcomes of the
5104 		 * other recipes together
5105 		 */
5106 		status = ice_alloc_recipe(hw, &rid);
5107 		if (status)
5108 			goto err_unroll;
5109 
5110 		buf[recps].recipe_indx = (u8)rid;
5111 		buf[recps].content.rid = (u8)rid;
5112 		buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5113 		/* the new entry created should also be part of rg_list to
5114 		 * make sure we have complete recipe
5115 		 */
5116 		last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw),
5117 						sizeof(*last_chain_entry),
5118 						GFP_KERNEL);
5119 		if (!last_chain_entry) {
5120 			status = -ENOMEM;
5121 			goto err_unroll;
5122 		}
5123 		last_chain_entry->rid = rid;
5124 		memset(&buf[recps].content.lkup_indx, 0,
5125 		       sizeof(buf[recps].content.lkup_indx));
5126 		/* All recipes use look-up index 0 to match switch ID. */
5127 		buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5128 		buf[recps].content.mask[0] =
5129 			cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
5130 		for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5131 			buf[recps].content.lkup_indx[i] =
5132 				ICE_AQ_RECIPE_LKUP_IGNORE;
5133 			buf[recps].content.mask[i] = 0;
5134 		}
5135 
5136 		i = 1;
5137 		/* update r_bitmap with the recp that is used for chaining */
5138 		set_bit(rid, rm->r_bitmap);
5139 		/* this is the recipe that chains all the other recipes so it
5140 		 * should not have a chaining ID to indicate the same
5141 		 */
5142 		last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5143 		list_for_each_entry(entry, &rm->rg_list, l_entry) {
5144 			last_chain_entry->fv_idx[i] = entry->chain_idx;
5145 			buf[recps].content.lkup_indx[i] = entry->chain_idx;
5146 			buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF);
5147 			set_bit(entry->rid, rm->r_bitmap);
5148 		}
5149 		list_add(&last_chain_entry->l_entry, &rm->rg_list);
5150 		if (sizeof(buf[recps].recipe_bitmap) >=
5151 		    sizeof(rm->r_bitmap)) {
5152 			memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5153 			       sizeof(buf[recps].recipe_bitmap));
5154 		} else {
5155 			status = -EINVAL;
5156 			goto err_unroll;
5157 		}
5158 		buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5159 
5160 		recps++;
5161 		rm->root_rid = (u8)rid;
5162 	}
5163 	status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5164 	if (status)
5165 		goto err_unroll;
5166 
5167 	status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5168 	ice_release_change_lock(hw);
5169 	if (status)
5170 		goto err_unroll;
5171 
5172 	/* Every recipe that just got created add it to the recipe
5173 	 * book keeping list
5174 	 */
5175 	list_for_each_entry(entry, &rm->rg_list, l_entry) {
5176 		struct ice_switch_info *sw = hw->switch_info;
5177 		bool is_root, idx_found = false;
5178 		struct ice_sw_recipe *recp;
5179 		u16 idx, buf_idx = 0;
5180 
5181 		/* find buffer index for copying some data */
5182 		for (idx = 0; idx < rm->n_grp_count; idx++)
5183 			if (buf[idx].recipe_indx == entry->rid) {
5184 				buf_idx = idx;
5185 				idx_found = true;
5186 			}
5187 
5188 		if (!idx_found) {
5189 			status = -EIO;
5190 			goto err_unroll;
5191 		}
5192 
5193 		recp = &sw->recp_list[entry->rid];
5194 		is_root = (rm->root_rid == entry->rid);
5195 		recp->is_root = is_root;
5196 
5197 		recp->root_rid = entry->rid;
5198 		recp->big_recp = (is_root && rm->n_grp_count > 1);
5199 
5200 		memcpy(&recp->ext_words, entry->r_group.pairs,
5201 		       entry->r_group.n_val_pairs * sizeof(struct ice_fv_word));
5202 
5203 		memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5204 		       sizeof(recp->r_bitmap));
5205 
5206 		/* Copy non-result fv index values and masks to recipe. This
5207 		 * call will also update the result recipe bitmask.
5208 		 */
5209 		ice_collect_result_idx(&buf[buf_idx], recp);
5210 
5211 		/* for non-root recipes, also copy to the root, this allows
5212 		 * easier matching of a complete chained recipe
5213 		 */
5214 		if (!is_root)
5215 			ice_collect_result_idx(&buf[buf_idx],
5216 					       &sw->recp_list[rm->root_rid]);
5217 
5218 		recp->n_ext_words = entry->r_group.n_val_pairs;
5219 		recp->chain_idx = entry->chain_idx;
5220 		recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5221 		recp->n_grp_count = rm->n_grp_count;
5222 		recp->tun_type = rm->tun_type;
5223 		recp->recp_created = true;
5224 	}
5225 	rm->root_buf = buf;
5226 	kfree(tmp);
5227 	return status;
5228 
5229 err_unroll:
5230 err_mem:
5231 	kfree(tmp);
5232 	devm_kfree(ice_hw_to_dev(hw), buf);
5233 	return status;
5234 }
5235 
5236 /**
5237  * ice_create_recipe_group - creates recipe group
5238  * @hw: pointer to hardware structure
5239  * @rm: recipe management list entry
5240  * @lkup_exts: lookup elements
5241  */
5242 static int
5243 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5244 			struct ice_prot_lkup_ext *lkup_exts)
5245 {
5246 	u8 recp_count = 0;
5247 	int status;
5248 
5249 	rm->n_grp_count = 0;
5250 
5251 	/* Create recipes for words that are marked not done by packing them
5252 	 * as best fit.
5253 	 */
5254 	status = ice_create_first_fit_recp_def(hw, lkup_exts,
5255 					       &rm->rg_list, &recp_count);
5256 	if (!status) {
5257 		rm->n_grp_count += recp_count;
5258 		rm->n_ext_words = lkup_exts->n_val_words;
5259 		memcpy(&rm->ext_words, lkup_exts->fv_words,
5260 		       sizeof(rm->ext_words));
5261 		memcpy(rm->word_masks, lkup_exts->field_mask,
5262 		       sizeof(rm->word_masks));
5263 	}
5264 
5265 	return status;
5266 }
5267 
5268 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5269  * @hw: pointer to hardware structure
5270  * @rinfo: other information regarding the rule e.g. priority and action info
5271  * @bm: pointer to memory for returning the bitmap of field vectors
5272  */
5273 static void
5274 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5275 			 unsigned long *bm)
5276 {
5277 	enum ice_prof_type prof_type;
5278 
5279 	bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
5280 
5281 	switch (rinfo->tun_type) {
5282 	case ICE_NON_TUN:
5283 		prof_type = ICE_PROF_NON_TUN;
5284 		break;
5285 	case ICE_ALL_TUNNELS:
5286 		prof_type = ICE_PROF_TUN_ALL;
5287 		break;
5288 	case ICE_SW_TUN_GENEVE:
5289 	case ICE_SW_TUN_VXLAN:
5290 		prof_type = ICE_PROF_TUN_UDP;
5291 		break;
5292 	case ICE_SW_TUN_NVGRE:
5293 		prof_type = ICE_PROF_TUN_GRE;
5294 		break;
5295 	case ICE_SW_TUN_GTPU:
5296 		prof_type = ICE_PROF_TUN_GTPU;
5297 		break;
5298 	case ICE_SW_TUN_GTPC:
5299 		prof_type = ICE_PROF_TUN_GTPC;
5300 		break;
5301 	case ICE_SW_TUN_AND_NON_TUN:
5302 	default:
5303 		prof_type = ICE_PROF_ALL;
5304 		break;
5305 	}
5306 
5307 	ice_get_sw_fv_bitmap(hw, prof_type, bm);
5308 }
5309 
5310 /**
5311  * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5312  * @hw: pointer to hardware structure
5313  * @lkups: lookup elements or match criteria for the advanced recipe, one
5314  *  structure per protocol header
5315  * @lkups_cnt: number of protocols
5316  * @rinfo: other information regarding the rule e.g. priority and action info
5317  * @rid: return the recipe ID of the recipe created
5318  */
5319 static int
5320 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5321 		   u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5322 {
5323 	DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES);
5324 	DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES);
5325 	struct ice_prot_lkup_ext *lkup_exts;
5326 	struct ice_recp_grp_entry *r_entry;
5327 	struct ice_sw_fv_list_entry *fvit;
5328 	struct ice_recp_grp_entry *r_tmp;
5329 	struct ice_sw_fv_list_entry *tmp;
5330 	struct ice_sw_recipe *rm;
5331 	int status = 0;
5332 	u8 i;
5333 
5334 	if (!lkups_cnt)
5335 		return -EINVAL;
5336 
5337 	lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
5338 	if (!lkup_exts)
5339 		return -ENOMEM;
5340 
5341 	/* Determine the number of words to be matched and if it exceeds a
5342 	 * recipe's restrictions
5343 	 */
5344 	for (i = 0; i < lkups_cnt; i++) {
5345 		u16 count;
5346 
5347 		if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5348 			status = -EIO;
5349 			goto err_free_lkup_exts;
5350 		}
5351 
5352 		count = ice_fill_valid_words(&lkups[i], lkup_exts);
5353 		if (!count) {
5354 			status = -EIO;
5355 			goto err_free_lkup_exts;
5356 		}
5357 	}
5358 
5359 	rm = kzalloc(sizeof(*rm), GFP_KERNEL);
5360 	if (!rm) {
5361 		status = -ENOMEM;
5362 		goto err_free_lkup_exts;
5363 	}
5364 
5365 	/* Get field vectors that contain fields extracted from all the protocol
5366 	 * headers being programmed.
5367 	 */
5368 	INIT_LIST_HEAD(&rm->fv_list);
5369 	INIT_LIST_HEAD(&rm->rg_list);
5370 
5371 	/* Get bitmap of field vectors (profiles) that are compatible with the
5372 	 * rule request; only these will be searched in the subsequent call to
5373 	 * ice_get_sw_fv_list.
5374 	 */
5375 	ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5376 
5377 	status = ice_get_sw_fv_list(hw, lkup_exts, fv_bitmap, &rm->fv_list);
5378 	if (status)
5379 		goto err_unroll;
5380 
5381 	/* Group match words into recipes using preferred recipe grouping
5382 	 * criteria.
5383 	 */
5384 	status = ice_create_recipe_group(hw, rm, lkup_exts);
5385 	if (status)
5386 		goto err_unroll;
5387 
5388 	/* set the recipe priority if specified */
5389 	rm->priority = (u8)rinfo->priority;
5390 
5391 	/* Find offsets from the field vector. Pick the first one for all the
5392 	 * recipes.
5393 	 */
5394 	status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5395 	if (status)
5396 		goto err_unroll;
5397 
5398 	/* get bitmap of all profiles the recipe will be associated with */
5399 	bitmap_zero(profiles, ICE_MAX_NUM_PROFILES);
5400 	list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5401 		ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5402 		set_bit((u16)fvit->profile_id, profiles);
5403 	}
5404 
5405 	/* Look for a recipe which matches our requested fv / mask list */
5406 	*rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
5407 	if (*rid < ICE_MAX_NUM_RECIPES)
5408 		/* Success if found a recipe that match the existing criteria */
5409 		goto err_unroll;
5410 
5411 	rm->tun_type = rinfo->tun_type;
5412 	/* Recipe we need does not exist, add a recipe */
5413 	status = ice_add_sw_recipe(hw, rm, profiles);
5414 	if (status)
5415 		goto err_unroll;
5416 
5417 	/* Associate all the recipes created with all the profiles in the
5418 	 * common field vector.
5419 	 */
5420 	list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5421 		DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
5422 		u16 j;
5423 
5424 		status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5425 						      (u8 *)r_bitmap, NULL);
5426 		if (status)
5427 			goto err_unroll;
5428 
5429 		bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
5430 			  ICE_MAX_NUM_RECIPES);
5431 		status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5432 		if (status)
5433 			goto err_unroll;
5434 
5435 		status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5436 						      (u8 *)r_bitmap,
5437 						      NULL);
5438 		ice_release_change_lock(hw);
5439 
5440 		if (status)
5441 			goto err_unroll;
5442 
5443 		/* Update profile to recipe bitmap array */
5444 		bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap,
5445 			    ICE_MAX_NUM_RECIPES);
5446 
5447 		/* Update recipe to profile bitmap array */
5448 		for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
5449 			set_bit((u16)fvit->profile_id, recipe_to_profile[j]);
5450 	}
5451 
5452 	*rid = rm->root_rid;
5453 	memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts,
5454 	       sizeof(*lkup_exts));
5455 err_unroll:
5456 	list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
5457 		list_del(&r_entry->l_entry);
5458 		devm_kfree(ice_hw_to_dev(hw), r_entry);
5459 	}
5460 
5461 	list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) {
5462 		list_del(&fvit->list_entry);
5463 		devm_kfree(ice_hw_to_dev(hw), fvit);
5464 	}
5465 
5466 	devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
5467 	kfree(rm);
5468 
5469 err_free_lkup_exts:
5470 	kfree(lkup_exts);
5471 
5472 	return status;
5473 }
5474 
5475 /**
5476  * ice_dummy_packet_add_vlan - insert VLAN header to dummy pkt
5477  *
5478  * @dummy_pkt: dummy packet profile pattern to which VLAN tag(s) will be added
5479  * @num_vlan: number of VLAN tags
5480  */
5481 static struct ice_dummy_pkt_profile *
5482 ice_dummy_packet_add_vlan(const struct ice_dummy_pkt_profile *dummy_pkt,
5483 			  u32 num_vlan)
5484 {
5485 	struct ice_dummy_pkt_profile *profile;
5486 	struct ice_dummy_pkt_offsets *offsets;
5487 	u32 buf_len, off, etype_off, i;
5488 	u8 *pkt;
5489 
5490 	if (num_vlan < 1 || num_vlan > 2)
5491 		return ERR_PTR(-EINVAL);
5492 
5493 	off = num_vlan * VLAN_HLEN;
5494 
5495 	buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet_offsets)) +
5496 		  dummy_pkt->offsets_len;
5497 	offsets = kzalloc(buf_len, GFP_KERNEL);
5498 	if (!offsets)
5499 		return ERR_PTR(-ENOMEM);
5500 
5501 	offsets[0] = dummy_pkt->offsets[0];
5502 	if (num_vlan == 2) {
5503 		offsets[1] = ice_dummy_qinq_packet_offsets[0];
5504 		offsets[2] = ice_dummy_qinq_packet_offsets[1];
5505 	} else if (num_vlan == 1) {
5506 		offsets[1] = ice_dummy_vlan_packet_offsets[0];
5507 	}
5508 
5509 	for (i = 1; dummy_pkt->offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5510 		offsets[i + num_vlan].type = dummy_pkt->offsets[i].type;
5511 		offsets[i + num_vlan].offset =
5512 			dummy_pkt->offsets[i].offset + off;
5513 	}
5514 	offsets[i + num_vlan] = dummy_pkt->offsets[i];
5515 
5516 	etype_off = dummy_pkt->offsets[1].offset;
5517 
5518 	buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet)) +
5519 		  dummy_pkt->pkt_len;
5520 	pkt = kzalloc(buf_len, GFP_KERNEL);
5521 	if (!pkt) {
5522 		kfree(offsets);
5523 		return ERR_PTR(-ENOMEM);
5524 	}
5525 
5526 	memcpy(pkt, dummy_pkt->pkt, etype_off);
5527 	memcpy(pkt + etype_off,
5528 	       num_vlan == 2 ? ice_dummy_qinq_packet : ice_dummy_vlan_packet,
5529 	       off);
5530 	memcpy(pkt + etype_off + off, dummy_pkt->pkt + etype_off,
5531 	       dummy_pkt->pkt_len - etype_off);
5532 
5533 	profile = kzalloc(sizeof(*profile), GFP_KERNEL);
5534 	if (!profile) {
5535 		kfree(offsets);
5536 		kfree(pkt);
5537 		return ERR_PTR(-ENOMEM);
5538 	}
5539 
5540 	profile->offsets = offsets;
5541 	profile->pkt = pkt;
5542 	profile->pkt_len = buf_len;
5543 	profile->match |= ICE_PKT_KMALLOC;
5544 
5545 	return profile;
5546 }
5547 
5548 /**
5549  * ice_find_dummy_packet - find dummy packet
5550  *
5551  * @lkups: lookup elements or match criteria for the advanced recipe, one
5552  *	   structure per protocol header
5553  * @lkups_cnt: number of protocols
5554  * @tun_type: tunnel type
5555  *
5556  * Returns the &ice_dummy_pkt_profile corresponding to these lookup params.
5557  */
5558 static const struct ice_dummy_pkt_profile *
5559 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5560 		      enum ice_sw_tunnel_type tun_type)
5561 {
5562 	const struct ice_dummy_pkt_profile *ret = ice_dummy_pkt_profiles;
5563 	u32 match = 0, vlan_count = 0;
5564 	u16 i;
5565 
5566 	switch (tun_type) {
5567 	case ICE_SW_TUN_GTPC:
5568 		match |= ICE_PKT_TUN_GTPC;
5569 		break;
5570 	case ICE_SW_TUN_GTPU:
5571 		match |= ICE_PKT_TUN_GTPU;
5572 		break;
5573 	case ICE_SW_TUN_NVGRE:
5574 		match |= ICE_PKT_TUN_NVGRE;
5575 		break;
5576 	case ICE_SW_TUN_GENEVE:
5577 	case ICE_SW_TUN_VXLAN:
5578 		match |= ICE_PKT_TUN_UDP;
5579 		break;
5580 	default:
5581 		break;
5582 	}
5583 
5584 	for (i = 0; i < lkups_cnt; i++) {
5585 		if (lkups[i].type == ICE_UDP_ILOS)
5586 			match |= ICE_PKT_INNER_UDP;
5587 		else if (lkups[i].type == ICE_TCP_IL)
5588 			match |= ICE_PKT_INNER_TCP;
5589 		else if (lkups[i].type == ICE_IPV6_OFOS)
5590 			match |= ICE_PKT_OUTER_IPV6;
5591 		else if (lkups[i].type == ICE_VLAN_OFOS ||
5592 			 lkups[i].type == ICE_VLAN_EX)
5593 			vlan_count++;
5594 		else if (lkups[i].type == ICE_VLAN_IN)
5595 			vlan_count++;
5596 		else if (lkups[i].type == ICE_ETYPE_OL &&
5597 			 lkups[i].h_u.ethertype.ethtype_id ==
5598 				cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5599 			 lkups[i].m_u.ethertype.ethtype_id ==
5600 				cpu_to_be16(0xFFFF))
5601 			match |= ICE_PKT_OUTER_IPV6;
5602 		else if (lkups[i].type == ICE_ETYPE_IL &&
5603 			 lkups[i].h_u.ethertype.ethtype_id ==
5604 				cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5605 			 lkups[i].m_u.ethertype.ethtype_id ==
5606 				cpu_to_be16(0xFFFF))
5607 			match |= ICE_PKT_INNER_IPV6;
5608 		else if (lkups[i].type == ICE_IPV6_IL)
5609 			match |= ICE_PKT_INNER_IPV6;
5610 		else if (lkups[i].type == ICE_GTP_NO_PAY)
5611 			match |= ICE_PKT_GTP_NOPAY;
5612 		else if (lkups[i].type == ICE_PPPOE) {
5613 			match |= ICE_PKT_PPPOE;
5614 			if (lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
5615 			    htons(PPP_IPV6))
5616 				match |= ICE_PKT_OUTER_IPV6;
5617 		} else if (lkups[i].type == ICE_L2TPV3)
5618 			match |= ICE_PKT_L2TPV3;
5619 	}
5620 
5621 	while (ret->match && (match & ret->match) != ret->match)
5622 		ret++;
5623 
5624 	if (vlan_count != 0)
5625 		ret = ice_dummy_packet_add_vlan(ret, vlan_count);
5626 
5627 	return ret;
5628 }
5629 
5630 /**
5631  * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5632  *
5633  * @lkups: lookup elements or match criteria for the advanced recipe, one
5634  *	   structure per protocol header
5635  * @lkups_cnt: number of protocols
5636  * @s_rule: stores rule information from the match criteria
5637  * @profile: dummy packet profile (the template, its size and header offsets)
5638  */
5639 static int
5640 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5641 			  struct ice_sw_rule_lkup_rx_tx *s_rule,
5642 			  const struct ice_dummy_pkt_profile *profile)
5643 {
5644 	u8 *pkt;
5645 	u16 i;
5646 
5647 	/* Start with a packet with a pre-defined/dummy content. Then, fill
5648 	 * in the header values to be looked up or matched.
5649 	 */
5650 	pkt = s_rule->hdr_data;
5651 
5652 	memcpy(pkt, profile->pkt, profile->pkt_len);
5653 
5654 	for (i = 0; i < lkups_cnt; i++) {
5655 		const struct ice_dummy_pkt_offsets *offsets = profile->offsets;
5656 		enum ice_protocol_type type;
5657 		u16 offset = 0, len = 0, j;
5658 		bool found = false;
5659 
5660 		/* find the start of this layer; it should be found since this
5661 		 * was already checked when search for the dummy packet
5662 		 */
5663 		type = lkups[i].type;
5664 		/* metadata isn't present in the packet */
5665 		if (type == ICE_HW_METADATA)
5666 			continue;
5667 
5668 		for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5669 			if (type == offsets[j].type) {
5670 				offset = offsets[j].offset;
5671 				found = true;
5672 				break;
5673 			}
5674 		}
5675 		/* this should never happen in a correct calling sequence */
5676 		if (!found)
5677 			return -EINVAL;
5678 
5679 		switch (lkups[i].type) {
5680 		case ICE_MAC_OFOS:
5681 		case ICE_MAC_IL:
5682 			len = sizeof(struct ice_ether_hdr);
5683 			break;
5684 		case ICE_ETYPE_OL:
5685 		case ICE_ETYPE_IL:
5686 			len = sizeof(struct ice_ethtype_hdr);
5687 			break;
5688 		case ICE_VLAN_OFOS:
5689 		case ICE_VLAN_EX:
5690 		case ICE_VLAN_IN:
5691 			len = sizeof(struct ice_vlan_hdr);
5692 			break;
5693 		case ICE_IPV4_OFOS:
5694 		case ICE_IPV4_IL:
5695 			len = sizeof(struct ice_ipv4_hdr);
5696 			break;
5697 		case ICE_IPV6_OFOS:
5698 		case ICE_IPV6_IL:
5699 			len = sizeof(struct ice_ipv6_hdr);
5700 			break;
5701 		case ICE_TCP_IL:
5702 		case ICE_UDP_OF:
5703 		case ICE_UDP_ILOS:
5704 			len = sizeof(struct ice_l4_hdr);
5705 			break;
5706 		case ICE_SCTP_IL:
5707 			len = sizeof(struct ice_sctp_hdr);
5708 			break;
5709 		case ICE_NVGRE:
5710 			len = sizeof(struct ice_nvgre_hdr);
5711 			break;
5712 		case ICE_VXLAN:
5713 		case ICE_GENEVE:
5714 			len = sizeof(struct ice_udp_tnl_hdr);
5715 			break;
5716 		case ICE_GTP_NO_PAY:
5717 		case ICE_GTP:
5718 			len = sizeof(struct ice_udp_gtp_hdr);
5719 			break;
5720 		case ICE_PPPOE:
5721 			len = sizeof(struct ice_pppoe_hdr);
5722 			break;
5723 		case ICE_L2TPV3:
5724 			len = sizeof(struct ice_l2tpv3_sess_hdr);
5725 			break;
5726 		default:
5727 			return -EINVAL;
5728 		}
5729 
5730 		/* the length should be a word multiple */
5731 		if (len % ICE_BYTES_PER_WORD)
5732 			return -EIO;
5733 
5734 		/* We have the offset to the header start, the length, the
5735 		 * caller's header values and mask. Use this information to
5736 		 * copy the data into the dummy packet appropriately based on
5737 		 * the mask. Note that we need to only write the bits as
5738 		 * indicated by the mask to make sure we don't improperly write
5739 		 * over any significant packet data.
5740 		 */
5741 		for (j = 0; j < len / sizeof(u16); j++) {
5742 			u16 *ptr = (u16 *)(pkt + offset);
5743 			u16 mask = lkups[i].m_raw[j];
5744 
5745 			if (!mask)
5746 				continue;
5747 
5748 			ptr[j] = (ptr[j] & ~mask) | (lkups[i].h_raw[j] & mask);
5749 		}
5750 	}
5751 
5752 	s_rule->hdr_len = cpu_to_le16(profile->pkt_len);
5753 
5754 	return 0;
5755 }
5756 
5757 /**
5758  * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
5759  * @hw: pointer to the hardware structure
5760  * @tun_type: tunnel type
5761  * @pkt: dummy packet to fill in
5762  * @offsets: offset info for the dummy packet
5763  */
5764 static int
5765 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
5766 			u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
5767 {
5768 	u16 open_port, i;
5769 
5770 	switch (tun_type) {
5771 	case ICE_SW_TUN_VXLAN:
5772 		if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
5773 			return -EIO;
5774 		break;
5775 	case ICE_SW_TUN_GENEVE:
5776 		if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
5777 			return -EIO;
5778 		break;
5779 	default:
5780 		/* Nothing needs to be done for this tunnel type */
5781 		return 0;
5782 	}
5783 
5784 	/* Find the outer UDP protocol header and insert the port number */
5785 	for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5786 		if (offsets[i].type == ICE_UDP_OF) {
5787 			struct ice_l4_hdr *hdr;
5788 			u16 offset;
5789 
5790 			offset = offsets[i].offset;
5791 			hdr = (struct ice_l4_hdr *)&pkt[offset];
5792 			hdr->dst_port = cpu_to_be16(open_port);
5793 
5794 			return 0;
5795 		}
5796 	}
5797 
5798 	return -EIO;
5799 }
5800 
5801 /**
5802  * ice_fill_adv_packet_vlan - fill dummy packet with VLAN tag type
5803  * @hw: pointer to hw structure
5804  * @vlan_type: VLAN tag type
5805  * @pkt: dummy packet to fill in
5806  * @offsets: offset info for the dummy packet
5807  */
5808 static int
5809 ice_fill_adv_packet_vlan(struct ice_hw *hw, u16 vlan_type, u8 *pkt,
5810 			 const struct ice_dummy_pkt_offsets *offsets)
5811 {
5812 	u16 i;
5813 
5814 	/* Check if there is something to do */
5815 	if (!vlan_type || !ice_is_dvm_ena(hw))
5816 		return 0;
5817 
5818 	/* Find VLAN header and insert VLAN TPID */
5819 	for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5820 		if (offsets[i].type == ICE_VLAN_OFOS ||
5821 		    offsets[i].type == ICE_VLAN_EX) {
5822 			struct ice_vlan_hdr *hdr;
5823 			u16 offset;
5824 
5825 			offset = offsets[i].offset;
5826 			hdr = (struct ice_vlan_hdr *)&pkt[offset];
5827 			hdr->type = cpu_to_be16(vlan_type);
5828 
5829 			return 0;
5830 		}
5831 	}
5832 
5833 	return -EIO;
5834 }
5835 
5836 static bool ice_rules_equal(const struct ice_adv_rule_info *first,
5837 			    const struct ice_adv_rule_info *second)
5838 {
5839 	return first->sw_act.flag == second->sw_act.flag &&
5840 	       first->tun_type == second->tun_type &&
5841 	       first->vlan_type == second->vlan_type &&
5842 	       first->src_vsi == second->src_vsi;
5843 }
5844 
5845 /**
5846  * ice_find_adv_rule_entry - Search a rule entry
5847  * @hw: pointer to the hardware structure
5848  * @lkups: lookup elements or match criteria for the advanced recipe, one
5849  *	   structure per protocol header
5850  * @lkups_cnt: number of protocols
5851  * @recp_id: recipe ID for which we are finding the rule
5852  * @rinfo: other information regarding the rule e.g. priority and action info
5853  *
5854  * Helper function to search for a given advance rule entry
5855  * Returns pointer to entry storing the rule if found
5856  */
5857 static struct ice_adv_fltr_mgmt_list_entry *
5858 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5859 			u16 lkups_cnt, u16 recp_id,
5860 			struct ice_adv_rule_info *rinfo)
5861 {
5862 	struct ice_adv_fltr_mgmt_list_entry *list_itr;
5863 	struct ice_switch_info *sw = hw->switch_info;
5864 	int i;
5865 
5866 	list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules,
5867 			    list_entry) {
5868 		bool lkups_matched = true;
5869 
5870 		if (lkups_cnt != list_itr->lkups_cnt)
5871 			continue;
5872 		for (i = 0; i < list_itr->lkups_cnt; i++)
5873 			if (memcmp(&list_itr->lkups[i], &lkups[i],
5874 				   sizeof(*lkups))) {
5875 				lkups_matched = false;
5876 				break;
5877 			}
5878 		if (ice_rules_equal(rinfo, &list_itr->rule_info) &&
5879 		    lkups_matched)
5880 			return list_itr;
5881 	}
5882 	return NULL;
5883 }
5884 
5885 /**
5886  * ice_adv_add_update_vsi_list
5887  * @hw: pointer to the hardware structure
5888  * @m_entry: pointer to current adv filter management list entry
5889  * @cur_fltr: filter information from the book keeping entry
5890  * @new_fltr: filter information with the new VSI to be added
5891  *
5892  * Call AQ command to add or update previously created VSI list with new VSI.
5893  *
5894  * Helper function to do book keeping associated with adding filter information
5895  * The algorithm to do the booking keeping is described below :
5896  * When a VSI needs to subscribe to a given advanced filter
5897  *	if only one VSI has been added till now
5898  *		Allocate a new VSI list and add two VSIs
5899  *		to this list using switch rule command
5900  *		Update the previously created switch rule with the
5901  *		newly created VSI list ID
5902  *	if a VSI list was previously created
5903  *		Add the new VSI to the previously created VSI list set
5904  *		using the update switch rule command
5905  */
5906 static int
5907 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5908 			    struct ice_adv_fltr_mgmt_list_entry *m_entry,
5909 			    struct ice_adv_rule_info *cur_fltr,
5910 			    struct ice_adv_rule_info *new_fltr)
5911 {
5912 	u16 vsi_list_id = 0;
5913 	int status;
5914 
5915 	if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5916 	    cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
5917 	    cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5918 		return -EOPNOTSUPP;
5919 
5920 	if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5921 	     new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5922 	    (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5923 	     cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5924 		return -EOPNOTSUPP;
5925 
5926 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5927 		 /* Only one entry existed in the mapping and it was not already
5928 		  * a part of a VSI list. So, create a VSI list with the old and
5929 		  * new VSIs.
5930 		  */
5931 		struct ice_fltr_info tmp_fltr;
5932 		u16 vsi_handle_arr[2];
5933 
5934 		/* A rule already exists with the new VSI being added */
5935 		if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5936 		    new_fltr->sw_act.fwd_id.hw_vsi_id)
5937 			return -EEXIST;
5938 
5939 		vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5940 		vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5941 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5942 						  &vsi_list_id,
5943 						  ICE_SW_LKUP_LAST);
5944 		if (status)
5945 			return status;
5946 
5947 		memset(&tmp_fltr, 0, sizeof(tmp_fltr));
5948 		tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
5949 		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5950 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5951 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5952 		tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
5953 
5954 		/* Update the previous switch rule of "forward to VSI" to
5955 		 * "fwd to VSI list"
5956 		 */
5957 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5958 		if (status)
5959 			return status;
5960 
5961 		cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
5962 		cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
5963 		m_entry->vsi_list_info =
5964 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5965 						vsi_list_id);
5966 	} else {
5967 		u16 vsi_handle = new_fltr->sw_act.vsi_handle;
5968 
5969 		if (!m_entry->vsi_list_info)
5970 			return -EIO;
5971 
5972 		/* A rule already exists with the new VSI being added */
5973 		if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
5974 			return 0;
5975 
5976 		/* Update the previously created VSI list set with
5977 		 * the new VSI ID passed in
5978 		 */
5979 		vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
5980 
5981 		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
5982 						  vsi_list_id, false,
5983 						  ice_aqc_opc_update_sw_rules,
5984 						  ICE_SW_LKUP_LAST);
5985 		/* update VSI list mapping info with new VSI ID */
5986 		if (!status)
5987 			set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
5988 	}
5989 	if (!status)
5990 		m_entry->vsi_count++;
5991 	return status;
5992 }
5993 
5994 void ice_rule_add_tunnel_metadata(struct ice_adv_lkup_elem *lkup)
5995 {
5996 	lkup->type = ICE_HW_METADATA;
5997 	lkup->m_u.metadata.flags[ICE_PKT_FLAGS_TUNNEL] =
5998 		cpu_to_be16(ICE_PKT_TUNNEL_MASK);
5999 }
6000 
6001 void ice_rule_add_vlan_metadata(struct ice_adv_lkup_elem *lkup)
6002 {
6003 	lkup->type = ICE_HW_METADATA;
6004 	lkup->m_u.metadata.flags[ICE_PKT_FLAGS_VLAN] =
6005 		cpu_to_be16(ICE_PKT_VLAN_MASK);
6006 }
6007 
6008 void ice_rule_add_src_vsi_metadata(struct ice_adv_lkup_elem *lkup)
6009 {
6010 	lkup->type = ICE_HW_METADATA;
6011 	lkup->m_u.metadata.source_vsi = cpu_to_be16(ICE_MDID_SOURCE_VSI_MASK);
6012 }
6013 
6014 /**
6015  * ice_add_adv_rule - helper function to create an advanced switch rule
6016  * @hw: pointer to the hardware structure
6017  * @lkups: information on the words that needs to be looked up. All words
6018  * together makes one recipe
6019  * @lkups_cnt: num of entries in the lkups array
6020  * @rinfo: other information related to the rule that needs to be programmed
6021  * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6022  *               ignored is case of error.
6023  *
6024  * This function can program only 1 rule at a time. The lkups is used to
6025  * describe the all the words that forms the "lookup" portion of the recipe.
6026  * These words can span multiple protocols. Callers to this function need to
6027  * pass in a list of protocol headers with lookup information along and mask
6028  * that determines which words are valid from the given protocol header.
6029  * rinfo describes other information related to this rule such as forwarding
6030  * IDs, priority of this rule, etc.
6031  */
6032 int
6033 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6034 		 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6035 		 struct ice_rule_query_data *added_entry)
6036 {
6037 	struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6038 	struct ice_sw_rule_lkup_rx_tx *s_rule = NULL;
6039 	const struct ice_dummy_pkt_profile *profile;
6040 	u16 rid = 0, i, rule_buf_sz, vsi_handle;
6041 	struct list_head *rule_head;
6042 	struct ice_switch_info *sw;
6043 	u16 word_cnt;
6044 	u32 act = 0;
6045 	int status;
6046 	u8 q_rgn;
6047 
6048 	/* Initialize profile to result index bitmap */
6049 	if (!hw->switch_info->prof_res_bm_init) {
6050 		hw->switch_info->prof_res_bm_init = 1;
6051 		ice_init_prof_result_bm(hw);
6052 	}
6053 
6054 	if (!lkups_cnt)
6055 		return -EINVAL;
6056 
6057 	/* get # of words we need to match */
6058 	word_cnt = 0;
6059 	for (i = 0; i < lkups_cnt; i++) {
6060 		u16 j;
6061 
6062 		for (j = 0; j < ARRAY_SIZE(lkups->m_raw); j++)
6063 			if (lkups[i].m_raw[j])
6064 				word_cnt++;
6065 	}
6066 
6067 	if (!word_cnt)
6068 		return -EINVAL;
6069 
6070 	if (word_cnt > ICE_MAX_CHAIN_WORDS)
6071 		return -ENOSPC;
6072 
6073 	/* locate a dummy packet */
6074 	profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type);
6075 	if (IS_ERR(profile))
6076 		return PTR_ERR(profile);
6077 
6078 	if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6079 	      rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6080 	      rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6081 	      rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) {
6082 		status = -EIO;
6083 		goto free_pkt_profile;
6084 	}
6085 
6086 	vsi_handle = rinfo->sw_act.vsi_handle;
6087 	if (!ice_is_vsi_valid(hw, vsi_handle)) {
6088 		status =  -EINVAL;
6089 		goto free_pkt_profile;
6090 	}
6091 
6092 	if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6093 		rinfo->sw_act.fwd_id.hw_vsi_id =
6094 			ice_get_hw_vsi_num(hw, vsi_handle);
6095 
6096 	if (rinfo->src_vsi)
6097 		rinfo->sw_act.src = ice_get_hw_vsi_num(hw, rinfo->src_vsi);
6098 	else
6099 		rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6100 
6101 	status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6102 	if (status)
6103 		goto free_pkt_profile;
6104 	m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6105 	if (m_entry) {
6106 		/* we have to add VSI to VSI_LIST and increment vsi_count.
6107 		 * Also Update VSI list so that we can change forwarding rule
6108 		 * if the rule already exists, we will check if it exists with
6109 		 * same vsi_id, if not then add it to the VSI list if it already
6110 		 * exists if not then create a VSI list and add the existing VSI
6111 		 * ID and the new VSI ID to the list
6112 		 * We will add that VSI to the list
6113 		 */
6114 		status = ice_adv_add_update_vsi_list(hw, m_entry,
6115 						     &m_entry->rule_info,
6116 						     rinfo);
6117 		if (added_entry) {
6118 			added_entry->rid = rid;
6119 			added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6120 			added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6121 		}
6122 		goto free_pkt_profile;
6123 	}
6124 	rule_buf_sz = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, profile->pkt_len);
6125 	s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6126 	if (!s_rule) {
6127 		status = -ENOMEM;
6128 		goto free_pkt_profile;
6129 	}
6130 	if (!rinfo->flags_info.act_valid) {
6131 		act |= ICE_SINGLE_ACT_LAN_ENABLE;
6132 		act |= ICE_SINGLE_ACT_LB_ENABLE;
6133 	} else {
6134 		act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
6135 						ICE_SINGLE_ACT_LB_ENABLE);
6136 	}
6137 
6138 	switch (rinfo->sw_act.fltr_act) {
6139 	case ICE_FWD_TO_VSI:
6140 		act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6141 			ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6142 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6143 		break;
6144 	case ICE_FWD_TO_Q:
6145 		act |= ICE_SINGLE_ACT_TO_Q;
6146 		act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6147 		       ICE_SINGLE_ACT_Q_INDEX_M;
6148 		break;
6149 	case ICE_FWD_TO_QGRP:
6150 		q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6151 			(u8)ilog2(rinfo->sw_act.qgrp_size) : 0;
6152 		act |= ICE_SINGLE_ACT_TO_Q;
6153 		act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6154 		       ICE_SINGLE_ACT_Q_INDEX_M;
6155 		act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6156 		       ICE_SINGLE_ACT_Q_REGION_M;
6157 		break;
6158 	case ICE_DROP_PACKET:
6159 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6160 		       ICE_SINGLE_ACT_VALID_BIT;
6161 		break;
6162 	default:
6163 		status = -EIO;
6164 		goto err_ice_add_adv_rule;
6165 	}
6166 
6167 	/* If there is no matching criteria for direction there
6168 	 * is only one difference between Rx and Tx:
6169 	 * - get switch id base on VSI number from source field (Tx)
6170 	 * - get switch id base on port number (Rx)
6171 	 *
6172 	 * If matching on direction metadata is chose rule direction is
6173 	 * extracted from type value set here.
6174 	 */
6175 	if (rinfo->sw_act.flag & ICE_FLTR_TX) {
6176 		s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
6177 		s_rule->src = cpu_to_le16(rinfo->sw_act.src);
6178 	} else {
6179 		s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
6180 		s_rule->src = cpu_to_le16(hw->port_info->lport);
6181 	}
6182 
6183 	s_rule->recipe_id = cpu_to_le16(rid);
6184 	s_rule->act = cpu_to_le32(act);
6185 
6186 	status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, profile);
6187 	if (status)
6188 		goto err_ice_add_adv_rule;
6189 
6190 	status = ice_fill_adv_packet_tun(hw, rinfo->tun_type, s_rule->hdr_data,
6191 					 profile->offsets);
6192 	if (status)
6193 		goto err_ice_add_adv_rule;
6194 
6195 	status = ice_fill_adv_packet_vlan(hw, rinfo->vlan_type,
6196 					  s_rule->hdr_data,
6197 					  profile->offsets);
6198 	if (status)
6199 		goto err_ice_add_adv_rule;
6200 
6201 	status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6202 				 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6203 				 NULL);
6204 	if (status)
6205 		goto err_ice_add_adv_rule;
6206 	adv_fltr = devm_kzalloc(ice_hw_to_dev(hw),
6207 				sizeof(struct ice_adv_fltr_mgmt_list_entry),
6208 				GFP_KERNEL);
6209 	if (!adv_fltr) {
6210 		status = -ENOMEM;
6211 		goto err_ice_add_adv_rule;
6212 	}
6213 
6214 	adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
6215 				       lkups_cnt * sizeof(*lkups), GFP_KERNEL);
6216 	if (!adv_fltr->lkups) {
6217 		status = -ENOMEM;
6218 		goto err_ice_add_adv_rule;
6219 	}
6220 
6221 	adv_fltr->lkups_cnt = lkups_cnt;
6222 	adv_fltr->rule_info = *rinfo;
6223 	adv_fltr->rule_info.fltr_rule_id = le16_to_cpu(s_rule->index);
6224 	sw = hw->switch_info;
6225 	sw->recp_list[rid].adv_rule = true;
6226 	rule_head = &sw->recp_list[rid].filt_rules;
6227 
6228 	if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6229 		adv_fltr->vsi_count = 1;
6230 
6231 	/* Add rule entry to book keeping list */
6232 	list_add(&adv_fltr->list_entry, rule_head);
6233 	if (added_entry) {
6234 		added_entry->rid = rid;
6235 		added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6236 		added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6237 	}
6238 err_ice_add_adv_rule:
6239 	if (status && adv_fltr) {
6240 		devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups);
6241 		devm_kfree(ice_hw_to_dev(hw), adv_fltr);
6242 	}
6243 
6244 	kfree(s_rule);
6245 
6246 free_pkt_profile:
6247 	if (profile->match & ICE_PKT_KMALLOC) {
6248 		kfree(profile->offsets);
6249 		kfree(profile->pkt);
6250 		kfree(profile);
6251 	}
6252 
6253 	return status;
6254 }
6255 
6256 /**
6257  * ice_replay_vsi_fltr - Replay filters for requested VSI
6258  * @hw: pointer to the hardware structure
6259  * @vsi_handle: driver VSI handle
6260  * @recp_id: Recipe ID for which rules need to be replayed
6261  * @list_head: list for which filters need to be replayed
6262  *
6263  * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6264  * It is required to pass valid VSI handle.
6265  */
6266 static int
6267 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6268 		    struct list_head *list_head)
6269 {
6270 	struct ice_fltr_mgmt_list_entry *itr;
6271 	int status = 0;
6272 	u16 hw_vsi_id;
6273 
6274 	if (list_empty(list_head))
6275 		return status;
6276 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6277 
6278 	list_for_each_entry(itr, list_head, list_entry) {
6279 		struct ice_fltr_list_entry f_entry;
6280 
6281 		f_entry.fltr_info = itr->fltr_info;
6282 		if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6283 		    itr->fltr_info.vsi_handle == vsi_handle) {
6284 			/* update the src in case it is VSI num */
6285 			if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6286 				f_entry.fltr_info.src = hw_vsi_id;
6287 			status = ice_add_rule_internal(hw, recp_id, &f_entry);
6288 			if (status)
6289 				goto end;
6290 			continue;
6291 		}
6292 		if (!itr->vsi_list_info ||
6293 		    !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
6294 			continue;
6295 		/* Clearing it so that the logic can add it back */
6296 		clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6297 		f_entry.fltr_info.vsi_handle = vsi_handle;
6298 		f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6299 		/* update the src in case it is VSI num */
6300 		if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6301 			f_entry.fltr_info.src = hw_vsi_id;
6302 		if (recp_id == ICE_SW_LKUP_VLAN)
6303 			status = ice_add_vlan_internal(hw, &f_entry);
6304 		else
6305 			status = ice_add_rule_internal(hw, recp_id, &f_entry);
6306 		if (status)
6307 			goto end;
6308 	}
6309 end:
6310 	return status;
6311 }
6312 
6313 /**
6314  * ice_adv_rem_update_vsi_list
6315  * @hw: pointer to the hardware structure
6316  * @vsi_handle: VSI handle of the VSI to remove
6317  * @fm_list: filter management entry for which the VSI list management needs to
6318  *	     be done
6319  */
6320 static int
6321 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6322 			    struct ice_adv_fltr_mgmt_list_entry *fm_list)
6323 {
6324 	struct ice_vsi_list_map_info *vsi_list_info;
6325 	enum ice_sw_lkup_type lkup_type;
6326 	u16 vsi_list_id;
6327 	int status;
6328 
6329 	if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6330 	    fm_list->vsi_count == 0)
6331 		return -EINVAL;
6332 
6333 	/* A rule with the VSI being removed does not exist */
6334 	if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
6335 		return -ENOENT;
6336 
6337 	lkup_type = ICE_SW_LKUP_LAST;
6338 	vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6339 	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6340 					  ice_aqc_opc_update_sw_rules,
6341 					  lkup_type);
6342 	if (status)
6343 		return status;
6344 
6345 	fm_list->vsi_count--;
6346 	clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6347 	vsi_list_info = fm_list->vsi_list_info;
6348 	if (fm_list->vsi_count == 1) {
6349 		struct ice_fltr_info tmp_fltr;
6350 		u16 rem_vsi_handle;
6351 
6352 		rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
6353 						ICE_MAX_VSI);
6354 		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6355 			return -EIO;
6356 
6357 		/* Make sure VSI list is empty before removing it below */
6358 		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6359 						  vsi_list_id, true,
6360 						  ice_aqc_opc_update_sw_rules,
6361 						  lkup_type);
6362 		if (status)
6363 			return status;
6364 
6365 		memset(&tmp_fltr, 0, sizeof(tmp_fltr));
6366 		tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
6367 		tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6368 		fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6369 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6370 		tmp_fltr.fwd_id.hw_vsi_id =
6371 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
6372 		fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6373 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
6374 		fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
6375 
6376 		/* Update the previous switch rule of "MAC forward to VSI" to
6377 		 * "MAC fwd to VSI list"
6378 		 */
6379 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6380 		if (status) {
6381 			ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6382 				  tmp_fltr.fwd_id.hw_vsi_id, status);
6383 			return status;
6384 		}
6385 		fm_list->vsi_list_info->ref_cnt--;
6386 
6387 		/* Remove the VSI list since it is no longer used */
6388 		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6389 		if (status) {
6390 			ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
6391 				  vsi_list_id, status);
6392 			return status;
6393 		}
6394 
6395 		list_del(&vsi_list_info->list_entry);
6396 		devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
6397 		fm_list->vsi_list_info = NULL;
6398 	}
6399 
6400 	return status;
6401 }
6402 
6403 /**
6404  * ice_rem_adv_rule - removes existing advanced switch rule
6405  * @hw: pointer to the hardware structure
6406  * @lkups: information on the words that needs to be looked up. All words
6407  *         together makes one recipe
6408  * @lkups_cnt: num of entries in the lkups array
6409  * @rinfo: Its the pointer to the rule information for the rule
6410  *
6411  * This function can be used to remove 1 rule at a time. The lkups is
6412  * used to describe all the words that forms the "lookup" portion of the
6413  * rule. These words can span multiple protocols. Callers to this function
6414  * need to pass in a list of protocol headers with lookup information along
6415  * and mask that determines which words are valid from the given protocol
6416  * header. rinfo describes other information related to this rule such as
6417  * forwarding IDs, priority of this rule, etc.
6418  */
6419 static int
6420 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6421 		 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6422 {
6423 	struct ice_adv_fltr_mgmt_list_entry *list_elem;
6424 	struct ice_prot_lkup_ext lkup_exts;
6425 	bool remove_rule = false;
6426 	struct mutex *rule_lock; /* Lock to protect filter rule list */
6427 	u16 i, rid, vsi_handle;
6428 	int status = 0;
6429 
6430 	memset(&lkup_exts, 0, sizeof(lkup_exts));
6431 	for (i = 0; i < lkups_cnt; i++) {
6432 		u16 count;
6433 
6434 		if (lkups[i].type >= ICE_PROTOCOL_LAST)
6435 			return -EIO;
6436 
6437 		count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6438 		if (!count)
6439 			return -EIO;
6440 	}
6441 
6442 	rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
6443 	/* If did not find a recipe that match the existing criteria */
6444 	if (rid == ICE_MAX_NUM_RECIPES)
6445 		return -EINVAL;
6446 
6447 	rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6448 	list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6449 	/* the rule is already removed */
6450 	if (!list_elem)
6451 		return 0;
6452 	mutex_lock(rule_lock);
6453 	if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6454 		remove_rule = true;
6455 	} else if (list_elem->vsi_count > 1) {
6456 		remove_rule = false;
6457 		vsi_handle = rinfo->sw_act.vsi_handle;
6458 		status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6459 	} else {
6460 		vsi_handle = rinfo->sw_act.vsi_handle;
6461 		status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6462 		if (status) {
6463 			mutex_unlock(rule_lock);
6464 			return status;
6465 		}
6466 		if (list_elem->vsi_count == 0)
6467 			remove_rule = true;
6468 	}
6469 	mutex_unlock(rule_lock);
6470 	if (remove_rule) {
6471 		struct ice_sw_rule_lkup_rx_tx *s_rule;
6472 		u16 rule_buf_sz;
6473 
6474 		rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule);
6475 		s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6476 		if (!s_rule)
6477 			return -ENOMEM;
6478 		s_rule->act = 0;
6479 		s_rule->index = cpu_to_le16(list_elem->rule_info.fltr_rule_id);
6480 		s_rule->hdr_len = 0;
6481 		status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6482 					 rule_buf_sz, 1,
6483 					 ice_aqc_opc_remove_sw_rules, NULL);
6484 		if (!status || status == -ENOENT) {
6485 			struct ice_switch_info *sw = hw->switch_info;
6486 
6487 			mutex_lock(rule_lock);
6488 			list_del(&list_elem->list_entry);
6489 			devm_kfree(ice_hw_to_dev(hw), list_elem->lkups);
6490 			devm_kfree(ice_hw_to_dev(hw), list_elem);
6491 			mutex_unlock(rule_lock);
6492 			if (list_empty(&sw->recp_list[rid].filt_rules))
6493 				sw->recp_list[rid].adv_rule = false;
6494 		}
6495 		kfree(s_rule);
6496 	}
6497 	return status;
6498 }
6499 
6500 /**
6501  * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6502  * @hw: pointer to the hardware structure
6503  * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6504  *
6505  * This function is used to remove 1 rule at a time. The removal is based on
6506  * the remove_entry parameter. This function will remove rule for a given
6507  * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6508  */
6509 int
6510 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6511 		       struct ice_rule_query_data *remove_entry)
6512 {
6513 	struct ice_adv_fltr_mgmt_list_entry *list_itr;
6514 	struct list_head *list_head;
6515 	struct ice_adv_rule_info rinfo;
6516 	struct ice_switch_info *sw;
6517 
6518 	sw = hw->switch_info;
6519 	if (!sw->recp_list[remove_entry->rid].recp_created)
6520 		return -EINVAL;
6521 	list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6522 	list_for_each_entry(list_itr, list_head, list_entry) {
6523 		if (list_itr->rule_info.fltr_rule_id ==
6524 		    remove_entry->rule_id) {
6525 			rinfo = list_itr->rule_info;
6526 			rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6527 			return ice_rem_adv_rule(hw, list_itr->lkups,
6528 						list_itr->lkups_cnt, &rinfo);
6529 		}
6530 	}
6531 	/* either list is empty or unable to find rule */
6532 	return -ENOENT;
6533 }
6534 
6535 /**
6536  * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
6537  *                            given VSI handle
6538  * @hw: pointer to the hardware structure
6539  * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6540  *
6541  * This function is used to remove all the rules for a given VSI and as soon
6542  * as removing a rule fails, it will return immediately with the error code,
6543  * else it will return success.
6544  */
6545 int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6546 {
6547 	struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
6548 	struct ice_vsi_list_map_info *map_info;
6549 	struct ice_adv_rule_info rinfo;
6550 	struct list_head *list_head;
6551 	struct ice_switch_info *sw;
6552 	int status;
6553 	u8 rid;
6554 
6555 	sw = hw->switch_info;
6556 	for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6557 		if (!sw->recp_list[rid].recp_created)
6558 			continue;
6559 		if (!sw->recp_list[rid].adv_rule)
6560 			continue;
6561 
6562 		list_head = &sw->recp_list[rid].filt_rules;
6563 		list_for_each_entry_safe(list_itr, tmp_entry, list_head,
6564 					 list_entry) {
6565 			rinfo = list_itr->rule_info;
6566 
6567 			if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
6568 				map_info = list_itr->vsi_list_info;
6569 				if (!map_info)
6570 					continue;
6571 
6572 				if (!test_bit(vsi_handle, map_info->vsi_map))
6573 					continue;
6574 			} else if (rinfo.sw_act.vsi_handle != vsi_handle) {
6575 				continue;
6576 			}
6577 
6578 			rinfo.sw_act.vsi_handle = vsi_handle;
6579 			status = ice_rem_adv_rule(hw, list_itr->lkups,
6580 						  list_itr->lkups_cnt, &rinfo);
6581 			if (status)
6582 				return status;
6583 		}
6584 	}
6585 	return 0;
6586 }
6587 
6588 /**
6589  * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6590  * @hw: pointer to the hardware structure
6591  * @vsi_handle: driver VSI handle
6592  * @list_head: list for which filters need to be replayed
6593  *
6594  * Replay the advanced rule for the given VSI.
6595  */
6596 static int
6597 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6598 			struct list_head *list_head)
6599 {
6600 	struct ice_rule_query_data added_entry = { 0 };
6601 	struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6602 	int status = 0;
6603 
6604 	if (list_empty(list_head))
6605 		return status;
6606 	list_for_each_entry(adv_fltr, list_head, list_entry) {
6607 		struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6608 		u16 lk_cnt = adv_fltr->lkups_cnt;
6609 
6610 		if (vsi_handle != rinfo->sw_act.vsi_handle)
6611 			continue;
6612 		status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6613 					  &added_entry);
6614 		if (status)
6615 			break;
6616 	}
6617 	return status;
6618 }
6619 
6620 /**
6621  * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6622  * @hw: pointer to the hardware structure
6623  * @vsi_handle: driver VSI handle
6624  *
6625  * Replays filters for requested VSI via vsi_handle.
6626  */
6627 int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6628 {
6629 	struct ice_switch_info *sw = hw->switch_info;
6630 	int status;
6631 	u8 i;
6632 
6633 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6634 		struct list_head *head;
6635 
6636 		head = &sw->recp_list[i].filt_replay_rules;
6637 		if (!sw->recp_list[i].adv_rule)
6638 			status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6639 		else
6640 			status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6641 		if (status)
6642 			return status;
6643 	}
6644 	return status;
6645 }
6646 
6647 /**
6648  * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6649  * @hw: pointer to the HW struct
6650  *
6651  * Deletes the filter replay rules.
6652  */
6653 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6654 {
6655 	struct ice_switch_info *sw = hw->switch_info;
6656 	u8 i;
6657 
6658 	if (!sw)
6659 		return;
6660 
6661 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6662 		if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
6663 			struct list_head *l_head;
6664 
6665 			l_head = &sw->recp_list[i].filt_replay_rules;
6666 			if (!sw->recp_list[i].adv_rule)
6667 				ice_rem_sw_rule_info(hw, l_head);
6668 			else
6669 				ice_rem_adv_rule_info(hw, l_head);
6670 		}
6671 	}
6672 }
6673