1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_lib.h"
5 #include "ice_switch.h"
6 
7 #define ICE_ETH_DA_OFFSET		0
8 #define ICE_ETH_ETHTYPE_OFFSET		12
9 #define ICE_ETH_VLAN_TCI_OFFSET		14
10 #define ICE_MAX_VLAN_ID			0xFFF
11 #define ICE_IPV6_ETHER_ID		0x86DD
12 
13 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
14  * struct to configure any switch filter rules.
15  * {DA (6 bytes), SA(6 bytes),
16  * Ether type (2 bytes for header without VLAN tag) OR
17  * VLAN tag (4 bytes for header with VLAN tag) }
18  *
19  * Word on Hardcoded values
20  * byte 0 = 0x2: to identify it as locally administered DA MAC
21  * byte 6 = 0x2: to identify it as locally administered SA MAC
22  * byte 12 = 0x81 & byte 13 = 0x00:
23  *	In case of VLAN filter first two bytes defines ether type (0x8100)
24  *	and remaining two bytes are placeholder for programming a given VLAN ID
25  *	In case of Ether type filter it is treated as header without VLAN tag
26  *	and byte 12 and 13 is used to program a given Ether type instead
27  */
28 #define DUMMY_ETH_HDR_LEN		16
29 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
30 							0x2, 0, 0, 0, 0, 0,
31 							0x81, 0, 0, 0};
32 
33 enum {
34 	ICE_PKT_OUTER_IPV6	= BIT(0),
35 	ICE_PKT_TUN_GTPC	= BIT(1),
36 	ICE_PKT_TUN_GTPU	= BIT(2),
37 	ICE_PKT_TUN_NVGRE	= BIT(3),
38 	ICE_PKT_TUN_UDP		= BIT(4),
39 	ICE_PKT_INNER_IPV6	= BIT(5),
40 	ICE_PKT_INNER_TCP	= BIT(6),
41 	ICE_PKT_INNER_UDP	= BIT(7),
42 	ICE_PKT_GTP_NOPAY	= BIT(8),
43 	ICE_PKT_KMALLOC		= BIT(9),
44 };
45 
46 struct ice_dummy_pkt_offsets {
47 	enum ice_protocol_type type;
48 	u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
49 };
50 
51 struct ice_dummy_pkt_profile {
52 	const struct ice_dummy_pkt_offsets *offsets;
53 	const u8 *pkt;
54 	u32 match;
55 	u16 pkt_len;
56 	u16 offsets_len;
57 };
58 
59 #define ICE_DECLARE_PKT_OFFSETS(type)					\
60 	static const struct ice_dummy_pkt_offsets			\
61 	ice_dummy_##type##_packet_offsets[]
62 
63 #define ICE_DECLARE_PKT_TEMPLATE(type)					\
64 	static const u8 ice_dummy_##type##_packet[]
65 
66 #define ICE_PKT_PROFILE(type, m) {					\
67 	.match		= (m),						\
68 	.pkt		= ice_dummy_##type##_packet,			\
69 	.pkt_len	= sizeof(ice_dummy_##type##_packet),		\
70 	.offsets	= ice_dummy_##type##_packet_offsets,		\
71 	.offsets_len	= sizeof(ice_dummy_##type##_packet_offsets),	\
72 }
73 
74 ICE_DECLARE_PKT_OFFSETS(vlan) = {
75 	{ ICE_VLAN_OFOS,        12 },
76 };
77 
78 ICE_DECLARE_PKT_TEMPLATE(vlan) = {
79 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
80 };
81 
82 ICE_DECLARE_PKT_OFFSETS(qinq) = {
83 	{ ICE_VLAN_EX,          12 },
84 	{ ICE_VLAN_IN,          16 },
85 };
86 
87 ICE_DECLARE_PKT_TEMPLATE(qinq) = {
88 	0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
89 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
90 };
91 
92 ICE_DECLARE_PKT_OFFSETS(gre_tcp) = {
93 	{ ICE_MAC_OFOS,		0 },
94 	{ ICE_ETYPE_OL,		12 },
95 	{ ICE_IPV4_OFOS,	14 },
96 	{ ICE_NVGRE,		34 },
97 	{ ICE_MAC_IL,		42 },
98 	{ ICE_ETYPE_IL,		54 },
99 	{ ICE_IPV4_IL,		56 },
100 	{ ICE_TCP_IL,		76 },
101 	{ ICE_PROTOCOL_LAST,	0 },
102 };
103 
104 ICE_DECLARE_PKT_TEMPLATE(gre_tcp) = {
105 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_OFOS 0 */
106 	0x00, 0x00, 0x00, 0x00,
107 	0x00, 0x00, 0x00, 0x00,
108 
109 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
110 
111 	0x45, 0x00, 0x00, 0x3E,	/* ICE_IPV4_OFOS 14 */
112 	0x00, 0x00, 0x00, 0x00,
113 	0x00, 0x2F, 0x00, 0x00,
114 	0x00, 0x00, 0x00, 0x00,
115 	0x00, 0x00, 0x00, 0x00,
116 
117 	0x80, 0x00, 0x65, 0x58,	/* ICE_NVGRE 34 */
118 	0x00, 0x00, 0x00, 0x00,
119 
120 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_IL 42 */
121 	0x00, 0x00, 0x00, 0x00,
122 	0x00, 0x00, 0x00, 0x00,
123 
124 	0x08, 0x00,		/* ICE_ETYPE_IL 54 */
125 
126 	0x45, 0x00, 0x00, 0x14,	/* ICE_IPV4_IL 56 */
127 	0x00, 0x00, 0x00, 0x00,
128 	0x00, 0x06, 0x00, 0x00,
129 	0x00, 0x00, 0x00, 0x00,
130 	0x00, 0x00, 0x00, 0x00,
131 
132 	0x00, 0x00, 0x00, 0x00,	/* ICE_TCP_IL 76 */
133 	0x00, 0x00, 0x00, 0x00,
134 	0x00, 0x00, 0x00, 0x00,
135 	0x50, 0x02, 0x20, 0x00,
136 	0x00, 0x00, 0x00, 0x00
137 };
138 
139 ICE_DECLARE_PKT_OFFSETS(gre_udp) = {
140 	{ ICE_MAC_OFOS,		0 },
141 	{ ICE_ETYPE_OL,		12 },
142 	{ ICE_IPV4_OFOS,	14 },
143 	{ ICE_NVGRE,		34 },
144 	{ ICE_MAC_IL,		42 },
145 	{ ICE_ETYPE_IL,		54 },
146 	{ ICE_IPV4_IL,		56 },
147 	{ ICE_UDP_ILOS,		76 },
148 	{ ICE_PROTOCOL_LAST,	0 },
149 };
150 
151 ICE_DECLARE_PKT_TEMPLATE(gre_udp) = {
152 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_OFOS 0 */
153 	0x00, 0x00, 0x00, 0x00,
154 	0x00, 0x00, 0x00, 0x00,
155 
156 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
157 
158 	0x45, 0x00, 0x00, 0x3E,	/* ICE_IPV4_OFOS 14 */
159 	0x00, 0x00, 0x00, 0x00,
160 	0x00, 0x2F, 0x00, 0x00,
161 	0x00, 0x00, 0x00, 0x00,
162 	0x00, 0x00, 0x00, 0x00,
163 
164 	0x80, 0x00, 0x65, 0x58,	/* ICE_NVGRE 34 */
165 	0x00, 0x00, 0x00, 0x00,
166 
167 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_IL 42 */
168 	0x00, 0x00, 0x00, 0x00,
169 	0x00, 0x00, 0x00, 0x00,
170 
171 	0x08, 0x00,		/* ICE_ETYPE_IL 54 */
172 
173 	0x45, 0x00, 0x00, 0x14,	/* ICE_IPV4_IL 56 */
174 	0x00, 0x00, 0x00, 0x00,
175 	0x00, 0x11, 0x00, 0x00,
176 	0x00, 0x00, 0x00, 0x00,
177 	0x00, 0x00, 0x00, 0x00,
178 
179 	0x00, 0x00, 0x00, 0x00,	/* ICE_UDP_ILOS 76 */
180 	0x00, 0x08, 0x00, 0x00,
181 };
182 
183 ICE_DECLARE_PKT_OFFSETS(udp_tun_tcp) = {
184 	{ ICE_MAC_OFOS,		0 },
185 	{ ICE_ETYPE_OL,		12 },
186 	{ ICE_IPV4_OFOS,	14 },
187 	{ ICE_UDP_OF,		34 },
188 	{ ICE_VXLAN,		42 },
189 	{ ICE_GENEVE,		42 },
190 	{ ICE_VXLAN_GPE,	42 },
191 	{ ICE_MAC_IL,		50 },
192 	{ ICE_ETYPE_IL,		62 },
193 	{ ICE_IPV4_IL,		64 },
194 	{ ICE_TCP_IL,		84 },
195 	{ ICE_PROTOCOL_LAST,	0 },
196 };
197 
198 ICE_DECLARE_PKT_TEMPLATE(udp_tun_tcp) = {
199 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
200 	0x00, 0x00, 0x00, 0x00,
201 	0x00, 0x00, 0x00, 0x00,
202 
203 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
204 
205 	0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
206 	0x00, 0x01, 0x00, 0x00,
207 	0x40, 0x11, 0x00, 0x00,
208 	0x00, 0x00, 0x00, 0x00,
209 	0x00, 0x00, 0x00, 0x00,
210 
211 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
212 	0x00, 0x46, 0x00, 0x00,
213 
214 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
215 	0x00, 0x00, 0x00, 0x00,
216 
217 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
218 	0x00, 0x00, 0x00, 0x00,
219 	0x00, 0x00, 0x00, 0x00,
220 
221 	0x08, 0x00,		/* ICE_ETYPE_IL 62 */
222 
223 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
224 	0x00, 0x01, 0x00, 0x00,
225 	0x40, 0x06, 0x00, 0x00,
226 	0x00, 0x00, 0x00, 0x00,
227 	0x00, 0x00, 0x00, 0x00,
228 
229 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
230 	0x00, 0x00, 0x00, 0x00,
231 	0x00, 0x00, 0x00, 0x00,
232 	0x50, 0x02, 0x20, 0x00,
233 	0x00, 0x00, 0x00, 0x00
234 };
235 
236 ICE_DECLARE_PKT_OFFSETS(udp_tun_udp) = {
237 	{ ICE_MAC_OFOS,		0 },
238 	{ ICE_ETYPE_OL,		12 },
239 	{ ICE_IPV4_OFOS,	14 },
240 	{ ICE_UDP_OF,		34 },
241 	{ ICE_VXLAN,		42 },
242 	{ ICE_GENEVE,		42 },
243 	{ ICE_VXLAN_GPE,	42 },
244 	{ ICE_MAC_IL,		50 },
245 	{ ICE_ETYPE_IL,		62 },
246 	{ ICE_IPV4_IL,		64 },
247 	{ ICE_UDP_ILOS,		84 },
248 	{ ICE_PROTOCOL_LAST,	0 },
249 };
250 
251 ICE_DECLARE_PKT_TEMPLATE(udp_tun_udp) = {
252 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
253 	0x00, 0x00, 0x00, 0x00,
254 	0x00, 0x00, 0x00, 0x00,
255 
256 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
257 
258 	0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
259 	0x00, 0x01, 0x00, 0x00,
260 	0x00, 0x11, 0x00, 0x00,
261 	0x00, 0x00, 0x00, 0x00,
262 	0x00, 0x00, 0x00, 0x00,
263 
264 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
265 	0x00, 0x3a, 0x00, 0x00,
266 
267 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
268 	0x00, 0x00, 0x00, 0x00,
269 
270 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
271 	0x00, 0x00, 0x00, 0x00,
272 	0x00, 0x00, 0x00, 0x00,
273 
274 	0x08, 0x00,		/* ICE_ETYPE_IL 62 */
275 
276 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
277 	0x00, 0x01, 0x00, 0x00,
278 	0x00, 0x11, 0x00, 0x00,
279 	0x00, 0x00, 0x00, 0x00,
280 	0x00, 0x00, 0x00, 0x00,
281 
282 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
283 	0x00, 0x08, 0x00, 0x00,
284 };
285 
286 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_tcp) = {
287 	{ ICE_MAC_OFOS,		0 },
288 	{ ICE_ETYPE_OL,		12 },
289 	{ ICE_IPV4_OFOS,	14 },
290 	{ ICE_NVGRE,		34 },
291 	{ ICE_MAC_IL,		42 },
292 	{ ICE_ETYPE_IL,		54 },
293 	{ ICE_IPV6_IL,		56 },
294 	{ ICE_TCP_IL,		96 },
295 	{ ICE_PROTOCOL_LAST,	0 },
296 };
297 
298 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_tcp) = {
299 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
300 	0x00, 0x00, 0x00, 0x00,
301 	0x00, 0x00, 0x00, 0x00,
302 
303 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
304 
305 	0x45, 0x00, 0x00, 0x66, /* ICE_IPV4_OFOS 14 */
306 	0x00, 0x00, 0x00, 0x00,
307 	0x00, 0x2F, 0x00, 0x00,
308 	0x00, 0x00, 0x00, 0x00,
309 	0x00, 0x00, 0x00, 0x00,
310 
311 	0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
312 	0x00, 0x00, 0x00, 0x00,
313 
314 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
315 	0x00, 0x00, 0x00, 0x00,
316 	0x00, 0x00, 0x00, 0x00,
317 
318 	0x86, 0xdd,		/* ICE_ETYPE_IL 54 */
319 
320 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
321 	0x00, 0x08, 0x06, 0x40,
322 	0x00, 0x00, 0x00, 0x00,
323 	0x00, 0x00, 0x00, 0x00,
324 	0x00, 0x00, 0x00, 0x00,
325 	0x00, 0x00, 0x00, 0x00,
326 	0x00, 0x00, 0x00, 0x00,
327 	0x00, 0x00, 0x00, 0x00,
328 	0x00, 0x00, 0x00, 0x00,
329 	0x00, 0x00, 0x00, 0x00,
330 
331 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 96 */
332 	0x00, 0x00, 0x00, 0x00,
333 	0x00, 0x00, 0x00, 0x00,
334 	0x50, 0x02, 0x20, 0x00,
335 	0x00, 0x00, 0x00, 0x00
336 };
337 
338 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_udp) = {
339 	{ ICE_MAC_OFOS,		0 },
340 	{ ICE_ETYPE_OL,		12 },
341 	{ ICE_IPV4_OFOS,	14 },
342 	{ ICE_NVGRE,		34 },
343 	{ ICE_MAC_IL,		42 },
344 	{ ICE_ETYPE_IL,		54 },
345 	{ ICE_IPV6_IL,		56 },
346 	{ ICE_UDP_ILOS,		96 },
347 	{ ICE_PROTOCOL_LAST,	0 },
348 };
349 
350 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_udp) = {
351 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
352 	0x00, 0x00, 0x00, 0x00,
353 	0x00, 0x00, 0x00, 0x00,
354 
355 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
356 
357 	0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
358 	0x00, 0x00, 0x00, 0x00,
359 	0x00, 0x2F, 0x00, 0x00,
360 	0x00, 0x00, 0x00, 0x00,
361 	0x00, 0x00, 0x00, 0x00,
362 
363 	0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
364 	0x00, 0x00, 0x00, 0x00,
365 
366 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
367 	0x00, 0x00, 0x00, 0x00,
368 	0x00, 0x00, 0x00, 0x00,
369 
370 	0x86, 0xdd,		/* ICE_ETYPE_IL 54 */
371 
372 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
373 	0x00, 0x08, 0x11, 0x40,
374 	0x00, 0x00, 0x00, 0x00,
375 	0x00, 0x00, 0x00, 0x00,
376 	0x00, 0x00, 0x00, 0x00,
377 	0x00, 0x00, 0x00, 0x00,
378 	0x00, 0x00, 0x00, 0x00,
379 	0x00, 0x00, 0x00, 0x00,
380 	0x00, 0x00, 0x00, 0x00,
381 	0x00, 0x00, 0x00, 0x00,
382 
383 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 96 */
384 	0x00, 0x08, 0x00, 0x00,
385 };
386 
387 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_tcp) = {
388 	{ ICE_MAC_OFOS,		0 },
389 	{ ICE_ETYPE_OL,		12 },
390 	{ ICE_IPV4_OFOS,	14 },
391 	{ ICE_UDP_OF,		34 },
392 	{ ICE_VXLAN,		42 },
393 	{ ICE_GENEVE,		42 },
394 	{ ICE_VXLAN_GPE,	42 },
395 	{ ICE_MAC_IL,		50 },
396 	{ ICE_ETYPE_IL,		62 },
397 	{ ICE_IPV6_IL,		64 },
398 	{ ICE_TCP_IL,		104 },
399 	{ ICE_PROTOCOL_LAST,	0 },
400 };
401 
402 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_tcp) = {
403 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
404 	0x00, 0x00, 0x00, 0x00,
405 	0x00, 0x00, 0x00, 0x00,
406 
407 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
408 
409 	0x45, 0x00, 0x00, 0x6e, /* ICE_IPV4_OFOS 14 */
410 	0x00, 0x01, 0x00, 0x00,
411 	0x40, 0x11, 0x00, 0x00,
412 	0x00, 0x00, 0x00, 0x00,
413 	0x00, 0x00, 0x00, 0x00,
414 
415 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
416 	0x00, 0x5a, 0x00, 0x00,
417 
418 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
419 	0x00, 0x00, 0x00, 0x00,
420 
421 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
422 	0x00, 0x00, 0x00, 0x00,
423 	0x00, 0x00, 0x00, 0x00,
424 
425 	0x86, 0xdd,		/* ICE_ETYPE_IL 62 */
426 
427 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
428 	0x00, 0x08, 0x06, 0x40,
429 	0x00, 0x00, 0x00, 0x00,
430 	0x00, 0x00, 0x00, 0x00,
431 	0x00, 0x00, 0x00, 0x00,
432 	0x00, 0x00, 0x00, 0x00,
433 	0x00, 0x00, 0x00, 0x00,
434 	0x00, 0x00, 0x00, 0x00,
435 	0x00, 0x00, 0x00, 0x00,
436 	0x00, 0x00, 0x00, 0x00,
437 
438 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 104 */
439 	0x00, 0x00, 0x00, 0x00,
440 	0x00, 0x00, 0x00, 0x00,
441 	0x50, 0x02, 0x20, 0x00,
442 	0x00, 0x00, 0x00, 0x00
443 };
444 
445 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_udp) = {
446 	{ ICE_MAC_OFOS,		0 },
447 	{ ICE_ETYPE_OL,		12 },
448 	{ ICE_IPV4_OFOS,	14 },
449 	{ ICE_UDP_OF,		34 },
450 	{ ICE_VXLAN,		42 },
451 	{ ICE_GENEVE,		42 },
452 	{ ICE_VXLAN_GPE,	42 },
453 	{ ICE_MAC_IL,		50 },
454 	{ ICE_ETYPE_IL,		62 },
455 	{ ICE_IPV6_IL,		64 },
456 	{ ICE_UDP_ILOS,		104 },
457 	{ ICE_PROTOCOL_LAST,	0 },
458 };
459 
460 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_udp) = {
461 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
462 	0x00, 0x00, 0x00, 0x00,
463 	0x00, 0x00, 0x00, 0x00,
464 
465 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
466 
467 	0x45, 0x00, 0x00, 0x62, /* ICE_IPV4_OFOS 14 */
468 	0x00, 0x01, 0x00, 0x00,
469 	0x00, 0x11, 0x00, 0x00,
470 	0x00, 0x00, 0x00, 0x00,
471 	0x00, 0x00, 0x00, 0x00,
472 
473 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
474 	0x00, 0x4e, 0x00, 0x00,
475 
476 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
477 	0x00, 0x00, 0x00, 0x00,
478 
479 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
480 	0x00, 0x00, 0x00, 0x00,
481 	0x00, 0x00, 0x00, 0x00,
482 
483 	0x86, 0xdd,		/* ICE_ETYPE_IL 62 */
484 
485 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
486 	0x00, 0x08, 0x11, 0x40,
487 	0x00, 0x00, 0x00, 0x00,
488 	0x00, 0x00, 0x00, 0x00,
489 	0x00, 0x00, 0x00, 0x00,
490 	0x00, 0x00, 0x00, 0x00,
491 	0x00, 0x00, 0x00, 0x00,
492 	0x00, 0x00, 0x00, 0x00,
493 	0x00, 0x00, 0x00, 0x00,
494 	0x00, 0x00, 0x00, 0x00,
495 
496 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 104 */
497 	0x00, 0x08, 0x00, 0x00,
498 };
499 
500 /* offset info for MAC + IPv4 + UDP dummy packet */
501 ICE_DECLARE_PKT_OFFSETS(udp) = {
502 	{ ICE_MAC_OFOS,		0 },
503 	{ ICE_ETYPE_OL,		12 },
504 	{ ICE_IPV4_OFOS,	14 },
505 	{ ICE_UDP_ILOS,		34 },
506 	{ ICE_PROTOCOL_LAST,	0 },
507 };
508 
509 /* Dummy packet for MAC + IPv4 + UDP */
510 ICE_DECLARE_PKT_TEMPLATE(udp) = {
511 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
512 	0x00, 0x00, 0x00, 0x00,
513 	0x00, 0x00, 0x00, 0x00,
514 
515 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
516 
517 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
518 	0x00, 0x01, 0x00, 0x00,
519 	0x00, 0x11, 0x00, 0x00,
520 	0x00, 0x00, 0x00, 0x00,
521 	0x00, 0x00, 0x00, 0x00,
522 
523 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
524 	0x00, 0x08, 0x00, 0x00,
525 
526 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
527 };
528 
529 /* offset info for MAC + IPv4 + TCP dummy packet */
530 ICE_DECLARE_PKT_OFFSETS(tcp) = {
531 	{ ICE_MAC_OFOS,		0 },
532 	{ ICE_ETYPE_OL,		12 },
533 	{ ICE_IPV4_OFOS,	14 },
534 	{ ICE_TCP_IL,		34 },
535 	{ ICE_PROTOCOL_LAST,	0 },
536 };
537 
538 /* Dummy packet for MAC + IPv4 + TCP */
539 ICE_DECLARE_PKT_TEMPLATE(tcp) = {
540 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
541 	0x00, 0x00, 0x00, 0x00,
542 	0x00, 0x00, 0x00, 0x00,
543 
544 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
545 
546 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
547 	0x00, 0x01, 0x00, 0x00,
548 	0x00, 0x06, 0x00, 0x00,
549 	0x00, 0x00, 0x00, 0x00,
550 	0x00, 0x00, 0x00, 0x00,
551 
552 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
553 	0x00, 0x00, 0x00, 0x00,
554 	0x00, 0x00, 0x00, 0x00,
555 	0x50, 0x00, 0x00, 0x00,
556 	0x00, 0x00, 0x00, 0x00,
557 
558 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
559 };
560 
561 ICE_DECLARE_PKT_OFFSETS(tcp_ipv6) = {
562 	{ ICE_MAC_OFOS,		0 },
563 	{ ICE_ETYPE_OL,		12 },
564 	{ ICE_IPV6_OFOS,	14 },
565 	{ ICE_TCP_IL,		54 },
566 	{ ICE_PROTOCOL_LAST,	0 },
567 };
568 
569 ICE_DECLARE_PKT_TEMPLATE(tcp_ipv6) = {
570 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
571 	0x00, 0x00, 0x00, 0x00,
572 	0x00, 0x00, 0x00, 0x00,
573 
574 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
575 
576 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
577 	0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
578 	0x00, 0x00, 0x00, 0x00,
579 	0x00, 0x00, 0x00, 0x00,
580 	0x00, 0x00, 0x00, 0x00,
581 	0x00, 0x00, 0x00, 0x00,
582 	0x00, 0x00, 0x00, 0x00,
583 	0x00, 0x00, 0x00, 0x00,
584 	0x00, 0x00, 0x00, 0x00,
585 	0x00, 0x00, 0x00, 0x00,
586 
587 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
588 	0x00, 0x00, 0x00, 0x00,
589 	0x00, 0x00, 0x00, 0x00,
590 	0x50, 0x00, 0x00, 0x00,
591 	0x00, 0x00, 0x00, 0x00,
592 
593 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
594 };
595 
596 /* IPv6 + UDP */
597 ICE_DECLARE_PKT_OFFSETS(udp_ipv6) = {
598 	{ ICE_MAC_OFOS,		0 },
599 	{ ICE_ETYPE_OL,		12 },
600 	{ ICE_IPV6_OFOS,	14 },
601 	{ ICE_UDP_ILOS,		54 },
602 	{ ICE_PROTOCOL_LAST,	0 },
603 };
604 
605 /* IPv6 + UDP dummy packet */
606 ICE_DECLARE_PKT_TEMPLATE(udp_ipv6) = {
607 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
608 	0x00, 0x00, 0x00, 0x00,
609 	0x00, 0x00, 0x00, 0x00,
610 
611 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
612 
613 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
614 	0x00, 0x10, 0x11, 0x00, /* Next header UDP */
615 	0x00, 0x00, 0x00, 0x00,
616 	0x00, 0x00, 0x00, 0x00,
617 	0x00, 0x00, 0x00, 0x00,
618 	0x00, 0x00, 0x00, 0x00,
619 	0x00, 0x00, 0x00, 0x00,
620 	0x00, 0x00, 0x00, 0x00,
621 	0x00, 0x00, 0x00, 0x00,
622 	0x00, 0x00, 0x00, 0x00,
623 
624 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
625 	0x00, 0x10, 0x00, 0x00,
626 
627 	0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
628 	0x00, 0x00, 0x00, 0x00,
629 
630 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
631 };
632 
633 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
634 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_tcp) = {
635 	{ ICE_MAC_OFOS,		0 },
636 	{ ICE_IPV4_OFOS,	14 },
637 	{ ICE_UDP_OF,		34 },
638 	{ ICE_GTP,		42 },
639 	{ ICE_IPV4_IL,		62 },
640 	{ ICE_TCP_IL,		82 },
641 	{ ICE_PROTOCOL_LAST,	0 },
642 };
643 
644 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_tcp) = {
645 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
646 	0x00, 0x00, 0x00, 0x00,
647 	0x00, 0x00, 0x00, 0x00,
648 	0x08, 0x00,
649 
650 	0x45, 0x00, 0x00, 0x58, /* IP 14 */
651 	0x00, 0x00, 0x00, 0x00,
652 	0x00, 0x11, 0x00, 0x00,
653 	0x00, 0x00, 0x00, 0x00,
654 	0x00, 0x00, 0x00, 0x00,
655 
656 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
657 	0x00, 0x44, 0x00, 0x00,
658 
659 	0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 42 */
660 	0x00, 0x00, 0x00, 0x00,
661 	0x00, 0x00, 0x00, 0x85,
662 
663 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
664 	0x00, 0x00, 0x00, 0x00,
665 
666 	0x45, 0x00, 0x00, 0x28, /* IP 62 */
667 	0x00, 0x00, 0x00, 0x00,
668 	0x00, 0x06, 0x00, 0x00,
669 	0x00, 0x00, 0x00, 0x00,
670 	0x00, 0x00, 0x00, 0x00,
671 
672 	0x00, 0x00, 0x00, 0x00, /* TCP 82 */
673 	0x00, 0x00, 0x00, 0x00,
674 	0x00, 0x00, 0x00, 0x00,
675 	0x50, 0x00, 0x00, 0x00,
676 	0x00, 0x00, 0x00, 0x00,
677 
678 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
679 };
680 
681 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
682 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_udp) = {
683 	{ ICE_MAC_OFOS,		0 },
684 	{ ICE_IPV4_OFOS,	14 },
685 	{ ICE_UDP_OF,		34 },
686 	{ ICE_GTP,		42 },
687 	{ ICE_IPV4_IL,		62 },
688 	{ ICE_UDP_ILOS,		82 },
689 	{ ICE_PROTOCOL_LAST,	0 },
690 };
691 
692 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_udp) = {
693 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
694 	0x00, 0x00, 0x00, 0x00,
695 	0x00, 0x00, 0x00, 0x00,
696 	0x08, 0x00,
697 
698 	0x45, 0x00, 0x00, 0x4c, /* IP 14 */
699 	0x00, 0x00, 0x00, 0x00,
700 	0x00, 0x11, 0x00, 0x00,
701 	0x00, 0x00, 0x00, 0x00,
702 	0x00, 0x00, 0x00, 0x00,
703 
704 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
705 	0x00, 0x38, 0x00, 0x00,
706 
707 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 42 */
708 	0x00, 0x00, 0x00, 0x00,
709 	0x00, 0x00, 0x00, 0x85,
710 
711 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
712 	0x00, 0x00, 0x00, 0x00,
713 
714 	0x45, 0x00, 0x00, 0x1c, /* IP 62 */
715 	0x00, 0x00, 0x00, 0x00,
716 	0x00, 0x11, 0x00, 0x00,
717 	0x00, 0x00, 0x00, 0x00,
718 	0x00, 0x00, 0x00, 0x00,
719 
720 	0x00, 0x00, 0x00, 0x00, /* UDP 82 */
721 	0x00, 0x08, 0x00, 0x00,
722 
723 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
724 };
725 
726 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
727 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_tcp) = {
728 	{ ICE_MAC_OFOS,		0 },
729 	{ ICE_IPV4_OFOS,	14 },
730 	{ ICE_UDP_OF,		34 },
731 	{ ICE_GTP,		42 },
732 	{ ICE_IPV6_IL,		62 },
733 	{ ICE_TCP_IL,		102 },
734 	{ ICE_PROTOCOL_LAST,	0 },
735 };
736 
737 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_tcp) = {
738 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
739 	0x00, 0x00, 0x00, 0x00,
740 	0x00, 0x00, 0x00, 0x00,
741 	0x08, 0x00,
742 
743 	0x45, 0x00, 0x00, 0x6c, /* IP 14 */
744 	0x00, 0x00, 0x00, 0x00,
745 	0x00, 0x11, 0x00, 0x00,
746 	0x00, 0x00, 0x00, 0x00,
747 	0x00, 0x00, 0x00, 0x00,
748 
749 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
750 	0x00, 0x58, 0x00, 0x00,
751 
752 	0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 42 */
753 	0x00, 0x00, 0x00, 0x00,
754 	0x00, 0x00, 0x00, 0x85,
755 
756 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
757 	0x00, 0x00, 0x00, 0x00,
758 
759 	0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
760 	0x00, 0x14, 0x06, 0x00,
761 	0x00, 0x00, 0x00, 0x00,
762 	0x00, 0x00, 0x00, 0x00,
763 	0x00, 0x00, 0x00, 0x00,
764 	0x00, 0x00, 0x00, 0x00,
765 	0x00, 0x00, 0x00, 0x00,
766 	0x00, 0x00, 0x00, 0x00,
767 	0x00, 0x00, 0x00, 0x00,
768 	0x00, 0x00, 0x00, 0x00,
769 
770 	0x00, 0x00, 0x00, 0x00, /* TCP 102 */
771 	0x00, 0x00, 0x00, 0x00,
772 	0x00, 0x00, 0x00, 0x00,
773 	0x50, 0x00, 0x00, 0x00,
774 	0x00, 0x00, 0x00, 0x00,
775 
776 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
777 };
778 
779 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_udp) = {
780 	{ ICE_MAC_OFOS,		0 },
781 	{ ICE_IPV4_OFOS,	14 },
782 	{ ICE_UDP_OF,		34 },
783 	{ ICE_GTP,		42 },
784 	{ ICE_IPV6_IL,		62 },
785 	{ ICE_UDP_ILOS,		102 },
786 	{ ICE_PROTOCOL_LAST,	0 },
787 };
788 
789 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_udp) = {
790 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
791 	0x00, 0x00, 0x00, 0x00,
792 	0x00, 0x00, 0x00, 0x00,
793 	0x08, 0x00,
794 
795 	0x45, 0x00, 0x00, 0x60, /* IP 14 */
796 	0x00, 0x00, 0x00, 0x00,
797 	0x00, 0x11, 0x00, 0x00,
798 	0x00, 0x00, 0x00, 0x00,
799 	0x00, 0x00, 0x00, 0x00,
800 
801 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
802 	0x00, 0x4c, 0x00, 0x00,
803 
804 	0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 42 */
805 	0x00, 0x00, 0x00, 0x00,
806 	0x00, 0x00, 0x00, 0x85,
807 
808 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
809 	0x00, 0x00, 0x00, 0x00,
810 
811 	0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
812 	0x00, 0x08, 0x11, 0x00,
813 	0x00, 0x00, 0x00, 0x00,
814 	0x00, 0x00, 0x00, 0x00,
815 	0x00, 0x00, 0x00, 0x00,
816 	0x00, 0x00, 0x00, 0x00,
817 	0x00, 0x00, 0x00, 0x00,
818 	0x00, 0x00, 0x00, 0x00,
819 	0x00, 0x00, 0x00, 0x00,
820 	0x00, 0x00, 0x00, 0x00,
821 
822 	0x00, 0x00, 0x00, 0x00, /* UDP 102 */
823 	0x00, 0x08, 0x00, 0x00,
824 
825 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
826 };
827 
828 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_tcp) = {
829 	{ ICE_MAC_OFOS,		0 },
830 	{ ICE_IPV6_OFOS,	14 },
831 	{ ICE_UDP_OF,		54 },
832 	{ ICE_GTP,		62 },
833 	{ ICE_IPV4_IL,		82 },
834 	{ ICE_TCP_IL,		102 },
835 	{ ICE_PROTOCOL_LAST,	0 },
836 };
837 
838 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_tcp) = {
839 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
840 	0x00, 0x00, 0x00, 0x00,
841 	0x00, 0x00, 0x00, 0x00,
842 	0x86, 0xdd,
843 
844 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
845 	0x00, 0x44, 0x11, 0x00,
846 	0x00, 0x00, 0x00, 0x00,
847 	0x00, 0x00, 0x00, 0x00,
848 	0x00, 0x00, 0x00, 0x00,
849 	0x00, 0x00, 0x00, 0x00,
850 	0x00, 0x00, 0x00, 0x00,
851 	0x00, 0x00, 0x00, 0x00,
852 	0x00, 0x00, 0x00, 0x00,
853 	0x00, 0x00, 0x00, 0x00,
854 
855 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
856 	0x00, 0x44, 0x00, 0x00,
857 
858 	0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 62 */
859 	0x00, 0x00, 0x00, 0x00,
860 	0x00, 0x00, 0x00, 0x85,
861 
862 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
863 	0x00, 0x00, 0x00, 0x00,
864 
865 	0x45, 0x00, 0x00, 0x28, /* IP 82 */
866 	0x00, 0x00, 0x00, 0x00,
867 	0x00, 0x06, 0x00, 0x00,
868 	0x00, 0x00, 0x00, 0x00,
869 	0x00, 0x00, 0x00, 0x00,
870 
871 	0x00, 0x00, 0x00, 0x00, /* TCP 102 */
872 	0x00, 0x00, 0x00, 0x00,
873 	0x00, 0x00, 0x00, 0x00,
874 	0x50, 0x00, 0x00, 0x00,
875 	0x00, 0x00, 0x00, 0x00,
876 
877 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
878 };
879 
880 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_udp) = {
881 	{ ICE_MAC_OFOS,		0 },
882 	{ ICE_IPV6_OFOS,	14 },
883 	{ ICE_UDP_OF,		54 },
884 	{ ICE_GTP,		62 },
885 	{ ICE_IPV4_IL,		82 },
886 	{ ICE_UDP_ILOS,		102 },
887 	{ ICE_PROTOCOL_LAST,	0 },
888 };
889 
890 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_udp) = {
891 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
892 	0x00, 0x00, 0x00, 0x00,
893 	0x00, 0x00, 0x00, 0x00,
894 	0x86, 0xdd,
895 
896 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
897 	0x00, 0x38, 0x11, 0x00,
898 	0x00, 0x00, 0x00, 0x00,
899 	0x00, 0x00, 0x00, 0x00,
900 	0x00, 0x00, 0x00, 0x00,
901 	0x00, 0x00, 0x00, 0x00,
902 	0x00, 0x00, 0x00, 0x00,
903 	0x00, 0x00, 0x00, 0x00,
904 	0x00, 0x00, 0x00, 0x00,
905 	0x00, 0x00, 0x00, 0x00,
906 
907 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
908 	0x00, 0x38, 0x00, 0x00,
909 
910 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 62 */
911 	0x00, 0x00, 0x00, 0x00,
912 	0x00, 0x00, 0x00, 0x85,
913 
914 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
915 	0x00, 0x00, 0x00, 0x00,
916 
917 	0x45, 0x00, 0x00, 0x1c, /* IP 82 */
918 	0x00, 0x00, 0x00, 0x00,
919 	0x00, 0x11, 0x00, 0x00,
920 	0x00, 0x00, 0x00, 0x00,
921 	0x00, 0x00, 0x00, 0x00,
922 
923 	0x00, 0x00, 0x00, 0x00, /* UDP 102 */
924 	0x00, 0x08, 0x00, 0x00,
925 
926 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
927 };
928 
929 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_tcp) = {
930 	{ ICE_MAC_OFOS,		0 },
931 	{ ICE_IPV6_OFOS,	14 },
932 	{ ICE_UDP_OF,		54 },
933 	{ ICE_GTP,		62 },
934 	{ ICE_IPV6_IL,		82 },
935 	{ ICE_TCP_IL,		122 },
936 	{ ICE_PROTOCOL_LAST,	0 },
937 };
938 
939 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_tcp) = {
940 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
941 	0x00, 0x00, 0x00, 0x00,
942 	0x00, 0x00, 0x00, 0x00,
943 	0x86, 0xdd,
944 
945 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
946 	0x00, 0x58, 0x11, 0x00,
947 	0x00, 0x00, 0x00, 0x00,
948 	0x00, 0x00, 0x00, 0x00,
949 	0x00, 0x00, 0x00, 0x00,
950 	0x00, 0x00, 0x00, 0x00,
951 	0x00, 0x00, 0x00, 0x00,
952 	0x00, 0x00, 0x00, 0x00,
953 	0x00, 0x00, 0x00, 0x00,
954 	0x00, 0x00, 0x00, 0x00,
955 
956 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
957 	0x00, 0x58, 0x00, 0x00,
958 
959 	0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 62 */
960 	0x00, 0x00, 0x00, 0x00,
961 	0x00, 0x00, 0x00, 0x85,
962 
963 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
964 	0x00, 0x00, 0x00, 0x00,
965 
966 	0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
967 	0x00, 0x14, 0x06, 0x00,
968 	0x00, 0x00, 0x00, 0x00,
969 	0x00, 0x00, 0x00, 0x00,
970 	0x00, 0x00, 0x00, 0x00,
971 	0x00, 0x00, 0x00, 0x00,
972 	0x00, 0x00, 0x00, 0x00,
973 	0x00, 0x00, 0x00, 0x00,
974 	0x00, 0x00, 0x00, 0x00,
975 	0x00, 0x00, 0x00, 0x00,
976 
977 	0x00, 0x00, 0x00, 0x00, /* TCP 122 */
978 	0x00, 0x00, 0x00, 0x00,
979 	0x00, 0x00, 0x00, 0x00,
980 	0x50, 0x00, 0x00, 0x00,
981 	0x00, 0x00, 0x00, 0x00,
982 
983 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
984 };
985 
986 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_udp) = {
987 	{ ICE_MAC_OFOS,		0 },
988 	{ ICE_IPV6_OFOS,	14 },
989 	{ ICE_UDP_OF,		54 },
990 	{ ICE_GTP,		62 },
991 	{ ICE_IPV6_IL,		82 },
992 	{ ICE_UDP_ILOS,		122 },
993 	{ ICE_PROTOCOL_LAST,	0 },
994 };
995 
996 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_udp) = {
997 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
998 	0x00, 0x00, 0x00, 0x00,
999 	0x00, 0x00, 0x00, 0x00,
1000 	0x86, 0xdd,
1001 
1002 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
1003 	0x00, 0x4c, 0x11, 0x00,
1004 	0x00, 0x00, 0x00, 0x00,
1005 	0x00, 0x00, 0x00, 0x00,
1006 	0x00, 0x00, 0x00, 0x00,
1007 	0x00, 0x00, 0x00, 0x00,
1008 	0x00, 0x00, 0x00, 0x00,
1009 	0x00, 0x00, 0x00, 0x00,
1010 	0x00, 0x00, 0x00, 0x00,
1011 	0x00, 0x00, 0x00, 0x00,
1012 
1013 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
1014 	0x00, 0x4c, 0x00, 0x00,
1015 
1016 	0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 62 */
1017 	0x00, 0x00, 0x00, 0x00,
1018 	0x00, 0x00, 0x00, 0x85,
1019 
1020 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
1021 	0x00, 0x00, 0x00, 0x00,
1022 
1023 	0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
1024 	0x00, 0x08, 0x11, 0x00,
1025 	0x00, 0x00, 0x00, 0x00,
1026 	0x00, 0x00, 0x00, 0x00,
1027 	0x00, 0x00, 0x00, 0x00,
1028 	0x00, 0x00, 0x00, 0x00,
1029 	0x00, 0x00, 0x00, 0x00,
1030 	0x00, 0x00, 0x00, 0x00,
1031 	0x00, 0x00, 0x00, 0x00,
1032 	0x00, 0x00, 0x00, 0x00,
1033 
1034 	0x00, 0x00, 0x00, 0x00, /* UDP 122 */
1035 	0x00, 0x08, 0x00, 0x00,
1036 
1037 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
1038 };
1039 
1040 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4) = {
1041 	{ ICE_MAC_OFOS,		0 },
1042 	{ ICE_IPV4_OFOS,	14 },
1043 	{ ICE_UDP_OF,		34 },
1044 	{ ICE_GTP_NO_PAY,	42 },
1045 	{ ICE_PROTOCOL_LAST,	0 },
1046 };
1047 
1048 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4) = {
1049 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1050 	0x00, 0x00, 0x00, 0x00,
1051 	0x00, 0x00, 0x00, 0x00,
1052 	0x08, 0x00,
1053 
1054 	0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
1055 	0x00, 0x00, 0x40, 0x00,
1056 	0x40, 0x11, 0x00, 0x00,
1057 	0x00, 0x00, 0x00, 0x00,
1058 	0x00, 0x00, 0x00, 0x00,
1059 
1060 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
1061 	0x00, 0x00, 0x00, 0x00,
1062 
1063 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
1064 	0x00, 0x00, 0x00, 0x00,
1065 	0x00, 0x00, 0x00, 0x85,
1066 
1067 	0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1068 	0x00, 0x00, 0x00, 0x00,
1069 
1070 	0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
1071 	0x00, 0x00, 0x40, 0x00,
1072 	0x40, 0x00, 0x00, 0x00,
1073 	0x00, 0x00, 0x00, 0x00,
1074 	0x00, 0x00, 0x00, 0x00,
1075 	0x00, 0x00,
1076 };
1077 
1078 ICE_DECLARE_PKT_OFFSETS(ipv6_gtp) = {
1079 	{ ICE_MAC_OFOS,		0 },
1080 	{ ICE_IPV6_OFOS,	14 },
1081 	{ ICE_UDP_OF,		54 },
1082 	{ ICE_GTP_NO_PAY,	62 },
1083 	{ ICE_PROTOCOL_LAST,	0 },
1084 };
1085 
1086 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = {
1087 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1088 	0x00, 0x00, 0x00, 0x00,
1089 	0x00, 0x00, 0x00, 0x00,
1090 	0x86, 0xdd,
1091 
1092 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1093 	0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1094 	0x00, 0x00, 0x00, 0x00,
1095 	0x00, 0x00, 0x00, 0x00,
1096 	0x00, 0x00, 0x00, 0x00,
1097 	0x00, 0x00, 0x00, 0x00,
1098 	0x00, 0x00, 0x00, 0x00,
1099 	0x00, 0x00, 0x00, 0x00,
1100 	0x00, 0x00, 0x00, 0x00,
1101 	0x00, 0x00, 0x00, 0x00,
1102 
1103 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1104 	0x00, 0x00, 0x00, 0x00,
1105 
1106 	0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */
1107 	0x00, 0x00, 0x00, 0x00,
1108 
1109 	0x00, 0x00,
1110 };
1111 
1112 static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
1113 	ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 |
1114 				  ICE_PKT_GTP_NOPAY),
1115 	ICE_PKT_PROFILE(ipv6_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1116 					    ICE_PKT_OUTER_IPV6 |
1117 					    ICE_PKT_INNER_IPV6 |
1118 					    ICE_PKT_INNER_UDP),
1119 	ICE_PKT_PROFILE(ipv6_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1120 					    ICE_PKT_OUTER_IPV6 |
1121 					    ICE_PKT_INNER_IPV6),
1122 	ICE_PKT_PROFILE(ipv6_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1123 					    ICE_PKT_OUTER_IPV6 |
1124 					    ICE_PKT_INNER_UDP),
1125 	ICE_PKT_PROFILE(ipv6_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU |
1126 					    ICE_PKT_OUTER_IPV6),
1127 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPU | ICE_PKT_GTP_NOPAY),
1128 	ICE_PKT_PROFILE(ipv4_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1129 					    ICE_PKT_INNER_IPV6 |
1130 					    ICE_PKT_INNER_UDP),
1131 	ICE_PKT_PROFILE(ipv4_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1132 					    ICE_PKT_INNER_IPV6),
1133 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1134 					    ICE_PKT_INNER_UDP),
1135 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU),
1136 	ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6),
1137 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC),
1138 	ICE_PKT_PROFILE(gre_ipv6_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6 |
1139 				      ICE_PKT_INNER_TCP),
1140 	ICE_PKT_PROFILE(gre_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_TCP),
1141 	ICE_PKT_PROFILE(gre_ipv6_udp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6),
1142 	ICE_PKT_PROFILE(gre_udp, ICE_PKT_TUN_NVGRE),
1143 	ICE_PKT_PROFILE(udp_tun_ipv6_tcp, ICE_PKT_TUN_UDP |
1144 					  ICE_PKT_INNER_IPV6 |
1145 					  ICE_PKT_INNER_TCP),
1146 	ICE_PKT_PROFILE(udp_tun_tcp, ICE_PKT_TUN_UDP | ICE_PKT_INNER_TCP),
1147 	ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP |
1148 					  ICE_PKT_INNER_IPV6),
1149 	ICE_PKT_PROFILE(udp_tun_udp, ICE_PKT_TUN_UDP),
1150 	ICE_PKT_PROFILE(udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP),
1151 	ICE_PKT_PROFILE(udp, ICE_PKT_INNER_UDP),
1152 	ICE_PKT_PROFILE(tcp_ipv6, ICE_PKT_OUTER_IPV6),
1153 	ICE_PKT_PROFILE(tcp, 0),
1154 };
1155 
1156 #define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l)	struct_size((s), hdr_data, (l))
1157 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s)	\
1158 	ICE_SW_RULE_RX_TX_HDR_SIZE((s), DUMMY_ETH_HDR_LEN)
1159 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s)	\
1160 	ICE_SW_RULE_RX_TX_HDR_SIZE((s), 0)
1161 #define ICE_SW_RULE_LG_ACT_SIZE(s, n)		struct_size((s), act, (n))
1162 #define ICE_SW_RULE_VSI_LIST_SIZE(s, n)		struct_size((s), vsi, (n))
1163 
1164 /* this is a recipe to profile association bitmap */
1165 static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1166 			  ICE_MAX_NUM_PROFILES);
1167 
1168 /* this is a profile to recipe association bitmap */
1169 static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1170 			  ICE_MAX_NUM_RECIPES);
1171 
1172 /**
1173  * ice_init_def_sw_recp - initialize the recipe book keeping tables
1174  * @hw: pointer to the HW struct
1175  *
1176  * Allocate memory for the entire recipe table and initialize the structures/
1177  * entries corresponding to basic recipes.
1178  */
1179 int ice_init_def_sw_recp(struct ice_hw *hw)
1180 {
1181 	struct ice_sw_recipe *recps;
1182 	u8 i;
1183 
1184 	recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
1185 			     sizeof(*recps), GFP_KERNEL);
1186 	if (!recps)
1187 		return -ENOMEM;
1188 
1189 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1190 		recps[i].root_rid = i;
1191 		INIT_LIST_HEAD(&recps[i].filt_rules);
1192 		INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1193 		INIT_LIST_HEAD(&recps[i].rg_list);
1194 		mutex_init(&recps[i].filt_rule_lock);
1195 	}
1196 
1197 	hw->switch_info->recp_list = recps;
1198 
1199 	return 0;
1200 }
1201 
1202 /**
1203  * ice_aq_get_sw_cfg - get switch configuration
1204  * @hw: pointer to the hardware structure
1205  * @buf: pointer to the result buffer
1206  * @buf_size: length of the buffer available for response
1207  * @req_desc: pointer to requested descriptor
1208  * @num_elems: pointer to number of elements
1209  * @cd: pointer to command details structure or NULL
1210  *
1211  * Get switch configuration (0x0200) to be placed in buf.
1212  * This admin command returns information such as initial VSI/port number
1213  * and switch ID it belongs to.
1214  *
1215  * NOTE: *req_desc is both an input/output parameter.
1216  * The caller of this function first calls this function with *request_desc set
1217  * to 0. If the response from f/w has *req_desc set to 0, all the switch
1218  * configuration information has been returned; if non-zero (meaning not all
1219  * the information was returned), the caller should call this function again
1220  * with *req_desc set to the previous value returned by f/w to get the
1221  * next block of switch configuration information.
1222  *
1223  * *num_elems is output only parameter. This reflects the number of elements
1224  * in response buffer. The caller of this function to use *num_elems while
1225  * parsing the response buffer.
1226  */
1227 static int
1228 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
1229 		  u16 buf_size, u16 *req_desc, u16 *num_elems,
1230 		  struct ice_sq_cd *cd)
1231 {
1232 	struct ice_aqc_get_sw_cfg *cmd;
1233 	struct ice_aq_desc desc;
1234 	int status;
1235 
1236 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1237 	cmd = &desc.params.get_sw_conf;
1238 	cmd->element = cpu_to_le16(*req_desc);
1239 
1240 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1241 	if (!status) {
1242 		*req_desc = le16_to_cpu(cmd->element);
1243 		*num_elems = le16_to_cpu(cmd->num_elems);
1244 	}
1245 
1246 	return status;
1247 }
1248 
1249 /**
1250  * ice_aq_add_vsi
1251  * @hw: pointer to the HW struct
1252  * @vsi_ctx: pointer to a VSI context struct
1253  * @cd: pointer to command details structure or NULL
1254  *
1255  * Add a VSI context to the hardware (0x0210)
1256  */
1257 static int
1258 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1259 	       struct ice_sq_cd *cd)
1260 {
1261 	struct ice_aqc_add_update_free_vsi_resp *res;
1262 	struct ice_aqc_add_get_update_free_vsi *cmd;
1263 	struct ice_aq_desc desc;
1264 	int status;
1265 
1266 	cmd = &desc.params.vsi_cmd;
1267 	res = &desc.params.add_update_free_vsi_res;
1268 
1269 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1270 
1271 	if (!vsi_ctx->alloc_from_pool)
1272 		cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
1273 					   ICE_AQ_VSI_IS_VALID);
1274 	cmd->vf_id = vsi_ctx->vf_num;
1275 
1276 	cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1277 
1278 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1279 
1280 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1281 				 sizeof(vsi_ctx->info), cd);
1282 
1283 	if (!status) {
1284 		vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1285 		vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
1286 		vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
1287 	}
1288 
1289 	return status;
1290 }
1291 
1292 /**
1293  * ice_aq_free_vsi
1294  * @hw: pointer to the HW struct
1295  * @vsi_ctx: pointer to a VSI context struct
1296  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1297  * @cd: pointer to command details structure or NULL
1298  *
1299  * Free VSI context info from hardware (0x0213)
1300  */
1301 static int
1302 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1303 		bool keep_vsi_alloc, struct ice_sq_cd *cd)
1304 {
1305 	struct ice_aqc_add_update_free_vsi_resp *resp;
1306 	struct ice_aqc_add_get_update_free_vsi *cmd;
1307 	struct ice_aq_desc desc;
1308 	int status;
1309 
1310 	cmd = &desc.params.vsi_cmd;
1311 	resp = &desc.params.add_update_free_vsi_res;
1312 
1313 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1314 
1315 	cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1316 	if (keep_vsi_alloc)
1317 		cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
1318 
1319 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1320 	if (!status) {
1321 		vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1322 		vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1323 	}
1324 
1325 	return status;
1326 }
1327 
1328 /**
1329  * ice_aq_update_vsi
1330  * @hw: pointer to the HW struct
1331  * @vsi_ctx: pointer to a VSI context struct
1332  * @cd: pointer to command details structure or NULL
1333  *
1334  * Update VSI context in the hardware (0x0211)
1335  */
1336 static int
1337 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1338 		  struct ice_sq_cd *cd)
1339 {
1340 	struct ice_aqc_add_update_free_vsi_resp *resp;
1341 	struct ice_aqc_add_get_update_free_vsi *cmd;
1342 	struct ice_aq_desc desc;
1343 	int status;
1344 
1345 	cmd = &desc.params.vsi_cmd;
1346 	resp = &desc.params.add_update_free_vsi_res;
1347 
1348 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1349 
1350 	cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1351 
1352 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1353 
1354 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1355 				 sizeof(vsi_ctx->info), cd);
1356 
1357 	if (!status) {
1358 		vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1359 		vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1360 	}
1361 
1362 	return status;
1363 }
1364 
1365 /**
1366  * ice_is_vsi_valid - check whether the VSI is valid or not
1367  * @hw: pointer to the HW struct
1368  * @vsi_handle: VSI handle
1369  *
1370  * check whether the VSI is valid or not
1371  */
1372 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1373 {
1374 	return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1375 }
1376 
1377 /**
1378  * ice_get_hw_vsi_num - return the HW VSI number
1379  * @hw: pointer to the HW struct
1380  * @vsi_handle: VSI handle
1381  *
1382  * return the HW VSI number
1383  * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1384  */
1385 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1386 {
1387 	return hw->vsi_ctx[vsi_handle]->vsi_num;
1388 }
1389 
1390 /**
1391  * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1392  * @hw: pointer to the HW struct
1393  * @vsi_handle: VSI handle
1394  *
1395  * return the VSI context entry for a given VSI handle
1396  */
1397 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1398 {
1399 	return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1400 }
1401 
1402 /**
1403  * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1404  * @hw: pointer to the HW struct
1405  * @vsi_handle: VSI handle
1406  * @vsi: VSI context pointer
1407  *
1408  * save the VSI context entry for a given VSI handle
1409  */
1410 static void
1411 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1412 {
1413 	hw->vsi_ctx[vsi_handle] = vsi;
1414 }
1415 
1416 /**
1417  * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1418  * @hw: pointer to the HW struct
1419  * @vsi_handle: VSI handle
1420  */
1421 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1422 {
1423 	struct ice_vsi_ctx *vsi;
1424 	u8 i;
1425 
1426 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
1427 	if (!vsi)
1428 		return;
1429 	ice_for_each_traffic_class(i) {
1430 		if (vsi->lan_q_ctx[i]) {
1431 			devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
1432 			vsi->lan_q_ctx[i] = NULL;
1433 		}
1434 		if (vsi->rdma_q_ctx[i]) {
1435 			devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
1436 			vsi->rdma_q_ctx[i] = NULL;
1437 		}
1438 	}
1439 }
1440 
1441 /**
1442  * ice_clear_vsi_ctx - clear the VSI context entry
1443  * @hw: pointer to the HW struct
1444  * @vsi_handle: VSI handle
1445  *
1446  * clear the VSI context entry
1447  */
1448 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1449 {
1450 	struct ice_vsi_ctx *vsi;
1451 
1452 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
1453 	if (vsi) {
1454 		ice_clear_vsi_q_ctx(hw, vsi_handle);
1455 		devm_kfree(ice_hw_to_dev(hw), vsi);
1456 		hw->vsi_ctx[vsi_handle] = NULL;
1457 	}
1458 }
1459 
1460 /**
1461  * ice_clear_all_vsi_ctx - clear all the VSI context entries
1462  * @hw: pointer to the HW struct
1463  */
1464 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1465 {
1466 	u16 i;
1467 
1468 	for (i = 0; i < ICE_MAX_VSI; i++)
1469 		ice_clear_vsi_ctx(hw, i);
1470 }
1471 
1472 /**
1473  * ice_add_vsi - add VSI context to the hardware and VSI handle list
1474  * @hw: pointer to the HW struct
1475  * @vsi_handle: unique VSI handle provided by drivers
1476  * @vsi_ctx: pointer to a VSI context struct
1477  * @cd: pointer to command details structure or NULL
1478  *
1479  * Add a VSI context to the hardware also add it into the VSI handle list.
1480  * If this function gets called after reset for existing VSIs then update
1481  * with the new HW VSI number in the corresponding VSI handle list entry.
1482  */
1483 int
1484 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1485 	    struct ice_sq_cd *cd)
1486 {
1487 	struct ice_vsi_ctx *tmp_vsi_ctx;
1488 	int status;
1489 
1490 	if (vsi_handle >= ICE_MAX_VSI)
1491 		return -EINVAL;
1492 	status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1493 	if (status)
1494 		return status;
1495 	tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1496 	if (!tmp_vsi_ctx) {
1497 		/* Create a new VSI context */
1498 		tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
1499 					   sizeof(*tmp_vsi_ctx), GFP_KERNEL);
1500 		if (!tmp_vsi_ctx) {
1501 			ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1502 			return -ENOMEM;
1503 		}
1504 		*tmp_vsi_ctx = *vsi_ctx;
1505 		ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1506 	} else {
1507 		/* update with new HW VSI num */
1508 		tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1509 	}
1510 
1511 	return 0;
1512 }
1513 
1514 /**
1515  * ice_free_vsi- free VSI context from hardware and VSI handle list
1516  * @hw: pointer to the HW struct
1517  * @vsi_handle: unique VSI handle
1518  * @vsi_ctx: pointer to a VSI context struct
1519  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1520  * @cd: pointer to command details structure or NULL
1521  *
1522  * Free VSI context info from hardware as well as from VSI handle list
1523  */
1524 int
1525 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1526 	     bool keep_vsi_alloc, struct ice_sq_cd *cd)
1527 {
1528 	int status;
1529 
1530 	if (!ice_is_vsi_valid(hw, vsi_handle))
1531 		return -EINVAL;
1532 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1533 	status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1534 	if (!status)
1535 		ice_clear_vsi_ctx(hw, vsi_handle);
1536 	return status;
1537 }
1538 
1539 /**
1540  * ice_update_vsi
1541  * @hw: pointer to the HW struct
1542  * @vsi_handle: unique VSI handle
1543  * @vsi_ctx: pointer to a VSI context struct
1544  * @cd: pointer to command details structure or NULL
1545  *
1546  * Update VSI context in the hardware
1547  */
1548 int
1549 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1550 	       struct ice_sq_cd *cd)
1551 {
1552 	if (!ice_is_vsi_valid(hw, vsi_handle))
1553 		return -EINVAL;
1554 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1555 	return ice_aq_update_vsi(hw, vsi_ctx, cd);
1556 }
1557 
1558 /**
1559  * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI
1560  * @hw: pointer to HW struct
1561  * @vsi_handle: VSI SW index
1562  * @enable: boolean for enable/disable
1563  */
1564 int
1565 ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
1566 {
1567 	struct ice_vsi_ctx *ctx;
1568 
1569 	ctx = ice_get_vsi_ctx(hw, vsi_handle);
1570 	if (!ctx)
1571 		return -EIO;
1572 
1573 	if (enable)
1574 		ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1575 	else
1576 		ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1577 
1578 	return ice_update_vsi(hw, vsi_handle, ctx, NULL);
1579 }
1580 
1581 /**
1582  * ice_aq_alloc_free_vsi_list
1583  * @hw: pointer to the HW struct
1584  * @vsi_list_id: VSI list ID returned or used for lookup
1585  * @lkup_type: switch rule filter lookup type
1586  * @opc: switch rules population command type - pass in the command opcode
1587  *
1588  * allocates or free a VSI list resource
1589  */
1590 static int
1591 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1592 			   enum ice_sw_lkup_type lkup_type,
1593 			   enum ice_adminq_opc opc)
1594 {
1595 	struct ice_aqc_alloc_free_res_elem *sw_buf;
1596 	struct ice_aqc_res_elem *vsi_ele;
1597 	u16 buf_len;
1598 	int status;
1599 
1600 	buf_len = struct_size(sw_buf, elem, 1);
1601 	sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
1602 	if (!sw_buf)
1603 		return -ENOMEM;
1604 	sw_buf->num_elems = cpu_to_le16(1);
1605 
1606 	if (lkup_type == ICE_SW_LKUP_MAC ||
1607 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1608 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1609 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1610 	    lkup_type == ICE_SW_LKUP_PROMISC ||
1611 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN) {
1612 		sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1613 	} else if (lkup_type == ICE_SW_LKUP_VLAN) {
1614 		sw_buf->res_type =
1615 			cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1616 	} else {
1617 		status = -EINVAL;
1618 		goto ice_aq_alloc_free_vsi_list_exit;
1619 	}
1620 
1621 	if (opc == ice_aqc_opc_free_res)
1622 		sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
1623 
1624 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1625 	if (status)
1626 		goto ice_aq_alloc_free_vsi_list_exit;
1627 
1628 	if (opc == ice_aqc_opc_alloc_res) {
1629 		vsi_ele = &sw_buf->elem[0];
1630 		*vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
1631 	}
1632 
1633 ice_aq_alloc_free_vsi_list_exit:
1634 	devm_kfree(ice_hw_to_dev(hw), sw_buf);
1635 	return status;
1636 }
1637 
1638 /**
1639  * ice_aq_sw_rules - add/update/remove switch rules
1640  * @hw: pointer to the HW struct
1641  * @rule_list: pointer to switch rule population list
1642  * @rule_list_sz: total size of the rule list in bytes
1643  * @num_rules: number of switch rules in the rule_list
1644  * @opc: switch rules population command type - pass in the command opcode
1645  * @cd: pointer to command details structure or NULL
1646  *
1647  * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1648  */
1649 int
1650 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1651 		u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1652 {
1653 	struct ice_aq_desc desc;
1654 	int status;
1655 
1656 	if (opc != ice_aqc_opc_add_sw_rules &&
1657 	    opc != ice_aqc_opc_update_sw_rules &&
1658 	    opc != ice_aqc_opc_remove_sw_rules)
1659 		return -EINVAL;
1660 
1661 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1662 
1663 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1664 	desc.params.sw_rules.num_rules_fltr_entry_index =
1665 		cpu_to_le16(num_rules);
1666 	status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1667 	if (opc != ice_aqc_opc_add_sw_rules &&
1668 	    hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
1669 		status = -ENOENT;
1670 
1671 	return status;
1672 }
1673 
1674 /**
1675  * ice_aq_add_recipe - add switch recipe
1676  * @hw: pointer to the HW struct
1677  * @s_recipe_list: pointer to switch rule population list
1678  * @num_recipes: number of switch recipes in the list
1679  * @cd: pointer to command details structure or NULL
1680  *
1681  * Add(0x0290)
1682  */
1683 static int
1684 ice_aq_add_recipe(struct ice_hw *hw,
1685 		  struct ice_aqc_recipe_data_elem *s_recipe_list,
1686 		  u16 num_recipes, struct ice_sq_cd *cd)
1687 {
1688 	struct ice_aqc_add_get_recipe *cmd;
1689 	struct ice_aq_desc desc;
1690 	u16 buf_size;
1691 
1692 	cmd = &desc.params.add_get_recipe;
1693 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1694 
1695 	cmd->num_sub_recipes = cpu_to_le16(num_recipes);
1696 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1697 
1698 	buf_size = num_recipes * sizeof(*s_recipe_list);
1699 
1700 	return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1701 }
1702 
1703 /**
1704  * ice_aq_get_recipe - get switch recipe
1705  * @hw: pointer to the HW struct
1706  * @s_recipe_list: pointer to switch rule population list
1707  * @num_recipes: pointer to the number of recipes (input and output)
1708  * @recipe_root: root recipe number of recipe(s) to retrieve
1709  * @cd: pointer to command details structure or NULL
1710  *
1711  * Get(0x0292)
1712  *
1713  * On input, *num_recipes should equal the number of entries in s_recipe_list.
1714  * On output, *num_recipes will equal the number of entries returned in
1715  * s_recipe_list.
1716  *
1717  * The caller must supply enough space in s_recipe_list to hold all possible
1718  * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1719  */
1720 static int
1721 ice_aq_get_recipe(struct ice_hw *hw,
1722 		  struct ice_aqc_recipe_data_elem *s_recipe_list,
1723 		  u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1724 {
1725 	struct ice_aqc_add_get_recipe *cmd;
1726 	struct ice_aq_desc desc;
1727 	u16 buf_size;
1728 	int status;
1729 
1730 	if (*num_recipes != ICE_MAX_NUM_RECIPES)
1731 		return -EINVAL;
1732 
1733 	cmd = &desc.params.add_get_recipe;
1734 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1735 
1736 	cmd->return_index = cpu_to_le16(recipe_root);
1737 	cmd->num_sub_recipes = 0;
1738 
1739 	buf_size = *num_recipes * sizeof(*s_recipe_list);
1740 
1741 	status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1742 	*num_recipes = le16_to_cpu(cmd->num_sub_recipes);
1743 
1744 	return status;
1745 }
1746 
1747 /**
1748  * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
1749  * @hw: pointer to the HW struct
1750  * @params: parameters used to update the default recipe
1751  *
1752  * This function only supports updating default recipes and it only supports
1753  * updating a single recipe based on the lkup_idx at a time.
1754  *
1755  * This is done as a read-modify-write operation. First, get the current recipe
1756  * contents based on the recipe's ID. Then modify the field vector index and
1757  * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
1758  * the pre-existing recipe with the modifications.
1759  */
1760 int
1761 ice_update_recipe_lkup_idx(struct ice_hw *hw,
1762 			   struct ice_update_recipe_lkup_idx_params *params)
1763 {
1764 	struct ice_aqc_recipe_data_elem *rcp_list;
1765 	u16 num_recps = ICE_MAX_NUM_RECIPES;
1766 	int status;
1767 
1768 	rcp_list = kcalloc(num_recps, sizeof(*rcp_list), GFP_KERNEL);
1769 	if (!rcp_list)
1770 		return -ENOMEM;
1771 
1772 	/* read current recipe list from firmware */
1773 	rcp_list->recipe_indx = params->rid;
1774 	status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
1775 	if (status) {
1776 		ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
1777 			  params->rid, status);
1778 		goto error_out;
1779 	}
1780 
1781 	/* only modify existing recipe's lkup_idx and mask if valid, while
1782 	 * leaving all other fields the same, then update the recipe firmware
1783 	 */
1784 	rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
1785 	if (params->mask_valid)
1786 		rcp_list->content.mask[params->lkup_idx] =
1787 			cpu_to_le16(params->mask);
1788 
1789 	if (params->ignore_valid)
1790 		rcp_list->content.lkup_indx[params->lkup_idx] |=
1791 			ICE_AQ_RECIPE_LKUP_IGNORE;
1792 
1793 	status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
1794 	if (status)
1795 		ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
1796 			  params->rid, params->lkup_idx, params->fv_idx,
1797 			  params->mask, params->mask_valid ? "true" : "false",
1798 			  status);
1799 
1800 error_out:
1801 	kfree(rcp_list);
1802 	return status;
1803 }
1804 
1805 /**
1806  * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1807  * @hw: pointer to the HW struct
1808  * @profile_id: package profile ID to associate the recipe with
1809  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1810  * @cd: pointer to command details structure or NULL
1811  * Recipe to profile association (0x0291)
1812  */
1813 static int
1814 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1815 			     struct ice_sq_cd *cd)
1816 {
1817 	struct ice_aqc_recipe_to_profile *cmd;
1818 	struct ice_aq_desc desc;
1819 
1820 	cmd = &desc.params.recipe_to_profile;
1821 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1822 	cmd->profile_id = cpu_to_le16(profile_id);
1823 	/* Set the recipe ID bit in the bitmask to let the device know which
1824 	 * profile we are associating the recipe to
1825 	 */
1826 	memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));
1827 
1828 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1829 }
1830 
1831 /**
1832  * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1833  * @hw: pointer to the HW struct
1834  * @profile_id: package profile ID to associate the recipe with
1835  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1836  * @cd: pointer to command details structure or NULL
1837  * Associate profile ID with given recipe (0x0293)
1838  */
1839 static int
1840 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1841 			     struct ice_sq_cd *cd)
1842 {
1843 	struct ice_aqc_recipe_to_profile *cmd;
1844 	struct ice_aq_desc desc;
1845 	int status;
1846 
1847 	cmd = &desc.params.recipe_to_profile;
1848 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1849 	cmd->profile_id = cpu_to_le16(profile_id);
1850 
1851 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1852 	if (!status)
1853 		memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));
1854 
1855 	return status;
1856 }
1857 
1858 /**
1859  * ice_alloc_recipe - add recipe resource
1860  * @hw: pointer to the hardware structure
1861  * @rid: recipe ID returned as response to AQ call
1862  */
1863 static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1864 {
1865 	struct ice_aqc_alloc_free_res_elem *sw_buf;
1866 	u16 buf_len;
1867 	int status;
1868 
1869 	buf_len = struct_size(sw_buf, elem, 1);
1870 	sw_buf = kzalloc(buf_len, GFP_KERNEL);
1871 	if (!sw_buf)
1872 		return -ENOMEM;
1873 
1874 	sw_buf->num_elems = cpu_to_le16(1);
1875 	sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
1876 					ICE_AQC_RES_TYPE_S) |
1877 					ICE_AQC_RES_TYPE_FLAG_SHARED);
1878 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1879 				       ice_aqc_opc_alloc_res, NULL);
1880 	if (!status)
1881 		*rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
1882 	kfree(sw_buf);
1883 
1884 	return status;
1885 }
1886 
1887 /**
1888  * ice_get_recp_to_prof_map - updates recipe to profile mapping
1889  * @hw: pointer to hardware structure
1890  *
1891  * This function is used to populate recipe_to_profile matrix where index to
1892  * this array is the recipe ID and the element is the mapping of which profiles
1893  * is this recipe mapped to.
1894  */
1895 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
1896 {
1897 	DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
1898 	u16 i;
1899 
1900 	for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
1901 		u16 j;
1902 
1903 		bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1904 		bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
1905 		if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1906 			continue;
1907 		bitmap_copy(profile_to_recipe[i], r_bitmap,
1908 			    ICE_MAX_NUM_RECIPES);
1909 		for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
1910 			set_bit(i, recipe_to_profile[j]);
1911 	}
1912 }
1913 
1914 /**
1915  * ice_collect_result_idx - copy result index values
1916  * @buf: buffer that contains the result index
1917  * @recp: the recipe struct to copy data into
1918  */
1919 static void
1920 ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1921 		       struct ice_sw_recipe *recp)
1922 {
1923 	if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1924 		set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
1925 			recp->res_idxs);
1926 }
1927 
1928 /**
1929  * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1930  * @hw: pointer to hardware structure
1931  * @recps: struct that we need to populate
1932  * @rid: recipe ID that we are populating
1933  * @refresh_required: true if we should get recipe to profile mapping from FW
1934  *
1935  * This function is used to populate all the necessary entries into our
1936  * bookkeeping so that we have a current list of all the recipes that are
1937  * programmed in the firmware.
1938  */
1939 static int
1940 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
1941 		    bool *refresh_required)
1942 {
1943 	DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS);
1944 	struct ice_aqc_recipe_data_elem *tmp;
1945 	u16 num_recps = ICE_MAX_NUM_RECIPES;
1946 	struct ice_prot_lkup_ext *lkup_exts;
1947 	u8 fv_word_idx = 0;
1948 	u16 sub_recps;
1949 	int status;
1950 
1951 	bitmap_zero(result_bm, ICE_MAX_FV_WORDS);
1952 
1953 	/* we need a buffer big enough to accommodate all the recipes */
1954 	tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
1955 	if (!tmp)
1956 		return -ENOMEM;
1957 
1958 	tmp[0].recipe_indx = rid;
1959 	status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
1960 	/* non-zero status meaning recipe doesn't exist */
1961 	if (status)
1962 		goto err_unroll;
1963 
1964 	/* Get recipe to profile map so that we can get the fv from lkups that
1965 	 * we read for a recipe from FW. Since we want to minimize the number of
1966 	 * times we make this FW call, just make one call and cache the copy
1967 	 * until a new recipe is added. This operation is only required the
1968 	 * first time to get the changes from FW. Then to search existing
1969 	 * entries we don't need to update the cache again until another recipe
1970 	 * gets added.
1971 	 */
1972 	if (*refresh_required) {
1973 		ice_get_recp_to_prof_map(hw);
1974 		*refresh_required = false;
1975 	}
1976 
1977 	/* Start populating all the entries for recps[rid] based on lkups from
1978 	 * firmware. Note that we are only creating the root recipe in our
1979 	 * database.
1980 	 */
1981 	lkup_exts = &recps[rid].lkup_exts;
1982 
1983 	for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
1984 		struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
1985 		struct ice_recp_grp_entry *rg_entry;
1986 		u8 i, prof, idx, prot = 0;
1987 		bool is_root;
1988 		u16 off = 0;
1989 
1990 		rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
1991 					GFP_KERNEL);
1992 		if (!rg_entry) {
1993 			status = -ENOMEM;
1994 			goto err_unroll;
1995 		}
1996 
1997 		idx = root_bufs.recipe_indx;
1998 		is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
1999 
2000 		/* Mark all result indices in this chain */
2001 		if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2002 			set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2003 				result_bm);
2004 
2005 		/* get the first profile that is associated with rid */
2006 		prof = find_first_bit(recipe_to_profile[idx],
2007 				      ICE_MAX_NUM_PROFILES);
2008 		for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2009 			u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2010 
2011 			rg_entry->fv_idx[i] = lkup_indx;
2012 			rg_entry->fv_mask[i] =
2013 				le16_to_cpu(root_bufs.content.mask[i + 1]);
2014 
2015 			/* If the recipe is a chained recipe then all its
2016 			 * child recipe's result will have a result index.
2017 			 * To fill fv_words we should not use those result
2018 			 * index, we only need the protocol ids and offsets.
2019 			 * We will skip all the fv_idx which stores result
2020 			 * index in them. We also need to skip any fv_idx which
2021 			 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2022 			 * valid offset value.
2023 			 */
2024 			if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) ||
2025 			    rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2026 			    rg_entry->fv_idx[i] == 0)
2027 				continue;
2028 
2029 			ice_find_prot_off(hw, ICE_BLK_SW, prof,
2030 					  rg_entry->fv_idx[i], &prot, &off);
2031 			lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2032 			lkup_exts->fv_words[fv_word_idx].off = off;
2033 			lkup_exts->field_mask[fv_word_idx] =
2034 				rg_entry->fv_mask[i];
2035 			fv_word_idx++;
2036 		}
2037 		/* populate rg_list with the data from the child entry of this
2038 		 * recipe
2039 		 */
2040 		list_add(&rg_entry->l_entry, &recps[rid].rg_list);
2041 
2042 		/* Propagate some data to the recipe database */
2043 		recps[idx].is_root = !!is_root;
2044 		recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2045 		bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2046 		if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2047 			recps[idx].chain_idx = root_bufs.content.result_indx &
2048 				~ICE_AQ_RECIPE_RESULT_EN;
2049 			set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2050 		} else {
2051 			recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2052 		}
2053 
2054 		if (!is_root)
2055 			continue;
2056 
2057 		/* Only do the following for root recipes entries */
2058 		memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2059 		       sizeof(recps[idx].r_bitmap));
2060 		recps[idx].root_rid = root_bufs.content.rid &
2061 			~ICE_AQ_RECIPE_ID_IS_ROOT;
2062 		recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2063 	}
2064 
2065 	/* Complete initialization of the root recipe entry */
2066 	lkup_exts->n_val_words = fv_word_idx;
2067 	recps[rid].big_recp = (num_recps > 1);
2068 	recps[rid].n_grp_count = (u8)num_recps;
2069 	recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp,
2070 					   recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
2071 					   GFP_KERNEL);
2072 	if (!recps[rid].root_buf) {
2073 		status = -ENOMEM;
2074 		goto err_unroll;
2075 	}
2076 
2077 	/* Copy result indexes */
2078 	bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2079 	recps[rid].recp_created = true;
2080 
2081 err_unroll:
2082 	kfree(tmp);
2083 	return status;
2084 }
2085 
2086 /* ice_init_port_info - Initialize port_info with switch configuration data
2087  * @pi: pointer to port_info
2088  * @vsi_port_num: VSI number or port number
2089  * @type: Type of switch element (port or VSI)
2090  * @swid: switch ID of the switch the element is attached to
2091  * @pf_vf_num: PF or VF number
2092  * @is_vf: true if the element is a VF, false otherwise
2093  */
2094 static void
2095 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2096 		   u16 swid, u16 pf_vf_num, bool is_vf)
2097 {
2098 	switch (type) {
2099 	case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2100 		pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2101 		pi->sw_id = swid;
2102 		pi->pf_vf_num = pf_vf_num;
2103 		pi->is_vf = is_vf;
2104 		pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2105 		pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2106 		break;
2107 	default:
2108 		ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
2109 		break;
2110 	}
2111 }
2112 
2113 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2114  * @hw: pointer to the hardware structure
2115  */
2116 int ice_get_initial_sw_cfg(struct ice_hw *hw)
2117 {
2118 	struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
2119 	u16 req_desc = 0;
2120 	u16 num_elems;
2121 	int status;
2122 	u16 i;
2123 
2124 	rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
2125 			    GFP_KERNEL);
2126 
2127 	if (!rbuf)
2128 		return -ENOMEM;
2129 
2130 	/* Multiple calls to ice_aq_get_sw_cfg may be required
2131 	 * to get all the switch configuration information. The need
2132 	 * for additional calls is indicated by ice_aq_get_sw_cfg
2133 	 * writing a non-zero value in req_desc
2134 	 */
2135 	do {
2136 		struct ice_aqc_get_sw_cfg_resp_elem *ele;
2137 
2138 		status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2139 					   &req_desc, &num_elems, NULL);
2140 
2141 		if (status)
2142 			break;
2143 
2144 		for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
2145 			u16 pf_vf_num, swid, vsi_port_num;
2146 			bool is_vf = false;
2147 			u8 res_type;
2148 
2149 			vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
2150 				ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2151 
2152 			pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
2153 				ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2154 
2155 			swid = le16_to_cpu(ele->swid);
2156 
2157 			if (le16_to_cpu(ele->pf_vf_num) &
2158 			    ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2159 				is_vf = true;
2160 
2161 			res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >>
2162 					ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2163 
2164 			if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
2165 				/* FW VSI is not needed. Just continue. */
2166 				continue;
2167 			}
2168 
2169 			ice_init_port_info(hw->port_info, vsi_port_num,
2170 					   res_type, swid, pf_vf_num, is_vf);
2171 		}
2172 	} while (req_desc && !status);
2173 
2174 	devm_kfree(ice_hw_to_dev(hw), rbuf);
2175 	return status;
2176 }
2177 
2178 /**
2179  * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2180  * @hw: pointer to the hardware structure
2181  * @fi: filter info structure to fill/update
2182  *
2183  * This helper function populates the lb_en and lan_en elements of the provided
2184  * ice_fltr_info struct using the switch's type and characteristics of the
2185  * switch rule being configured.
2186  */
2187 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2188 {
2189 	fi->lb_en = false;
2190 	fi->lan_en = false;
2191 	if ((fi->flag & ICE_FLTR_TX) &&
2192 	    (fi->fltr_act == ICE_FWD_TO_VSI ||
2193 	     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2194 	     fi->fltr_act == ICE_FWD_TO_Q ||
2195 	     fi->fltr_act == ICE_FWD_TO_QGRP)) {
2196 		/* Setting LB for prune actions will result in replicated
2197 		 * packets to the internal switch that will be dropped.
2198 		 */
2199 		if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2200 			fi->lb_en = true;
2201 
2202 		/* Set lan_en to TRUE if
2203 		 * 1. The switch is a VEB AND
2204 		 * 2
2205 		 * 2.1 The lookup is a directional lookup like ethertype,
2206 		 * promiscuous, ethertype-MAC, promiscuous-VLAN
2207 		 * and default-port OR
2208 		 * 2.2 The lookup is VLAN, OR
2209 		 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2210 		 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2211 		 *
2212 		 * OR
2213 		 *
2214 		 * The switch is a VEPA.
2215 		 *
2216 		 * In all other cases, the LAN enable has to be set to false.
2217 		 */
2218 		if (hw->evb_veb) {
2219 			if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2220 			    fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2221 			    fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2222 			    fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2223 			    fi->lkup_type == ICE_SW_LKUP_DFLT ||
2224 			    fi->lkup_type == ICE_SW_LKUP_VLAN ||
2225 			    (fi->lkup_type == ICE_SW_LKUP_MAC &&
2226 			     !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
2227 			    (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2228 			     !is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
2229 				fi->lan_en = true;
2230 		} else {
2231 			fi->lan_en = true;
2232 		}
2233 	}
2234 }
2235 
2236 /**
2237  * ice_fill_sw_rule - Helper function to fill switch rule structure
2238  * @hw: pointer to the hardware structure
2239  * @f_info: entry containing packet forwarding information
2240  * @s_rule: switch rule structure to be filled in based on mac_entry
2241  * @opc: switch rules population command type - pass in the command opcode
2242  */
2243 static void
2244 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2245 		 struct ice_sw_rule_lkup_rx_tx *s_rule,
2246 		 enum ice_adminq_opc opc)
2247 {
2248 	u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2249 	u16 vlan_tpid = ETH_P_8021Q;
2250 	void *daddr = NULL;
2251 	u16 eth_hdr_sz;
2252 	u8 *eth_hdr;
2253 	u32 act = 0;
2254 	__be16 *off;
2255 	u8 q_rgn;
2256 
2257 	if (opc == ice_aqc_opc_remove_sw_rules) {
2258 		s_rule->act = 0;
2259 		s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
2260 		s_rule->hdr_len = 0;
2261 		return;
2262 	}
2263 
2264 	eth_hdr_sz = sizeof(dummy_eth_header);
2265 	eth_hdr = s_rule->hdr_data;
2266 
2267 	/* initialize the ether header with a dummy header */
2268 	memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
2269 	ice_fill_sw_info(hw, f_info);
2270 
2271 	switch (f_info->fltr_act) {
2272 	case ICE_FWD_TO_VSI:
2273 		act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2274 			ICE_SINGLE_ACT_VSI_ID_M;
2275 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2276 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2277 				ICE_SINGLE_ACT_VALID_BIT;
2278 		break;
2279 	case ICE_FWD_TO_VSI_LIST:
2280 		act |= ICE_SINGLE_ACT_VSI_LIST;
2281 		act |= (f_info->fwd_id.vsi_list_id <<
2282 			ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2283 			ICE_SINGLE_ACT_VSI_LIST_ID_M;
2284 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2285 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2286 				ICE_SINGLE_ACT_VALID_BIT;
2287 		break;
2288 	case ICE_FWD_TO_Q:
2289 		act |= ICE_SINGLE_ACT_TO_Q;
2290 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2291 			ICE_SINGLE_ACT_Q_INDEX_M;
2292 		break;
2293 	case ICE_DROP_PACKET:
2294 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2295 			ICE_SINGLE_ACT_VALID_BIT;
2296 		break;
2297 	case ICE_FWD_TO_QGRP:
2298 		q_rgn = f_info->qgrp_size > 0 ?
2299 			(u8)ilog2(f_info->qgrp_size) : 0;
2300 		act |= ICE_SINGLE_ACT_TO_Q;
2301 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2302 			ICE_SINGLE_ACT_Q_INDEX_M;
2303 		act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2304 			ICE_SINGLE_ACT_Q_REGION_M;
2305 		break;
2306 	default:
2307 		return;
2308 	}
2309 
2310 	if (f_info->lb_en)
2311 		act |= ICE_SINGLE_ACT_LB_ENABLE;
2312 	if (f_info->lan_en)
2313 		act |= ICE_SINGLE_ACT_LAN_ENABLE;
2314 
2315 	switch (f_info->lkup_type) {
2316 	case ICE_SW_LKUP_MAC:
2317 		daddr = f_info->l_data.mac.mac_addr;
2318 		break;
2319 	case ICE_SW_LKUP_VLAN:
2320 		vlan_id = f_info->l_data.vlan.vlan_id;
2321 		if (f_info->l_data.vlan.tpid_valid)
2322 			vlan_tpid = f_info->l_data.vlan.tpid;
2323 		if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2324 		    f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2325 			act |= ICE_SINGLE_ACT_PRUNE;
2326 			act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2327 		}
2328 		break;
2329 	case ICE_SW_LKUP_ETHERTYPE_MAC:
2330 		daddr = f_info->l_data.ethertype_mac.mac_addr;
2331 		fallthrough;
2332 	case ICE_SW_LKUP_ETHERTYPE:
2333 		off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2334 		*off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
2335 		break;
2336 	case ICE_SW_LKUP_MAC_VLAN:
2337 		daddr = f_info->l_data.mac_vlan.mac_addr;
2338 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
2339 		break;
2340 	case ICE_SW_LKUP_PROMISC_VLAN:
2341 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
2342 		fallthrough;
2343 	case ICE_SW_LKUP_PROMISC:
2344 		daddr = f_info->l_data.mac_vlan.mac_addr;
2345 		break;
2346 	default:
2347 		break;
2348 	}
2349 
2350 	s_rule->hdr.type = (f_info->flag & ICE_FLTR_RX) ?
2351 		cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2352 		cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
2353 
2354 	/* Recipe set depending on lookup type */
2355 	s_rule->recipe_id = cpu_to_le16(f_info->lkup_type);
2356 	s_rule->src = cpu_to_le16(f_info->src);
2357 	s_rule->act = cpu_to_le32(act);
2358 
2359 	if (daddr)
2360 		ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
2361 
2362 	if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2363 		off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2364 		*off = cpu_to_be16(vlan_id);
2365 		off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2366 		*off = cpu_to_be16(vlan_tpid);
2367 	}
2368 
2369 	/* Create the switch rule with the final dummy Ethernet header */
2370 	if (opc != ice_aqc_opc_update_sw_rules)
2371 		s_rule->hdr_len = cpu_to_le16(eth_hdr_sz);
2372 }
2373 
2374 /**
2375  * ice_add_marker_act
2376  * @hw: pointer to the hardware structure
2377  * @m_ent: the management entry for which sw marker needs to be added
2378  * @sw_marker: sw marker to tag the Rx descriptor with
2379  * @l_id: large action resource ID
2380  *
2381  * Create a large action to hold software marker and update the switch rule
2382  * entry pointed by m_ent with newly created large action
2383  */
2384 static int
2385 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2386 		   u16 sw_marker, u16 l_id)
2387 {
2388 	struct ice_sw_rule_lkup_rx_tx *rx_tx;
2389 	struct ice_sw_rule_lg_act *lg_act;
2390 	/* For software marker we need 3 large actions
2391 	 * 1. FWD action: FWD TO VSI or VSI LIST
2392 	 * 2. GENERIC VALUE action to hold the profile ID
2393 	 * 3. GENERIC VALUE action to hold the software marker ID
2394 	 */
2395 	const u16 num_lg_acts = 3;
2396 	u16 lg_act_size;
2397 	u16 rules_size;
2398 	int status;
2399 	u32 act;
2400 	u16 id;
2401 
2402 	if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2403 		return -EINVAL;
2404 
2405 	/* Create two back-to-back switch rules and submit them to the HW using
2406 	 * one memory buffer:
2407 	 *    1. Large Action
2408 	 *    2. Look up Tx Rx
2409 	 */
2410 	lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(lg_act, num_lg_acts);
2411 	rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(rx_tx);
2412 	lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
2413 	if (!lg_act)
2414 		return -ENOMEM;
2415 
2416 	rx_tx = (typeof(rx_tx))((u8 *)lg_act + lg_act_size);
2417 
2418 	/* Fill in the first switch rule i.e. large action */
2419 	lg_act->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
2420 	lg_act->index = cpu_to_le16(l_id);
2421 	lg_act->size = cpu_to_le16(num_lg_acts);
2422 
2423 	/* First action VSI forwarding or VSI list forwarding depending on how
2424 	 * many VSIs
2425 	 */
2426 	id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2427 		m_ent->fltr_info.fwd_id.hw_vsi_id;
2428 
2429 	act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2430 	act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
2431 	if (m_ent->vsi_count > 1)
2432 		act |= ICE_LG_ACT_VSI_LIST;
2433 	lg_act->act[0] = cpu_to_le32(act);
2434 
2435 	/* Second action descriptor type */
2436 	act = ICE_LG_ACT_GENERIC;
2437 
2438 	act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2439 	lg_act->act[1] = cpu_to_le32(act);
2440 
2441 	act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2442 	       ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2443 
2444 	/* Third action Marker value */
2445 	act |= ICE_LG_ACT_GENERIC;
2446 	act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2447 		ICE_LG_ACT_GENERIC_VALUE_M;
2448 
2449 	lg_act->act[2] = cpu_to_le32(act);
2450 
2451 	/* call the fill switch rule to fill the lookup Tx Rx structure */
2452 	ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2453 			 ice_aqc_opc_update_sw_rules);
2454 
2455 	/* Update the action to point to the large action ID */
2456 	rx_tx->act = cpu_to_le32(ICE_SINGLE_ACT_PTR |
2457 				 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2458 				  ICE_SINGLE_ACT_PTR_VAL_M));
2459 
2460 	/* Use the filter rule ID of the previously created rule with single
2461 	 * act. Once the update happens, hardware will treat this as large
2462 	 * action
2463 	 */
2464 	rx_tx->index = cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
2465 
2466 	status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2467 				 ice_aqc_opc_update_sw_rules, NULL);
2468 	if (!status) {
2469 		m_ent->lg_act_idx = l_id;
2470 		m_ent->sw_marker_id = sw_marker;
2471 	}
2472 
2473 	devm_kfree(ice_hw_to_dev(hw), lg_act);
2474 	return status;
2475 }
2476 
2477 /**
2478  * ice_create_vsi_list_map
2479  * @hw: pointer to the hardware structure
2480  * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2481  * @num_vsi: number of VSI handles in the array
2482  * @vsi_list_id: VSI list ID generated as part of allocate resource
2483  *
2484  * Helper function to create a new entry of VSI list ID to VSI mapping
2485  * using the given VSI list ID
2486  */
2487 static struct ice_vsi_list_map_info *
2488 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2489 			u16 vsi_list_id)
2490 {
2491 	struct ice_switch_info *sw = hw->switch_info;
2492 	struct ice_vsi_list_map_info *v_map;
2493 	int i;
2494 
2495 	v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL);
2496 	if (!v_map)
2497 		return NULL;
2498 
2499 	v_map->vsi_list_id = vsi_list_id;
2500 	v_map->ref_cnt = 1;
2501 	for (i = 0; i < num_vsi; i++)
2502 		set_bit(vsi_handle_arr[i], v_map->vsi_map);
2503 
2504 	list_add(&v_map->list_entry, &sw->vsi_list_map_head);
2505 	return v_map;
2506 }
2507 
2508 /**
2509  * ice_update_vsi_list_rule
2510  * @hw: pointer to the hardware structure
2511  * @vsi_handle_arr: array of VSI handles to form a VSI list
2512  * @num_vsi: number of VSI handles in the array
2513  * @vsi_list_id: VSI list ID generated as part of allocate resource
2514  * @remove: Boolean value to indicate if this is a remove action
2515  * @opc: switch rules population command type - pass in the command opcode
2516  * @lkup_type: lookup type of the filter
2517  *
2518  * Call AQ command to add a new switch rule or update existing switch rule
2519  * using the given VSI list ID
2520  */
2521 static int
2522 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2523 			 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2524 			 enum ice_sw_lkup_type lkup_type)
2525 {
2526 	struct ice_sw_rule_vsi_list *s_rule;
2527 	u16 s_rule_size;
2528 	u16 rule_type;
2529 	int status;
2530 	int i;
2531 
2532 	if (!num_vsi)
2533 		return -EINVAL;
2534 
2535 	if (lkup_type == ICE_SW_LKUP_MAC ||
2536 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2537 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2538 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2539 	    lkup_type == ICE_SW_LKUP_PROMISC ||
2540 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
2541 		rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2542 			ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2543 	else if (lkup_type == ICE_SW_LKUP_VLAN)
2544 		rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2545 			ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2546 	else
2547 		return -EINVAL;
2548 
2549 	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi);
2550 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
2551 	if (!s_rule)
2552 		return -ENOMEM;
2553 	for (i = 0; i < num_vsi; i++) {
2554 		if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2555 			status = -EINVAL;
2556 			goto exit;
2557 		}
2558 		/* AQ call requires hw_vsi_id(s) */
2559 		s_rule->vsi[i] =
2560 			cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2561 	}
2562 
2563 	s_rule->hdr.type = cpu_to_le16(rule_type);
2564 	s_rule->number_vsi = cpu_to_le16(num_vsi);
2565 	s_rule->index = cpu_to_le16(vsi_list_id);
2566 
2567 	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2568 
2569 exit:
2570 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2571 	return status;
2572 }
2573 
2574 /**
2575  * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2576  * @hw: pointer to the HW struct
2577  * @vsi_handle_arr: array of VSI handles to form a VSI list
2578  * @num_vsi: number of VSI handles in the array
2579  * @vsi_list_id: stores the ID of the VSI list to be created
2580  * @lkup_type: switch rule filter's lookup type
2581  */
2582 static int
2583 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2584 			 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2585 {
2586 	int status;
2587 
2588 	status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2589 					    ice_aqc_opc_alloc_res);
2590 	if (status)
2591 		return status;
2592 
2593 	/* Update the newly created VSI list to include the specified VSIs */
2594 	return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2595 					*vsi_list_id, false,
2596 					ice_aqc_opc_add_sw_rules, lkup_type);
2597 }
2598 
2599 /**
2600  * ice_create_pkt_fwd_rule
2601  * @hw: pointer to the hardware structure
2602  * @f_entry: entry containing packet forwarding information
2603  *
2604  * Create switch rule with given filter information and add an entry
2605  * to the corresponding filter management list to track this switch rule
2606  * and VSI mapping
2607  */
2608 static int
2609 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2610 			struct ice_fltr_list_entry *f_entry)
2611 {
2612 	struct ice_fltr_mgmt_list_entry *fm_entry;
2613 	struct ice_sw_rule_lkup_rx_tx *s_rule;
2614 	enum ice_sw_lkup_type l_type;
2615 	struct ice_sw_recipe *recp;
2616 	int status;
2617 
2618 	s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2619 			      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
2620 			      GFP_KERNEL);
2621 	if (!s_rule)
2622 		return -ENOMEM;
2623 	fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
2624 				GFP_KERNEL);
2625 	if (!fm_entry) {
2626 		status = -ENOMEM;
2627 		goto ice_create_pkt_fwd_rule_exit;
2628 	}
2629 
2630 	fm_entry->fltr_info = f_entry->fltr_info;
2631 
2632 	/* Initialize all the fields for the management entry */
2633 	fm_entry->vsi_count = 1;
2634 	fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2635 	fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2636 	fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2637 
2638 	ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2639 			 ice_aqc_opc_add_sw_rules);
2640 
2641 	status = ice_aq_sw_rules(hw, s_rule,
2642 				 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
2643 				 ice_aqc_opc_add_sw_rules, NULL);
2644 	if (status) {
2645 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
2646 		goto ice_create_pkt_fwd_rule_exit;
2647 	}
2648 
2649 	f_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
2650 	fm_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
2651 
2652 	/* The book keeping entries will get removed when base driver
2653 	 * calls remove filter AQ command
2654 	 */
2655 	l_type = fm_entry->fltr_info.lkup_type;
2656 	recp = &hw->switch_info->recp_list[l_type];
2657 	list_add(&fm_entry->list_entry, &recp->filt_rules);
2658 
2659 ice_create_pkt_fwd_rule_exit:
2660 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2661 	return status;
2662 }
2663 
2664 /**
2665  * ice_update_pkt_fwd_rule
2666  * @hw: pointer to the hardware structure
2667  * @f_info: filter information for switch rule
2668  *
2669  * Call AQ command to update a previously created switch rule with a
2670  * VSI list ID
2671  */
2672 static int
2673 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2674 {
2675 	struct ice_sw_rule_lkup_rx_tx *s_rule;
2676 	int status;
2677 
2678 	s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2679 			      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
2680 			      GFP_KERNEL);
2681 	if (!s_rule)
2682 		return -ENOMEM;
2683 
2684 	ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2685 
2686 	s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
2687 
2688 	/* Update switch rule with new rule set to forward VSI list */
2689 	status = ice_aq_sw_rules(hw, s_rule,
2690 				 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
2691 				 ice_aqc_opc_update_sw_rules, NULL);
2692 
2693 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2694 	return status;
2695 }
2696 
2697 /**
2698  * ice_update_sw_rule_bridge_mode
2699  * @hw: pointer to the HW struct
2700  *
2701  * Updates unicast switch filter rules based on VEB/VEPA mode
2702  */
2703 int ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2704 {
2705 	struct ice_switch_info *sw = hw->switch_info;
2706 	struct ice_fltr_mgmt_list_entry *fm_entry;
2707 	struct list_head *rule_head;
2708 	struct mutex *rule_lock; /* Lock to protect filter rule list */
2709 	int status = 0;
2710 
2711 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2712 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2713 
2714 	mutex_lock(rule_lock);
2715 	list_for_each_entry(fm_entry, rule_head, list_entry) {
2716 		struct ice_fltr_info *fi = &fm_entry->fltr_info;
2717 		u8 *addr = fi->l_data.mac.mac_addr;
2718 
2719 		/* Update unicast Tx rules to reflect the selected
2720 		 * VEB/VEPA mode
2721 		 */
2722 		if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
2723 		    (fi->fltr_act == ICE_FWD_TO_VSI ||
2724 		     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2725 		     fi->fltr_act == ICE_FWD_TO_Q ||
2726 		     fi->fltr_act == ICE_FWD_TO_QGRP)) {
2727 			status = ice_update_pkt_fwd_rule(hw, fi);
2728 			if (status)
2729 				break;
2730 		}
2731 	}
2732 
2733 	mutex_unlock(rule_lock);
2734 
2735 	return status;
2736 }
2737 
2738 /**
2739  * ice_add_update_vsi_list
2740  * @hw: pointer to the hardware structure
2741  * @m_entry: pointer to current filter management list entry
2742  * @cur_fltr: filter information from the book keeping entry
2743  * @new_fltr: filter information with the new VSI to be added
2744  *
2745  * Call AQ command to add or update previously created VSI list with new VSI.
2746  *
2747  * Helper function to do book keeping associated with adding filter information
2748  * The algorithm to do the book keeping is described below :
2749  * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2750  *	if only one VSI has been added till now
2751  *		Allocate a new VSI list and add two VSIs
2752  *		to this list using switch rule command
2753  *		Update the previously created switch rule with the
2754  *		newly created VSI list ID
2755  *	if a VSI list was previously created
2756  *		Add the new VSI to the previously created VSI list set
2757  *		using the update switch rule command
2758  */
2759 static int
2760 ice_add_update_vsi_list(struct ice_hw *hw,
2761 			struct ice_fltr_mgmt_list_entry *m_entry,
2762 			struct ice_fltr_info *cur_fltr,
2763 			struct ice_fltr_info *new_fltr)
2764 {
2765 	u16 vsi_list_id = 0;
2766 	int status = 0;
2767 
2768 	if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2769 	     cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2770 		return -EOPNOTSUPP;
2771 
2772 	if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2773 	     new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2774 	    (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2775 	     cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2776 		return -EOPNOTSUPP;
2777 
2778 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2779 		/* Only one entry existed in the mapping and it was not already
2780 		 * a part of a VSI list. So, create a VSI list with the old and
2781 		 * new VSIs.
2782 		 */
2783 		struct ice_fltr_info tmp_fltr;
2784 		u16 vsi_handle_arr[2];
2785 
2786 		/* A rule already exists with the new VSI being added */
2787 		if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2788 			return -EEXIST;
2789 
2790 		vsi_handle_arr[0] = cur_fltr->vsi_handle;
2791 		vsi_handle_arr[1] = new_fltr->vsi_handle;
2792 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2793 						  &vsi_list_id,
2794 						  new_fltr->lkup_type);
2795 		if (status)
2796 			return status;
2797 
2798 		tmp_fltr = *new_fltr;
2799 		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2800 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2801 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2802 		/* Update the previous switch rule of "MAC forward to VSI" to
2803 		 * "MAC fwd to VSI list"
2804 		 */
2805 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2806 		if (status)
2807 			return status;
2808 
2809 		cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2810 		cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2811 		m_entry->vsi_list_info =
2812 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2813 						vsi_list_id);
2814 
2815 		if (!m_entry->vsi_list_info)
2816 			return -ENOMEM;
2817 
2818 		/* If this entry was large action then the large action needs
2819 		 * to be updated to point to FWD to VSI list
2820 		 */
2821 		if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2822 			status =
2823 			    ice_add_marker_act(hw, m_entry,
2824 					       m_entry->sw_marker_id,
2825 					       m_entry->lg_act_idx);
2826 	} else {
2827 		u16 vsi_handle = new_fltr->vsi_handle;
2828 		enum ice_adminq_opc opcode;
2829 
2830 		if (!m_entry->vsi_list_info)
2831 			return -EIO;
2832 
2833 		/* A rule already exists with the new VSI being added */
2834 		if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
2835 			return 0;
2836 
2837 		/* Update the previously created VSI list set with
2838 		 * the new VSI ID passed in
2839 		 */
2840 		vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2841 		opcode = ice_aqc_opc_update_sw_rules;
2842 
2843 		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2844 						  vsi_list_id, false, opcode,
2845 						  new_fltr->lkup_type);
2846 		/* update VSI list mapping info with new VSI ID */
2847 		if (!status)
2848 			set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
2849 	}
2850 	if (!status)
2851 		m_entry->vsi_count++;
2852 	return status;
2853 }
2854 
2855 /**
2856  * ice_find_rule_entry - Search a rule entry
2857  * @hw: pointer to the hardware structure
2858  * @recp_id: lookup type for which the specified rule needs to be searched
2859  * @f_info: rule information
2860  *
2861  * Helper function to search for a given rule entry
2862  * Returns pointer to entry storing the rule if found
2863  */
2864 static struct ice_fltr_mgmt_list_entry *
2865 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2866 {
2867 	struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2868 	struct ice_switch_info *sw = hw->switch_info;
2869 	struct list_head *list_head;
2870 
2871 	list_head = &sw->recp_list[recp_id].filt_rules;
2872 	list_for_each_entry(list_itr, list_head, list_entry) {
2873 		if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2874 			    sizeof(f_info->l_data)) &&
2875 		    f_info->flag == list_itr->fltr_info.flag) {
2876 			ret = list_itr;
2877 			break;
2878 		}
2879 	}
2880 	return ret;
2881 }
2882 
2883 /**
2884  * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2885  * @hw: pointer to the hardware structure
2886  * @recp_id: lookup type for which VSI lists needs to be searched
2887  * @vsi_handle: VSI handle to be found in VSI list
2888  * @vsi_list_id: VSI list ID found containing vsi_handle
2889  *
2890  * Helper function to search a VSI list with single entry containing given VSI
2891  * handle element. This can be extended further to search VSI list with more
2892  * than 1 vsi_count. Returns pointer to VSI list entry if found.
2893  */
2894 static struct ice_vsi_list_map_info *
2895 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2896 			u16 *vsi_list_id)
2897 {
2898 	struct ice_vsi_list_map_info *map_info = NULL;
2899 	struct ice_switch_info *sw = hw->switch_info;
2900 	struct ice_fltr_mgmt_list_entry *list_itr;
2901 	struct list_head *list_head;
2902 
2903 	list_head = &sw->recp_list[recp_id].filt_rules;
2904 	list_for_each_entry(list_itr, list_head, list_entry) {
2905 		if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
2906 			map_info = list_itr->vsi_list_info;
2907 			if (test_bit(vsi_handle, map_info->vsi_map)) {
2908 				*vsi_list_id = map_info->vsi_list_id;
2909 				return map_info;
2910 			}
2911 		}
2912 	}
2913 	return NULL;
2914 }
2915 
2916 /**
2917  * ice_add_rule_internal - add rule for a given lookup type
2918  * @hw: pointer to the hardware structure
2919  * @recp_id: lookup type (recipe ID) for which rule has to be added
2920  * @f_entry: structure containing MAC forwarding information
2921  *
2922  * Adds or updates the rule lists for a given recipe
2923  */
2924 static int
2925 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2926 		      struct ice_fltr_list_entry *f_entry)
2927 {
2928 	struct ice_switch_info *sw = hw->switch_info;
2929 	struct ice_fltr_info *new_fltr, *cur_fltr;
2930 	struct ice_fltr_mgmt_list_entry *m_entry;
2931 	struct mutex *rule_lock; /* Lock to protect filter rule list */
2932 	int status = 0;
2933 
2934 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2935 		return -EINVAL;
2936 	f_entry->fltr_info.fwd_id.hw_vsi_id =
2937 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2938 
2939 	rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2940 
2941 	mutex_lock(rule_lock);
2942 	new_fltr = &f_entry->fltr_info;
2943 	if (new_fltr->flag & ICE_FLTR_RX)
2944 		new_fltr->src = hw->port_info->lport;
2945 	else if (new_fltr->flag & ICE_FLTR_TX)
2946 		new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
2947 
2948 	m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2949 	if (!m_entry) {
2950 		mutex_unlock(rule_lock);
2951 		return ice_create_pkt_fwd_rule(hw, f_entry);
2952 	}
2953 
2954 	cur_fltr = &m_entry->fltr_info;
2955 	status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2956 	mutex_unlock(rule_lock);
2957 
2958 	return status;
2959 }
2960 
2961 /**
2962  * ice_remove_vsi_list_rule
2963  * @hw: pointer to the hardware structure
2964  * @vsi_list_id: VSI list ID generated as part of allocate resource
2965  * @lkup_type: switch rule filter lookup type
2966  *
2967  * The VSI list should be emptied before this function is called to remove the
2968  * VSI list.
2969  */
2970 static int
2971 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2972 			 enum ice_sw_lkup_type lkup_type)
2973 {
2974 	struct ice_sw_rule_vsi_list *s_rule;
2975 	u16 s_rule_size;
2976 	int status;
2977 
2978 	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, 0);
2979 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
2980 	if (!s_rule)
2981 		return -ENOMEM;
2982 
2983 	s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2984 	s_rule->index = cpu_to_le16(vsi_list_id);
2985 
2986 	/* Free the vsi_list resource that we allocated. It is assumed that the
2987 	 * list is empty at this point.
2988 	 */
2989 	status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2990 					    ice_aqc_opc_free_res);
2991 
2992 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2993 	return status;
2994 }
2995 
2996 /**
2997  * ice_rem_update_vsi_list
2998  * @hw: pointer to the hardware structure
2999  * @vsi_handle: VSI handle of the VSI to remove
3000  * @fm_list: filter management entry for which the VSI list management needs to
3001  *           be done
3002  */
3003 static int
3004 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3005 			struct ice_fltr_mgmt_list_entry *fm_list)
3006 {
3007 	enum ice_sw_lkup_type lkup_type;
3008 	u16 vsi_list_id;
3009 	int status = 0;
3010 
3011 	if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3012 	    fm_list->vsi_count == 0)
3013 		return -EINVAL;
3014 
3015 	/* A rule with the VSI being removed does not exist */
3016 	if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
3017 		return -ENOENT;
3018 
3019 	lkup_type = fm_list->fltr_info.lkup_type;
3020 	vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3021 	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3022 					  ice_aqc_opc_update_sw_rules,
3023 					  lkup_type);
3024 	if (status)
3025 		return status;
3026 
3027 	fm_list->vsi_count--;
3028 	clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3029 
3030 	if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3031 		struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3032 		struct ice_vsi_list_map_info *vsi_list_info =
3033 			fm_list->vsi_list_info;
3034 		u16 rem_vsi_handle;
3035 
3036 		rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
3037 						ICE_MAX_VSI);
3038 		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3039 			return -EIO;
3040 
3041 		/* Make sure VSI list is empty before removing it below */
3042 		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3043 						  vsi_list_id, true,
3044 						  ice_aqc_opc_update_sw_rules,
3045 						  lkup_type);
3046 		if (status)
3047 			return status;
3048 
3049 		tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3050 		tmp_fltr_info.fwd_id.hw_vsi_id =
3051 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
3052 		tmp_fltr_info.vsi_handle = rem_vsi_handle;
3053 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3054 		if (status) {
3055 			ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3056 				  tmp_fltr_info.fwd_id.hw_vsi_id, status);
3057 			return status;
3058 		}
3059 
3060 		fm_list->fltr_info = tmp_fltr_info;
3061 	}
3062 
3063 	if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3064 	    (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3065 		struct ice_vsi_list_map_info *vsi_list_info =
3066 			fm_list->vsi_list_info;
3067 
3068 		/* Remove the VSI list since it is no longer used */
3069 		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3070 		if (status) {
3071 			ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
3072 				  vsi_list_id, status);
3073 			return status;
3074 		}
3075 
3076 		list_del(&vsi_list_info->list_entry);
3077 		devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
3078 		fm_list->vsi_list_info = NULL;
3079 	}
3080 
3081 	return status;
3082 }
3083 
3084 /**
3085  * ice_remove_rule_internal - Remove a filter rule of a given type
3086  * @hw: pointer to the hardware structure
3087  * @recp_id: recipe ID for which the rule needs to removed
3088  * @f_entry: rule entry containing filter information
3089  */
3090 static int
3091 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
3092 			 struct ice_fltr_list_entry *f_entry)
3093 {
3094 	struct ice_switch_info *sw = hw->switch_info;
3095 	struct ice_fltr_mgmt_list_entry *list_elem;
3096 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3097 	bool remove_rule = false;
3098 	u16 vsi_handle;
3099 	int status = 0;
3100 
3101 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3102 		return -EINVAL;
3103 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3104 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3105 
3106 	rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3107 	mutex_lock(rule_lock);
3108 	list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
3109 	if (!list_elem) {
3110 		status = -ENOENT;
3111 		goto exit;
3112 	}
3113 
3114 	if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3115 		remove_rule = true;
3116 	} else if (!list_elem->vsi_list_info) {
3117 		status = -ENOENT;
3118 		goto exit;
3119 	} else if (list_elem->vsi_list_info->ref_cnt > 1) {
3120 		/* a ref_cnt > 1 indicates that the vsi_list is being
3121 		 * shared by multiple rules. Decrement the ref_cnt and
3122 		 * remove this rule, but do not modify the list, as it
3123 		 * is in-use by other rules.
3124 		 */
3125 		list_elem->vsi_list_info->ref_cnt--;
3126 		remove_rule = true;
3127 	} else {
3128 		/* a ref_cnt of 1 indicates the vsi_list is only used
3129 		 * by one rule. However, the original removal request is only
3130 		 * for a single VSI. Update the vsi_list first, and only
3131 		 * remove the rule if there are no further VSIs in this list.
3132 		 */
3133 		vsi_handle = f_entry->fltr_info.vsi_handle;
3134 		status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3135 		if (status)
3136 			goto exit;
3137 		/* if VSI count goes to zero after updating the VSI list */
3138 		if (list_elem->vsi_count == 0)
3139 			remove_rule = true;
3140 	}
3141 
3142 	if (remove_rule) {
3143 		/* Remove the lookup rule */
3144 		struct ice_sw_rule_lkup_rx_tx *s_rule;
3145 
3146 		s_rule = devm_kzalloc(ice_hw_to_dev(hw),
3147 				      ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
3148 				      GFP_KERNEL);
3149 		if (!s_rule) {
3150 			status = -ENOMEM;
3151 			goto exit;
3152 		}
3153 
3154 		ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3155 				 ice_aqc_opc_remove_sw_rules);
3156 
3157 		status = ice_aq_sw_rules(hw, s_rule,
3158 					 ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
3159 					 1, ice_aqc_opc_remove_sw_rules, NULL);
3160 
3161 		/* Remove a book keeping from the list */
3162 		devm_kfree(ice_hw_to_dev(hw), s_rule);
3163 
3164 		if (status)
3165 			goto exit;
3166 
3167 		list_del(&list_elem->list_entry);
3168 		devm_kfree(ice_hw_to_dev(hw), list_elem);
3169 	}
3170 exit:
3171 	mutex_unlock(rule_lock);
3172 	return status;
3173 }
3174 
3175 /**
3176  * ice_mac_fltr_exist - does this MAC filter exist for given VSI
3177  * @hw: pointer to the hardware structure
3178  * @mac: MAC address to be checked (for MAC filter)
3179  * @vsi_handle: check MAC filter for this VSI
3180  */
3181 bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle)
3182 {
3183 	struct ice_fltr_mgmt_list_entry *entry;
3184 	struct list_head *rule_head;
3185 	struct ice_switch_info *sw;
3186 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3187 	u16 hw_vsi_id;
3188 
3189 	if (!ice_is_vsi_valid(hw, vsi_handle))
3190 		return false;
3191 
3192 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3193 	sw = hw->switch_info;
3194 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3195 	if (!rule_head)
3196 		return false;
3197 
3198 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3199 	mutex_lock(rule_lock);
3200 	list_for_each_entry(entry, rule_head, list_entry) {
3201 		struct ice_fltr_info *f_info = &entry->fltr_info;
3202 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3203 
3204 		if (is_zero_ether_addr(mac_addr))
3205 			continue;
3206 
3207 		if (f_info->flag != ICE_FLTR_TX ||
3208 		    f_info->src_id != ICE_SRC_ID_VSI ||
3209 		    f_info->lkup_type != ICE_SW_LKUP_MAC ||
3210 		    f_info->fltr_act != ICE_FWD_TO_VSI ||
3211 		    hw_vsi_id != f_info->fwd_id.hw_vsi_id)
3212 			continue;
3213 
3214 		if (ether_addr_equal(mac, mac_addr)) {
3215 			mutex_unlock(rule_lock);
3216 			return true;
3217 		}
3218 	}
3219 	mutex_unlock(rule_lock);
3220 	return false;
3221 }
3222 
3223 /**
3224  * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI
3225  * @hw: pointer to the hardware structure
3226  * @vlan_id: VLAN ID
3227  * @vsi_handle: check MAC filter for this VSI
3228  */
3229 bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
3230 {
3231 	struct ice_fltr_mgmt_list_entry *entry;
3232 	struct list_head *rule_head;
3233 	struct ice_switch_info *sw;
3234 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3235 	u16 hw_vsi_id;
3236 
3237 	if (vlan_id > ICE_MAX_VLAN_ID)
3238 		return false;
3239 
3240 	if (!ice_is_vsi_valid(hw, vsi_handle))
3241 		return false;
3242 
3243 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3244 	sw = hw->switch_info;
3245 	rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
3246 	if (!rule_head)
3247 		return false;
3248 
3249 	rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3250 	mutex_lock(rule_lock);
3251 	list_for_each_entry(entry, rule_head, list_entry) {
3252 		struct ice_fltr_info *f_info = &entry->fltr_info;
3253 		u16 entry_vlan_id = f_info->l_data.vlan.vlan_id;
3254 		struct ice_vsi_list_map_info *map_info;
3255 
3256 		if (entry_vlan_id > ICE_MAX_VLAN_ID)
3257 			continue;
3258 
3259 		if (f_info->flag != ICE_FLTR_TX ||
3260 		    f_info->src_id != ICE_SRC_ID_VSI ||
3261 		    f_info->lkup_type != ICE_SW_LKUP_VLAN)
3262 			continue;
3263 
3264 		/* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */
3265 		if (f_info->fltr_act != ICE_FWD_TO_VSI &&
3266 		    f_info->fltr_act != ICE_FWD_TO_VSI_LIST)
3267 			continue;
3268 
3269 		if (f_info->fltr_act == ICE_FWD_TO_VSI) {
3270 			if (hw_vsi_id != f_info->fwd_id.hw_vsi_id)
3271 				continue;
3272 		} else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3273 			/* If filter_action is FWD_TO_VSI_LIST, make sure
3274 			 * that VSI being checked is part of VSI list
3275 			 */
3276 			if (entry->vsi_count == 1 &&
3277 			    entry->vsi_list_info) {
3278 				map_info = entry->vsi_list_info;
3279 				if (!test_bit(vsi_handle, map_info->vsi_map))
3280 					continue;
3281 			}
3282 		}
3283 
3284 		if (vlan_id == entry_vlan_id) {
3285 			mutex_unlock(rule_lock);
3286 			return true;
3287 		}
3288 	}
3289 	mutex_unlock(rule_lock);
3290 
3291 	return false;
3292 }
3293 
3294 /**
3295  * ice_add_mac - Add a MAC address based filter rule
3296  * @hw: pointer to the hardware structure
3297  * @m_list: list of MAC addresses and forwarding information
3298  *
3299  * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3300  * multiple unicast addresses, the function assumes that all the
3301  * addresses are unique in a given add_mac call. It doesn't
3302  * check for duplicates in this case, removing duplicates from a given
3303  * list should be taken care of in the caller of this function.
3304  */
3305 int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
3306 {
3307 	struct ice_sw_rule_lkup_rx_tx *s_rule, *r_iter;
3308 	struct ice_fltr_list_entry *m_list_itr;
3309 	struct list_head *rule_head;
3310 	u16 total_elem_left, s_rule_size;
3311 	struct ice_switch_info *sw;
3312 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3313 	u16 num_unicast = 0;
3314 	int status = 0;
3315 	u8 elem_sent;
3316 
3317 	if (!m_list || !hw)
3318 		return -EINVAL;
3319 
3320 	s_rule = NULL;
3321 	sw = hw->switch_info;
3322 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3323 	list_for_each_entry(m_list_itr, m_list, list_entry) {
3324 		u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3325 		u16 vsi_handle;
3326 		u16 hw_vsi_id;
3327 
3328 		m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3329 		vsi_handle = m_list_itr->fltr_info.vsi_handle;
3330 		if (!ice_is_vsi_valid(hw, vsi_handle))
3331 			return -EINVAL;
3332 		hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3333 		m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3334 		/* update the src in case it is VSI num */
3335 		if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3336 			return -EINVAL;
3337 		m_list_itr->fltr_info.src = hw_vsi_id;
3338 		if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3339 		    is_zero_ether_addr(add))
3340 			return -EINVAL;
3341 		if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
3342 			/* Don't overwrite the unicast address */
3343 			mutex_lock(rule_lock);
3344 			if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
3345 						&m_list_itr->fltr_info)) {
3346 				mutex_unlock(rule_lock);
3347 				return -EEXIST;
3348 			}
3349 			mutex_unlock(rule_lock);
3350 			num_unicast++;
3351 		} else if (is_multicast_ether_addr(add) ||
3352 			   (is_unicast_ether_addr(add) && hw->ucast_shared)) {
3353 			m_list_itr->status =
3354 				ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3355 						      m_list_itr);
3356 			if (m_list_itr->status)
3357 				return m_list_itr->status;
3358 		}
3359 	}
3360 
3361 	mutex_lock(rule_lock);
3362 	/* Exit if no suitable entries were found for adding bulk switch rule */
3363 	if (!num_unicast) {
3364 		status = 0;
3365 		goto ice_add_mac_exit;
3366 	}
3367 
3368 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3369 
3370 	/* Allocate switch rule buffer for the bulk update for unicast */
3371 	s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule);
3372 	s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
3373 			      GFP_KERNEL);
3374 	if (!s_rule) {
3375 		status = -ENOMEM;
3376 		goto ice_add_mac_exit;
3377 	}
3378 
3379 	r_iter = s_rule;
3380 	list_for_each_entry(m_list_itr, m_list, list_entry) {
3381 		struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3382 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3383 
3384 		if (is_unicast_ether_addr(mac_addr)) {
3385 			ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3386 					 ice_aqc_opc_add_sw_rules);
3387 			r_iter = (typeof(s_rule))((u8 *)r_iter + s_rule_size);
3388 		}
3389 	}
3390 
3391 	/* Call AQ bulk switch rule update for all unicast addresses */
3392 	r_iter = s_rule;
3393 	/* Call AQ switch rule in AQ_MAX chunk */
3394 	for (total_elem_left = num_unicast; total_elem_left > 0;
3395 	     total_elem_left -= elem_sent) {
3396 		struct ice_sw_rule_lkup_rx_tx *entry = r_iter;
3397 
3398 		elem_sent = min_t(u8, total_elem_left,
3399 				  (ICE_AQ_MAX_BUF_LEN / s_rule_size));
3400 		status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3401 					 elem_sent, ice_aqc_opc_add_sw_rules,
3402 					 NULL);
3403 		if (status)
3404 			goto ice_add_mac_exit;
3405 		r_iter = (typeof(s_rule))
3406 			((u8 *)r_iter + (elem_sent * s_rule_size));
3407 	}
3408 
3409 	/* Fill up rule ID based on the value returned from FW */
3410 	r_iter = s_rule;
3411 	list_for_each_entry(m_list_itr, m_list, list_entry) {
3412 		struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3413 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3414 		struct ice_fltr_mgmt_list_entry *fm_entry;
3415 
3416 		if (is_unicast_ether_addr(mac_addr)) {
3417 			f_info->fltr_rule_id = le16_to_cpu(r_iter->index);
3418 			f_info->fltr_act = ICE_FWD_TO_VSI;
3419 			/* Create an entry to track this MAC address */
3420 			fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
3421 						sizeof(*fm_entry), GFP_KERNEL);
3422 			if (!fm_entry) {
3423 				status = -ENOMEM;
3424 				goto ice_add_mac_exit;
3425 			}
3426 			fm_entry->fltr_info = *f_info;
3427 			fm_entry->vsi_count = 1;
3428 			/* The book keeping entries will get removed when
3429 			 * base driver calls remove filter AQ command
3430 			 */
3431 
3432 			list_add(&fm_entry->list_entry, rule_head);
3433 			r_iter = (typeof(s_rule))((u8 *)r_iter + s_rule_size);
3434 		}
3435 	}
3436 
3437 ice_add_mac_exit:
3438 	mutex_unlock(rule_lock);
3439 	if (s_rule)
3440 		devm_kfree(ice_hw_to_dev(hw), s_rule);
3441 	return status;
3442 }
3443 
3444 /**
3445  * ice_add_vlan_internal - Add one VLAN based filter rule
3446  * @hw: pointer to the hardware structure
3447  * @f_entry: filter entry containing one VLAN information
3448  */
3449 static int
3450 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3451 {
3452 	struct ice_switch_info *sw = hw->switch_info;
3453 	struct ice_fltr_mgmt_list_entry *v_list_itr;
3454 	struct ice_fltr_info *new_fltr, *cur_fltr;
3455 	enum ice_sw_lkup_type lkup_type;
3456 	u16 vsi_list_id = 0, vsi_handle;
3457 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3458 	int status = 0;
3459 
3460 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3461 		return -EINVAL;
3462 
3463 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3464 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3465 	new_fltr = &f_entry->fltr_info;
3466 
3467 	/* VLAN ID should only be 12 bits */
3468 	if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3469 		return -EINVAL;
3470 
3471 	if (new_fltr->src_id != ICE_SRC_ID_VSI)
3472 		return -EINVAL;
3473 
3474 	new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3475 	lkup_type = new_fltr->lkup_type;
3476 	vsi_handle = new_fltr->vsi_handle;
3477 	rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3478 	mutex_lock(rule_lock);
3479 	v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3480 	if (!v_list_itr) {
3481 		struct ice_vsi_list_map_info *map_info = NULL;
3482 
3483 		if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3484 			/* All VLAN pruning rules use a VSI list. Check if
3485 			 * there is already a VSI list containing VSI that we
3486 			 * want to add. If found, use the same vsi_list_id for
3487 			 * this new VLAN rule or else create a new list.
3488 			 */
3489 			map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3490 							   vsi_handle,
3491 							   &vsi_list_id);
3492 			if (!map_info) {
3493 				status = ice_create_vsi_list_rule(hw,
3494 								  &vsi_handle,
3495 								  1,
3496 								  &vsi_list_id,
3497 								  lkup_type);
3498 				if (status)
3499 					goto exit;
3500 			}
3501 			/* Convert the action to forwarding to a VSI list. */
3502 			new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3503 			new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3504 		}
3505 
3506 		status = ice_create_pkt_fwd_rule(hw, f_entry);
3507 		if (!status) {
3508 			v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3509 							 new_fltr);
3510 			if (!v_list_itr) {
3511 				status = -ENOENT;
3512 				goto exit;
3513 			}
3514 			/* reuse VSI list for new rule and increment ref_cnt */
3515 			if (map_info) {
3516 				v_list_itr->vsi_list_info = map_info;
3517 				map_info->ref_cnt++;
3518 			} else {
3519 				v_list_itr->vsi_list_info =
3520 					ice_create_vsi_list_map(hw, &vsi_handle,
3521 								1, vsi_list_id);
3522 			}
3523 		}
3524 	} else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3525 		/* Update existing VSI list to add new VSI ID only if it used
3526 		 * by one VLAN rule.
3527 		 */
3528 		cur_fltr = &v_list_itr->fltr_info;
3529 		status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3530 						 new_fltr);
3531 	} else {
3532 		/* If VLAN rule exists and VSI list being used by this rule is
3533 		 * referenced by more than 1 VLAN rule. Then create a new VSI
3534 		 * list appending previous VSI with new VSI and update existing
3535 		 * VLAN rule to point to new VSI list ID
3536 		 */
3537 		struct ice_fltr_info tmp_fltr;
3538 		u16 vsi_handle_arr[2];
3539 		u16 cur_handle;
3540 
3541 		/* Current implementation only supports reusing VSI list with
3542 		 * one VSI count. We should never hit below condition
3543 		 */
3544 		if (v_list_itr->vsi_count > 1 &&
3545 		    v_list_itr->vsi_list_info->ref_cnt > 1) {
3546 			ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3547 			status = -EIO;
3548 			goto exit;
3549 		}
3550 
3551 		cur_handle =
3552 			find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3553 				       ICE_MAX_VSI);
3554 
3555 		/* A rule already exists with the new VSI being added */
3556 		if (cur_handle == vsi_handle) {
3557 			status = -EEXIST;
3558 			goto exit;
3559 		}
3560 
3561 		vsi_handle_arr[0] = cur_handle;
3562 		vsi_handle_arr[1] = vsi_handle;
3563 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3564 						  &vsi_list_id, lkup_type);
3565 		if (status)
3566 			goto exit;
3567 
3568 		tmp_fltr = v_list_itr->fltr_info;
3569 		tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3570 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3571 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3572 		/* Update the previous switch rule to a new VSI list which
3573 		 * includes current VSI that is requested
3574 		 */
3575 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3576 		if (status)
3577 			goto exit;
3578 
3579 		/* before overriding VSI list map info. decrement ref_cnt of
3580 		 * previous VSI list
3581 		 */
3582 		v_list_itr->vsi_list_info->ref_cnt--;
3583 
3584 		/* now update to newly created list */
3585 		v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3586 		v_list_itr->vsi_list_info =
3587 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3588 						vsi_list_id);
3589 		v_list_itr->vsi_count++;
3590 	}
3591 
3592 exit:
3593 	mutex_unlock(rule_lock);
3594 	return status;
3595 }
3596 
3597 /**
3598  * ice_add_vlan - Add VLAN based filter rule
3599  * @hw: pointer to the hardware structure
3600  * @v_list: list of VLAN entries and forwarding information
3601  */
3602 int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
3603 {
3604 	struct ice_fltr_list_entry *v_list_itr;
3605 
3606 	if (!v_list || !hw)
3607 		return -EINVAL;
3608 
3609 	list_for_each_entry(v_list_itr, v_list, list_entry) {
3610 		if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3611 			return -EINVAL;
3612 		v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3613 		v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3614 		if (v_list_itr->status)
3615 			return v_list_itr->status;
3616 	}
3617 	return 0;
3618 }
3619 
3620 /**
3621  * ice_add_eth_mac - Add ethertype and MAC based filter rule
3622  * @hw: pointer to the hardware structure
3623  * @em_list: list of ether type MAC filter, MAC is optional
3624  *
3625  * This function requires the caller to populate the entries in
3626  * the filter list with the necessary fields (including flags to
3627  * indicate Tx or Rx rules).
3628  */
3629 int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3630 {
3631 	struct ice_fltr_list_entry *em_list_itr;
3632 
3633 	if (!em_list || !hw)
3634 		return -EINVAL;
3635 
3636 	list_for_each_entry(em_list_itr, em_list, list_entry) {
3637 		enum ice_sw_lkup_type l_type =
3638 			em_list_itr->fltr_info.lkup_type;
3639 
3640 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3641 		    l_type != ICE_SW_LKUP_ETHERTYPE)
3642 			return -EINVAL;
3643 
3644 		em_list_itr->status = ice_add_rule_internal(hw, l_type,
3645 							    em_list_itr);
3646 		if (em_list_itr->status)
3647 			return em_list_itr->status;
3648 	}
3649 	return 0;
3650 }
3651 
3652 /**
3653  * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3654  * @hw: pointer to the hardware structure
3655  * @em_list: list of ethertype or ethertype MAC entries
3656  */
3657 int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3658 {
3659 	struct ice_fltr_list_entry *em_list_itr, *tmp;
3660 
3661 	if (!em_list || !hw)
3662 		return -EINVAL;
3663 
3664 	list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
3665 		enum ice_sw_lkup_type l_type =
3666 			em_list_itr->fltr_info.lkup_type;
3667 
3668 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3669 		    l_type != ICE_SW_LKUP_ETHERTYPE)
3670 			return -EINVAL;
3671 
3672 		em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3673 							       em_list_itr);
3674 		if (em_list_itr->status)
3675 			return em_list_itr->status;
3676 	}
3677 	return 0;
3678 }
3679 
3680 /**
3681  * ice_rem_sw_rule_info
3682  * @hw: pointer to the hardware structure
3683  * @rule_head: pointer to the switch list structure that we want to delete
3684  */
3685 static void
3686 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3687 {
3688 	if (!list_empty(rule_head)) {
3689 		struct ice_fltr_mgmt_list_entry *entry;
3690 		struct ice_fltr_mgmt_list_entry *tmp;
3691 
3692 		list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
3693 			list_del(&entry->list_entry);
3694 			devm_kfree(ice_hw_to_dev(hw), entry);
3695 		}
3696 	}
3697 }
3698 
3699 /**
3700  * ice_rem_adv_rule_info
3701  * @hw: pointer to the hardware structure
3702  * @rule_head: pointer to the switch list structure that we want to delete
3703  */
3704 static void
3705 ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3706 {
3707 	struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3708 	struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3709 
3710 	if (list_empty(rule_head))
3711 		return;
3712 
3713 	list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) {
3714 		list_del(&lst_itr->list_entry);
3715 		devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
3716 		devm_kfree(ice_hw_to_dev(hw), lst_itr);
3717 	}
3718 }
3719 
3720 /**
3721  * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3722  * @hw: pointer to the hardware structure
3723  * @vsi_handle: VSI handle to set as default
3724  * @set: true to add the above mentioned switch rule, false to remove it
3725  * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3726  *
3727  * add filter rule to set/unset given VSI as default VSI for the switch
3728  * (represented by swid)
3729  */
3730 int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
3731 {
3732 	struct ice_sw_rule_lkup_rx_tx *s_rule;
3733 	struct ice_fltr_info f_info;
3734 	enum ice_adminq_opc opcode;
3735 	u16 s_rule_size;
3736 	u16 hw_vsi_id;
3737 	int status;
3738 
3739 	if (!ice_is_vsi_valid(hw, vsi_handle))
3740 		return -EINVAL;
3741 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3742 
3743 	s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule) :
3744 			    ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule);
3745 
3746 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
3747 	if (!s_rule)
3748 		return -ENOMEM;
3749 
3750 	memset(&f_info, 0, sizeof(f_info));
3751 
3752 	f_info.lkup_type = ICE_SW_LKUP_DFLT;
3753 	f_info.flag = direction;
3754 	f_info.fltr_act = ICE_FWD_TO_VSI;
3755 	f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3756 
3757 	if (f_info.flag & ICE_FLTR_RX) {
3758 		f_info.src = hw->port_info->lport;
3759 		f_info.src_id = ICE_SRC_ID_LPORT;
3760 		if (!set)
3761 			f_info.fltr_rule_id =
3762 				hw->port_info->dflt_rx_vsi_rule_id;
3763 	} else if (f_info.flag & ICE_FLTR_TX) {
3764 		f_info.src_id = ICE_SRC_ID_VSI;
3765 		f_info.src = hw_vsi_id;
3766 		if (!set)
3767 			f_info.fltr_rule_id =
3768 				hw->port_info->dflt_tx_vsi_rule_id;
3769 	}
3770 
3771 	if (set)
3772 		opcode = ice_aqc_opc_add_sw_rules;
3773 	else
3774 		opcode = ice_aqc_opc_remove_sw_rules;
3775 
3776 	ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3777 
3778 	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3779 	if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3780 		goto out;
3781 	if (set) {
3782 		u16 index = le16_to_cpu(s_rule->index);
3783 
3784 		if (f_info.flag & ICE_FLTR_TX) {
3785 			hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
3786 			hw->port_info->dflt_tx_vsi_rule_id = index;
3787 		} else if (f_info.flag & ICE_FLTR_RX) {
3788 			hw->port_info->dflt_rx_vsi_num = hw_vsi_id;
3789 			hw->port_info->dflt_rx_vsi_rule_id = index;
3790 		}
3791 	} else {
3792 		if (f_info.flag & ICE_FLTR_TX) {
3793 			hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3794 			hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3795 		} else if (f_info.flag & ICE_FLTR_RX) {
3796 			hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3797 			hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3798 		}
3799 	}
3800 
3801 out:
3802 	devm_kfree(ice_hw_to_dev(hw), s_rule);
3803 	return status;
3804 }
3805 
3806 /**
3807  * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3808  * @hw: pointer to the hardware structure
3809  * @recp_id: lookup type for which the specified rule needs to be searched
3810  * @f_info: rule information
3811  *
3812  * Helper function to search for a unicast rule entry - this is to be used
3813  * to remove unicast MAC filter that is not shared with other VSIs on the
3814  * PF switch.
3815  *
3816  * Returns pointer to entry storing the rule if found
3817  */
3818 static struct ice_fltr_mgmt_list_entry *
3819 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3820 			  struct ice_fltr_info *f_info)
3821 {
3822 	struct ice_switch_info *sw = hw->switch_info;
3823 	struct ice_fltr_mgmt_list_entry *list_itr;
3824 	struct list_head *list_head;
3825 
3826 	list_head = &sw->recp_list[recp_id].filt_rules;
3827 	list_for_each_entry(list_itr, list_head, list_entry) {
3828 		if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3829 			    sizeof(f_info->l_data)) &&
3830 		    f_info->fwd_id.hw_vsi_id ==
3831 		    list_itr->fltr_info.fwd_id.hw_vsi_id &&
3832 		    f_info->flag == list_itr->fltr_info.flag)
3833 			return list_itr;
3834 	}
3835 	return NULL;
3836 }
3837 
3838 /**
3839  * ice_remove_mac - remove a MAC address based filter rule
3840  * @hw: pointer to the hardware structure
3841  * @m_list: list of MAC addresses and forwarding information
3842  *
3843  * This function removes either a MAC filter rule or a specific VSI from a
3844  * VSI list for a multicast MAC address.
3845  *
3846  * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should
3847  * be aware that this call will only work if all the entries passed into m_list
3848  * were added previously. It will not attempt to do a partial remove of entries
3849  * that were found.
3850  */
3851 int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
3852 {
3853 	struct ice_fltr_list_entry *list_itr, *tmp;
3854 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3855 
3856 	if (!m_list)
3857 		return -EINVAL;
3858 
3859 	rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3860 	list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
3861 		enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3862 		u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3863 		u16 vsi_handle;
3864 
3865 		if (l_type != ICE_SW_LKUP_MAC)
3866 			return -EINVAL;
3867 
3868 		vsi_handle = list_itr->fltr_info.vsi_handle;
3869 		if (!ice_is_vsi_valid(hw, vsi_handle))
3870 			return -EINVAL;
3871 
3872 		list_itr->fltr_info.fwd_id.hw_vsi_id =
3873 					ice_get_hw_vsi_num(hw, vsi_handle);
3874 		if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
3875 			/* Don't remove the unicast address that belongs to
3876 			 * another VSI on the switch, since it is not being
3877 			 * shared...
3878 			 */
3879 			mutex_lock(rule_lock);
3880 			if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3881 						       &list_itr->fltr_info)) {
3882 				mutex_unlock(rule_lock);
3883 				return -ENOENT;
3884 			}
3885 			mutex_unlock(rule_lock);
3886 		}
3887 		list_itr->status = ice_remove_rule_internal(hw,
3888 							    ICE_SW_LKUP_MAC,
3889 							    list_itr);
3890 		if (list_itr->status)
3891 			return list_itr->status;
3892 	}
3893 	return 0;
3894 }
3895 
3896 /**
3897  * ice_remove_vlan - Remove VLAN based filter rule
3898  * @hw: pointer to the hardware structure
3899  * @v_list: list of VLAN entries and forwarding information
3900  */
3901 int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
3902 {
3903 	struct ice_fltr_list_entry *v_list_itr, *tmp;
3904 
3905 	if (!v_list || !hw)
3906 		return -EINVAL;
3907 
3908 	list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
3909 		enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3910 
3911 		if (l_type != ICE_SW_LKUP_VLAN)
3912 			return -EINVAL;
3913 		v_list_itr->status = ice_remove_rule_internal(hw,
3914 							      ICE_SW_LKUP_VLAN,
3915 							      v_list_itr);
3916 		if (v_list_itr->status)
3917 			return v_list_itr->status;
3918 	}
3919 	return 0;
3920 }
3921 
3922 /**
3923  * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3924  * @fm_entry: filter entry to inspect
3925  * @vsi_handle: VSI handle to compare with filter info
3926  */
3927 static bool
3928 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3929 {
3930 	return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3931 		 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3932 		(fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3933 		 fm_entry->vsi_list_info &&
3934 		 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
3935 }
3936 
3937 /**
3938  * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3939  * @hw: pointer to the hardware structure
3940  * @vsi_handle: VSI handle to remove filters from
3941  * @vsi_list_head: pointer to the list to add entry to
3942  * @fi: pointer to fltr_info of filter entry to copy & add
3943  *
3944  * Helper function, used when creating a list of filters to remove from
3945  * a specific VSI. The entry added to vsi_list_head is a COPY of the
3946  * original filter entry, with the exception of fltr_info.fltr_act and
3947  * fltr_info.fwd_id fields. These are set such that later logic can
3948  * extract which VSI to remove the fltr from, and pass on that information.
3949  */
3950 static int
3951 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3952 			       struct list_head *vsi_list_head,
3953 			       struct ice_fltr_info *fi)
3954 {
3955 	struct ice_fltr_list_entry *tmp;
3956 
3957 	/* this memory is freed up in the caller function
3958 	 * once filters for this VSI are removed
3959 	 */
3960 	tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
3961 	if (!tmp)
3962 		return -ENOMEM;
3963 
3964 	tmp->fltr_info = *fi;
3965 
3966 	/* Overwrite these fields to indicate which VSI to remove filter from,
3967 	 * so find and remove logic can extract the information from the
3968 	 * list entries. Note that original entries will still have proper
3969 	 * values.
3970 	 */
3971 	tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3972 	tmp->fltr_info.vsi_handle = vsi_handle;
3973 	tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3974 
3975 	list_add(&tmp->list_entry, vsi_list_head);
3976 
3977 	return 0;
3978 }
3979 
3980 /**
3981  * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3982  * @hw: pointer to the hardware structure
3983  * @vsi_handle: VSI handle to remove filters from
3984  * @lkup_list_head: pointer to the list that has certain lookup type filters
3985  * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3986  *
3987  * Locates all filters in lkup_list_head that are used by the given VSI,
3988  * and adds COPIES of those entries to vsi_list_head (intended to be used
3989  * to remove the listed filters).
3990  * Note that this means all entries in vsi_list_head must be explicitly
3991  * deallocated by the caller when done with list.
3992  */
3993 static int
3994 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3995 			 struct list_head *lkup_list_head,
3996 			 struct list_head *vsi_list_head)
3997 {
3998 	struct ice_fltr_mgmt_list_entry *fm_entry;
3999 	int status = 0;
4000 
4001 	/* check to make sure VSI ID is valid and within boundary */
4002 	if (!ice_is_vsi_valid(hw, vsi_handle))
4003 		return -EINVAL;
4004 
4005 	list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
4006 		if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
4007 			continue;
4008 
4009 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4010 							vsi_list_head,
4011 							&fm_entry->fltr_info);
4012 		if (status)
4013 			return status;
4014 	}
4015 	return status;
4016 }
4017 
4018 /**
4019  * ice_determine_promisc_mask
4020  * @fi: filter info to parse
4021  *
4022  * Helper function to determine which ICE_PROMISC_ mask corresponds
4023  * to given filter into.
4024  */
4025 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4026 {
4027 	u16 vid = fi->l_data.mac_vlan.vlan_id;
4028 	u8 *macaddr = fi->l_data.mac.mac_addr;
4029 	bool is_tx_fltr = false;
4030 	u8 promisc_mask = 0;
4031 
4032 	if (fi->flag == ICE_FLTR_TX)
4033 		is_tx_fltr = true;
4034 
4035 	if (is_broadcast_ether_addr(macaddr))
4036 		promisc_mask |= is_tx_fltr ?
4037 			ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4038 	else if (is_multicast_ether_addr(macaddr))
4039 		promisc_mask |= is_tx_fltr ?
4040 			ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4041 	else if (is_unicast_ether_addr(macaddr))
4042 		promisc_mask |= is_tx_fltr ?
4043 			ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4044 	if (vid)
4045 		promisc_mask |= is_tx_fltr ?
4046 			ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4047 
4048 	return promisc_mask;
4049 }
4050 
4051 /**
4052  * ice_remove_promisc - Remove promisc based filter rules
4053  * @hw: pointer to the hardware structure
4054  * @recp_id: recipe ID for which the rule needs to removed
4055  * @v_list: list of promisc entries
4056  */
4057 static int
4058 ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list)
4059 {
4060 	struct ice_fltr_list_entry *v_list_itr, *tmp;
4061 
4062 	list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
4063 		v_list_itr->status =
4064 			ice_remove_rule_internal(hw, recp_id, v_list_itr);
4065 		if (v_list_itr->status)
4066 			return v_list_itr->status;
4067 	}
4068 	return 0;
4069 }
4070 
4071 /**
4072  * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4073  * @hw: pointer to the hardware structure
4074  * @vsi_handle: VSI handle to clear mode
4075  * @promisc_mask: mask of promiscuous config bits to clear
4076  * @vid: VLAN ID to clear VLAN promiscuous
4077  */
4078 int
4079 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4080 		      u16 vid)
4081 {
4082 	struct ice_switch_info *sw = hw->switch_info;
4083 	struct ice_fltr_list_entry *fm_entry, *tmp;
4084 	struct list_head remove_list_head;
4085 	struct ice_fltr_mgmt_list_entry *itr;
4086 	struct list_head *rule_head;
4087 	struct mutex *rule_lock;	/* Lock to protect filter rule list */
4088 	int status = 0;
4089 	u8 recipe_id;
4090 
4091 	if (!ice_is_vsi_valid(hw, vsi_handle))
4092 		return -EINVAL;
4093 
4094 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4095 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4096 	else
4097 		recipe_id = ICE_SW_LKUP_PROMISC;
4098 
4099 	rule_head = &sw->recp_list[recipe_id].filt_rules;
4100 	rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4101 
4102 	INIT_LIST_HEAD(&remove_list_head);
4103 
4104 	mutex_lock(rule_lock);
4105 	list_for_each_entry(itr, rule_head, list_entry) {
4106 		struct ice_fltr_info *fltr_info;
4107 		u8 fltr_promisc_mask = 0;
4108 
4109 		if (!ice_vsi_uses_fltr(itr, vsi_handle))
4110 			continue;
4111 		fltr_info = &itr->fltr_info;
4112 
4113 		if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4114 		    vid != fltr_info->l_data.mac_vlan.vlan_id)
4115 			continue;
4116 
4117 		fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4118 
4119 		/* Skip if filter is not completely specified by given mask */
4120 		if (fltr_promisc_mask & ~promisc_mask)
4121 			continue;
4122 
4123 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4124 							&remove_list_head,
4125 							fltr_info);
4126 		if (status) {
4127 			mutex_unlock(rule_lock);
4128 			goto free_fltr_list;
4129 		}
4130 	}
4131 	mutex_unlock(rule_lock);
4132 
4133 	status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4134 
4135 free_fltr_list:
4136 	list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4137 		list_del(&fm_entry->list_entry);
4138 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
4139 	}
4140 
4141 	return status;
4142 }
4143 
4144 /**
4145  * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4146  * @hw: pointer to the hardware structure
4147  * @vsi_handle: VSI handle to configure
4148  * @promisc_mask: mask of promiscuous config bits
4149  * @vid: VLAN ID to set VLAN promiscuous
4150  */
4151 int
4152 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4153 {
4154 	enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4155 	struct ice_fltr_list_entry f_list_entry;
4156 	struct ice_fltr_info new_fltr;
4157 	bool is_tx_fltr;
4158 	int status = 0;
4159 	u16 hw_vsi_id;
4160 	int pkt_type;
4161 	u8 recipe_id;
4162 
4163 	if (!ice_is_vsi_valid(hw, vsi_handle))
4164 		return -EINVAL;
4165 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4166 
4167 	memset(&new_fltr, 0, sizeof(new_fltr));
4168 
4169 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4170 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4171 		new_fltr.l_data.mac_vlan.vlan_id = vid;
4172 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4173 	} else {
4174 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4175 		recipe_id = ICE_SW_LKUP_PROMISC;
4176 	}
4177 
4178 	/* Separate filters must be set for each direction/packet type
4179 	 * combination, so we will loop over the mask value, store the
4180 	 * individual type, and clear it out in the input mask as it
4181 	 * is found.
4182 	 */
4183 	while (promisc_mask) {
4184 		u8 *mac_addr;
4185 
4186 		pkt_type = 0;
4187 		is_tx_fltr = false;
4188 
4189 		if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4190 			promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4191 			pkt_type = UCAST_FLTR;
4192 		} else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4193 			promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4194 			pkt_type = UCAST_FLTR;
4195 			is_tx_fltr = true;
4196 		} else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4197 			promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4198 			pkt_type = MCAST_FLTR;
4199 		} else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4200 			promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4201 			pkt_type = MCAST_FLTR;
4202 			is_tx_fltr = true;
4203 		} else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4204 			promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4205 			pkt_type = BCAST_FLTR;
4206 		} else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4207 			promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4208 			pkt_type = BCAST_FLTR;
4209 			is_tx_fltr = true;
4210 		}
4211 
4212 		/* Check for VLAN promiscuous flag */
4213 		if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4214 			promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4215 		} else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4216 			promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4217 			is_tx_fltr = true;
4218 		}
4219 
4220 		/* Set filter DA based on packet type */
4221 		mac_addr = new_fltr.l_data.mac.mac_addr;
4222 		if (pkt_type == BCAST_FLTR) {
4223 			eth_broadcast_addr(mac_addr);
4224 		} else if (pkt_type == MCAST_FLTR ||
4225 			   pkt_type == UCAST_FLTR) {
4226 			/* Use the dummy ether header DA */
4227 			ether_addr_copy(mac_addr, dummy_eth_header);
4228 			if (pkt_type == MCAST_FLTR)
4229 				mac_addr[0] |= 0x1;	/* Set multicast bit */
4230 		}
4231 
4232 		/* Need to reset this to zero for all iterations */
4233 		new_fltr.flag = 0;
4234 		if (is_tx_fltr) {
4235 			new_fltr.flag |= ICE_FLTR_TX;
4236 			new_fltr.src = hw_vsi_id;
4237 		} else {
4238 			new_fltr.flag |= ICE_FLTR_RX;
4239 			new_fltr.src = hw->port_info->lport;
4240 		}
4241 
4242 		new_fltr.fltr_act = ICE_FWD_TO_VSI;
4243 		new_fltr.vsi_handle = vsi_handle;
4244 		new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4245 		f_list_entry.fltr_info = new_fltr;
4246 
4247 		status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4248 		if (status)
4249 			goto set_promisc_exit;
4250 	}
4251 
4252 set_promisc_exit:
4253 	return status;
4254 }
4255 
4256 /**
4257  * ice_set_vlan_vsi_promisc
4258  * @hw: pointer to the hardware structure
4259  * @vsi_handle: VSI handle to configure
4260  * @promisc_mask: mask of promiscuous config bits
4261  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4262  *
4263  * Configure VSI with all associated VLANs to given promiscuous mode(s)
4264  */
4265 int
4266 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4267 			 bool rm_vlan_promisc)
4268 {
4269 	struct ice_switch_info *sw = hw->switch_info;
4270 	struct ice_fltr_list_entry *list_itr, *tmp;
4271 	struct list_head vsi_list_head;
4272 	struct list_head *vlan_head;
4273 	struct mutex *vlan_lock; /* Lock to protect filter rule list */
4274 	u16 vlan_id;
4275 	int status;
4276 
4277 	INIT_LIST_HEAD(&vsi_list_head);
4278 	vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4279 	vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4280 	mutex_lock(vlan_lock);
4281 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4282 					  &vsi_list_head);
4283 	mutex_unlock(vlan_lock);
4284 	if (status)
4285 		goto free_fltr_list;
4286 
4287 	list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
4288 		vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4289 		if (rm_vlan_promisc)
4290 			status = ice_clear_vsi_promisc(hw, vsi_handle,
4291 						       promisc_mask, vlan_id);
4292 		else
4293 			status = ice_set_vsi_promisc(hw, vsi_handle,
4294 						     promisc_mask, vlan_id);
4295 		if (status)
4296 			break;
4297 	}
4298 
4299 free_fltr_list:
4300 	list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
4301 		list_del(&list_itr->list_entry);
4302 		devm_kfree(ice_hw_to_dev(hw), list_itr);
4303 	}
4304 	return status;
4305 }
4306 
4307 /**
4308  * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4309  * @hw: pointer to the hardware structure
4310  * @vsi_handle: VSI handle to remove filters from
4311  * @lkup: switch rule filter lookup type
4312  */
4313 static void
4314 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4315 			 enum ice_sw_lkup_type lkup)
4316 {
4317 	struct ice_switch_info *sw = hw->switch_info;
4318 	struct ice_fltr_list_entry *fm_entry;
4319 	struct list_head remove_list_head;
4320 	struct list_head *rule_head;
4321 	struct ice_fltr_list_entry *tmp;
4322 	struct mutex *rule_lock;	/* Lock to protect filter rule list */
4323 	int status;
4324 
4325 	INIT_LIST_HEAD(&remove_list_head);
4326 	rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4327 	rule_head = &sw->recp_list[lkup].filt_rules;
4328 	mutex_lock(rule_lock);
4329 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4330 					  &remove_list_head);
4331 	mutex_unlock(rule_lock);
4332 	if (status)
4333 		goto free_fltr_list;
4334 
4335 	switch (lkup) {
4336 	case ICE_SW_LKUP_MAC:
4337 		ice_remove_mac(hw, &remove_list_head);
4338 		break;
4339 	case ICE_SW_LKUP_VLAN:
4340 		ice_remove_vlan(hw, &remove_list_head);
4341 		break;
4342 	case ICE_SW_LKUP_PROMISC:
4343 	case ICE_SW_LKUP_PROMISC_VLAN:
4344 		ice_remove_promisc(hw, lkup, &remove_list_head);
4345 		break;
4346 	case ICE_SW_LKUP_MAC_VLAN:
4347 	case ICE_SW_LKUP_ETHERTYPE:
4348 	case ICE_SW_LKUP_ETHERTYPE_MAC:
4349 	case ICE_SW_LKUP_DFLT:
4350 	case ICE_SW_LKUP_LAST:
4351 	default:
4352 		ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
4353 		break;
4354 	}
4355 
4356 free_fltr_list:
4357 	list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4358 		list_del(&fm_entry->list_entry);
4359 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
4360 	}
4361 }
4362 
4363 /**
4364  * ice_remove_vsi_fltr - Remove all filters for a VSI
4365  * @hw: pointer to the hardware structure
4366  * @vsi_handle: VSI handle to remove filters from
4367  */
4368 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4369 {
4370 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4371 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4372 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4373 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4374 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4375 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4376 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4377 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4378 }
4379 
4380 /**
4381  * ice_alloc_res_cntr - allocating resource counter
4382  * @hw: pointer to the hardware structure
4383  * @type: type of resource
4384  * @alloc_shared: if set it is shared else dedicated
4385  * @num_items: number of entries requested for FD resource type
4386  * @counter_id: counter index returned by AQ call
4387  */
4388 int
4389 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4390 		   u16 *counter_id)
4391 {
4392 	struct ice_aqc_alloc_free_res_elem *buf;
4393 	u16 buf_len;
4394 	int status;
4395 
4396 	/* Allocate resource */
4397 	buf_len = struct_size(buf, elem, 1);
4398 	buf = kzalloc(buf_len, GFP_KERNEL);
4399 	if (!buf)
4400 		return -ENOMEM;
4401 
4402 	buf->num_elems = cpu_to_le16(num_items);
4403 	buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4404 				      ICE_AQC_RES_TYPE_M) | alloc_shared);
4405 
4406 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4407 				       ice_aqc_opc_alloc_res, NULL);
4408 	if (status)
4409 		goto exit;
4410 
4411 	*counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
4412 
4413 exit:
4414 	kfree(buf);
4415 	return status;
4416 }
4417 
4418 /**
4419  * ice_free_res_cntr - free resource counter
4420  * @hw: pointer to the hardware structure
4421  * @type: type of resource
4422  * @alloc_shared: if set it is shared else dedicated
4423  * @num_items: number of entries to be freed for FD resource type
4424  * @counter_id: counter ID resource which needs to be freed
4425  */
4426 int
4427 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4428 		  u16 counter_id)
4429 {
4430 	struct ice_aqc_alloc_free_res_elem *buf;
4431 	u16 buf_len;
4432 	int status;
4433 
4434 	/* Free resource */
4435 	buf_len = struct_size(buf, elem, 1);
4436 	buf = kzalloc(buf_len, GFP_KERNEL);
4437 	if (!buf)
4438 		return -ENOMEM;
4439 
4440 	buf->num_elems = cpu_to_le16(num_items);
4441 	buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4442 				      ICE_AQC_RES_TYPE_M) | alloc_shared);
4443 	buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
4444 
4445 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4446 				       ice_aqc_opc_free_res, NULL);
4447 	if (status)
4448 		ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
4449 
4450 	kfree(buf);
4451 	return status;
4452 }
4453 
4454 /* This is mapping table entry that maps every word within a given protocol
4455  * structure to the real byte offset as per the specification of that
4456  * protocol header.
4457  * for example dst address is 3 words in ethertype header and corresponding
4458  * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4459  * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4460  * matching entry describing its field. This needs to be updated if new
4461  * structure is added to that union.
4462  */
4463 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4464 	{ ICE_MAC_OFOS,		{ 0, 2, 4, 6, 8, 10, 12 } },
4465 	{ ICE_MAC_IL,		{ 0, 2, 4, 6, 8, 10, 12 } },
4466 	{ ICE_ETYPE_OL,		{ 0 } },
4467 	{ ICE_ETYPE_IL,		{ 0 } },
4468 	{ ICE_VLAN_OFOS,	{ 2, 0 } },
4469 	{ ICE_IPV4_OFOS,	{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4470 	{ ICE_IPV4_IL,		{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4471 	{ ICE_IPV6_OFOS,	{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4472 				 26, 28, 30, 32, 34, 36, 38 } },
4473 	{ ICE_IPV6_IL,		{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4474 				 26, 28, 30, 32, 34, 36, 38 } },
4475 	{ ICE_TCP_IL,		{ 0, 2 } },
4476 	{ ICE_UDP_OF,		{ 0, 2 } },
4477 	{ ICE_UDP_ILOS,		{ 0, 2 } },
4478 	{ ICE_VXLAN,		{ 8, 10, 12, 14 } },
4479 	{ ICE_GENEVE,		{ 8, 10, 12, 14 } },
4480 	{ ICE_NVGRE,		{ 0, 2, 4, 6 } },
4481 	{ ICE_GTP,		{ 8, 10, 12, 14, 16, 18, 20, 22 } },
4482 	{ ICE_GTP_NO_PAY,	{ 8, 10, 12, 14 } },
4483 	{ ICE_VLAN_EX,          { 2, 0 } },
4484 	{ ICE_VLAN_IN,          { 2, 0 } },
4485 };
4486 
4487 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4488 	{ ICE_MAC_OFOS,		ICE_MAC_OFOS_HW },
4489 	{ ICE_MAC_IL,		ICE_MAC_IL_HW },
4490 	{ ICE_ETYPE_OL,		ICE_ETYPE_OL_HW },
4491 	{ ICE_ETYPE_IL,		ICE_ETYPE_IL_HW },
4492 	{ ICE_VLAN_OFOS,	ICE_VLAN_OL_HW },
4493 	{ ICE_IPV4_OFOS,	ICE_IPV4_OFOS_HW },
4494 	{ ICE_IPV4_IL,		ICE_IPV4_IL_HW },
4495 	{ ICE_IPV6_OFOS,	ICE_IPV6_OFOS_HW },
4496 	{ ICE_IPV6_IL,		ICE_IPV6_IL_HW },
4497 	{ ICE_TCP_IL,		ICE_TCP_IL_HW },
4498 	{ ICE_UDP_OF,		ICE_UDP_OF_HW },
4499 	{ ICE_UDP_ILOS,		ICE_UDP_ILOS_HW },
4500 	{ ICE_VXLAN,		ICE_UDP_OF_HW },
4501 	{ ICE_GENEVE,		ICE_UDP_OF_HW },
4502 	{ ICE_NVGRE,		ICE_GRE_OF_HW },
4503 	{ ICE_GTP,		ICE_UDP_OF_HW },
4504 	{ ICE_GTP_NO_PAY,	ICE_UDP_ILOS_HW },
4505 	{ ICE_VLAN_EX,          ICE_VLAN_OF_HW },
4506 	{ ICE_VLAN_IN,          ICE_VLAN_OL_HW },
4507 };
4508 
4509 /**
4510  * ice_find_recp - find a recipe
4511  * @hw: pointer to the hardware structure
4512  * @lkup_exts: extension sequence to match
4513  * @tun_type: type of recipe tunnel
4514  *
4515  * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4516  */
4517 static u16
4518 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
4519 	      enum ice_sw_tunnel_type tun_type)
4520 {
4521 	bool refresh_required = true;
4522 	struct ice_sw_recipe *recp;
4523 	u8 i;
4524 
4525 	/* Walk through existing recipes to find a match */
4526 	recp = hw->switch_info->recp_list;
4527 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4528 		/* If recipe was not created for this ID, in SW bookkeeping,
4529 		 * check if FW has an entry for this recipe. If the FW has an
4530 		 * entry update it in our SW bookkeeping and continue with the
4531 		 * matching.
4532 		 */
4533 		if (!recp[i].recp_created)
4534 			if (ice_get_recp_frm_fw(hw,
4535 						hw->switch_info->recp_list, i,
4536 						&refresh_required))
4537 				continue;
4538 
4539 		/* Skip inverse action recipes */
4540 		if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4541 		    ICE_AQ_RECIPE_ACT_INV_ACT)
4542 			continue;
4543 
4544 		/* if number of words we are looking for match */
4545 		if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4546 			struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
4547 			struct ice_fv_word *be = lkup_exts->fv_words;
4548 			u16 *cr = recp[i].lkup_exts.field_mask;
4549 			u16 *de = lkup_exts->field_mask;
4550 			bool found = true;
4551 			u8 pe, qr;
4552 
4553 			/* ar, cr, and qr are related to the recipe words, while
4554 			 * be, de, and pe are related to the lookup words
4555 			 */
4556 			for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
4557 				for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
4558 				     qr++) {
4559 					if (ar[qr].off == be[pe].off &&
4560 					    ar[qr].prot_id == be[pe].prot_id &&
4561 					    cr[qr] == de[pe])
4562 						/* Found the "pe"th word in the
4563 						 * given recipe
4564 						 */
4565 						break;
4566 				}
4567 				/* After walking through all the words in the
4568 				 * "i"th recipe if "p"th word was not found then
4569 				 * this recipe is not what we are looking for.
4570 				 * So break out from this loop and try the next
4571 				 * recipe
4572 				 */
4573 				if (qr >= recp[i].lkup_exts.n_val_words) {
4574 					found = false;
4575 					break;
4576 				}
4577 			}
4578 			/* If for "i"th recipe the found was never set to false
4579 			 * then it means we found our match
4580 			 * Also tun type of recipe needs to be checked
4581 			 */
4582 			if (found && recp[i].tun_type == tun_type)
4583 				return i; /* Return the recipe ID */
4584 		}
4585 	}
4586 	return ICE_MAX_NUM_RECIPES;
4587 }
4588 
4589 /**
4590  * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
4591  *
4592  * As protocol id for outer vlan is different in dvm and svm, if dvm is
4593  * supported protocol array record for outer vlan has to be modified to
4594  * reflect the value proper for DVM.
4595  */
4596 void ice_change_proto_id_to_dvm(void)
4597 {
4598 	u8 i;
4599 
4600 	for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4601 		if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
4602 		    ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
4603 			ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
4604 }
4605 
4606 /**
4607  * ice_prot_type_to_id - get protocol ID from protocol type
4608  * @type: protocol type
4609  * @id: pointer to variable that will receive the ID
4610  *
4611  * Returns true if found, false otherwise
4612  */
4613 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
4614 {
4615 	u8 i;
4616 
4617 	for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4618 		if (ice_prot_id_tbl[i].type == type) {
4619 			*id = ice_prot_id_tbl[i].protocol_id;
4620 			return true;
4621 		}
4622 	return false;
4623 }
4624 
4625 /**
4626  * ice_fill_valid_words - count valid words
4627  * @rule: advanced rule with lookup information
4628  * @lkup_exts: byte offset extractions of the words that are valid
4629  *
4630  * calculate valid words in a lookup rule using mask value
4631  */
4632 static u8
4633 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4634 		     struct ice_prot_lkup_ext *lkup_exts)
4635 {
4636 	u8 j, word, prot_id, ret_val;
4637 
4638 	if (!ice_prot_type_to_id(rule->type, &prot_id))
4639 		return 0;
4640 
4641 	word = lkup_exts->n_val_words;
4642 
4643 	for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4644 		if (((u16 *)&rule->m_u)[j] &&
4645 		    rule->type < ARRAY_SIZE(ice_prot_ext)) {
4646 			/* No more space to accommodate */
4647 			if (word >= ICE_MAX_CHAIN_WORDS)
4648 				return 0;
4649 			lkup_exts->fv_words[word].off =
4650 				ice_prot_ext[rule->type].offs[j];
4651 			lkup_exts->fv_words[word].prot_id =
4652 				ice_prot_id_tbl[rule->type].protocol_id;
4653 			lkup_exts->field_mask[word] =
4654 				be16_to_cpu(((__force __be16 *)&rule->m_u)[j]);
4655 			word++;
4656 		}
4657 
4658 	ret_val = word - lkup_exts->n_val_words;
4659 	lkup_exts->n_val_words = word;
4660 
4661 	return ret_val;
4662 }
4663 
4664 /**
4665  * ice_create_first_fit_recp_def - Create a recipe grouping
4666  * @hw: pointer to the hardware structure
4667  * @lkup_exts: an array of protocol header extractions
4668  * @rg_list: pointer to a list that stores new recipe groups
4669  * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4670  *
4671  * Using first fit algorithm, take all the words that are still not done
4672  * and start grouping them in 4-word groups. Each group makes up one
4673  * recipe.
4674  */
4675 static int
4676 ice_create_first_fit_recp_def(struct ice_hw *hw,
4677 			      struct ice_prot_lkup_ext *lkup_exts,
4678 			      struct list_head *rg_list,
4679 			      u8 *recp_cnt)
4680 {
4681 	struct ice_pref_recipe_group *grp = NULL;
4682 	u8 j;
4683 
4684 	*recp_cnt = 0;
4685 
4686 	/* Walk through every word in the rule to check if it is not done. If so
4687 	 * then this word needs to be part of a new recipe.
4688 	 */
4689 	for (j = 0; j < lkup_exts->n_val_words; j++)
4690 		if (!test_bit(j, lkup_exts->done)) {
4691 			if (!grp ||
4692 			    grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4693 				struct ice_recp_grp_entry *entry;
4694 
4695 				entry = devm_kzalloc(ice_hw_to_dev(hw),
4696 						     sizeof(*entry),
4697 						     GFP_KERNEL);
4698 				if (!entry)
4699 					return -ENOMEM;
4700 				list_add(&entry->l_entry, rg_list);
4701 				grp = &entry->r_group;
4702 				(*recp_cnt)++;
4703 			}
4704 
4705 			grp->pairs[grp->n_val_pairs].prot_id =
4706 				lkup_exts->fv_words[j].prot_id;
4707 			grp->pairs[grp->n_val_pairs].off =
4708 				lkup_exts->fv_words[j].off;
4709 			grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4710 			grp->n_val_pairs++;
4711 		}
4712 
4713 	return 0;
4714 }
4715 
4716 /**
4717  * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4718  * @hw: pointer to the hardware structure
4719  * @fv_list: field vector with the extraction sequence information
4720  * @rg_list: recipe groupings with protocol-offset pairs
4721  *
4722  * Helper function to fill in the field vector indices for protocol-offset
4723  * pairs. These indexes are then ultimately programmed into a recipe.
4724  */
4725 static int
4726 ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
4727 		       struct list_head *rg_list)
4728 {
4729 	struct ice_sw_fv_list_entry *fv;
4730 	struct ice_recp_grp_entry *rg;
4731 	struct ice_fv_word *fv_ext;
4732 
4733 	if (list_empty(fv_list))
4734 		return 0;
4735 
4736 	fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry,
4737 			      list_entry);
4738 	fv_ext = fv->fv_ptr->ew;
4739 
4740 	list_for_each_entry(rg, rg_list, l_entry) {
4741 		u8 i;
4742 
4743 		for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4744 			struct ice_fv_word *pr;
4745 			bool found = false;
4746 			u16 mask;
4747 			u8 j;
4748 
4749 			pr = &rg->r_group.pairs[i];
4750 			mask = rg->r_group.mask[i];
4751 
4752 			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4753 				if (fv_ext[j].prot_id == pr->prot_id &&
4754 				    fv_ext[j].off == pr->off) {
4755 					found = true;
4756 
4757 					/* Store index of field vector */
4758 					rg->fv_idx[i] = j;
4759 					rg->fv_mask[i] = mask;
4760 					break;
4761 				}
4762 
4763 			/* Protocol/offset could not be found, caller gave an
4764 			 * invalid pair
4765 			 */
4766 			if (!found)
4767 				return -EINVAL;
4768 		}
4769 	}
4770 
4771 	return 0;
4772 }
4773 
4774 /**
4775  * ice_find_free_recp_res_idx - find free result indexes for recipe
4776  * @hw: pointer to hardware structure
4777  * @profiles: bitmap of profiles that will be associated with the new recipe
4778  * @free_idx: pointer to variable to receive the free index bitmap
4779  *
4780  * The algorithm used here is:
4781  *	1. When creating a new recipe, create a set P which contains all
4782  *	   Profiles that will be associated with our new recipe
4783  *
4784  *	2. For each Profile p in set P:
4785  *	    a. Add all recipes associated with Profile p into set R
4786  *	    b. Optional : PossibleIndexes &= profile[p].possibleIndexes
4787  *		[initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
4788  *		i. Or just assume they all have the same possible indexes:
4789  *			44, 45, 46, 47
4790  *			i.e., PossibleIndexes = 0x0000F00000000000
4791  *
4792  *	3. For each Recipe r in set R:
4793  *	    a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
4794  *	    b. FreeIndexes = UsedIndexes ^ PossibleIndexes
4795  *
4796  *	FreeIndexes will contain the bits indicating the indexes free for use,
4797  *      then the code needs to update the recipe[r].used_result_idx_bits to
4798  *      indicate which indexes were selected for use by this recipe.
4799  */
4800 static u16
4801 ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
4802 			   unsigned long *free_idx)
4803 {
4804 	DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS);
4805 	DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES);
4806 	DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS);
4807 	u16 bit;
4808 
4809 	bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
4810 	bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
4811 
4812 	bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
4813 
4814 	/* For each profile we are going to associate the recipe with, add the
4815 	 * recipes that are associated with that profile. This will give us
4816 	 * the set of recipes that our recipe may collide with. Also, determine
4817 	 * what possible result indexes are usable given this set of profiles.
4818 	 */
4819 	for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
4820 		bitmap_or(recipes, recipes, profile_to_recipe[bit],
4821 			  ICE_MAX_NUM_RECIPES);
4822 		bitmap_and(possible_idx, possible_idx,
4823 			   hw->switch_info->prof_res_bm[bit],
4824 			   ICE_MAX_FV_WORDS);
4825 	}
4826 
4827 	/* For each recipe that our new recipe may collide with, determine
4828 	 * which indexes have been used.
4829 	 */
4830 	for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
4831 		bitmap_or(used_idx, used_idx,
4832 			  hw->switch_info->recp_list[bit].res_idxs,
4833 			  ICE_MAX_FV_WORDS);
4834 
4835 	bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
4836 
4837 	/* return number of free indexes */
4838 	return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS);
4839 }
4840 
4841 /**
4842  * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4843  * @hw: pointer to hardware structure
4844  * @rm: recipe management list entry
4845  * @profiles: bitmap of profiles that will be associated.
4846  */
4847 static int
4848 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
4849 		  unsigned long *profiles)
4850 {
4851 	DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
4852 	struct ice_aqc_recipe_data_elem *tmp;
4853 	struct ice_aqc_recipe_data_elem *buf;
4854 	struct ice_recp_grp_entry *entry;
4855 	u16 free_res_idx;
4856 	u16 recipe_count;
4857 	u8 chain_idx;
4858 	u8 recps = 0;
4859 	int status;
4860 
4861 	/* When more than one recipe are required, another recipe is needed to
4862 	 * chain them together. Matching a tunnel metadata ID takes up one of
4863 	 * the match fields in the chaining recipe reducing the number of
4864 	 * chained recipes by one.
4865 	 */
4866 	 /* check number of free result indices */
4867 	bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
4868 	free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
4869 
4870 	ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
4871 		  free_res_idx, rm->n_grp_count);
4872 
4873 	if (rm->n_grp_count > 1) {
4874 		if (rm->n_grp_count > free_res_idx)
4875 			return -ENOSPC;
4876 
4877 		rm->n_grp_count++;
4878 	}
4879 
4880 	if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
4881 		return -ENOSPC;
4882 
4883 	tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
4884 	if (!tmp)
4885 		return -ENOMEM;
4886 
4887 	buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
4888 			   GFP_KERNEL);
4889 	if (!buf) {
4890 		status = -ENOMEM;
4891 		goto err_mem;
4892 	}
4893 
4894 	bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
4895 	recipe_count = ICE_MAX_NUM_RECIPES;
4896 	status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
4897 				   NULL);
4898 	if (status || recipe_count == 0)
4899 		goto err_unroll;
4900 
4901 	/* Allocate the recipe resources, and configure them according to the
4902 	 * match fields from protocol headers and extracted field vectors.
4903 	 */
4904 	chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
4905 	list_for_each_entry(entry, &rm->rg_list, l_entry) {
4906 		u8 i;
4907 
4908 		status = ice_alloc_recipe(hw, &entry->rid);
4909 		if (status)
4910 			goto err_unroll;
4911 
4912 		/* Clear the result index of the located recipe, as this will be
4913 		 * updated, if needed, later in the recipe creation process.
4914 		 */
4915 		tmp[0].content.result_indx = 0;
4916 
4917 		buf[recps] = tmp[0];
4918 		buf[recps].recipe_indx = (u8)entry->rid;
4919 		/* if the recipe is a non-root recipe RID should be programmed
4920 		 * as 0 for the rules to be applied correctly.
4921 		 */
4922 		buf[recps].content.rid = 0;
4923 		memset(&buf[recps].content.lkup_indx, 0,
4924 		       sizeof(buf[recps].content.lkup_indx));
4925 
4926 		/* All recipes use look-up index 0 to match switch ID. */
4927 		buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
4928 		buf[recps].content.mask[0] =
4929 			cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
4930 		/* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
4931 		 * to be 0
4932 		 */
4933 		for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
4934 			buf[recps].content.lkup_indx[i] = 0x80;
4935 			buf[recps].content.mask[i] = 0;
4936 		}
4937 
4938 		for (i = 0; i < entry->r_group.n_val_pairs; i++) {
4939 			buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
4940 			buf[recps].content.mask[i + 1] =
4941 				cpu_to_le16(entry->fv_mask[i]);
4942 		}
4943 
4944 		if (rm->n_grp_count > 1) {
4945 			/* Checks to see if there really is a valid result index
4946 			 * that can be used.
4947 			 */
4948 			if (chain_idx >= ICE_MAX_FV_WORDS) {
4949 				ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
4950 				status = -ENOSPC;
4951 				goto err_unroll;
4952 			}
4953 
4954 			entry->chain_idx = chain_idx;
4955 			buf[recps].content.result_indx =
4956 				ICE_AQ_RECIPE_RESULT_EN |
4957 				((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
4958 				 ICE_AQ_RECIPE_RESULT_DATA_M);
4959 			clear_bit(chain_idx, result_idx_bm);
4960 			chain_idx = find_first_bit(result_idx_bm,
4961 						   ICE_MAX_FV_WORDS);
4962 		}
4963 
4964 		/* fill recipe dependencies */
4965 		bitmap_zero((unsigned long *)buf[recps].recipe_bitmap,
4966 			    ICE_MAX_NUM_RECIPES);
4967 		set_bit(buf[recps].recipe_indx,
4968 			(unsigned long *)buf[recps].recipe_bitmap);
4969 		buf[recps].content.act_ctrl_fwd_priority = rm->priority;
4970 		recps++;
4971 	}
4972 
4973 	if (rm->n_grp_count == 1) {
4974 		rm->root_rid = buf[0].recipe_indx;
4975 		set_bit(buf[0].recipe_indx, rm->r_bitmap);
4976 		buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
4977 		if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
4978 			memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
4979 			       sizeof(buf[0].recipe_bitmap));
4980 		} else {
4981 			status = -EINVAL;
4982 			goto err_unroll;
4983 		}
4984 		/* Applicable only for ROOT_RECIPE, set the fwd_priority for
4985 		 * the recipe which is getting created if specified
4986 		 * by user. Usually any advanced switch filter, which results
4987 		 * into new extraction sequence, ended up creating a new recipe
4988 		 * of type ROOT and usually recipes are associated with profiles
4989 		 * Switch rule referreing newly created recipe, needs to have
4990 		 * either/or 'fwd' or 'join' priority, otherwise switch rule
4991 		 * evaluation will not happen correctly. In other words, if
4992 		 * switch rule to be evaluated on priority basis, then recipe
4993 		 * needs to have priority, otherwise it will be evaluated last.
4994 		 */
4995 		buf[0].content.act_ctrl_fwd_priority = rm->priority;
4996 	} else {
4997 		struct ice_recp_grp_entry *last_chain_entry;
4998 		u16 rid, i;
4999 
5000 		/* Allocate the last recipe that will chain the outcomes of the
5001 		 * other recipes together
5002 		 */
5003 		status = ice_alloc_recipe(hw, &rid);
5004 		if (status)
5005 			goto err_unroll;
5006 
5007 		buf[recps].recipe_indx = (u8)rid;
5008 		buf[recps].content.rid = (u8)rid;
5009 		buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5010 		/* the new entry created should also be part of rg_list to
5011 		 * make sure we have complete recipe
5012 		 */
5013 		last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw),
5014 						sizeof(*last_chain_entry),
5015 						GFP_KERNEL);
5016 		if (!last_chain_entry) {
5017 			status = -ENOMEM;
5018 			goto err_unroll;
5019 		}
5020 		last_chain_entry->rid = rid;
5021 		memset(&buf[recps].content.lkup_indx, 0,
5022 		       sizeof(buf[recps].content.lkup_indx));
5023 		/* All recipes use look-up index 0 to match switch ID. */
5024 		buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5025 		buf[recps].content.mask[0] =
5026 			cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
5027 		for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5028 			buf[recps].content.lkup_indx[i] =
5029 				ICE_AQ_RECIPE_LKUP_IGNORE;
5030 			buf[recps].content.mask[i] = 0;
5031 		}
5032 
5033 		i = 1;
5034 		/* update r_bitmap with the recp that is used for chaining */
5035 		set_bit(rid, rm->r_bitmap);
5036 		/* this is the recipe that chains all the other recipes so it
5037 		 * should not have a chaining ID to indicate the same
5038 		 */
5039 		last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5040 		list_for_each_entry(entry, &rm->rg_list, l_entry) {
5041 			last_chain_entry->fv_idx[i] = entry->chain_idx;
5042 			buf[recps].content.lkup_indx[i] = entry->chain_idx;
5043 			buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF);
5044 			set_bit(entry->rid, rm->r_bitmap);
5045 		}
5046 		list_add(&last_chain_entry->l_entry, &rm->rg_list);
5047 		if (sizeof(buf[recps].recipe_bitmap) >=
5048 		    sizeof(rm->r_bitmap)) {
5049 			memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5050 			       sizeof(buf[recps].recipe_bitmap));
5051 		} else {
5052 			status = -EINVAL;
5053 			goto err_unroll;
5054 		}
5055 		buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5056 
5057 		recps++;
5058 		rm->root_rid = (u8)rid;
5059 	}
5060 	status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5061 	if (status)
5062 		goto err_unroll;
5063 
5064 	status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5065 	ice_release_change_lock(hw);
5066 	if (status)
5067 		goto err_unroll;
5068 
5069 	/* Every recipe that just got created add it to the recipe
5070 	 * book keeping list
5071 	 */
5072 	list_for_each_entry(entry, &rm->rg_list, l_entry) {
5073 		struct ice_switch_info *sw = hw->switch_info;
5074 		bool is_root, idx_found = false;
5075 		struct ice_sw_recipe *recp;
5076 		u16 idx, buf_idx = 0;
5077 
5078 		/* find buffer index for copying some data */
5079 		for (idx = 0; idx < rm->n_grp_count; idx++)
5080 			if (buf[idx].recipe_indx == entry->rid) {
5081 				buf_idx = idx;
5082 				idx_found = true;
5083 			}
5084 
5085 		if (!idx_found) {
5086 			status = -EIO;
5087 			goto err_unroll;
5088 		}
5089 
5090 		recp = &sw->recp_list[entry->rid];
5091 		is_root = (rm->root_rid == entry->rid);
5092 		recp->is_root = is_root;
5093 
5094 		recp->root_rid = entry->rid;
5095 		recp->big_recp = (is_root && rm->n_grp_count > 1);
5096 
5097 		memcpy(&recp->ext_words, entry->r_group.pairs,
5098 		       entry->r_group.n_val_pairs * sizeof(struct ice_fv_word));
5099 
5100 		memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5101 		       sizeof(recp->r_bitmap));
5102 
5103 		/* Copy non-result fv index values and masks to recipe. This
5104 		 * call will also update the result recipe bitmask.
5105 		 */
5106 		ice_collect_result_idx(&buf[buf_idx], recp);
5107 
5108 		/* for non-root recipes, also copy to the root, this allows
5109 		 * easier matching of a complete chained recipe
5110 		 */
5111 		if (!is_root)
5112 			ice_collect_result_idx(&buf[buf_idx],
5113 					       &sw->recp_list[rm->root_rid]);
5114 
5115 		recp->n_ext_words = entry->r_group.n_val_pairs;
5116 		recp->chain_idx = entry->chain_idx;
5117 		recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5118 		recp->n_grp_count = rm->n_grp_count;
5119 		recp->tun_type = rm->tun_type;
5120 		recp->recp_created = true;
5121 	}
5122 	rm->root_buf = buf;
5123 	kfree(tmp);
5124 	return status;
5125 
5126 err_unroll:
5127 err_mem:
5128 	kfree(tmp);
5129 	devm_kfree(ice_hw_to_dev(hw), buf);
5130 	return status;
5131 }
5132 
5133 /**
5134  * ice_create_recipe_group - creates recipe group
5135  * @hw: pointer to hardware structure
5136  * @rm: recipe management list entry
5137  * @lkup_exts: lookup elements
5138  */
5139 static int
5140 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5141 			struct ice_prot_lkup_ext *lkup_exts)
5142 {
5143 	u8 recp_count = 0;
5144 	int status;
5145 
5146 	rm->n_grp_count = 0;
5147 
5148 	/* Create recipes for words that are marked not done by packing them
5149 	 * as best fit.
5150 	 */
5151 	status = ice_create_first_fit_recp_def(hw, lkup_exts,
5152 					       &rm->rg_list, &recp_count);
5153 	if (!status) {
5154 		rm->n_grp_count += recp_count;
5155 		rm->n_ext_words = lkup_exts->n_val_words;
5156 		memcpy(&rm->ext_words, lkup_exts->fv_words,
5157 		       sizeof(rm->ext_words));
5158 		memcpy(rm->word_masks, lkup_exts->field_mask,
5159 		       sizeof(rm->word_masks));
5160 	}
5161 
5162 	return status;
5163 }
5164 
5165 /**
5166  * ice_tun_type_match_word - determine if tun type needs a match mask
5167  * @tun_type: tunnel type
5168  * @mask: mask to be used for the tunnel
5169  */
5170 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
5171 {
5172 	switch (tun_type) {
5173 	case ICE_SW_TUN_GENEVE:
5174 	case ICE_SW_TUN_VXLAN:
5175 	case ICE_SW_TUN_NVGRE:
5176 	case ICE_SW_TUN_GTPU:
5177 	case ICE_SW_TUN_GTPC:
5178 		*mask = ICE_TUN_FLAG_MASK;
5179 		return true;
5180 
5181 	default:
5182 		*mask = 0;
5183 		return false;
5184 	}
5185 }
5186 
5187 /**
5188  * ice_add_special_words - Add words that are not protocols, such as metadata
5189  * @rinfo: other information regarding the rule e.g. priority and action info
5190  * @lkup_exts: lookup word structure
5191  * @dvm_ena: is double VLAN mode enabled
5192  */
5193 static int
5194 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5195 		      struct ice_prot_lkup_ext *lkup_exts, bool dvm_ena)
5196 {
5197 	u16 mask;
5198 
5199 	/* If this is a tunneled packet, then add recipe index to match the
5200 	 * tunnel bit in the packet metadata flags.
5201 	 */
5202 	if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
5203 		if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5204 			u8 word = lkup_exts->n_val_words++;
5205 
5206 			lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5207 			lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
5208 			lkup_exts->field_mask[word] = mask;
5209 		} else {
5210 			return -ENOSPC;
5211 		}
5212 	}
5213 
5214 	if (rinfo->vlan_type != 0 && dvm_ena) {
5215 		if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5216 			u8 word = lkup_exts->n_val_words++;
5217 
5218 			lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5219 			lkup_exts->fv_words[word].off = ICE_VLAN_FLAG_MDID_OFF;
5220 			lkup_exts->field_mask[word] =
5221 					ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK;
5222 		} else {
5223 			return -ENOSPC;
5224 		}
5225 	}
5226 
5227 	return 0;
5228 }
5229 
5230 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5231  * @hw: pointer to hardware structure
5232  * @rinfo: other information regarding the rule e.g. priority and action info
5233  * @bm: pointer to memory for returning the bitmap of field vectors
5234  */
5235 static void
5236 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5237 			 unsigned long *bm)
5238 {
5239 	enum ice_prof_type prof_type;
5240 
5241 	bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
5242 
5243 	switch (rinfo->tun_type) {
5244 	case ICE_NON_TUN:
5245 		prof_type = ICE_PROF_NON_TUN;
5246 		break;
5247 	case ICE_ALL_TUNNELS:
5248 		prof_type = ICE_PROF_TUN_ALL;
5249 		break;
5250 	case ICE_SW_TUN_GENEVE:
5251 	case ICE_SW_TUN_VXLAN:
5252 		prof_type = ICE_PROF_TUN_UDP;
5253 		break;
5254 	case ICE_SW_TUN_NVGRE:
5255 		prof_type = ICE_PROF_TUN_GRE;
5256 		break;
5257 	case ICE_SW_TUN_GTPU:
5258 		prof_type = ICE_PROF_TUN_GTPU;
5259 		break;
5260 	case ICE_SW_TUN_GTPC:
5261 		prof_type = ICE_PROF_TUN_GTPC;
5262 		break;
5263 	case ICE_SW_TUN_AND_NON_TUN:
5264 	default:
5265 		prof_type = ICE_PROF_ALL;
5266 		break;
5267 	}
5268 
5269 	ice_get_sw_fv_bitmap(hw, prof_type, bm);
5270 }
5271 
5272 /**
5273  * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5274  * @hw: pointer to hardware structure
5275  * @lkups: lookup elements or match criteria for the advanced recipe, one
5276  *  structure per protocol header
5277  * @lkups_cnt: number of protocols
5278  * @rinfo: other information regarding the rule e.g. priority and action info
5279  * @rid: return the recipe ID of the recipe created
5280  */
5281 static int
5282 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5283 		   u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5284 {
5285 	DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES);
5286 	DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES);
5287 	struct ice_prot_lkup_ext *lkup_exts;
5288 	struct ice_recp_grp_entry *r_entry;
5289 	struct ice_sw_fv_list_entry *fvit;
5290 	struct ice_recp_grp_entry *r_tmp;
5291 	struct ice_sw_fv_list_entry *tmp;
5292 	struct ice_sw_recipe *rm;
5293 	int status = 0;
5294 	u8 i;
5295 
5296 	if (!lkups_cnt)
5297 		return -EINVAL;
5298 
5299 	lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
5300 	if (!lkup_exts)
5301 		return -ENOMEM;
5302 
5303 	/* Determine the number of words to be matched and if it exceeds a
5304 	 * recipe's restrictions
5305 	 */
5306 	for (i = 0; i < lkups_cnt; i++) {
5307 		u16 count;
5308 
5309 		if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5310 			status = -EIO;
5311 			goto err_free_lkup_exts;
5312 		}
5313 
5314 		count = ice_fill_valid_words(&lkups[i], lkup_exts);
5315 		if (!count) {
5316 			status = -EIO;
5317 			goto err_free_lkup_exts;
5318 		}
5319 	}
5320 
5321 	rm = kzalloc(sizeof(*rm), GFP_KERNEL);
5322 	if (!rm) {
5323 		status = -ENOMEM;
5324 		goto err_free_lkup_exts;
5325 	}
5326 
5327 	/* Get field vectors that contain fields extracted from all the protocol
5328 	 * headers being programmed.
5329 	 */
5330 	INIT_LIST_HEAD(&rm->fv_list);
5331 	INIT_LIST_HEAD(&rm->rg_list);
5332 
5333 	/* Get bitmap of field vectors (profiles) that are compatible with the
5334 	 * rule request; only these will be searched in the subsequent call to
5335 	 * ice_get_sw_fv_list.
5336 	 */
5337 	ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5338 
5339 	status = ice_get_sw_fv_list(hw, lkup_exts, fv_bitmap, &rm->fv_list);
5340 	if (status)
5341 		goto err_unroll;
5342 
5343 	/* Create any special protocol/offset pairs, such as looking at tunnel
5344 	 * bits by extracting metadata
5345 	 */
5346 	status = ice_add_special_words(rinfo, lkup_exts, ice_is_dvm_ena(hw));
5347 	if (status)
5348 		goto err_free_lkup_exts;
5349 
5350 	/* Group match words into recipes using preferred recipe grouping
5351 	 * criteria.
5352 	 */
5353 	status = ice_create_recipe_group(hw, rm, lkup_exts);
5354 	if (status)
5355 		goto err_unroll;
5356 
5357 	/* set the recipe priority if specified */
5358 	rm->priority = (u8)rinfo->priority;
5359 
5360 	/* Find offsets from the field vector. Pick the first one for all the
5361 	 * recipes.
5362 	 */
5363 	status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5364 	if (status)
5365 		goto err_unroll;
5366 
5367 	/* get bitmap of all profiles the recipe will be associated with */
5368 	bitmap_zero(profiles, ICE_MAX_NUM_PROFILES);
5369 	list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5370 		ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5371 		set_bit((u16)fvit->profile_id, profiles);
5372 	}
5373 
5374 	/* Look for a recipe which matches our requested fv / mask list */
5375 	*rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
5376 	if (*rid < ICE_MAX_NUM_RECIPES)
5377 		/* Success if found a recipe that match the existing criteria */
5378 		goto err_unroll;
5379 
5380 	rm->tun_type = rinfo->tun_type;
5381 	/* Recipe we need does not exist, add a recipe */
5382 	status = ice_add_sw_recipe(hw, rm, profiles);
5383 	if (status)
5384 		goto err_unroll;
5385 
5386 	/* Associate all the recipes created with all the profiles in the
5387 	 * common field vector.
5388 	 */
5389 	list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5390 		DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
5391 		u16 j;
5392 
5393 		status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5394 						      (u8 *)r_bitmap, NULL);
5395 		if (status)
5396 			goto err_unroll;
5397 
5398 		bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
5399 			  ICE_MAX_NUM_RECIPES);
5400 		status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5401 		if (status)
5402 			goto err_unroll;
5403 
5404 		status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5405 						      (u8 *)r_bitmap,
5406 						      NULL);
5407 		ice_release_change_lock(hw);
5408 
5409 		if (status)
5410 			goto err_unroll;
5411 
5412 		/* Update profile to recipe bitmap array */
5413 		bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap,
5414 			    ICE_MAX_NUM_RECIPES);
5415 
5416 		/* Update recipe to profile bitmap array */
5417 		for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
5418 			set_bit((u16)fvit->profile_id, recipe_to_profile[j]);
5419 	}
5420 
5421 	*rid = rm->root_rid;
5422 	memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts,
5423 	       sizeof(*lkup_exts));
5424 err_unroll:
5425 	list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
5426 		list_del(&r_entry->l_entry);
5427 		devm_kfree(ice_hw_to_dev(hw), r_entry);
5428 	}
5429 
5430 	list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) {
5431 		list_del(&fvit->list_entry);
5432 		devm_kfree(ice_hw_to_dev(hw), fvit);
5433 	}
5434 
5435 	if (rm->root_buf)
5436 		devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
5437 
5438 	kfree(rm);
5439 
5440 err_free_lkup_exts:
5441 	kfree(lkup_exts);
5442 
5443 	return status;
5444 }
5445 
5446 /**
5447  * ice_dummy_packet_add_vlan - insert VLAN header to dummy pkt
5448  *
5449  * @dummy_pkt: dummy packet profile pattern to which VLAN tag(s) will be added
5450  * @num_vlan: number of VLAN tags
5451  */
5452 static struct ice_dummy_pkt_profile *
5453 ice_dummy_packet_add_vlan(const struct ice_dummy_pkt_profile *dummy_pkt,
5454 			  u32 num_vlan)
5455 {
5456 	struct ice_dummy_pkt_profile *profile;
5457 	struct ice_dummy_pkt_offsets *offsets;
5458 	u32 buf_len, off, etype_off, i;
5459 	u8 *pkt;
5460 
5461 	if (num_vlan < 1 || num_vlan > 2)
5462 		return ERR_PTR(-EINVAL);
5463 
5464 	off = num_vlan * VLAN_HLEN;
5465 
5466 	buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet_offsets)) +
5467 		  dummy_pkt->offsets_len;
5468 	offsets = kzalloc(buf_len, GFP_KERNEL);
5469 	if (!offsets)
5470 		return ERR_PTR(-ENOMEM);
5471 
5472 	offsets[0] = dummy_pkt->offsets[0];
5473 	if (num_vlan == 2) {
5474 		offsets[1] = ice_dummy_qinq_packet_offsets[0];
5475 		offsets[2] = ice_dummy_qinq_packet_offsets[1];
5476 	} else if (num_vlan == 1) {
5477 		offsets[1] = ice_dummy_vlan_packet_offsets[0];
5478 	}
5479 
5480 	for (i = 1; dummy_pkt->offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5481 		offsets[i + num_vlan].type = dummy_pkt->offsets[i].type;
5482 		offsets[i + num_vlan].offset =
5483 			dummy_pkt->offsets[i].offset + off;
5484 	}
5485 	offsets[i + num_vlan] = dummy_pkt->offsets[i];
5486 
5487 	etype_off = dummy_pkt->offsets[1].offset;
5488 
5489 	buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet)) +
5490 		  dummy_pkt->pkt_len;
5491 	pkt = kzalloc(buf_len, GFP_KERNEL);
5492 	if (!pkt) {
5493 		kfree(offsets);
5494 		return ERR_PTR(-ENOMEM);
5495 	}
5496 
5497 	memcpy(pkt, dummy_pkt->pkt, etype_off);
5498 	memcpy(pkt + etype_off,
5499 	       num_vlan == 2 ? ice_dummy_qinq_packet : ice_dummy_vlan_packet,
5500 	       off);
5501 	memcpy(pkt + etype_off + off, dummy_pkt->pkt + etype_off,
5502 	       dummy_pkt->pkt_len - etype_off);
5503 
5504 	profile = kzalloc(sizeof(*profile), GFP_KERNEL);
5505 	if (!profile) {
5506 		kfree(offsets);
5507 		kfree(pkt);
5508 		return ERR_PTR(-ENOMEM);
5509 	}
5510 
5511 	profile->offsets = offsets;
5512 	profile->pkt = pkt;
5513 	profile->pkt_len = buf_len;
5514 	profile->match |= ICE_PKT_KMALLOC;
5515 
5516 	return profile;
5517 }
5518 
5519 /**
5520  * ice_find_dummy_packet - find dummy packet
5521  *
5522  * @lkups: lookup elements or match criteria for the advanced recipe, one
5523  *	   structure per protocol header
5524  * @lkups_cnt: number of protocols
5525  * @tun_type: tunnel type
5526  *
5527  * Returns the &ice_dummy_pkt_profile corresponding to these lookup params.
5528  */
5529 static const struct ice_dummy_pkt_profile *
5530 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5531 		      enum ice_sw_tunnel_type tun_type)
5532 {
5533 	const struct ice_dummy_pkt_profile *ret = ice_dummy_pkt_profiles;
5534 	u32 match = 0, vlan_count = 0;
5535 	u16 i;
5536 
5537 	switch (tun_type) {
5538 	case ICE_SW_TUN_GTPC:
5539 		match |= ICE_PKT_TUN_GTPC;
5540 		break;
5541 	case ICE_SW_TUN_GTPU:
5542 		match |= ICE_PKT_TUN_GTPU;
5543 		break;
5544 	case ICE_SW_TUN_NVGRE:
5545 		match |= ICE_PKT_TUN_NVGRE;
5546 		break;
5547 	case ICE_SW_TUN_GENEVE:
5548 	case ICE_SW_TUN_VXLAN:
5549 		match |= ICE_PKT_TUN_UDP;
5550 		break;
5551 	default:
5552 		break;
5553 	}
5554 
5555 	for (i = 0; i < lkups_cnt; i++) {
5556 		if (lkups[i].type == ICE_UDP_ILOS)
5557 			match |= ICE_PKT_INNER_UDP;
5558 		else if (lkups[i].type == ICE_TCP_IL)
5559 			match |= ICE_PKT_INNER_TCP;
5560 		else if (lkups[i].type == ICE_IPV6_OFOS)
5561 			match |= ICE_PKT_OUTER_IPV6;
5562 		else if (lkups[i].type == ICE_VLAN_OFOS ||
5563 			 lkups[i].type == ICE_VLAN_EX)
5564 			vlan_count++;
5565 		else if (lkups[i].type == ICE_VLAN_IN)
5566 			vlan_count++;
5567 		else if (lkups[i].type == ICE_ETYPE_OL &&
5568 			 lkups[i].h_u.ethertype.ethtype_id ==
5569 				cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5570 			 lkups[i].m_u.ethertype.ethtype_id ==
5571 				cpu_to_be16(0xFFFF))
5572 			match |= ICE_PKT_OUTER_IPV6;
5573 		else if (lkups[i].type == ICE_ETYPE_IL &&
5574 			 lkups[i].h_u.ethertype.ethtype_id ==
5575 				cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5576 			 lkups[i].m_u.ethertype.ethtype_id ==
5577 				cpu_to_be16(0xFFFF))
5578 			match |= ICE_PKT_INNER_IPV6;
5579 		else if (lkups[i].type == ICE_IPV6_IL)
5580 			match |= ICE_PKT_INNER_IPV6;
5581 		else if (lkups[i].type == ICE_GTP_NO_PAY)
5582 			match |= ICE_PKT_GTP_NOPAY;
5583 	}
5584 
5585 	while (ret->match && (match & ret->match) != ret->match)
5586 		ret++;
5587 
5588 	if (vlan_count != 0)
5589 		ret = ice_dummy_packet_add_vlan(ret, vlan_count);
5590 
5591 	return ret;
5592 }
5593 
5594 /**
5595  * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5596  *
5597  * @lkups: lookup elements or match criteria for the advanced recipe, one
5598  *	   structure per protocol header
5599  * @lkups_cnt: number of protocols
5600  * @s_rule: stores rule information from the match criteria
5601  * @profile: dummy packet profile (the template, its size and header offsets)
5602  */
5603 static int
5604 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5605 			  struct ice_sw_rule_lkup_rx_tx *s_rule,
5606 			  const struct ice_dummy_pkt_profile *profile)
5607 {
5608 	u8 *pkt;
5609 	u16 i;
5610 
5611 	/* Start with a packet with a pre-defined/dummy content. Then, fill
5612 	 * in the header values to be looked up or matched.
5613 	 */
5614 	pkt = s_rule->hdr_data;
5615 
5616 	memcpy(pkt, profile->pkt, profile->pkt_len);
5617 
5618 	for (i = 0; i < lkups_cnt; i++) {
5619 		const struct ice_dummy_pkt_offsets *offsets = profile->offsets;
5620 		enum ice_protocol_type type;
5621 		u16 offset = 0, len = 0, j;
5622 		bool found = false;
5623 
5624 		/* find the start of this layer; it should be found since this
5625 		 * was already checked when search for the dummy packet
5626 		 */
5627 		type = lkups[i].type;
5628 		for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5629 			if (type == offsets[j].type) {
5630 				offset = offsets[j].offset;
5631 				found = true;
5632 				break;
5633 			}
5634 		}
5635 		/* this should never happen in a correct calling sequence */
5636 		if (!found)
5637 			return -EINVAL;
5638 
5639 		switch (lkups[i].type) {
5640 		case ICE_MAC_OFOS:
5641 		case ICE_MAC_IL:
5642 			len = sizeof(struct ice_ether_hdr);
5643 			break;
5644 		case ICE_ETYPE_OL:
5645 		case ICE_ETYPE_IL:
5646 			len = sizeof(struct ice_ethtype_hdr);
5647 			break;
5648 		case ICE_VLAN_OFOS:
5649 		case ICE_VLAN_EX:
5650 		case ICE_VLAN_IN:
5651 			len = sizeof(struct ice_vlan_hdr);
5652 			break;
5653 		case ICE_IPV4_OFOS:
5654 		case ICE_IPV4_IL:
5655 			len = sizeof(struct ice_ipv4_hdr);
5656 			break;
5657 		case ICE_IPV6_OFOS:
5658 		case ICE_IPV6_IL:
5659 			len = sizeof(struct ice_ipv6_hdr);
5660 			break;
5661 		case ICE_TCP_IL:
5662 		case ICE_UDP_OF:
5663 		case ICE_UDP_ILOS:
5664 			len = sizeof(struct ice_l4_hdr);
5665 			break;
5666 		case ICE_SCTP_IL:
5667 			len = sizeof(struct ice_sctp_hdr);
5668 			break;
5669 		case ICE_NVGRE:
5670 			len = sizeof(struct ice_nvgre_hdr);
5671 			break;
5672 		case ICE_VXLAN:
5673 		case ICE_GENEVE:
5674 			len = sizeof(struct ice_udp_tnl_hdr);
5675 			break;
5676 		case ICE_GTP_NO_PAY:
5677 		case ICE_GTP:
5678 			len = sizeof(struct ice_udp_gtp_hdr);
5679 			break;
5680 		default:
5681 			return -EINVAL;
5682 		}
5683 
5684 		/* the length should be a word multiple */
5685 		if (len % ICE_BYTES_PER_WORD)
5686 			return -EIO;
5687 
5688 		/* We have the offset to the header start, the length, the
5689 		 * caller's header values and mask. Use this information to
5690 		 * copy the data into the dummy packet appropriately based on
5691 		 * the mask. Note that we need to only write the bits as
5692 		 * indicated by the mask to make sure we don't improperly write
5693 		 * over any significant packet data.
5694 		 */
5695 		for (j = 0; j < len / sizeof(u16); j++) {
5696 			u16 *ptr = (u16 *)(pkt + offset);
5697 			u16 mask = lkups[i].m_raw[j];
5698 
5699 			if (!mask)
5700 				continue;
5701 
5702 			ptr[j] = (ptr[j] & ~mask) | (lkups[i].h_raw[j] & mask);
5703 		}
5704 	}
5705 
5706 	s_rule->hdr_len = cpu_to_le16(profile->pkt_len);
5707 
5708 	return 0;
5709 }
5710 
5711 /**
5712  * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
5713  * @hw: pointer to the hardware structure
5714  * @tun_type: tunnel type
5715  * @pkt: dummy packet to fill in
5716  * @offsets: offset info for the dummy packet
5717  */
5718 static int
5719 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
5720 			u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
5721 {
5722 	u16 open_port, i;
5723 
5724 	switch (tun_type) {
5725 	case ICE_SW_TUN_VXLAN:
5726 		if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
5727 			return -EIO;
5728 		break;
5729 	case ICE_SW_TUN_GENEVE:
5730 		if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
5731 			return -EIO;
5732 		break;
5733 	default:
5734 		/* Nothing needs to be done for this tunnel type */
5735 		return 0;
5736 	}
5737 
5738 	/* Find the outer UDP protocol header and insert the port number */
5739 	for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5740 		if (offsets[i].type == ICE_UDP_OF) {
5741 			struct ice_l4_hdr *hdr;
5742 			u16 offset;
5743 
5744 			offset = offsets[i].offset;
5745 			hdr = (struct ice_l4_hdr *)&pkt[offset];
5746 			hdr->dst_port = cpu_to_be16(open_port);
5747 
5748 			return 0;
5749 		}
5750 	}
5751 
5752 	return -EIO;
5753 }
5754 
5755 /**
5756  * ice_fill_adv_packet_vlan - fill dummy packet with VLAN tag type
5757  * @vlan_type: VLAN tag type
5758  * @pkt: dummy packet to fill in
5759  * @offsets: offset info for the dummy packet
5760  */
5761 static int
5762 ice_fill_adv_packet_vlan(u16 vlan_type, u8 *pkt,
5763 			 const struct ice_dummy_pkt_offsets *offsets)
5764 {
5765 	u16 i;
5766 
5767 	/* Find VLAN header and insert VLAN TPID */
5768 	for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5769 		if (offsets[i].type == ICE_VLAN_OFOS ||
5770 		    offsets[i].type == ICE_VLAN_EX) {
5771 			struct ice_vlan_hdr *hdr;
5772 			u16 offset;
5773 
5774 			offset = offsets[i].offset;
5775 			hdr = (struct ice_vlan_hdr *)&pkt[offset];
5776 			hdr->type = cpu_to_be16(vlan_type);
5777 
5778 			return 0;
5779 		}
5780 	}
5781 
5782 	return -EIO;
5783 }
5784 
5785 /**
5786  * ice_find_adv_rule_entry - Search a rule entry
5787  * @hw: pointer to the hardware structure
5788  * @lkups: lookup elements or match criteria for the advanced recipe, one
5789  *	   structure per protocol header
5790  * @lkups_cnt: number of protocols
5791  * @recp_id: recipe ID for which we are finding the rule
5792  * @rinfo: other information regarding the rule e.g. priority and action info
5793  *
5794  * Helper function to search for a given advance rule entry
5795  * Returns pointer to entry storing the rule if found
5796  */
5797 static struct ice_adv_fltr_mgmt_list_entry *
5798 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5799 			u16 lkups_cnt, u16 recp_id,
5800 			struct ice_adv_rule_info *rinfo)
5801 {
5802 	struct ice_adv_fltr_mgmt_list_entry *list_itr;
5803 	struct ice_switch_info *sw = hw->switch_info;
5804 	int i;
5805 
5806 	list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules,
5807 			    list_entry) {
5808 		bool lkups_matched = true;
5809 
5810 		if (lkups_cnt != list_itr->lkups_cnt)
5811 			continue;
5812 		for (i = 0; i < list_itr->lkups_cnt; i++)
5813 			if (memcmp(&list_itr->lkups[i], &lkups[i],
5814 				   sizeof(*lkups))) {
5815 				lkups_matched = false;
5816 				break;
5817 			}
5818 		if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5819 		    rinfo->tun_type == list_itr->rule_info.tun_type &&
5820 		    rinfo->vlan_type == list_itr->rule_info.vlan_type &&
5821 		    lkups_matched)
5822 			return list_itr;
5823 	}
5824 	return NULL;
5825 }
5826 
5827 /**
5828  * ice_adv_add_update_vsi_list
5829  * @hw: pointer to the hardware structure
5830  * @m_entry: pointer to current adv filter management list entry
5831  * @cur_fltr: filter information from the book keeping entry
5832  * @new_fltr: filter information with the new VSI to be added
5833  *
5834  * Call AQ command to add or update previously created VSI list with new VSI.
5835  *
5836  * Helper function to do book keeping associated with adding filter information
5837  * The algorithm to do the booking keeping is described below :
5838  * When a VSI needs to subscribe to a given advanced filter
5839  *	if only one VSI has been added till now
5840  *		Allocate a new VSI list and add two VSIs
5841  *		to this list using switch rule command
5842  *		Update the previously created switch rule with the
5843  *		newly created VSI list ID
5844  *	if a VSI list was previously created
5845  *		Add the new VSI to the previously created VSI list set
5846  *		using the update switch rule command
5847  */
5848 static int
5849 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5850 			    struct ice_adv_fltr_mgmt_list_entry *m_entry,
5851 			    struct ice_adv_rule_info *cur_fltr,
5852 			    struct ice_adv_rule_info *new_fltr)
5853 {
5854 	u16 vsi_list_id = 0;
5855 	int status;
5856 
5857 	if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5858 	    cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
5859 	    cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5860 		return -EOPNOTSUPP;
5861 
5862 	if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5863 	     new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5864 	    (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5865 	     cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5866 		return -EOPNOTSUPP;
5867 
5868 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5869 		 /* Only one entry existed in the mapping and it was not already
5870 		  * a part of a VSI list. So, create a VSI list with the old and
5871 		  * new VSIs.
5872 		  */
5873 		struct ice_fltr_info tmp_fltr;
5874 		u16 vsi_handle_arr[2];
5875 
5876 		/* A rule already exists with the new VSI being added */
5877 		if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5878 		    new_fltr->sw_act.fwd_id.hw_vsi_id)
5879 			return -EEXIST;
5880 
5881 		vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5882 		vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5883 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5884 						  &vsi_list_id,
5885 						  ICE_SW_LKUP_LAST);
5886 		if (status)
5887 			return status;
5888 
5889 		memset(&tmp_fltr, 0, sizeof(tmp_fltr));
5890 		tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
5891 		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5892 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5893 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5894 		tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
5895 
5896 		/* Update the previous switch rule of "forward to VSI" to
5897 		 * "fwd to VSI list"
5898 		 */
5899 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5900 		if (status)
5901 			return status;
5902 
5903 		cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
5904 		cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
5905 		m_entry->vsi_list_info =
5906 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5907 						vsi_list_id);
5908 	} else {
5909 		u16 vsi_handle = new_fltr->sw_act.vsi_handle;
5910 
5911 		if (!m_entry->vsi_list_info)
5912 			return -EIO;
5913 
5914 		/* A rule already exists with the new VSI being added */
5915 		if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
5916 			return 0;
5917 
5918 		/* Update the previously created VSI list set with
5919 		 * the new VSI ID passed in
5920 		 */
5921 		vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
5922 
5923 		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
5924 						  vsi_list_id, false,
5925 						  ice_aqc_opc_update_sw_rules,
5926 						  ICE_SW_LKUP_LAST);
5927 		/* update VSI list mapping info with new VSI ID */
5928 		if (!status)
5929 			set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
5930 	}
5931 	if (!status)
5932 		m_entry->vsi_count++;
5933 	return status;
5934 }
5935 
5936 /**
5937  * ice_add_adv_rule - helper function to create an advanced switch rule
5938  * @hw: pointer to the hardware structure
5939  * @lkups: information on the words that needs to be looked up. All words
5940  * together makes one recipe
5941  * @lkups_cnt: num of entries in the lkups array
5942  * @rinfo: other information related to the rule that needs to be programmed
5943  * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
5944  *               ignored is case of error.
5945  *
5946  * This function can program only 1 rule at a time. The lkups is used to
5947  * describe the all the words that forms the "lookup" portion of the recipe.
5948  * These words can span multiple protocols. Callers to this function need to
5949  * pass in a list of protocol headers with lookup information along and mask
5950  * that determines which words are valid from the given protocol header.
5951  * rinfo describes other information related to this rule such as forwarding
5952  * IDs, priority of this rule, etc.
5953  */
5954 int
5955 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5956 		 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
5957 		 struct ice_rule_query_data *added_entry)
5958 {
5959 	struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
5960 	struct ice_sw_rule_lkup_rx_tx *s_rule = NULL;
5961 	const struct ice_dummy_pkt_profile *profile;
5962 	u16 rid = 0, i, rule_buf_sz, vsi_handle;
5963 	struct list_head *rule_head;
5964 	struct ice_switch_info *sw;
5965 	u16 word_cnt;
5966 	u32 act = 0;
5967 	int status;
5968 	u8 q_rgn;
5969 
5970 	/* Initialize profile to result index bitmap */
5971 	if (!hw->switch_info->prof_res_bm_init) {
5972 		hw->switch_info->prof_res_bm_init = 1;
5973 		ice_init_prof_result_bm(hw);
5974 	}
5975 
5976 	if (!lkups_cnt)
5977 		return -EINVAL;
5978 
5979 	/* get # of words we need to match */
5980 	word_cnt = 0;
5981 	for (i = 0; i < lkups_cnt; i++) {
5982 		u16 j;
5983 
5984 		for (j = 0; j < ARRAY_SIZE(lkups->m_raw); j++)
5985 			if (lkups[i].m_raw[j])
5986 				word_cnt++;
5987 	}
5988 
5989 	if (!word_cnt)
5990 		return -EINVAL;
5991 
5992 	if (word_cnt > ICE_MAX_CHAIN_WORDS)
5993 		return -ENOSPC;
5994 
5995 	/* locate a dummy packet */
5996 	profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type);
5997 	if (IS_ERR(profile))
5998 		return PTR_ERR(profile);
5999 
6000 	if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6001 	      rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6002 	      rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6003 	      rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) {
6004 		status = -EIO;
6005 		goto free_pkt_profile;
6006 	}
6007 
6008 	vsi_handle = rinfo->sw_act.vsi_handle;
6009 	if (!ice_is_vsi_valid(hw, vsi_handle)) {
6010 		status =  -EINVAL;
6011 		goto free_pkt_profile;
6012 	}
6013 
6014 	if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6015 		rinfo->sw_act.fwd_id.hw_vsi_id =
6016 			ice_get_hw_vsi_num(hw, vsi_handle);
6017 	if (rinfo->sw_act.flag & ICE_FLTR_TX)
6018 		rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6019 
6020 	status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6021 	if (status)
6022 		goto free_pkt_profile;
6023 	m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6024 	if (m_entry) {
6025 		/* we have to add VSI to VSI_LIST and increment vsi_count.
6026 		 * Also Update VSI list so that we can change forwarding rule
6027 		 * if the rule already exists, we will check if it exists with
6028 		 * same vsi_id, if not then add it to the VSI list if it already
6029 		 * exists if not then create a VSI list and add the existing VSI
6030 		 * ID and the new VSI ID to the list
6031 		 * We will add that VSI to the list
6032 		 */
6033 		status = ice_adv_add_update_vsi_list(hw, m_entry,
6034 						     &m_entry->rule_info,
6035 						     rinfo);
6036 		if (added_entry) {
6037 			added_entry->rid = rid;
6038 			added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6039 			added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6040 		}
6041 		goto free_pkt_profile;
6042 	}
6043 	rule_buf_sz = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, profile->pkt_len);
6044 	s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6045 	if (!s_rule) {
6046 		status = -ENOMEM;
6047 		goto free_pkt_profile;
6048 	}
6049 	if (!rinfo->flags_info.act_valid) {
6050 		act |= ICE_SINGLE_ACT_LAN_ENABLE;
6051 		act |= ICE_SINGLE_ACT_LB_ENABLE;
6052 	} else {
6053 		act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
6054 						ICE_SINGLE_ACT_LB_ENABLE);
6055 	}
6056 
6057 	switch (rinfo->sw_act.fltr_act) {
6058 	case ICE_FWD_TO_VSI:
6059 		act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6060 			ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6061 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6062 		break;
6063 	case ICE_FWD_TO_Q:
6064 		act |= ICE_SINGLE_ACT_TO_Q;
6065 		act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6066 		       ICE_SINGLE_ACT_Q_INDEX_M;
6067 		break;
6068 	case ICE_FWD_TO_QGRP:
6069 		q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6070 			(u8)ilog2(rinfo->sw_act.qgrp_size) : 0;
6071 		act |= ICE_SINGLE_ACT_TO_Q;
6072 		act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6073 		       ICE_SINGLE_ACT_Q_INDEX_M;
6074 		act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6075 		       ICE_SINGLE_ACT_Q_REGION_M;
6076 		break;
6077 	case ICE_DROP_PACKET:
6078 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6079 		       ICE_SINGLE_ACT_VALID_BIT;
6080 		break;
6081 	default:
6082 		status = -EIO;
6083 		goto err_ice_add_adv_rule;
6084 	}
6085 
6086 	/* set the rule LOOKUP type based on caller specified 'Rx'
6087 	 * instead of hardcoding it to be either LOOKUP_TX/RX
6088 	 *
6089 	 * for 'Rx' set the source to be the port number
6090 	 * for 'Tx' set the source to be the source HW VSI number (determined
6091 	 * by caller)
6092 	 */
6093 	if (rinfo->rx) {
6094 		s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
6095 		s_rule->src = cpu_to_le16(hw->port_info->lport);
6096 	} else {
6097 		s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
6098 		s_rule->src = cpu_to_le16(rinfo->sw_act.src);
6099 	}
6100 
6101 	s_rule->recipe_id = cpu_to_le16(rid);
6102 	s_rule->act = cpu_to_le32(act);
6103 
6104 	status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, profile);
6105 	if (status)
6106 		goto err_ice_add_adv_rule;
6107 
6108 	if (rinfo->tun_type != ICE_NON_TUN &&
6109 	    rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
6110 		status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6111 						 s_rule->hdr_data,
6112 						 profile->offsets);
6113 		if (status)
6114 			goto err_ice_add_adv_rule;
6115 	}
6116 
6117 	if (rinfo->vlan_type != 0 && ice_is_dvm_ena(hw)) {
6118 		status = ice_fill_adv_packet_vlan(rinfo->vlan_type,
6119 						  s_rule->hdr_data,
6120 						  profile->offsets);
6121 		if (status)
6122 			goto err_ice_add_adv_rule;
6123 	}
6124 
6125 	status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6126 				 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6127 				 NULL);
6128 	if (status)
6129 		goto err_ice_add_adv_rule;
6130 	adv_fltr = devm_kzalloc(ice_hw_to_dev(hw),
6131 				sizeof(struct ice_adv_fltr_mgmt_list_entry),
6132 				GFP_KERNEL);
6133 	if (!adv_fltr) {
6134 		status = -ENOMEM;
6135 		goto err_ice_add_adv_rule;
6136 	}
6137 
6138 	adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
6139 				       lkups_cnt * sizeof(*lkups), GFP_KERNEL);
6140 	if (!adv_fltr->lkups) {
6141 		status = -ENOMEM;
6142 		goto err_ice_add_adv_rule;
6143 	}
6144 
6145 	adv_fltr->lkups_cnt = lkups_cnt;
6146 	adv_fltr->rule_info = *rinfo;
6147 	adv_fltr->rule_info.fltr_rule_id = le16_to_cpu(s_rule->index);
6148 	sw = hw->switch_info;
6149 	sw->recp_list[rid].adv_rule = true;
6150 	rule_head = &sw->recp_list[rid].filt_rules;
6151 
6152 	if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6153 		adv_fltr->vsi_count = 1;
6154 
6155 	/* Add rule entry to book keeping list */
6156 	list_add(&adv_fltr->list_entry, rule_head);
6157 	if (added_entry) {
6158 		added_entry->rid = rid;
6159 		added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6160 		added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6161 	}
6162 err_ice_add_adv_rule:
6163 	if (status && adv_fltr) {
6164 		devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups);
6165 		devm_kfree(ice_hw_to_dev(hw), adv_fltr);
6166 	}
6167 
6168 	kfree(s_rule);
6169 
6170 free_pkt_profile:
6171 	if (profile->match & ICE_PKT_KMALLOC) {
6172 		kfree(profile->offsets);
6173 		kfree(profile->pkt);
6174 		kfree(profile);
6175 	}
6176 
6177 	return status;
6178 }
6179 
6180 /**
6181  * ice_replay_vsi_fltr - Replay filters for requested VSI
6182  * @hw: pointer to the hardware structure
6183  * @vsi_handle: driver VSI handle
6184  * @recp_id: Recipe ID for which rules need to be replayed
6185  * @list_head: list for which filters need to be replayed
6186  *
6187  * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6188  * It is required to pass valid VSI handle.
6189  */
6190 static int
6191 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6192 		    struct list_head *list_head)
6193 {
6194 	struct ice_fltr_mgmt_list_entry *itr;
6195 	int status = 0;
6196 	u16 hw_vsi_id;
6197 
6198 	if (list_empty(list_head))
6199 		return status;
6200 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6201 
6202 	list_for_each_entry(itr, list_head, list_entry) {
6203 		struct ice_fltr_list_entry f_entry;
6204 
6205 		f_entry.fltr_info = itr->fltr_info;
6206 		if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6207 		    itr->fltr_info.vsi_handle == vsi_handle) {
6208 			/* update the src in case it is VSI num */
6209 			if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6210 				f_entry.fltr_info.src = hw_vsi_id;
6211 			status = ice_add_rule_internal(hw, recp_id, &f_entry);
6212 			if (status)
6213 				goto end;
6214 			continue;
6215 		}
6216 		if (!itr->vsi_list_info ||
6217 		    !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
6218 			continue;
6219 		/* Clearing it so that the logic can add it back */
6220 		clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6221 		f_entry.fltr_info.vsi_handle = vsi_handle;
6222 		f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6223 		/* update the src in case it is VSI num */
6224 		if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6225 			f_entry.fltr_info.src = hw_vsi_id;
6226 		if (recp_id == ICE_SW_LKUP_VLAN)
6227 			status = ice_add_vlan_internal(hw, &f_entry);
6228 		else
6229 			status = ice_add_rule_internal(hw, recp_id, &f_entry);
6230 		if (status)
6231 			goto end;
6232 	}
6233 end:
6234 	return status;
6235 }
6236 
6237 /**
6238  * ice_adv_rem_update_vsi_list
6239  * @hw: pointer to the hardware structure
6240  * @vsi_handle: VSI handle of the VSI to remove
6241  * @fm_list: filter management entry for which the VSI list management needs to
6242  *	     be done
6243  */
6244 static int
6245 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6246 			    struct ice_adv_fltr_mgmt_list_entry *fm_list)
6247 {
6248 	struct ice_vsi_list_map_info *vsi_list_info;
6249 	enum ice_sw_lkup_type lkup_type;
6250 	u16 vsi_list_id;
6251 	int status;
6252 
6253 	if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6254 	    fm_list->vsi_count == 0)
6255 		return -EINVAL;
6256 
6257 	/* A rule with the VSI being removed does not exist */
6258 	if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
6259 		return -ENOENT;
6260 
6261 	lkup_type = ICE_SW_LKUP_LAST;
6262 	vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6263 	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6264 					  ice_aqc_opc_update_sw_rules,
6265 					  lkup_type);
6266 	if (status)
6267 		return status;
6268 
6269 	fm_list->vsi_count--;
6270 	clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6271 	vsi_list_info = fm_list->vsi_list_info;
6272 	if (fm_list->vsi_count == 1) {
6273 		struct ice_fltr_info tmp_fltr;
6274 		u16 rem_vsi_handle;
6275 
6276 		rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
6277 						ICE_MAX_VSI);
6278 		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6279 			return -EIO;
6280 
6281 		/* Make sure VSI list is empty before removing it below */
6282 		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6283 						  vsi_list_id, true,
6284 						  ice_aqc_opc_update_sw_rules,
6285 						  lkup_type);
6286 		if (status)
6287 			return status;
6288 
6289 		memset(&tmp_fltr, 0, sizeof(tmp_fltr));
6290 		tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
6291 		tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6292 		fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6293 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6294 		tmp_fltr.fwd_id.hw_vsi_id =
6295 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
6296 		fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6297 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
6298 		fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
6299 
6300 		/* Update the previous switch rule of "MAC forward to VSI" to
6301 		 * "MAC fwd to VSI list"
6302 		 */
6303 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6304 		if (status) {
6305 			ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6306 				  tmp_fltr.fwd_id.hw_vsi_id, status);
6307 			return status;
6308 		}
6309 		fm_list->vsi_list_info->ref_cnt--;
6310 
6311 		/* Remove the VSI list since it is no longer used */
6312 		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6313 		if (status) {
6314 			ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
6315 				  vsi_list_id, status);
6316 			return status;
6317 		}
6318 
6319 		list_del(&vsi_list_info->list_entry);
6320 		devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
6321 		fm_list->vsi_list_info = NULL;
6322 	}
6323 
6324 	return status;
6325 }
6326 
6327 /**
6328  * ice_rem_adv_rule - removes existing advanced switch rule
6329  * @hw: pointer to the hardware structure
6330  * @lkups: information on the words that needs to be looked up. All words
6331  *         together makes one recipe
6332  * @lkups_cnt: num of entries in the lkups array
6333  * @rinfo: Its the pointer to the rule information for the rule
6334  *
6335  * This function can be used to remove 1 rule at a time. The lkups is
6336  * used to describe all the words that forms the "lookup" portion of the
6337  * rule. These words can span multiple protocols. Callers to this function
6338  * need to pass in a list of protocol headers with lookup information along
6339  * and mask that determines which words are valid from the given protocol
6340  * header. rinfo describes other information related to this rule such as
6341  * forwarding IDs, priority of this rule, etc.
6342  */
6343 static int
6344 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6345 		 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6346 {
6347 	struct ice_adv_fltr_mgmt_list_entry *list_elem;
6348 	struct ice_prot_lkup_ext lkup_exts;
6349 	bool remove_rule = false;
6350 	struct mutex *rule_lock; /* Lock to protect filter rule list */
6351 	u16 i, rid, vsi_handle;
6352 	int status = 0;
6353 
6354 	memset(&lkup_exts, 0, sizeof(lkup_exts));
6355 	for (i = 0; i < lkups_cnt; i++) {
6356 		u16 count;
6357 
6358 		if (lkups[i].type >= ICE_PROTOCOL_LAST)
6359 			return -EIO;
6360 
6361 		count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6362 		if (!count)
6363 			return -EIO;
6364 	}
6365 
6366 	/* Create any special protocol/offset pairs, such as looking at tunnel
6367 	 * bits by extracting metadata
6368 	 */
6369 	status = ice_add_special_words(rinfo, &lkup_exts, ice_is_dvm_ena(hw));
6370 	if (status)
6371 		return status;
6372 
6373 	rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
6374 	/* If did not find a recipe that match the existing criteria */
6375 	if (rid == ICE_MAX_NUM_RECIPES)
6376 		return -EINVAL;
6377 
6378 	rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6379 	list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6380 	/* the rule is already removed */
6381 	if (!list_elem)
6382 		return 0;
6383 	mutex_lock(rule_lock);
6384 	if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6385 		remove_rule = true;
6386 	} else if (list_elem->vsi_count > 1) {
6387 		remove_rule = false;
6388 		vsi_handle = rinfo->sw_act.vsi_handle;
6389 		status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6390 	} else {
6391 		vsi_handle = rinfo->sw_act.vsi_handle;
6392 		status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6393 		if (status) {
6394 			mutex_unlock(rule_lock);
6395 			return status;
6396 		}
6397 		if (list_elem->vsi_count == 0)
6398 			remove_rule = true;
6399 	}
6400 	mutex_unlock(rule_lock);
6401 	if (remove_rule) {
6402 		struct ice_sw_rule_lkup_rx_tx *s_rule;
6403 		u16 rule_buf_sz;
6404 
6405 		rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule);
6406 		s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6407 		if (!s_rule)
6408 			return -ENOMEM;
6409 		s_rule->act = 0;
6410 		s_rule->index = cpu_to_le16(list_elem->rule_info.fltr_rule_id);
6411 		s_rule->hdr_len = 0;
6412 		status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6413 					 rule_buf_sz, 1,
6414 					 ice_aqc_opc_remove_sw_rules, NULL);
6415 		if (!status || status == -ENOENT) {
6416 			struct ice_switch_info *sw = hw->switch_info;
6417 
6418 			mutex_lock(rule_lock);
6419 			list_del(&list_elem->list_entry);
6420 			devm_kfree(ice_hw_to_dev(hw), list_elem->lkups);
6421 			devm_kfree(ice_hw_to_dev(hw), list_elem);
6422 			mutex_unlock(rule_lock);
6423 			if (list_empty(&sw->recp_list[rid].filt_rules))
6424 				sw->recp_list[rid].adv_rule = false;
6425 		}
6426 		kfree(s_rule);
6427 	}
6428 	return status;
6429 }
6430 
6431 /**
6432  * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6433  * @hw: pointer to the hardware structure
6434  * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6435  *
6436  * This function is used to remove 1 rule at a time. The removal is based on
6437  * the remove_entry parameter. This function will remove rule for a given
6438  * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6439  */
6440 int
6441 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6442 		       struct ice_rule_query_data *remove_entry)
6443 {
6444 	struct ice_adv_fltr_mgmt_list_entry *list_itr;
6445 	struct list_head *list_head;
6446 	struct ice_adv_rule_info rinfo;
6447 	struct ice_switch_info *sw;
6448 
6449 	sw = hw->switch_info;
6450 	if (!sw->recp_list[remove_entry->rid].recp_created)
6451 		return -EINVAL;
6452 	list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6453 	list_for_each_entry(list_itr, list_head, list_entry) {
6454 		if (list_itr->rule_info.fltr_rule_id ==
6455 		    remove_entry->rule_id) {
6456 			rinfo = list_itr->rule_info;
6457 			rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6458 			return ice_rem_adv_rule(hw, list_itr->lkups,
6459 						list_itr->lkups_cnt, &rinfo);
6460 		}
6461 	}
6462 	/* either list is empty or unable to find rule */
6463 	return -ENOENT;
6464 }
6465 
6466 /**
6467  * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
6468  *                            given VSI handle
6469  * @hw: pointer to the hardware structure
6470  * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6471  *
6472  * This function is used to remove all the rules for a given VSI and as soon
6473  * as removing a rule fails, it will return immediately with the error code,
6474  * else it will return success.
6475  */
6476 int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6477 {
6478 	struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
6479 	struct ice_vsi_list_map_info *map_info;
6480 	struct ice_adv_rule_info rinfo;
6481 	struct list_head *list_head;
6482 	struct ice_switch_info *sw;
6483 	int status;
6484 	u8 rid;
6485 
6486 	sw = hw->switch_info;
6487 	for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6488 		if (!sw->recp_list[rid].recp_created)
6489 			continue;
6490 		if (!sw->recp_list[rid].adv_rule)
6491 			continue;
6492 
6493 		list_head = &sw->recp_list[rid].filt_rules;
6494 		list_for_each_entry_safe(list_itr, tmp_entry, list_head,
6495 					 list_entry) {
6496 			rinfo = list_itr->rule_info;
6497 
6498 			if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
6499 				map_info = list_itr->vsi_list_info;
6500 				if (!map_info)
6501 					continue;
6502 
6503 				if (!test_bit(vsi_handle, map_info->vsi_map))
6504 					continue;
6505 			} else if (rinfo.sw_act.vsi_handle != vsi_handle) {
6506 				continue;
6507 			}
6508 
6509 			rinfo.sw_act.vsi_handle = vsi_handle;
6510 			status = ice_rem_adv_rule(hw, list_itr->lkups,
6511 						  list_itr->lkups_cnt, &rinfo);
6512 			if (status)
6513 				return status;
6514 		}
6515 	}
6516 	return 0;
6517 }
6518 
6519 /**
6520  * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6521  * @hw: pointer to the hardware structure
6522  * @vsi_handle: driver VSI handle
6523  * @list_head: list for which filters need to be replayed
6524  *
6525  * Replay the advanced rule for the given VSI.
6526  */
6527 static int
6528 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6529 			struct list_head *list_head)
6530 {
6531 	struct ice_rule_query_data added_entry = { 0 };
6532 	struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6533 	int status = 0;
6534 
6535 	if (list_empty(list_head))
6536 		return status;
6537 	list_for_each_entry(adv_fltr, list_head, list_entry) {
6538 		struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6539 		u16 lk_cnt = adv_fltr->lkups_cnt;
6540 
6541 		if (vsi_handle != rinfo->sw_act.vsi_handle)
6542 			continue;
6543 		status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6544 					  &added_entry);
6545 		if (status)
6546 			break;
6547 	}
6548 	return status;
6549 }
6550 
6551 /**
6552  * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6553  * @hw: pointer to the hardware structure
6554  * @vsi_handle: driver VSI handle
6555  *
6556  * Replays filters for requested VSI via vsi_handle.
6557  */
6558 int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6559 {
6560 	struct ice_switch_info *sw = hw->switch_info;
6561 	int status;
6562 	u8 i;
6563 
6564 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6565 		struct list_head *head;
6566 
6567 		head = &sw->recp_list[i].filt_replay_rules;
6568 		if (!sw->recp_list[i].adv_rule)
6569 			status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6570 		else
6571 			status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6572 		if (status)
6573 			return status;
6574 	}
6575 	return status;
6576 }
6577 
6578 /**
6579  * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6580  * @hw: pointer to the HW struct
6581  *
6582  * Deletes the filter replay rules.
6583  */
6584 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6585 {
6586 	struct ice_switch_info *sw = hw->switch_info;
6587 	u8 i;
6588 
6589 	if (!sw)
6590 		return;
6591 
6592 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6593 		if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
6594 			struct list_head *l_head;
6595 
6596 			l_head = &sw->recp_list[i].filt_replay_rules;
6597 			if (!sw->recp_list[i].adv_rule)
6598 				ice_rem_sw_rule_info(hw, l_head);
6599 			else
6600 				ice_rem_adv_rule_info(hw, l_head);
6601 		}
6602 	}
6603 }
6604