1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_lib.h"
5 #include "ice_switch.h"
6 
7 #define ICE_ETH_DA_OFFSET		0
8 #define ICE_ETH_ETHTYPE_OFFSET		12
9 #define ICE_ETH_VLAN_TCI_OFFSET		14
10 #define ICE_MAX_VLAN_ID			0xFFF
11 #define ICE_IPV6_ETHER_ID		0x86DD
12 
13 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
14  * struct to configure any switch filter rules.
15  * {DA (6 bytes), SA(6 bytes),
16  * Ether type (2 bytes for header without VLAN tag) OR
17  * VLAN tag (4 bytes for header with VLAN tag) }
18  *
19  * Word on Hardcoded values
20  * byte 0 = 0x2: to identify it as locally administered DA MAC
21  * byte 6 = 0x2: to identify it as locally administered SA MAC
22  * byte 12 = 0x81 & byte 13 = 0x00:
23  *	In case of VLAN filter first two bytes defines ether type (0x8100)
24  *	and remaining two bytes are placeholder for programming a given VLAN ID
25  *	In case of Ether type filter it is treated as header without VLAN tag
26  *	and byte 12 and 13 is used to program a given Ether type instead
27  */
28 #define DUMMY_ETH_HDR_LEN		16
29 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
30 							0x2, 0, 0, 0, 0, 0,
31 							0x81, 0, 0, 0};
32 
33 struct ice_dummy_pkt_offsets {
34 	enum ice_protocol_type type;
35 	u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
36 };
37 
38 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
39 	{ ICE_MAC_OFOS,		0 },
40 	{ ICE_ETYPE_OL,		12 },
41 	{ ICE_IPV4_OFOS,	14 },
42 	{ ICE_NVGRE,		34 },
43 	{ ICE_MAC_IL,		42 },
44 	{ ICE_IPV4_IL,		56 },
45 	{ ICE_TCP_IL,		76 },
46 	{ ICE_PROTOCOL_LAST,	0 },
47 };
48 
49 static const u8 dummy_gre_tcp_packet[] = {
50 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_OFOS 0 */
51 	0x00, 0x00, 0x00, 0x00,
52 	0x00, 0x00, 0x00, 0x00,
53 
54 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
55 
56 	0x45, 0x00, 0x00, 0x3E,	/* ICE_IPV4_OFOS 14 */
57 	0x00, 0x00, 0x00, 0x00,
58 	0x00, 0x2F, 0x00, 0x00,
59 	0x00, 0x00, 0x00, 0x00,
60 	0x00, 0x00, 0x00, 0x00,
61 
62 	0x80, 0x00, 0x65, 0x58,	/* ICE_NVGRE 34 */
63 	0x00, 0x00, 0x00, 0x00,
64 
65 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_IL 42 */
66 	0x00, 0x00, 0x00, 0x00,
67 	0x00, 0x00, 0x00, 0x00,
68 	0x08, 0x00,
69 
70 	0x45, 0x00, 0x00, 0x14,	/* ICE_IPV4_IL 56 */
71 	0x00, 0x00, 0x00, 0x00,
72 	0x00, 0x06, 0x00, 0x00,
73 	0x00, 0x00, 0x00, 0x00,
74 	0x00, 0x00, 0x00, 0x00,
75 
76 	0x00, 0x00, 0x00, 0x00,	/* ICE_TCP_IL 76 */
77 	0x00, 0x00, 0x00, 0x00,
78 	0x00, 0x00, 0x00, 0x00,
79 	0x50, 0x02, 0x20, 0x00,
80 	0x00, 0x00, 0x00, 0x00
81 };
82 
83 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
84 	{ ICE_MAC_OFOS,		0 },
85 	{ ICE_ETYPE_OL,		12 },
86 	{ ICE_IPV4_OFOS,	14 },
87 	{ ICE_NVGRE,		34 },
88 	{ ICE_MAC_IL,		42 },
89 	{ ICE_IPV4_IL,		56 },
90 	{ ICE_UDP_ILOS,		76 },
91 	{ ICE_PROTOCOL_LAST,	0 },
92 };
93 
94 static const u8 dummy_gre_udp_packet[] = {
95 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_OFOS 0 */
96 	0x00, 0x00, 0x00, 0x00,
97 	0x00, 0x00, 0x00, 0x00,
98 
99 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
100 
101 	0x45, 0x00, 0x00, 0x3E,	/* ICE_IPV4_OFOS 14 */
102 	0x00, 0x00, 0x00, 0x00,
103 	0x00, 0x2F, 0x00, 0x00,
104 	0x00, 0x00, 0x00, 0x00,
105 	0x00, 0x00, 0x00, 0x00,
106 
107 	0x80, 0x00, 0x65, 0x58,	/* ICE_NVGRE 34 */
108 	0x00, 0x00, 0x00, 0x00,
109 
110 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_IL 42 */
111 	0x00, 0x00, 0x00, 0x00,
112 	0x00, 0x00, 0x00, 0x00,
113 	0x08, 0x00,
114 
115 	0x45, 0x00, 0x00, 0x14,	/* ICE_IPV4_IL 56 */
116 	0x00, 0x00, 0x00, 0x00,
117 	0x00, 0x11, 0x00, 0x00,
118 	0x00, 0x00, 0x00, 0x00,
119 	0x00, 0x00, 0x00, 0x00,
120 
121 	0x00, 0x00, 0x00, 0x00,	/* ICE_UDP_ILOS 76 */
122 	0x00, 0x08, 0x00, 0x00,
123 };
124 
125 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
126 	{ ICE_MAC_OFOS,		0 },
127 	{ ICE_ETYPE_OL,		12 },
128 	{ ICE_IPV4_OFOS,	14 },
129 	{ ICE_UDP_OF,		34 },
130 	{ ICE_VXLAN,		42 },
131 	{ ICE_GENEVE,		42 },
132 	{ ICE_VXLAN_GPE,	42 },
133 	{ ICE_MAC_IL,		50 },
134 	{ ICE_IPV4_IL,		64 },
135 	{ ICE_TCP_IL,		84 },
136 	{ ICE_PROTOCOL_LAST,	0 },
137 };
138 
139 static const u8 dummy_udp_tun_tcp_packet[] = {
140 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
141 	0x00, 0x00, 0x00, 0x00,
142 	0x00, 0x00, 0x00, 0x00,
143 
144 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
145 
146 	0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
147 	0x00, 0x01, 0x00, 0x00,
148 	0x40, 0x11, 0x00, 0x00,
149 	0x00, 0x00, 0x00, 0x00,
150 	0x00, 0x00, 0x00, 0x00,
151 
152 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
153 	0x00, 0x46, 0x00, 0x00,
154 
155 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
156 	0x00, 0x00, 0x00, 0x00,
157 
158 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
159 	0x00, 0x00, 0x00, 0x00,
160 	0x00, 0x00, 0x00, 0x00,
161 	0x08, 0x00,
162 
163 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
164 	0x00, 0x01, 0x00, 0x00,
165 	0x40, 0x06, 0x00, 0x00,
166 	0x00, 0x00, 0x00, 0x00,
167 	0x00, 0x00, 0x00, 0x00,
168 
169 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
170 	0x00, 0x00, 0x00, 0x00,
171 	0x00, 0x00, 0x00, 0x00,
172 	0x50, 0x02, 0x20, 0x00,
173 	0x00, 0x00, 0x00, 0x00
174 };
175 
176 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
177 	{ ICE_MAC_OFOS,		0 },
178 	{ ICE_ETYPE_OL,		12 },
179 	{ ICE_IPV4_OFOS,	14 },
180 	{ ICE_UDP_OF,		34 },
181 	{ ICE_VXLAN,		42 },
182 	{ ICE_GENEVE,		42 },
183 	{ ICE_VXLAN_GPE,	42 },
184 	{ ICE_MAC_IL,		50 },
185 	{ ICE_IPV4_IL,		64 },
186 	{ ICE_UDP_ILOS,		84 },
187 	{ ICE_PROTOCOL_LAST,	0 },
188 };
189 
190 static const u8 dummy_udp_tun_udp_packet[] = {
191 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
192 	0x00, 0x00, 0x00, 0x00,
193 	0x00, 0x00, 0x00, 0x00,
194 
195 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
196 
197 	0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
198 	0x00, 0x01, 0x00, 0x00,
199 	0x00, 0x11, 0x00, 0x00,
200 	0x00, 0x00, 0x00, 0x00,
201 	0x00, 0x00, 0x00, 0x00,
202 
203 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
204 	0x00, 0x3a, 0x00, 0x00,
205 
206 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
207 	0x00, 0x00, 0x00, 0x00,
208 
209 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
210 	0x00, 0x00, 0x00, 0x00,
211 	0x00, 0x00, 0x00, 0x00,
212 	0x08, 0x00,
213 
214 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
215 	0x00, 0x01, 0x00, 0x00,
216 	0x00, 0x11, 0x00, 0x00,
217 	0x00, 0x00, 0x00, 0x00,
218 	0x00, 0x00, 0x00, 0x00,
219 
220 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
221 	0x00, 0x08, 0x00, 0x00,
222 };
223 
224 /* offset info for MAC + IPv4 + UDP dummy packet */
225 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
226 	{ ICE_MAC_OFOS,		0 },
227 	{ ICE_ETYPE_OL,		12 },
228 	{ ICE_IPV4_OFOS,	14 },
229 	{ ICE_UDP_ILOS,		34 },
230 	{ ICE_PROTOCOL_LAST,	0 },
231 };
232 
233 /* Dummy packet for MAC + IPv4 + UDP */
234 static const u8 dummy_udp_packet[] = {
235 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
236 	0x00, 0x00, 0x00, 0x00,
237 	0x00, 0x00, 0x00, 0x00,
238 
239 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
240 
241 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
242 	0x00, 0x01, 0x00, 0x00,
243 	0x00, 0x11, 0x00, 0x00,
244 	0x00, 0x00, 0x00, 0x00,
245 	0x00, 0x00, 0x00, 0x00,
246 
247 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
248 	0x00, 0x08, 0x00, 0x00,
249 
250 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
251 };
252 
253 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
254 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
255 	{ ICE_MAC_OFOS,		0 },
256 	{ ICE_VLAN_OFOS,	12 },
257 	{ ICE_ETYPE_OL,		16 },
258 	{ ICE_IPV4_OFOS,	18 },
259 	{ ICE_UDP_ILOS,		38 },
260 	{ ICE_PROTOCOL_LAST,	0 },
261 };
262 
263 /* C-tag (801.1Q), IPv4:UDP dummy packet */
264 static const u8 dummy_vlan_udp_packet[] = {
265 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
266 	0x00, 0x00, 0x00, 0x00,
267 	0x00, 0x00, 0x00, 0x00,
268 
269 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
270 
271 	0x08, 0x00,		/* ICE_ETYPE_OL 16 */
272 
273 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
274 	0x00, 0x01, 0x00, 0x00,
275 	0x00, 0x11, 0x00, 0x00,
276 	0x00, 0x00, 0x00, 0x00,
277 	0x00, 0x00, 0x00, 0x00,
278 
279 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
280 	0x00, 0x08, 0x00, 0x00,
281 
282 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
283 };
284 
285 /* offset info for MAC + IPv4 + TCP dummy packet */
286 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
287 	{ ICE_MAC_OFOS,		0 },
288 	{ ICE_ETYPE_OL,		12 },
289 	{ ICE_IPV4_OFOS,	14 },
290 	{ ICE_TCP_IL,		34 },
291 	{ ICE_PROTOCOL_LAST,	0 },
292 };
293 
294 /* Dummy packet for MAC + IPv4 + TCP */
295 static const u8 dummy_tcp_packet[] = {
296 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
297 	0x00, 0x00, 0x00, 0x00,
298 	0x00, 0x00, 0x00, 0x00,
299 
300 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
301 
302 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
303 	0x00, 0x01, 0x00, 0x00,
304 	0x00, 0x06, 0x00, 0x00,
305 	0x00, 0x00, 0x00, 0x00,
306 	0x00, 0x00, 0x00, 0x00,
307 
308 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
309 	0x00, 0x00, 0x00, 0x00,
310 	0x00, 0x00, 0x00, 0x00,
311 	0x50, 0x00, 0x00, 0x00,
312 	0x00, 0x00, 0x00, 0x00,
313 
314 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
315 };
316 
317 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
318 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
319 	{ ICE_MAC_OFOS,		0 },
320 	{ ICE_VLAN_OFOS,	12 },
321 	{ ICE_ETYPE_OL,		16 },
322 	{ ICE_IPV4_OFOS,	18 },
323 	{ ICE_TCP_IL,		38 },
324 	{ ICE_PROTOCOL_LAST,	0 },
325 };
326 
327 /* C-tag (801.1Q), IPv4:TCP dummy packet */
328 static const u8 dummy_vlan_tcp_packet[] = {
329 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
330 	0x00, 0x00, 0x00, 0x00,
331 	0x00, 0x00, 0x00, 0x00,
332 
333 	0x81, 0x00, 0x00, 0x00,	/* ICE_VLAN_OFOS 12 */
334 
335 	0x08, 0x00,		/* ICE_ETYPE_OL 16 */
336 
337 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
338 	0x00, 0x01, 0x00, 0x00,
339 	0x00, 0x06, 0x00, 0x00,
340 	0x00, 0x00, 0x00, 0x00,
341 	0x00, 0x00, 0x00, 0x00,
342 
343 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
344 	0x00, 0x00, 0x00, 0x00,
345 	0x00, 0x00, 0x00, 0x00,
346 	0x50, 0x00, 0x00, 0x00,
347 	0x00, 0x00, 0x00, 0x00,
348 
349 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
350 };
351 
352 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
353 	{ ICE_MAC_OFOS,		0 },
354 	{ ICE_ETYPE_OL,		12 },
355 	{ ICE_IPV6_OFOS,	14 },
356 	{ ICE_TCP_IL,		54 },
357 	{ ICE_PROTOCOL_LAST,	0 },
358 };
359 
360 static const u8 dummy_tcp_ipv6_packet[] = {
361 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
362 	0x00, 0x00, 0x00, 0x00,
363 	0x00, 0x00, 0x00, 0x00,
364 
365 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
366 
367 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
368 	0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
369 	0x00, 0x00, 0x00, 0x00,
370 	0x00, 0x00, 0x00, 0x00,
371 	0x00, 0x00, 0x00, 0x00,
372 	0x00, 0x00, 0x00, 0x00,
373 	0x00, 0x00, 0x00, 0x00,
374 	0x00, 0x00, 0x00, 0x00,
375 	0x00, 0x00, 0x00, 0x00,
376 	0x00, 0x00, 0x00, 0x00,
377 
378 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
379 	0x00, 0x00, 0x00, 0x00,
380 	0x00, 0x00, 0x00, 0x00,
381 	0x50, 0x00, 0x00, 0x00,
382 	0x00, 0x00, 0x00, 0x00,
383 
384 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
385 };
386 
387 /* C-tag (802.1Q): IPv6 + TCP */
388 static const struct ice_dummy_pkt_offsets
389 dummy_vlan_tcp_ipv6_packet_offsets[] = {
390 	{ ICE_MAC_OFOS,		0 },
391 	{ ICE_VLAN_OFOS,	12 },
392 	{ ICE_ETYPE_OL,		16 },
393 	{ ICE_IPV6_OFOS,	18 },
394 	{ ICE_TCP_IL,		58 },
395 	{ ICE_PROTOCOL_LAST,	0 },
396 };
397 
398 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
399 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
400 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
401 	0x00, 0x00, 0x00, 0x00,
402 	0x00, 0x00, 0x00, 0x00,
403 
404 	0x81, 0x00, 0x00, 0x00,	/* ICE_VLAN_OFOS 12 */
405 
406 	0x86, 0xDD,		/* ICE_ETYPE_OL 16 */
407 
408 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
409 	0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
410 	0x00, 0x00, 0x00, 0x00,
411 	0x00, 0x00, 0x00, 0x00,
412 	0x00, 0x00, 0x00, 0x00,
413 	0x00, 0x00, 0x00, 0x00,
414 	0x00, 0x00, 0x00, 0x00,
415 	0x00, 0x00, 0x00, 0x00,
416 	0x00, 0x00, 0x00, 0x00,
417 	0x00, 0x00, 0x00, 0x00,
418 
419 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
420 	0x00, 0x00, 0x00, 0x00,
421 	0x00, 0x00, 0x00, 0x00,
422 	0x50, 0x00, 0x00, 0x00,
423 	0x00, 0x00, 0x00, 0x00,
424 
425 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
426 };
427 
428 /* IPv6 + UDP */
429 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
430 	{ ICE_MAC_OFOS,		0 },
431 	{ ICE_ETYPE_OL,		12 },
432 	{ ICE_IPV6_OFOS,	14 },
433 	{ ICE_UDP_ILOS,		54 },
434 	{ ICE_PROTOCOL_LAST,	0 },
435 };
436 
437 /* IPv6 + UDP dummy packet */
438 static const u8 dummy_udp_ipv6_packet[] = {
439 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
440 	0x00, 0x00, 0x00, 0x00,
441 	0x00, 0x00, 0x00, 0x00,
442 
443 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
444 
445 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
446 	0x00, 0x10, 0x11, 0x00, /* Next header UDP */
447 	0x00, 0x00, 0x00, 0x00,
448 	0x00, 0x00, 0x00, 0x00,
449 	0x00, 0x00, 0x00, 0x00,
450 	0x00, 0x00, 0x00, 0x00,
451 	0x00, 0x00, 0x00, 0x00,
452 	0x00, 0x00, 0x00, 0x00,
453 	0x00, 0x00, 0x00, 0x00,
454 	0x00, 0x00, 0x00, 0x00,
455 
456 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
457 	0x00, 0x10, 0x00, 0x00,
458 
459 	0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
460 	0x00, 0x00, 0x00, 0x00,
461 
462 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
463 };
464 
465 /* C-tag (802.1Q): IPv6 + UDP */
466 static const struct ice_dummy_pkt_offsets
467 dummy_vlan_udp_ipv6_packet_offsets[] = {
468 	{ ICE_MAC_OFOS,		0 },
469 	{ ICE_VLAN_OFOS,	12 },
470 	{ ICE_ETYPE_OL,		16 },
471 	{ ICE_IPV6_OFOS,	18 },
472 	{ ICE_UDP_ILOS,		58 },
473 	{ ICE_PROTOCOL_LAST,	0 },
474 };
475 
476 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
477 static const u8 dummy_vlan_udp_ipv6_packet[] = {
478 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
479 	0x00, 0x00, 0x00, 0x00,
480 	0x00, 0x00, 0x00, 0x00,
481 
482 	0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */
483 
484 	0x86, 0xDD,		/* ICE_ETYPE_OL 16 */
485 
486 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
487 	0x00, 0x08, 0x11, 0x00, /* Next header UDP */
488 	0x00, 0x00, 0x00, 0x00,
489 	0x00, 0x00, 0x00, 0x00,
490 	0x00, 0x00, 0x00, 0x00,
491 	0x00, 0x00, 0x00, 0x00,
492 	0x00, 0x00, 0x00, 0x00,
493 	0x00, 0x00, 0x00, 0x00,
494 	0x00, 0x00, 0x00, 0x00,
495 	0x00, 0x00, 0x00, 0x00,
496 
497 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
498 	0x00, 0x08, 0x00, 0x00,
499 
500 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
501 };
502 
503 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
504 	(offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
505 	 (DUMMY_ETH_HDR_LEN * \
506 	  sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0])))
507 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
508 	(offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
509 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
510 	(offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \
511 	 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0])))
512 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
513 	(offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \
514 	 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0])))
515 
516 /* this is a recipe to profile association bitmap */
517 static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
518 			  ICE_MAX_NUM_PROFILES);
519 
520 /* this is a profile to recipe association bitmap */
521 static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES],
522 			  ICE_MAX_NUM_RECIPES);
523 
524 /**
525  * ice_init_def_sw_recp - initialize the recipe book keeping tables
526  * @hw: pointer to the HW struct
527  *
528  * Allocate memory for the entire recipe table and initialize the structures/
529  * entries corresponding to basic recipes.
530  */
531 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
532 {
533 	struct ice_sw_recipe *recps;
534 	u8 i;
535 
536 	recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
537 			     sizeof(*recps), GFP_KERNEL);
538 	if (!recps)
539 		return ICE_ERR_NO_MEMORY;
540 
541 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
542 		recps[i].root_rid = i;
543 		INIT_LIST_HEAD(&recps[i].filt_rules);
544 		INIT_LIST_HEAD(&recps[i].filt_replay_rules);
545 		INIT_LIST_HEAD(&recps[i].rg_list);
546 		mutex_init(&recps[i].filt_rule_lock);
547 	}
548 
549 	hw->switch_info->recp_list = recps;
550 
551 	return 0;
552 }
553 
554 /**
555  * ice_aq_get_sw_cfg - get switch configuration
556  * @hw: pointer to the hardware structure
557  * @buf: pointer to the result buffer
558  * @buf_size: length of the buffer available for response
559  * @req_desc: pointer to requested descriptor
560  * @num_elems: pointer to number of elements
561  * @cd: pointer to command details structure or NULL
562  *
563  * Get switch configuration (0x0200) to be placed in buf.
564  * This admin command returns information such as initial VSI/port number
565  * and switch ID it belongs to.
566  *
567  * NOTE: *req_desc is both an input/output parameter.
568  * The caller of this function first calls this function with *request_desc set
569  * to 0. If the response from f/w has *req_desc set to 0, all the switch
570  * configuration information has been returned; if non-zero (meaning not all
571  * the information was returned), the caller should call this function again
572  * with *req_desc set to the previous value returned by f/w to get the
573  * next block of switch configuration information.
574  *
575  * *num_elems is output only parameter. This reflects the number of elements
576  * in response buffer. The caller of this function to use *num_elems while
577  * parsing the response buffer.
578  */
579 static enum ice_status
580 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
581 		  u16 buf_size, u16 *req_desc, u16 *num_elems,
582 		  struct ice_sq_cd *cd)
583 {
584 	struct ice_aqc_get_sw_cfg *cmd;
585 	struct ice_aq_desc desc;
586 	enum ice_status status;
587 
588 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
589 	cmd = &desc.params.get_sw_conf;
590 	cmd->element = cpu_to_le16(*req_desc);
591 
592 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
593 	if (!status) {
594 		*req_desc = le16_to_cpu(cmd->element);
595 		*num_elems = le16_to_cpu(cmd->num_elems);
596 	}
597 
598 	return status;
599 }
600 
601 /**
602  * ice_aq_add_vsi
603  * @hw: pointer to the HW struct
604  * @vsi_ctx: pointer to a VSI context struct
605  * @cd: pointer to command details structure or NULL
606  *
607  * Add a VSI context to the hardware (0x0210)
608  */
609 static enum ice_status
610 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
611 	       struct ice_sq_cd *cd)
612 {
613 	struct ice_aqc_add_update_free_vsi_resp *res;
614 	struct ice_aqc_add_get_update_free_vsi *cmd;
615 	struct ice_aq_desc desc;
616 	enum ice_status status;
617 
618 	cmd = &desc.params.vsi_cmd;
619 	res = &desc.params.add_update_free_vsi_res;
620 
621 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
622 
623 	if (!vsi_ctx->alloc_from_pool)
624 		cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
625 					   ICE_AQ_VSI_IS_VALID);
626 	cmd->vf_id = vsi_ctx->vf_num;
627 
628 	cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
629 
630 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
631 
632 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
633 				 sizeof(vsi_ctx->info), cd);
634 
635 	if (!status) {
636 		vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
637 		vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
638 		vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
639 	}
640 
641 	return status;
642 }
643 
644 /**
645  * ice_aq_free_vsi
646  * @hw: pointer to the HW struct
647  * @vsi_ctx: pointer to a VSI context struct
648  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
649  * @cd: pointer to command details structure or NULL
650  *
651  * Free VSI context info from hardware (0x0213)
652  */
653 static enum ice_status
654 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
655 		bool keep_vsi_alloc, struct ice_sq_cd *cd)
656 {
657 	struct ice_aqc_add_update_free_vsi_resp *resp;
658 	struct ice_aqc_add_get_update_free_vsi *cmd;
659 	struct ice_aq_desc desc;
660 	enum ice_status status;
661 
662 	cmd = &desc.params.vsi_cmd;
663 	resp = &desc.params.add_update_free_vsi_res;
664 
665 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
666 
667 	cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
668 	if (keep_vsi_alloc)
669 		cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
670 
671 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
672 	if (!status) {
673 		vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
674 		vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
675 	}
676 
677 	return status;
678 }
679 
680 /**
681  * ice_aq_update_vsi
682  * @hw: pointer to the HW struct
683  * @vsi_ctx: pointer to a VSI context struct
684  * @cd: pointer to command details structure or NULL
685  *
686  * Update VSI context in the hardware (0x0211)
687  */
688 static enum ice_status
689 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
690 		  struct ice_sq_cd *cd)
691 {
692 	struct ice_aqc_add_update_free_vsi_resp *resp;
693 	struct ice_aqc_add_get_update_free_vsi *cmd;
694 	struct ice_aq_desc desc;
695 	enum ice_status status;
696 
697 	cmd = &desc.params.vsi_cmd;
698 	resp = &desc.params.add_update_free_vsi_res;
699 
700 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
701 
702 	cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
703 
704 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
705 
706 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
707 				 sizeof(vsi_ctx->info), cd);
708 
709 	if (!status) {
710 		vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
711 		vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
712 	}
713 
714 	return status;
715 }
716 
717 /**
718  * ice_is_vsi_valid - check whether the VSI is valid or not
719  * @hw: pointer to the HW struct
720  * @vsi_handle: VSI handle
721  *
722  * check whether the VSI is valid or not
723  */
724 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
725 {
726 	return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
727 }
728 
729 /**
730  * ice_get_hw_vsi_num - return the HW VSI number
731  * @hw: pointer to the HW struct
732  * @vsi_handle: VSI handle
733  *
734  * return the HW VSI number
735  * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
736  */
737 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
738 {
739 	return hw->vsi_ctx[vsi_handle]->vsi_num;
740 }
741 
742 /**
743  * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
744  * @hw: pointer to the HW struct
745  * @vsi_handle: VSI handle
746  *
747  * return the VSI context entry for a given VSI handle
748  */
749 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
750 {
751 	return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
752 }
753 
754 /**
755  * ice_save_vsi_ctx - save the VSI context for a given VSI handle
756  * @hw: pointer to the HW struct
757  * @vsi_handle: VSI handle
758  * @vsi: VSI context pointer
759  *
760  * save the VSI context entry for a given VSI handle
761  */
762 static void
763 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
764 {
765 	hw->vsi_ctx[vsi_handle] = vsi;
766 }
767 
768 /**
769  * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
770  * @hw: pointer to the HW struct
771  * @vsi_handle: VSI handle
772  */
773 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
774 {
775 	struct ice_vsi_ctx *vsi;
776 	u8 i;
777 
778 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
779 	if (!vsi)
780 		return;
781 	ice_for_each_traffic_class(i) {
782 		if (vsi->lan_q_ctx[i]) {
783 			devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
784 			vsi->lan_q_ctx[i] = NULL;
785 		}
786 		if (vsi->rdma_q_ctx[i]) {
787 			devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
788 			vsi->rdma_q_ctx[i] = NULL;
789 		}
790 	}
791 }
792 
793 /**
794  * ice_clear_vsi_ctx - clear the VSI context entry
795  * @hw: pointer to the HW struct
796  * @vsi_handle: VSI handle
797  *
798  * clear the VSI context entry
799  */
800 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
801 {
802 	struct ice_vsi_ctx *vsi;
803 
804 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
805 	if (vsi) {
806 		ice_clear_vsi_q_ctx(hw, vsi_handle);
807 		devm_kfree(ice_hw_to_dev(hw), vsi);
808 		hw->vsi_ctx[vsi_handle] = NULL;
809 	}
810 }
811 
812 /**
813  * ice_clear_all_vsi_ctx - clear all the VSI context entries
814  * @hw: pointer to the HW struct
815  */
816 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
817 {
818 	u16 i;
819 
820 	for (i = 0; i < ICE_MAX_VSI; i++)
821 		ice_clear_vsi_ctx(hw, i);
822 }
823 
824 /**
825  * ice_add_vsi - add VSI context to the hardware and VSI handle list
826  * @hw: pointer to the HW struct
827  * @vsi_handle: unique VSI handle provided by drivers
828  * @vsi_ctx: pointer to a VSI context struct
829  * @cd: pointer to command details structure or NULL
830  *
831  * Add a VSI context to the hardware also add it into the VSI handle list.
832  * If this function gets called after reset for existing VSIs then update
833  * with the new HW VSI number in the corresponding VSI handle list entry.
834  */
835 enum ice_status
836 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
837 	    struct ice_sq_cd *cd)
838 {
839 	struct ice_vsi_ctx *tmp_vsi_ctx;
840 	enum ice_status status;
841 
842 	if (vsi_handle >= ICE_MAX_VSI)
843 		return ICE_ERR_PARAM;
844 	status = ice_aq_add_vsi(hw, vsi_ctx, cd);
845 	if (status)
846 		return status;
847 	tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
848 	if (!tmp_vsi_ctx) {
849 		/* Create a new VSI context */
850 		tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
851 					   sizeof(*tmp_vsi_ctx), GFP_KERNEL);
852 		if (!tmp_vsi_ctx) {
853 			ice_aq_free_vsi(hw, vsi_ctx, false, cd);
854 			return ICE_ERR_NO_MEMORY;
855 		}
856 		*tmp_vsi_ctx = *vsi_ctx;
857 		ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
858 	} else {
859 		/* update with new HW VSI num */
860 		tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
861 	}
862 
863 	return 0;
864 }
865 
866 /**
867  * ice_free_vsi- free VSI context from hardware and VSI handle list
868  * @hw: pointer to the HW struct
869  * @vsi_handle: unique VSI handle
870  * @vsi_ctx: pointer to a VSI context struct
871  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
872  * @cd: pointer to command details structure or NULL
873  *
874  * Free VSI context info from hardware as well as from VSI handle list
875  */
876 enum ice_status
877 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
878 	     bool keep_vsi_alloc, struct ice_sq_cd *cd)
879 {
880 	enum ice_status status;
881 
882 	if (!ice_is_vsi_valid(hw, vsi_handle))
883 		return ICE_ERR_PARAM;
884 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
885 	status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
886 	if (!status)
887 		ice_clear_vsi_ctx(hw, vsi_handle);
888 	return status;
889 }
890 
891 /**
892  * ice_update_vsi
893  * @hw: pointer to the HW struct
894  * @vsi_handle: unique VSI handle
895  * @vsi_ctx: pointer to a VSI context struct
896  * @cd: pointer to command details structure or NULL
897  *
898  * Update VSI context in the hardware
899  */
900 enum ice_status
901 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
902 	       struct ice_sq_cd *cd)
903 {
904 	if (!ice_is_vsi_valid(hw, vsi_handle))
905 		return ICE_ERR_PARAM;
906 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
907 	return ice_aq_update_vsi(hw, vsi_ctx, cd);
908 }
909 
910 /**
911  * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI
912  * @hw: pointer to HW struct
913  * @vsi_handle: VSI SW index
914  * @enable: boolean for enable/disable
915  */
916 int
917 ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
918 {
919 	struct ice_vsi_ctx *ctx;
920 
921 	ctx = ice_get_vsi_ctx(hw, vsi_handle);
922 	if (!ctx)
923 		return -EIO;
924 
925 	if (enable)
926 		ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
927 	else
928 		ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
929 
930 	return ice_status_to_errno(ice_update_vsi(hw, vsi_handle, ctx, NULL));
931 }
932 
933 /**
934  * ice_aq_alloc_free_vsi_list
935  * @hw: pointer to the HW struct
936  * @vsi_list_id: VSI list ID returned or used for lookup
937  * @lkup_type: switch rule filter lookup type
938  * @opc: switch rules population command type - pass in the command opcode
939  *
940  * allocates or free a VSI list resource
941  */
942 static enum ice_status
943 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
944 			   enum ice_sw_lkup_type lkup_type,
945 			   enum ice_adminq_opc opc)
946 {
947 	struct ice_aqc_alloc_free_res_elem *sw_buf;
948 	struct ice_aqc_res_elem *vsi_ele;
949 	enum ice_status status;
950 	u16 buf_len;
951 
952 	buf_len = struct_size(sw_buf, elem, 1);
953 	sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
954 	if (!sw_buf)
955 		return ICE_ERR_NO_MEMORY;
956 	sw_buf->num_elems = cpu_to_le16(1);
957 
958 	if (lkup_type == ICE_SW_LKUP_MAC ||
959 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
960 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
961 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
962 	    lkup_type == ICE_SW_LKUP_PROMISC ||
963 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN) {
964 		sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
965 	} else if (lkup_type == ICE_SW_LKUP_VLAN) {
966 		sw_buf->res_type =
967 			cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
968 	} else {
969 		status = ICE_ERR_PARAM;
970 		goto ice_aq_alloc_free_vsi_list_exit;
971 	}
972 
973 	if (opc == ice_aqc_opc_free_res)
974 		sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
975 
976 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
977 	if (status)
978 		goto ice_aq_alloc_free_vsi_list_exit;
979 
980 	if (opc == ice_aqc_opc_alloc_res) {
981 		vsi_ele = &sw_buf->elem[0];
982 		*vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
983 	}
984 
985 ice_aq_alloc_free_vsi_list_exit:
986 	devm_kfree(ice_hw_to_dev(hw), sw_buf);
987 	return status;
988 }
989 
990 /**
991  * ice_aq_sw_rules - add/update/remove switch rules
992  * @hw: pointer to the HW struct
993  * @rule_list: pointer to switch rule population list
994  * @rule_list_sz: total size of the rule list in bytes
995  * @num_rules: number of switch rules in the rule_list
996  * @opc: switch rules population command type - pass in the command opcode
997  * @cd: pointer to command details structure or NULL
998  *
999  * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1000  */
1001 enum ice_status
1002 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1003 		u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1004 {
1005 	struct ice_aq_desc desc;
1006 	enum ice_status status;
1007 
1008 	if (opc != ice_aqc_opc_add_sw_rules &&
1009 	    opc != ice_aqc_opc_update_sw_rules &&
1010 	    opc != ice_aqc_opc_remove_sw_rules)
1011 		return ICE_ERR_PARAM;
1012 
1013 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1014 
1015 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1016 	desc.params.sw_rules.num_rules_fltr_entry_index =
1017 		cpu_to_le16(num_rules);
1018 	status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1019 	if (opc != ice_aqc_opc_add_sw_rules &&
1020 	    hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
1021 		status = ICE_ERR_DOES_NOT_EXIST;
1022 
1023 	return status;
1024 }
1025 
1026 /**
1027  * ice_aq_add_recipe - add switch recipe
1028  * @hw: pointer to the HW struct
1029  * @s_recipe_list: pointer to switch rule population list
1030  * @num_recipes: number of switch recipes in the list
1031  * @cd: pointer to command details structure or NULL
1032  *
1033  * Add(0x0290)
1034  */
1035 static enum ice_status
1036 ice_aq_add_recipe(struct ice_hw *hw,
1037 		  struct ice_aqc_recipe_data_elem *s_recipe_list,
1038 		  u16 num_recipes, struct ice_sq_cd *cd)
1039 {
1040 	struct ice_aqc_add_get_recipe *cmd;
1041 	struct ice_aq_desc desc;
1042 	u16 buf_size;
1043 
1044 	cmd = &desc.params.add_get_recipe;
1045 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1046 
1047 	cmd->num_sub_recipes = cpu_to_le16(num_recipes);
1048 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1049 
1050 	buf_size = num_recipes * sizeof(*s_recipe_list);
1051 
1052 	return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1053 }
1054 
1055 /**
1056  * ice_aq_get_recipe - get switch recipe
1057  * @hw: pointer to the HW struct
1058  * @s_recipe_list: pointer to switch rule population list
1059  * @num_recipes: pointer to the number of recipes (input and output)
1060  * @recipe_root: root recipe number of recipe(s) to retrieve
1061  * @cd: pointer to command details structure or NULL
1062  *
1063  * Get(0x0292)
1064  *
1065  * On input, *num_recipes should equal the number of entries in s_recipe_list.
1066  * On output, *num_recipes will equal the number of entries returned in
1067  * s_recipe_list.
1068  *
1069  * The caller must supply enough space in s_recipe_list to hold all possible
1070  * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1071  */
1072 static enum ice_status
1073 ice_aq_get_recipe(struct ice_hw *hw,
1074 		  struct ice_aqc_recipe_data_elem *s_recipe_list,
1075 		  u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1076 {
1077 	struct ice_aqc_add_get_recipe *cmd;
1078 	struct ice_aq_desc desc;
1079 	enum ice_status status;
1080 	u16 buf_size;
1081 
1082 	if (*num_recipes != ICE_MAX_NUM_RECIPES)
1083 		return ICE_ERR_PARAM;
1084 
1085 	cmd = &desc.params.add_get_recipe;
1086 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1087 
1088 	cmd->return_index = cpu_to_le16(recipe_root);
1089 	cmd->num_sub_recipes = 0;
1090 
1091 	buf_size = *num_recipes * sizeof(*s_recipe_list);
1092 
1093 	status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1094 	*num_recipes = le16_to_cpu(cmd->num_sub_recipes);
1095 
1096 	return status;
1097 }
1098 
1099 /**
1100  * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1101  * @hw: pointer to the HW struct
1102  * @profile_id: package profile ID to associate the recipe with
1103  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1104  * @cd: pointer to command details structure or NULL
1105  * Recipe to profile association (0x0291)
1106  */
1107 static enum ice_status
1108 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1109 			     struct ice_sq_cd *cd)
1110 {
1111 	struct ice_aqc_recipe_to_profile *cmd;
1112 	struct ice_aq_desc desc;
1113 
1114 	cmd = &desc.params.recipe_to_profile;
1115 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1116 	cmd->profile_id = cpu_to_le16(profile_id);
1117 	/* Set the recipe ID bit in the bitmask to let the device know which
1118 	 * profile we are associating the recipe to
1119 	 */
1120 	memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));
1121 
1122 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1123 }
1124 
1125 /**
1126  * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1127  * @hw: pointer to the HW struct
1128  * @profile_id: package profile ID to associate the recipe with
1129  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1130  * @cd: pointer to command details structure or NULL
1131  * Associate profile ID with given recipe (0x0293)
1132  */
1133 static enum ice_status
1134 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1135 			     struct ice_sq_cd *cd)
1136 {
1137 	struct ice_aqc_recipe_to_profile *cmd;
1138 	struct ice_aq_desc desc;
1139 	enum ice_status status;
1140 
1141 	cmd = &desc.params.recipe_to_profile;
1142 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1143 	cmd->profile_id = cpu_to_le16(profile_id);
1144 
1145 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1146 	if (!status)
1147 		memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));
1148 
1149 	return status;
1150 }
1151 
1152 /**
1153  * ice_alloc_recipe - add recipe resource
1154  * @hw: pointer to the hardware structure
1155  * @rid: recipe ID returned as response to AQ call
1156  */
1157 static enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1158 {
1159 	struct ice_aqc_alloc_free_res_elem *sw_buf;
1160 	enum ice_status status;
1161 	u16 buf_len;
1162 
1163 	buf_len = struct_size(sw_buf, elem, 1);
1164 	sw_buf = kzalloc(buf_len, GFP_KERNEL);
1165 	if (!sw_buf)
1166 		return ICE_ERR_NO_MEMORY;
1167 
1168 	sw_buf->num_elems = cpu_to_le16(1);
1169 	sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
1170 					ICE_AQC_RES_TYPE_S) |
1171 					ICE_AQC_RES_TYPE_FLAG_SHARED);
1172 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1173 				       ice_aqc_opc_alloc_res, NULL);
1174 	if (!status)
1175 		*rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
1176 	kfree(sw_buf);
1177 
1178 	return status;
1179 }
1180 
1181 /**
1182  * ice_get_recp_to_prof_map - updates recipe to profile mapping
1183  * @hw: pointer to hardware structure
1184  *
1185  * This function is used to populate recipe_to_profile matrix where index to
1186  * this array is the recipe ID and the element is the mapping of which profiles
1187  * is this recipe mapped to.
1188  */
1189 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
1190 {
1191 	DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
1192 	u16 i;
1193 
1194 	for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
1195 		u16 j;
1196 
1197 		bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1198 		bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
1199 		if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1200 			continue;
1201 		bitmap_copy(profile_to_recipe[i], r_bitmap,
1202 			    ICE_MAX_NUM_RECIPES);
1203 		for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
1204 			set_bit(i, recipe_to_profile[j]);
1205 	}
1206 }
1207 
1208 /**
1209  * ice_collect_result_idx - copy result index values
1210  * @buf: buffer that contains the result index
1211  * @recp: the recipe struct to copy data into
1212  */
1213 static void
1214 ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1215 		       struct ice_sw_recipe *recp)
1216 {
1217 	if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1218 		set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
1219 			recp->res_idxs);
1220 }
1221 
1222 /**
1223  * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1224  * @hw: pointer to hardware structure
1225  * @recps: struct that we need to populate
1226  * @rid: recipe ID that we are populating
1227  * @refresh_required: true if we should get recipe to profile mapping from FW
1228  *
1229  * This function is used to populate all the necessary entries into our
1230  * bookkeeping so that we have a current list of all the recipes that are
1231  * programmed in the firmware.
1232  */
1233 static enum ice_status
1234 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
1235 		    bool *refresh_required)
1236 {
1237 	DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS);
1238 	struct ice_aqc_recipe_data_elem *tmp;
1239 	u16 num_recps = ICE_MAX_NUM_RECIPES;
1240 	struct ice_prot_lkup_ext *lkup_exts;
1241 	enum ice_status status;
1242 	u8 fv_word_idx = 0;
1243 	u16 sub_recps;
1244 
1245 	bitmap_zero(result_bm, ICE_MAX_FV_WORDS);
1246 
1247 	/* we need a buffer big enough to accommodate all the recipes */
1248 	tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
1249 	if (!tmp)
1250 		return ICE_ERR_NO_MEMORY;
1251 
1252 	tmp[0].recipe_indx = rid;
1253 	status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
1254 	/* non-zero status meaning recipe doesn't exist */
1255 	if (status)
1256 		goto err_unroll;
1257 
1258 	/* Get recipe to profile map so that we can get the fv from lkups that
1259 	 * we read for a recipe from FW. Since we want to minimize the number of
1260 	 * times we make this FW call, just make one call and cache the copy
1261 	 * until a new recipe is added. This operation is only required the
1262 	 * first time to get the changes from FW. Then to search existing
1263 	 * entries we don't need to update the cache again until another recipe
1264 	 * gets added.
1265 	 */
1266 	if (*refresh_required) {
1267 		ice_get_recp_to_prof_map(hw);
1268 		*refresh_required = false;
1269 	}
1270 
1271 	/* Start populating all the entries for recps[rid] based on lkups from
1272 	 * firmware. Note that we are only creating the root recipe in our
1273 	 * database.
1274 	 */
1275 	lkup_exts = &recps[rid].lkup_exts;
1276 
1277 	for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
1278 		struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
1279 		struct ice_recp_grp_entry *rg_entry;
1280 		u8 i, prof, idx, prot = 0;
1281 		bool is_root;
1282 		u16 off = 0;
1283 
1284 		rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
1285 					GFP_KERNEL);
1286 		if (!rg_entry) {
1287 			status = ICE_ERR_NO_MEMORY;
1288 			goto err_unroll;
1289 		}
1290 
1291 		idx = root_bufs.recipe_indx;
1292 		is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
1293 
1294 		/* Mark all result indices in this chain */
1295 		if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1296 			set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
1297 				result_bm);
1298 
1299 		/* get the first profile that is associated with rid */
1300 		prof = find_first_bit(recipe_to_profile[idx],
1301 				      ICE_MAX_NUM_PROFILES);
1302 		for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
1303 			u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
1304 
1305 			rg_entry->fv_idx[i] = lkup_indx;
1306 			rg_entry->fv_mask[i] =
1307 				le16_to_cpu(root_bufs.content.mask[i + 1]);
1308 
1309 			/* If the recipe is a chained recipe then all its
1310 			 * child recipe's result will have a result index.
1311 			 * To fill fv_words we should not use those result
1312 			 * index, we only need the protocol ids and offsets.
1313 			 * We will skip all the fv_idx which stores result
1314 			 * index in them. We also need to skip any fv_idx which
1315 			 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
1316 			 * valid offset value.
1317 			 */
1318 			if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) ||
1319 			    rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
1320 			    rg_entry->fv_idx[i] == 0)
1321 				continue;
1322 
1323 			ice_find_prot_off(hw, ICE_BLK_SW, prof,
1324 					  rg_entry->fv_idx[i], &prot, &off);
1325 			lkup_exts->fv_words[fv_word_idx].prot_id = prot;
1326 			lkup_exts->fv_words[fv_word_idx].off = off;
1327 			lkup_exts->field_mask[fv_word_idx] =
1328 				rg_entry->fv_mask[i];
1329 			fv_word_idx++;
1330 		}
1331 		/* populate rg_list with the data from the child entry of this
1332 		 * recipe
1333 		 */
1334 		list_add(&rg_entry->l_entry, &recps[rid].rg_list);
1335 
1336 		/* Propagate some data to the recipe database */
1337 		recps[idx].is_root = !!is_root;
1338 		recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1339 		bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
1340 		if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
1341 			recps[idx].chain_idx = root_bufs.content.result_indx &
1342 				~ICE_AQ_RECIPE_RESULT_EN;
1343 			set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
1344 		} else {
1345 			recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
1346 		}
1347 
1348 		if (!is_root)
1349 			continue;
1350 
1351 		/* Only do the following for root recipes entries */
1352 		memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
1353 		       sizeof(recps[idx].r_bitmap));
1354 		recps[idx].root_rid = root_bufs.content.rid &
1355 			~ICE_AQ_RECIPE_ID_IS_ROOT;
1356 		recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1357 	}
1358 
1359 	/* Complete initialization of the root recipe entry */
1360 	lkup_exts->n_val_words = fv_word_idx;
1361 	recps[rid].big_recp = (num_recps > 1);
1362 	recps[rid].n_grp_count = (u8)num_recps;
1363 	recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp,
1364 					   recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
1365 					   GFP_KERNEL);
1366 	if (!recps[rid].root_buf) {
1367 		status = ICE_ERR_NO_MEMORY;
1368 		goto err_unroll;
1369 	}
1370 
1371 	/* Copy result indexes */
1372 	bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
1373 	recps[rid].recp_created = true;
1374 
1375 err_unroll:
1376 	kfree(tmp);
1377 	return status;
1378 }
1379 
1380 /* ice_init_port_info - Initialize port_info with switch configuration data
1381  * @pi: pointer to port_info
1382  * @vsi_port_num: VSI number or port number
1383  * @type: Type of switch element (port or VSI)
1384  * @swid: switch ID of the switch the element is attached to
1385  * @pf_vf_num: PF or VF number
1386  * @is_vf: true if the element is a VF, false otherwise
1387  */
1388 static void
1389 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1390 		   u16 swid, u16 pf_vf_num, bool is_vf)
1391 {
1392 	switch (type) {
1393 	case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1394 		pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1395 		pi->sw_id = swid;
1396 		pi->pf_vf_num = pf_vf_num;
1397 		pi->is_vf = is_vf;
1398 		pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1399 		pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1400 		break;
1401 	default:
1402 		ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
1403 		break;
1404 	}
1405 }
1406 
1407 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1408  * @hw: pointer to the hardware structure
1409  */
1410 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1411 {
1412 	struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
1413 	enum ice_status status;
1414 	u16 req_desc = 0;
1415 	u16 num_elems;
1416 	u16 i;
1417 
1418 	rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
1419 			    GFP_KERNEL);
1420 
1421 	if (!rbuf)
1422 		return ICE_ERR_NO_MEMORY;
1423 
1424 	/* Multiple calls to ice_aq_get_sw_cfg may be required
1425 	 * to get all the switch configuration information. The need
1426 	 * for additional calls is indicated by ice_aq_get_sw_cfg
1427 	 * writing a non-zero value in req_desc
1428 	 */
1429 	do {
1430 		struct ice_aqc_get_sw_cfg_resp_elem *ele;
1431 
1432 		status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1433 					   &req_desc, &num_elems, NULL);
1434 
1435 		if (status)
1436 			break;
1437 
1438 		for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
1439 			u16 pf_vf_num, swid, vsi_port_num;
1440 			bool is_vf = false;
1441 			u8 res_type;
1442 
1443 			vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
1444 				ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1445 
1446 			pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
1447 				ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1448 
1449 			swid = le16_to_cpu(ele->swid);
1450 
1451 			if (le16_to_cpu(ele->pf_vf_num) &
1452 			    ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1453 				is_vf = true;
1454 
1455 			res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >>
1456 					ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
1457 
1458 			if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
1459 				/* FW VSI is not needed. Just continue. */
1460 				continue;
1461 			}
1462 
1463 			ice_init_port_info(hw->port_info, vsi_port_num,
1464 					   res_type, swid, pf_vf_num, is_vf);
1465 		}
1466 	} while (req_desc && !status);
1467 
1468 	devm_kfree(ice_hw_to_dev(hw), rbuf);
1469 	return status;
1470 }
1471 
1472 /**
1473  * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1474  * @hw: pointer to the hardware structure
1475  * @fi: filter info structure to fill/update
1476  *
1477  * This helper function populates the lb_en and lan_en elements of the provided
1478  * ice_fltr_info struct using the switch's type and characteristics of the
1479  * switch rule being configured.
1480  */
1481 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1482 {
1483 	fi->lb_en = false;
1484 	fi->lan_en = false;
1485 	if ((fi->flag & ICE_FLTR_TX) &&
1486 	    (fi->fltr_act == ICE_FWD_TO_VSI ||
1487 	     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1488 	     fi->fltr_act == ICE_FWD_TO_Q ||
1489 	     fi->fltr_act == ICE_FWD_TO_QGRP)) {
1490 		/* Setting LB for prune actions will result in replicated
1491 		 * packets to the internal switch that will be dropped.
1492 		 */
1493 		if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1494 			fi->lb_en = true;
1495 
1496 		/* Set lan_en to TRUE if
1497 		 * 1. The switch is a VEB AND
1498 		 * 2
1499 		 * 2.1 The lookup is a directional lookup like ethertype,
1500 		 * promiscuous, ethertype-MAC, promiscuous-VLAN
1501 		 * and default-port OR
1502 		 * 2.2 The lookup is VLAN, OR
1503 		 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1504 		 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1505 		 *
1506 		 * OR
1507 		 *
1508 		 * The switch is a VEPA.
1509 		 *
1510 		 * In all other cases, the LAN enable has to be set to false.
1511 		 */
1512 		if (hw->evb_veb) {
1513 			if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1514 			    fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1515 			    fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1516 			    fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1517 			    fi->lkup_type == ICE_SW_LKUP_DFLT ||
1518 			    fi->lkup_type == ICE_SW_LKUP_VLAN ||
1519 			    (fi->lkup_type == ICE_SW_LKUP_MAC &&
1520 			     !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
1521 			    (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1522 			     !is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
1523 				fi->lan_en = true;
1524 		} else {
1525 			fi->lan_en = true;
1526 		}
1527 	}
1528 }
1529 
1530 /**
1531  * ice_fill_sw_rule - Helper function to fill switch rule structure
1532  * @hw: pointer to the hardware structure
1533  * @f_info: entry containing packet forwarding information
1534  * @s_rule: switch rule structure to be filled in based on mac_entry
1535  * @opc: switch rules population command type - pass in the command opcode
1536  */
1537 static void
1538 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1539 		 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1540 {
1541 	u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1542 	void *daddr = NULL;
1543 	u16 eth_hdr_sz;
1544 	u8 *eth_hdr;
1545 	u32 act = 0;
1546 	__be16 *off;
1547 	u8 q_rgn;
1548 
1549 	if (opc == ice_aqc_opc_remove_sw_rules) {
1550 		s_rule->pdata.lkup_tx_rx.act = 0;
1551 		s_rule->pdata.lkup_tx_rx.index =
1552 			cpu_to_le16(f_info->fltr_rule_id);
1553 		s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1554 		return;
1555 	}
1556 
1557 	eth_hdr_sz = sizeof(dummy_eth_header);
1558 	eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1559 
1560 	/* initialize the ether header with a dummy header */
1561 	memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
1562 	ice_fill_sw_info(hw, f_info);
1563 
1564 	switch (f_info->fltr_act) {
1565 	case ICE_FWD_TO_VSI:
1566 		act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1567 			ICE_SINGLE_ACT_VSI_ID_M;
1568 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1569 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1570 				ICE_SINGLE_ACT_VALID_BIT;
1571 		break;
1572 	case ICE_FWD_TO_VSI_LIST:
1573 		act |= ICE_SINGLE_ACT_VSI_LIST;
1574 		act |= (f_info->fwd_id.vsi_list_id <<
1575 			ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1576 			ICE_SINGLE_ACT_VSI_LIST_ID_M;
1577 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1578 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1579 				ICE_SINGLE_ACT_VALID_BIT;
1580 		break;
1581 	case ICE_FWD_TO_Q:
1582 		act |= ICE_SINGLE_ACT_TO_Q;
1583 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1584 			ICE_SINGLE_ACT_Q_INDEX_M;
1585 		break;
1586 	case ICE_DROP_PACKET:
1587 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1588 			ICE_SINGLE_ACT_VALID_BIT;
1589 		break;
1590 	case ICE_FWD_TO_QGRP:
1591 		q_rgn = f_info->qgrp_size > 0 ?
1592 			(u8)ilog2(f_info->qgrp_size) : 0;
1593 		act |= ICE_SINGLE_ACT_TO_Q;
1594 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1595 			ICE_SINGLE_ACT_Q_INDEX_M;
1596 		act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1597 			ICE_SINGLE_ACT_Q_REGION_M;
1598 		break;
1599 	default:
1600 		return;
1601 	}
1602 
1603 	if (f_info->lb_en)
1604 		act |= ICE_SINGLE_ACT_LB_ENABLE;
1605 	if (f_info->lan_en)
1606 		act |= ICE_SINGLE_ACT_LAN_ENABLE;
1607 
1608 	switch (f_info->lkup_type) {
1609 	case ICE_SW_LKUP_MAC:
1610 		daddr = f_info->l_data.mac.mac_addr;
1611 		break;
1612 	case ICE_SW_LKUP_VLAN:
1613 		vlan_id = f_info->l_data.vlan.vlan_id;
1614 		if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1615 		    f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1616 			act |= ICE_SINGLE_ACT_PRUNE;
1617 			act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1618 		}
1619 		break;
1620 	case ICE_SW_LKUP_ETHERTYPE_MAC:
1621 		daddr = f_info->l_data.ethertype_mac.mac_addr;
1622 		fallthrough;
1623 	case ICE_SW_LKUP_ETHERTYPE:
1624 		off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1625 		*off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
1626 		break;
1627 	case ICE_SW_LKUP_MAC_VLAN:
1628 		daddr = f_info->l_data.mac_vlan.mac_addr;
1629 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
1630 		break;
1631 	case ICE_SW_LKUP_PROMISC_VLAN:
1632 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
1633 		fallthrough;
1634 	case ICE_SW_LKUP_PROMISC:
1635 		daddr = f_info->l_data.mac_vlan.mac_addr;
1636 		break;
1637 	default:
1638 		break;
1639 	}
1640 
1641 	s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1642 		cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1643 		cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
1644 
1645 	/* Recipe set depending on lookup type */
1646 	s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type);
1647 	s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src);
1648 	s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
1649 
1650 	if (daddr)
1651 		ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
1652 
1653 	if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1654 		off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1655 		*off = cpu_to_be16(vlan_id);
1656 	}
1657 
1658 	/* Create the switch rule with the final dummy Ethernet header */
1659 	if (opc != ice_aqc_opc_update_sw_rules)
1660 		s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz);
1661 }
1662 
1663 /**
1664  * ice_add_marker_act
1665  * @hw: pointer to the hardware structure
1666  * @m_ent: the management entry for which sw marker needs to be added
1667  * @sw_marker: sw marker to tag the Rx descriptor with
1668  * @l_id: large action resource ID
1669  *
1670  * Create a large action to hold software marker and update the switch rule
1671  * entry pointed by m_ent with newly created large action
1672  */
1673 static enum ice_status
1674 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1675 		   u16 sw_marker, u16 l_id)
1676 {
1677 	struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
1678 	/* For software marker we need 3 large actions
1679 	 * 1. FWD action: FWD TO VSI or VSI LIST
1680 	 * 2. GENERIC VALUE action to hold the profile ID
1681 	 * 3. GENERIC VALUE action to hold the software marker ID
1682 	 */
1683 	const u16 num_lg_acts = 3;
1684 	enum ice_status status;
1685 	u16 lg_act_size;
1686 	u16 rules_size;
1687 	u32 act;
1688 	u16 id;
1689 
1690 	if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
1691 		return ICE_ERR_PARAM;
1692 
1693 	/* Create two back-to-back switch rules and submit them to the HW using
1694 	 * one memory buffer:
1695 	 *    1. Large Action
1696 	 *    2. Look up Tx Rx
1697 	 */
1698 	lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
1699 	rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1700 	lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
1701 	if (!lg_act)
1702 		return ICE_ERR_NO_MEMORY;
1703 
1704 	rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
1705 
1706 	/* Fill in the first switch rule i.e. large action */
1707 	lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
1708 	lg_act->pdata.lg_act.index = cpu_to_le16(l_id);
1709 	lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts);
1710 
1711 	/* First action VSI forwarding or VSI list forwarding depending on how
1712 	 * many VSIs
1713 	 */
1714 	id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
1715 		m_ent->fltr_info.fwd_id.hw_vsi_id;
1716 
1717 	act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
1718 	act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
1719 	if (m_ent->vsi_count > 1)
1720 		act |= ICE_LG_ACT_VSI_LIST;
1721 	lg_act->pdata.lg_act.act[0] = cpu_to_le32(act);
1722 
1723 	/* Second action descriptor type */
1724 	act = ICE_LG_ACT_GENERIC;
1725 
1726 	act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
1727 	lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
1728 
1729 	act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
1730 	       ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
1731 
1732 	/* Third action Marker value */
1733 	act |= ICE_LG_ACT_GENERIC;
1734 	act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
1735 		ICE_LG_ACT_GENERIC_VALUE_M;
1736 
1737 	lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
1738 
1739 	/* call the fill switch rule to fill the lookup Tx Rx structure */
1740 	ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
1741 			 ice_aqc_opc_update_sw_rules);
1742 
1743 	/* Update the action to point to the large action ID */
1744 	rx_tx->pdata.lkup_tx_rx.act =
1745 		cpu_to_le32(ICE_SINGLE_ACT_PTR |
1746 			    ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
1747 			     ICE_SINGLE_ACT_PTR_VAL_M));
1748 
1749 	/* Use the filter rule ID of the previously created rule with single
1750 	 * act. Once the update happens, hardware will treat this as large
1751 	 * action
1752 	 */
1753 	rx_tx->pdata.lkup_tx_rx.index =
1754 		cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
1755 
1756 	status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
1757 				 ice_aqc_opc_update_sw_rules, NULL);
1758 	if (!status) {
1759 		m_ent->lg_act_idx = l_id;
1760 		m_ent->sw_marker_id = sw_marker;
1761 	}
1762 
1763 	devm_kfree(ice_hw_to_dev(hw), lg_act);
1764 	return status;
1765 }
1766 
1767 /**
1768  * ice_create_vsi_list_map
1769  * @hw: pointer to the hardware structure
1770  * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
1771  * @num_vsi: number of VSI handles in the array
1772  * @vsi_list_id: VSI list ID generated as part of allocate resource
1773  *
1774  * Helper function to create a new entry of VSI list ID to VSI mapping
1775  * using the given VSI list ID
1776  */
1777 static struct ice_vsi_list_map_info *
1778 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1779 			u16 vsi_list_id)
1780 {
1781 	struct ice_switch_info *sw = hw->switch_info;
1782 	struct ice_vsi_list_map_info *v_map;
1783 	int i;
1784 
1785 	v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL);
1786 	if (!v_map)
1787 		return NULL;
1788 
1789 	v_map->vsi_list_id = vsi_list_id;
1790 	v_map->ref_cnt = 1;
1791 	for (i = 0; i < num_vsi; i++)
1792 		set_bit(vsi_handle_arr[i], v_map->vsi_map);
1793 
1794 	list_add(&v_map->list_entry, &sw->vsi_list_map_head);
1795 	return v_map;
1796 }
1797 
1798 /**
1799  * ice_update_vsi_list_rule
1800  * @hw: pointer to the hardware structure
1801  * @vsi_handle_arr: array of VSI handles to form a VSI list
1802  * @num_vsi: number of VSI handles in the array
1803  * @vsi_list_id: VSI list ID generated as part of allocate resource
1804  * @remove: Boolean value to indicate if this is a remove action
1805  * @opc: switch rules population command type - pass in the command opcode
1806  * @lkup_type: lookup type of the filter
1807  *
1808  * Call AQ command to add a new switch rule or update existing switch rule
1809  * using the given VSI list ID
1810  */
1811 static enum ice_status
1812 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1813 			 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
1814 			 enum ice_sw_lkup_type lkup_type)
1815 {
1816 	struct ice_aqc_sw_rules_elem *s_rule;
1817 	enum ice_status status;
1818 	u16 s_rule_size;
1819 	u16 rule_type;
1820 	int i;
1821 
1822 	if (!num_vsi)
1823 		return ICE_ERR_PARAM;
1824 
1825 	if (lkup_type == ICE_SW_LKUP_MAC ||
1826 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1827 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1828 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1829 	    lkup_type == ICE_SW_LKUP_PROMISC ||
1830 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
1831 		rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
1832 			ICE_AQC_SW_RULES_T_VSI_LIST_SET;
1833 	else if (lkup_type == ICE_SW_LKUP_VLAN)
1834 		rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
1835 			ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
1836 	else
1837 		return ICE_ERR_PARAM;
1838 
1839 	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
1840 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
1841 	if (!s_rule)
1842 		return ICE_ERR_NO_MEMORY;
1843 	for (i = 0; i < num_vsi; i++) {
1844 		if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
1845 			status = ICE_ERR_PARAM;
1846 			goto exit;
1847 		}
1848 		/* AQ call requires hw_vsi_id(s) */
1849 		s_rule->pdata.vsi_list.vsi[i] =
1850 			cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
1851 	}
1852 
1853 	s_rule->type = cpu_to_le16(rule_type);
1854 	s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
1855 	s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
1856 
1857 	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
1858 
1859 exit:
1860 	devm_kfree(ice_hw_to_dev(hw), s_rule);
1861 	return status;
1862 }
1863 
1864 /**
1865  * ice_create_vsi_list_rule - Creates and populates a VSI list rule
1866  * @hw: pointer to the HW struct
1867  * @vsi_handle_arr: array of VSI handles to form a VSI list
1868  * @num_vsi: number of VSI handles in the array
1869  * @vsi_list_id: stores the ID of the VSI list to be created
1870  * @lkup_type: switch rule filter's lookup type
1871  */
1872 static enum ice_status
1873 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1874 			 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
1875 {
1876 	enum ice_status status;
1877 
1878 	status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
1879 					    ice_aqc_opc_alloc_res);
1880 	if (status)
1881 		return status;
1882 
1883 	/* Update the newly created VSI list to include the specified VSIs */
1884 	return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
1885 					*vsi_list_id, false,
1886 					ice_aqc_opc_add_sw_rules, lkup_type);
1887 }
1888 
1889 /**
1890  * ice_create_pkt_fwd_rule
1891  * @hw: pointer to the hardware structure
1892  * @f_entry: entry containing packet forwarding information
1893  *
1894  * Create switch rule with given filter information and add an entry
1895  * to the corresponding filter management list to track this switch rule
1896  * and VSI mapping
1897  */
1898 static enum ice_status
1899 ice_create_pkt_fwd_rule(struct ice_hw *hw,
1900 			struct ice_fltr_list_entry *f_entry)
1901 {
1902 	struct ice_fltr_mgmt_list_entry *fm_entry;
1903 	struct ice_aqc_sw_rules_elem *s_rule;
1904 	enum ice_sw_lkup_type l_type;
1905 	struct ice_sw_recipe *recp;
1906 	enum ice_status status;
1907 
1908 	s_rule = devm_kzalloc(ice_hw_to_dev(hw),
1909 			      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
1910 	if (!s_rule)
1911 		return ICE_ERR_NO_MEMORY;
1912 	fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
1913 				GFP_KERNEL);
1914 	if (!fm_entry) {
1915 		status = ICE_ERR_NO_MEMORY;
1916 		goto ice_create_pkt_fwd_rule_exit;
1917 	}
1918 
1919 	fm_entry->fltr_info = f_entry->fltr_info;
1920 
1921 	/* Initialize all the fields for the management entry */
1922 	fm_entry->vsi_count = 1;
1923 	fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
1924 	fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
1925 	fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
1926 
1927 	ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
1928 			 ice_aqc_opc_add_sw_rules);
1929 
1930 	status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
1931 				 ice_aqc_opc_add_sw_rules, NULL);
1932 	if (status) {
1933 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
1934 		goto ice_create_pkt_fwd_rule_exit;
1935 	}
1936 
1937 	f_entry->fltr_info.fltr_rule_id =
1938 		le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
1939 	fm_entry->fltr_info.fltr_rule_id =
1940 		le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
1941 
1942 	/* The book keeping entries will get removed when base driver
1943 	 * calls remove filter AQ command
1944 	 */
1945 	l_type = fm_entry->fltr_info.lkup_type;
1946 	recp = &hw->switch_info->recp_list[l_type];
1947 	list_add(&fm_entry->list_entry, &recp->filt_rules);
1948 
1949 ice_create_pkt_fwd_rule_exit:
1950 	devm_kfree(ice_hw_to_dev(hw), s_rule);
1951 	return status;
1952 }
1953 
1954 /**
1955  * ice_update_pkt_fwd_rule
1956  * @hw: pointer to the hardware structure
1957  * @f_info: filter information for switch rule
1958  *
1959  * Call AQ command to update a previously created switch rule with a
1960  * VSI list ID
1961  */
1962 static enum ice_status
1963 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
1964 {
1965 	struct ice_aqc_sw_rules_elem *s_rule;
1966 	enum ice_status status;
1967 
1968 	s_rule = devm_kzalloc(ice_hw_to_dev(hw),
1969 			      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
1970 	if (!s_rule)
1971 		return ICE_ERR_NO_MEMORY;
1972 
1973 	ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
1974 
1975 	s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id);
1976 
1977 	/* Update switch rule with new rule set to forward VSI list */
1978 	status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
1979 				 ice_aqc_opc_update_sw_rules, NULL);
1980 
1981 	devm_kfree(ice_hw_to_dev(hw), s_rule);
1982 	return status;
1983 }
1984 
1985 /**
1986  * ice_update_sw_rule_bridge_mode
1987  * @hw: pointer to the HW struct
1988  *
1989  * Updates unicast switch filter rules based on VEB/VEPA mode
1990  */
1991 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
1992 {
1993 	struct ice_switch_info *sw = hw->switch_info;
1994 	struct ice_fltr_mgmt_list_entry *fm_entry;
1995 	enum ice_status status = 0;
1996 	struct list_head *rule_head;
1997 	struct mutex *rule_lock; /* Lock to protect filter rule list */
1998 
1999 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2000 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2001 
2002 	mutex_lock(rule_lock);
2003 	list_for_each_entry(fm_entry, rule_head, list_entry) {
2004 		struct ice_fltr_info *fi = &fm_entry->fltr_info;
2005 		u8 *addr = fi->l_data.mac.mac_addr;
2006 
2007 		/* Update unicast Tx rules to reflect the selected
2008 		 * VEB/VEPA mode
2009 		 */
2010 		if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
2011 		    (fi->fltr_act == ICE_FWD_TO_VSI ||
2012 		     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2013 		     fi->fltr_act == ICE_FWD_TO_Q ||
2014 		     fi->fltr_act == ICE_FWD_TO_QGRP)) {
2015 			status = ice_update_pkt_fwd_rule(hw, fi);
2016 			if (status)
2017 				break;
2018 		}
2019 	}
2020 
2021 	mutex_unlock(rule_lock);
2022 
2023 	return status;
2024 }
2025 
2026 /**
2027  * ice_add_update_vsi_list
2028  * @hw: pointer to the hardware structure
2029  * @m_entry: pointer to current filter management list entry
2030  * @cur_fltr: filter information from the book keeping entry
2031  * @new_fltr: filter information with the new VSI to be added
2032  *
2033  * Call AQ command to add or update previously created VSI list with new VSI.
2034  *
2035  * Helper function to do book keeping associated with adding filter information
2036  * The algorithm to do the book keeping is described below :
2037  * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2038  *	if only one VSI has been added till now
2039  *		Allocate a new VSI list and add two VSIs
2040  *		to this list using switch rule command
2041  *		Update the previously created switch rule with the
2042  *		newly created VSI list ID
2043  *	if a VSI list was previously created
2044  *		Add the new VSI to the previously created VSI list set
2045  *		using the update switch rule command
2046  */
2047 static enum ice_status
2048 ice_add_update_vsi_list(struct ice_hw *hw,
2049 			struct ice_fltr_mgmt_list_entry *m_entry,
2050 			struct ice_fltr_info *cur_fltr,
2051 			struct ice_fltr_info *new_fltr)
2052 {
2053 	enum ice_status status = 0;
2054 	u16 vsi_list_id = 0;
2055 
2056 	if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2057 	     cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2058 		return ICE_ERR_NOT_IMPL;
2059 
2060 	if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2061 	     new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2062 	    (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2063 	     cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2064 		return ICE_ERR_NOT_IMPL;
2065 
2066 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2067 		/* Only one entry existed in the mapping and it was not already
2068 		 * a part of a VSI list. So, create a VSI list with the old and
2069 		 * new VSIs.
2070 		 */
2071 		struct ice_fltr_info tmp_fltr;
2072 		u16 vsi_handle_arr[2];
2073 
2074 		/* A rule already exists with the new VSI being added */
2075 		if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2076 			return ICE_ERR_ALREADY_EXISTS;
2077 
2078 		vsi_handle_arr[0] = cur_fltr->vsi_handle;
2079 		vsi_handle_arr[1] = new_fltr->vsi_handle;
2080 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2081 						  &vsi_list_id,
2082 						  new_fltr->lkup_type);
2083 		if (status)
2084 			return status;
2085 
2086 		tmp_fltr = *new_fltr;
2087 		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2088 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2089 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2090 		/* Update the previous switch rule of "MAC forward to VSI" to
2091 		 * "MAC fwd to VSI list"
2092 		 */
2093 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2094 		if (status)
2095 			return status;
2096 
2097 		cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2098 		cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2099 		m_entry->vsi_list_info =
2100 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2101 						vsi_list_id);
2102 
2103 		if (!m_entry->vsi_list_info)
2104 			return ICE_ERR_NO_MEMORY;
2105 
2106 		/* If this entry was large action then the large action needs
2107 		 * to be updated to point to FWD to VSI list
2108 		 */
2109 		if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2110 			status =
2111 			    ice_add_marker_act(hw, m_entry,
2112 					       m_entry->sw_marker_id,
2113 					       m_entry->lg_act_idx);
2114 	} else {
2115 		u16 vsi_handle = new_fltr->vsi_handle;
2116 		enum ice_adminq_opc opcode;
2117 
2118 		if (!m_entry->vsi_list_info)
2119 			return ICE_ERR_CFG;
2120 
2121 		/* A rule already exists with the new VSI being added */
2122 		if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
2123 			return 0;
2124 
2125 		/* Update the previously created VSI list set with
2126 		 * the new VSI ID passed in
2127 		 */
2128 		vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2129 		opcode = ice_aqc_opc_update_sw_rules;
2130 
2131 		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2132 						  vsi_list_id, false, opcode,
2133 						  new_fltr->lkup_type);
2134 		/* update VSI list mapping info with new VSI ID */
2135 		if (!status)
2136 			set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
2137 	}
2138 	if (!status)
2139 		m_entry->vsi_count++;
2140 	return status;
2141 }
2142 
2143 /**
2144  * ice_find_rule_entry - Search a rule entry
2145  * @hw: pointer to the hardware structure
2146  * @recp_id: lookup type for which the specified rule needs to be searched
2147  * @f_info: rule information
2148  *
2149  * Helper function to search for a given rule entry
2150  * Returns pointer to entry storing the rule if found
2151  */
2152 static struct ice_fltr_mgmt_list_entry *
2153 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2154 {
2155 	struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2156 	struct ice_switch_info *sw = hw->switch_info;
2157 	struct list_head *list_head;
2158 
2159 	list_head = &sw->recp_list[recp_id].filt_rules;
2160 	list_for_each_entry(list_itr, list_head, list_entry) {
2161 		if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2162 			    sizeof(f_info->l_data)) &&
2163 		    f_info->flag == list_itr->fltr_info.flag) {
2164 			ret = list_itr;
2165 			break;
2166 		}
2167 	}
2168 	return ret;
2169 }
2170 
2171 /**
2172  * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2173  * @hw: pointer to the hardware structure
2174  * @recp_id: lookup type for which VSI lists needs to be searched
2175  * @vsi_handle: VSI handle to be found in VSI list
2176  * @vsi_list_id: VSI list ID found containing vsi_handle
2177  *
2178  * Helper function to search a VSI list with single entry containing given VSI
2179  * handle element. This can be extended further to search VSI list with more
2180  * than 1 vsi_count. Returns pointer to VSI list entry if found.
2181  */
2182 static struct ice_vsi_list_map_info *
2183 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2184 			u16 *vsi_list_id)
2185 {
2186 	struct ice_vsi_list_map_info *map_info = NULL;
2187 	struct ice_switch_info *sw = hw->switch_info;
2188 	struct ice_fltr_mgmt_list_entry *list_itr;
2189 	struct list_head *list_head;
2190 
2191 	list_head = &sw->recp_list[recp_id].filt_rules;
2192 	list_for_each_entry(list_itr, list_head, list_entry) {
2193 		if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
2194 			map_info = list_itr->vsi_list_info;
2195 			if (test_bit(vsi_handle, map_info->vsi_map)) {
2196 				*vsi_list_id = map_info->vsi_list_id;
2197 				return map_info;
2198 			}
2199 		}
2200 	}
2201 	return NULL;
2202 }
2203 
2204 /**
2205  * ice_add_rule_internal - add rule for a given lookup type
2206  * @hw: pointer to the hardware structure
2207  * @recp_id: lookup type (recipe ID) for which rule has to be added
2208  * @f_entry: structure containing MAC forwarding information
2209  *
2210  * Adds or updates the rule lists for a given recipe
2211  */
2212 static enum ice_status
2213 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2214 		      struct ice_fltr_list_entry *f_entry)
2215 {
2216 	struct ice_switch_info *sw = hw->switch_info;
2217 	struct ice_fltr_info *new_fltr, *cur_fltr;
2218 	struct ice_fltr_mgmt_list_entry *m_entry;
2219 	struct mutex *rule_lock; /* Lock to protect filter rule list */
2220 	enum ice_status status = 0;
2221 
2222 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2223 		return ICE_ERR_PARAM;
2224 	f_entry->fltr_info.fwd_id.hw_vsi_id =
2225 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2226 
2227 	rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2228 
2229 	mutex_lock(rule_lock);
2230 	new_fltr = &f_entry->fltr_info;
2231 	if (new_fltr->flag & ICE_FLTR_RX)
2232 		new_fltr->src = hw->port_info->lport;
2233 	else if (new_fltr->flag & ICE_FLTR_TX)
2234 		new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
2235 
2236 	m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2237 	if (!m_entry) {
2238 		mutex_unlock(rule_lock);
2239 		return ice_create_pkt_fwd_rule(hw, f_entry);
2240 	}
2241 
2242 	cur_fltr = &m_entry->fltr_info;
2243 	status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2244 	mutex_unlock(rule_lock);
2245 
2246 	return status;
2247 }
2248 
2249 /**
2250  * ice_remove_vsi_list_rule
2251  * @hw: pointer to the hardware structure
2252  * @vsi_list_id: VSI list ID generated as part of allocate resource
2253  * @lkup_type: switch rule filter lookup type
2254  *
2255  * The VSI list should be emptied before this function is called to remove the
2256  * VSI list.
2257  */
2258 static enum ice_status
2259 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2260 			 enum ice_sw_lkup_type lkup_type)
2261 {
2262 	struct ice_aqc_sw_rules_elem *s_rule;
2263 	enum ice_status status;
2264 	u16 s_rule_size;
2265 
2266 	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2267 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
2268 	if (!s_rule)
2269 		return ICE_ERR_NO_MEMORY;
2270 
2271 	s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2272 	s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
2273 
2274 	/* Free the vsi_list resource that we allocated. It is assumed that the
2275 	 * list is empty at this point.
2276 	 */
2277 	status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2278 					    ice_aqc_opc_free_res);
2279 
2280 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2281 	return status;
2282 }
2283 
2284 /**
2285  * ice_rem_update_vsi_list
2286  * @hw: pointer to the hardware structure
2287  * @vsi_handle: VSI handle of the VSI to remove
2288  * @fm_list: filter management entry for which the VSI list management needs to
2289  *           be done
2290  */
2291 static enum ice_status
2292 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2293 			struct ice_fltr_mgmt_list_entry *fm_list)
2294 {
2295 	enum ice_sw_lkup_type lkup_type;
2296 	enum ice_status status = 0;
2297 	u16 vsi_list_id;
2298 
2299 	if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2300 	    fm_list->vsi_count == 0)
2301 		return ICE_ERR_PARAM;
2302 
2303 	/* A rule with the VSI being removed does not exist */
2304 	if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
2305 		return ICE_ERR_DOES_NOT_EXIST;
2306 
2307 	lkup_type = fm_list->fltr_info.lkup_type;
2308 	vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2309 	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2310 					  ice_aqc_opc_update_sw_rules,
2311 					  lkup_type);
2312 	if (status)
2313 		return status;
2314 
2315 	fm_list->vsi_count--;
2316 	clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2317 
2318 	if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2319 		struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2320 		struct ice_vsi_list_map_info *vsi_list_info =
2321 			fm_list->vsi_list_info;
2322 		u16 rem_vsi_handle;
2323 
2324 		rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
2325 						ICE_MAX_VSI);
2326 		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2327 			return ICE_ERR_OUT_OF_RANGE;
2328 
2329 		/* Make sure VSI list is empty before removing it below */
2330 		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2331 						  vsi_list_id, true,
2332 						  ice_aqc_opc_update_sw_rules,
2333 						  lkup_type);
2334 		if (status)
2335 			return status;
2336 
2337 		tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2338 		tmp_fltr_info.fwd_id.hw_vsi_id =
2339 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
2340 		tmp_fltr_info.vsi_handle = rem_vsi_handle;
2341 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2342 		if (status) {
2343 			ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2344 				  tmp_fltr_info.fwd_id.hw_vsi_id, status);
2345 			return status;
2346 		}
2347 
2348 		fm_list->fltr_info = tmp_fltr_info;
2349 	}
2350 
2351 	if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2352 	    (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2353 		struct ice_vsi_list_map_info *vsi_list_info =
2354 			fm_list->vsi_list_info;
2355 
2356 		/* Remove the VSI list since it is no longer used */
2357 		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2358 		if (status) {
2359 			ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
2360 				  vsi_list_id, status);
2361 			return status;
2362 		}
2363 
2364 		list_del(&vsi_list_info->list_entry);
2365 		devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
2366 		fm_list->vsi_list_info = NULL;
2367 	}
2368 
2369 	return status;
2370 }
2371 
2372 /**
2373  * ice_remove_rule_internal - Remove a filter rule of a given type
2374  * @hw: pointer to the hardware structure
2375  * @recp_id: recipe ID for which the rule needs to removed
2376  * @f_entry: rule entry containing filter information
2377  */
2378 static enum ice_status
2379 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2380 			 struct ice_fltr_list_entry *f_entry)
2381 {
2382 	struct ice_switch_info *sw = hw->switch_info;
2383 	struct ice_fltr_mgmt_list_entry *list_elem;
2384 	struct mutex *rule_lock; /* Lock to protect filter rule list */
2385 	enum ice_status status = 0;
2386 	bool remove_rule = false;
2387 	u16 vsi_handle;
2388 
2389 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2390 		return ICE_ERR_PARAM;
2391 	f_entry->fltr_info.fwd_id.hw_vsi_id =
2392 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2393 
2394 	rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2395 	mutex_lock(rule_lock);
2396 	list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2397 	if (!list_elem) {
2398 		status = ICE_ERR_DOES_NOT_EXIST;
2399 		goto exit;
2400 	}
2401 
2402 	if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2403 		remove_rule = true;
2404 	} else if (!list_elem->vsi_list_info) {
2405 		status = ICE_ERR_DOES_NOT_EXIST;
2406 		goto exit;
2407 	} else if (list_elem->vsi_list_info->ref_cnt > 1) {
2408 		/* a ref_cnt > 1 indicates that the vsi_list is being
2409 		 * shared by multiple rules. Decrement the ref_cnt and
2410 		 * remove this rule, but do not modify the list, as it
2411 		 * is in-use by other rules.
2412 		 */
2413 		list_elem->vsi_list_info->ref_cnt--;
2414 		remove_rule = true;
2415 	} else {
2416 		/* a ref_cnt of 1 indicates the vsi_list is only used
2417 		 * by one rule. However, the original removal request is only
2418 		 * for a single VSI. Update the vsi_list first, and only
2419 		 * remove the rule if there are no further VSIs in this list.
2420 		 */
2421 		vsi_handle = f_entry->fltr_info.vsi_handle;
2422 		status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2423 		if (status)
2424 			goto exit;
2425 		/* if VSI count goes to zero after updating the VSI list */
2426 		if (list_elem->vsi_count == 0)
2427 			remove_rule = true;
2428 	}
2429 
2430 	if (remove_rule) {
2431 		/* Remove the lookup rule */
2432 		struct ice_aqc_sw_rules_elem *s_rule;
2433 
2434 		s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2435 				      ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
2436 				      GFP_KERNEL);
2437 		if (!s_rule) {
2438 			status = ICE_ERR_NO_MEMORY;
2439 			goto exit;
2440 		}
2441 
2442 		ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2443 				 ice_aqc_opc_remove_sw_rules);
2444 
2445 		status = ice_aq_sw_rules(hw, s_rule,
2446 					 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2447 					 ice_aqc_opc_remove_sw_rules, NULL);
2448 
2449 		/* Remove a book keeping from the list */
2450 		devm_kfree(ice_hw_to_dev(hw), s_rule);
2451 
2452 		if (status)
2453 			goto exit;
2454 
2455 		list_del(&list_elem->list_entry);
2456 		devm_kfree(ice_hw_to_dev(hw), list_elem);
2457 	}
2458 exit:
2459 	mutex_unlock(rule_lock);
2460 	return status;
2461 }
2462 
2463 /**
2464  * ice_mac_fltr_exist - does this MAC filter exist for given VSI
2465  * @hw: pointer to the hardware structure
2466  * @mac: MAC address to be checked (for MAC filter)
2467  * @vsi_handle: check MAC filter for this VSI
2468  */
2469 bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle)
2470 {
2471 	struct ice_fltr_mgmt_list_entry *entry;
2472 	struct list_head *rule_head;
2473 	struct ice_switch_info *sw;
2474 	struct mutex *rule_lock; /* Lock to protect filter rule list */
2475 	u16 hw_vsi_id;
2476 
2477 	if (!ice_is_vsi_valid(hw, vsi_handle))
2478 		return false;
2479 
2480 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2481 	sw = hw->switch_info;
2482 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2483 	if (!rule_head)
2484 		return false;
2485 
2486 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2487 	mutex_lock(rule_lock);
2488 	list_for_each_entry(entry, rule_head, list_entry) {
2489 		struct ice_fltr_info *f_info = &entry->fltr_info;
2490 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2491 
2492 		if (is_zero_ether_addr(mac_addr))
2493 			continue;
2494 
2495 		if (f_info->flag != ICE_FLTR_TX ||
2496 		    f_info->src_id != ICE_SRC_ID_VSI ||
2497 		    f_info->lkup_type != ICE_SW_LKUP_MAC ||
2498 		    f_info->fltr_act != ICE_FWD_TO_VSI ||
2499 		    hw_vsi_id != f_info->fwd_id.hw_vsi_id)
2500 			continue;
2501 
2502 		if (ether_addr_equal(mac, mac_addr)) {
2503 			mutex_unlock(rule_lock);
2504 			return true;
2505 		}
2506 	}
2507 	mutex_unlock(rule_lock);
2508 	return false;
2509 }
2510 
2511 /**
2512  * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI
2513  * @hw: pointer to the hardware structure
2514  * @vlan_id: VLAN ID
2515  * @vsi_handle: check MAC filter for this VSI
2516  */
2517 bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
2518 {
2519 	struct ice_fltr_mgmt_list_entry *entry;
2520 	struct list_head *rule_head;
2521 	struct ice_switch_info *sw;
2522 	struct mutex *rule_lock; /* Lock to protect filter rule list */
2523 	u16 hw_vsi_id;
2524 
2525 	if (vlan_id > ICE_MAX_VLAN_ID)
2526 		return false;
2527 
2528 	if (!ice_is_vsi_valid(hw, vsi_handle))
2529 		return false;
2530 
2531 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2532 	sw = hw->switch_info;
2533 	rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
2534 	if (!rule_head)
2535 		return false;
2536 
2537 	rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
2538 	mutex_lock(rule_lock);
2539 	list_for_each_entry(entry, rule_head, list_entry) {
2540 		struct ice_fltr_info *f_info = &entry->fltr_info;
2541 		u16 entry_vlan_id = f_info->l_data.vlan.vlan_id;
2542 		struct ice_vsi_list_map_info *map_info;
2543 
2544 		if (entry_vlan_id > ICE_MAX_VLAN_ID)
2545 			continue;
2546 
2547 		if (f_info->flag != ICE_FLTR_TX ||
2548 		    f_info->src_id != ICE_SRC_ID_VSI ||
2549 		    f_info->lkup_type != ICE_SW_LKUP_VLAN)
2550 			continue;
2551 
2552 		/* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */
2553 		if (f_info->fltr_act != ICE_FWD_TO_VSI &&
2554 		    f_info->fltr_act != ICE_FWD_TO_VSI_LIST)
2555 			continue;
2556 
2557 		if (f_info->fltr_act == ICE_FWD_TO_VSI) {
2558 			if (hw_vsi_id != f_info->fwd_id.hw_vsi_id)
2559 				continue;
2560 		} else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2561 			/* If filter_action is FWD_TO_VSI_LIST, make sure
2562 			 * that VSI being checked is part of VSI list
2563 			 */
2564 			if (entry->vsi_count == 1 &&
2565 			    entry->vsi_list_info) {
2566 				map_info = entry->vsi_list_info;
2567 				if (!test_bit(vsi_handle, map_info->vsi_map))
2568 					continue;
2569 			}
2570 		}
2571 
2572 		if (vlan_id == entry_vlan_id) {
2573 			mutex_unlock(rule_lock);
2574 			return true;
2575 		}
2576 	}
2577 	mutex_unlock(rule_lock);
2578 
2579 	return false;
2580 }
2581 
2582 /**
2583  * ice_add_mac - Add a MAC address based filter rule
2584  * @hw: pointer to the hardware structure
2585  * @m_list: list of MAC addresses and forwarding information
2586  *
2587  * IMPORTANT: When the ucast_shared flag is set to false and m_list has
2588  * multiple unicast addresses, the function assumes that all the
2589  * addresses are unique in a given add_mac call. It doesn't
2590  * check for duplicates in this case, removing duplicates from a given
2591  * list should be taken care of in the caller of this function.
2592  */
2593 enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
2594 {
2595 	struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
2596 	struct ice_fltr_list_entry *m_list_itr;
2597 	struct list_head *rule_head;
2598 	u16 total_elem_left, s_rule_size;
2599 	struct ice_switch_info *sw;
2600 	struct mutex *rule_lock; /* Lock to protect filter rule list */
2601 	enum ice_status status = 0;
2602 	u16 num_unicast = 0;
2603 	u8 elem_sent;
2604 
2605 	if (!m_list || !hw)
2606 		return ICE_ERR_PARAM;
2607 
2608 	s_rule = NULL;
2609 	sw = hw->switch_info;
2610 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2611 	list_for_each_entry(m_list_itr, m_list, list_entry) {
2612 		u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
2613 		u16 vsi_handle;
2614 		u16 hw_vsi_id;
2615 
2616 		m_list_itr->fltr_info.flag = ICE_FLTR_TX;
2617 		vsi_handle = m_list_itr->fltr_info.vsi_handle;
2618 		if (!ice_is_vsi_valid(hw, vsi_handle))
2619 			return ICE_ERR_PARAM;
2620 		hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2621 		m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
2622 		/* update the src in case it is VSI num */
2623 		if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
2624 			return ICE_ERR_PARAM;
2625 		m_list_itr->fltr_info.src = hw_vsi_id;
2626 		if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
2627 		    is_zero_ether_addr(add))
2628 			return ICE_ERR_PARAM;
2629 		if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
2630 			/* Don't overwrite the unicast address */
2631 			mutex_lock(rule_lock);
2632 			if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
2633 						&m_list_itr->fltr_info)) {
2634 				mutex_unlock(rule_lock);
2635 				return ICE_ERR_ALREADY_EXISTS;
2636 			}
2637 			mutex_unlock(rule_lock);
2638 			num_unicast++;
2639 		} else if (is_multicast_ether_addr(add) ||
2640 			   (is_unicast_ether_addr(add) && hw->ucast_shared)) {
2641 			m_list_itr->status =
2642 				ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
2643 						      m_list_itr);
2644 			if (m_list_itr->status)
2645 				return m_list_itr->status;
2646 		}
2647 	}
2648 
2649 	mutex_lock(rule_lock);
2650 	/* Exit if no suitable entries were found for adding bulk switch rule */
2651 	if (!num_unicast) {
2652 		status = 0;
2653 		goto ice_add_mac_exit;
2654 	}
2655 
2656 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2657 
2658 	/* Allocate switch rule buffer for the bulk update for unicast */
2659 	s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2660 	s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
2661 			      GFP_KERNEL);
2662 	if (!s_rule) {
2663 		status = ICE_ERR_NO_MEMORY;
2664 		goto ice_add_mac_exit;
2665 	}
2666 
2667 	r_iter = s_rule;
2668 	list_for_each_entry(m_list_itr, m_list, list_entry) {
2669 		struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
2670 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2671 
2672 		if (is_unicast_ether_addr(mac_addr)) {
2673 			ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
2674 					 ice_aqc_opc_add_sw_rules);
2675 			r_iter = (struct ice_aqc_sw_rules_elem *)
2676 				((u8 *)r_iter + s_rule_size);
2677 		}
2678 	}
2679 
2680 	/* Call AQ bulk switch rule update for all unicast addresses */
2681 	r_iter = s_rule;
2682 	/* Call AQ switch rule in AQ_MAX chunk */
2683 	for (total_elem_left = num_unicast; total_elem_left > 0;
2684 	     total_elem_left -= elem_sent) {
2685 		struct ice_aqc_sw_rules_elem *entry = r_iter;
2686 
2687 		elem_sent = min_t(u8, total_elem_left,
2688 				  (ICE_AQ_MAX_BUF_LEN / s_rule_size));
2689 		status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
2690 					 elem_sent, ice_aqc_opc_add_sw_rules,
2691 					 NULL);
2692 		if (status)
2693 			goto ice_add_mac_exit;
2694 		r_iter = (struct ice_aqc_sw_rules_elem *)
2695 			((u8 *)r_iter + (elem_sent * s_rule_size));
2696 	}
2697 
2698 	/* Fill up rule ID based on the value returned from FW */
2699 	r_iter = s_rule;
2700 	list_for_each_entry(m_list_itr, m_list, list_entry) {
2701 		struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
2702 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2703 		struct ice_fltr_mgmt_list_entry *fm_entry;
2704 
2705 		if (is_unicast_ether_addr(mac_addr)) {
2706 			f_info->fltr_rule_id =
2707 				le16_to_cpu(r_iter->pdata.lkup_tx_rx.index);
2708 			f_info->fltr_act = ICE_FWD_TO_VSI;
2709 			/* Create an entry to track this MAC address */
2710 			fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
2711 						sizeof(*fm_entry), GFP_KERNEL);
2712 			if (!fm_entry) {
2713 				status = ICE_ERR_NO_MEMORY;
2714 				goto ice_add_mac_exit;
2715 			}
2716 			fm_entry->fltr_info = *f_info;
2717 			fm_entry->vsi_count = 1;
2718 			/* The book keeping entries will get removed when
2719 			 * base driver calls remove filter AQ command
2720 			 */
2721 
2722 			list_add(&fm_entry->list_entry, rule_head);
2723 			r_iter = (struct ice_aqc_sw_rules_elem *)
2724 				((u8 *)r_iter + s_rule_size);
2725 		}
2726 	}
2727 
2728 ice_add_mac_exit:
2729 	mutex_unlock(rule_lock);
2730 	if (s_rule)
2731 		devm_kfree(ice_hw_to_dev(hw), s_rule);
2732 	return status;
2733 }
2734 
2735 /**
2736  * ice_add_vlan_internal - Add one VLAN based filter rule
2737  * @hw: pointer to the hardware structure
2738  * @f_entry: filter entry containing one VLAN information
2739  */
2740 static enum ice_status
2741 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
2742 {
2743 	struct ice_switch_info *sw = hw->switch_info;
2744 	struct ice_fltr_mgmt_list_entry *v_list_itr;
2745 	struct ice_fltr_info *new_fltr, *cur_fltr;
2746 	enum ice_sw_lkup_type lkup_type;
2747 	u16 vsi_list_id = 0, vsi_handle;
2748 	struct mutex *rule_lock; /* Lock to protect filter rule list */
2749 	enum ice_status status = 0;
2750 
2751 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2752 		return ICE_ERR_PARAM;
2753 
2754 	f_entry->fltr_info.fwd_id.hw_vsi_id =
2755 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2756 	new_fltr = &f_entry->fltr_info;
2757 
2758 	/* VLAN ID should only be 12 bits */
2759 	if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
2760 		return ICE_ERR_PARAM;
2761 
2762 	if (new_fltr->src_id != ICE_SRC_ID_VSI)
2763 		return ICE_ERR_PARAM;
2764 
2765 	new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
2766 	lkup_type = new_fltr->lkup_type;
2767 	vsi_handle = new_fltr->vsi_handle;
2768 	rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
2769 	mutex_lock(rule_lock);
2770 	v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
2771 	if (!v_list_itr) {
2772 		struct ice_vsi_list_map_info *map_info = NULL;
2773 
2774 		if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
2775 			/* All VLAN pruning rules use a VSI list. Check if
2776 			 * there is already a VSI list containing VSI that we
2777 			 * want to add. If found, use the same vsi_list_id for
2778 			 * this new VLAN rule or else create a new list.
2779 			 */
2780 			map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
2781 							   vsi_handle,
2782 							   &vsi_list_id);
2783 			if (!map_info) {
2784 				status = ice_create_vsi_list_rule(hw,
2785 								  &vsi_handle,
2786 								  1,
2787 								  &vsi_list_id,
2788 								  lkup_type);
2789 				if (status)
2790 					goto exit;
2791 			}
2792 			/* Convert the action to forwarding to a VSI list. */
2793 			new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2794 			new_fltr->fwd_id.vsi_list_id = vsi_list_id;
2795 		}
2796 
2797 		status = ice_create_pkt_fwd_rule(hw, f_entry);
2798 		if (!status) {
2799 			v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
2800 							 new_fltr);
2801 			if (!v_list_itr) {
2802 				status = ICE_ERR_DOES_NOT_EXIST;
2803 				goto exit;
2804 			}
2805 			/* reuse VSI list for new rule and increment ref_cnt */
2806 			if (map_info) {
2807 				v_list_itr->vsi_list_info = map_info;
2808 				map_info->ref_cnt++;
2809 			} else {
2810 				v_list_itr->vsi_list_info =
2811 					ice_create_vsi_list_map(hw, &vsi_handle,
2812 								1, vsi_list_id);
2813 			}
2814 		}
2815 	} else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
2816 		/* Update existing VSI list to add new VSI ID only if it used
2817 		 * by one VLAN rule.
2818 		 */
2819 		cur_fltr = &v_list_itr->fltr_info;
2820 		status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
2821 						 new_fltr);
2822 	} else {
2823 		/* If VLAN rule exists and VSI list being used by this rule is
2824 		 * referenced by more than 1 VLAN rule. Then create a new VSI
2825 		 * list appending previous VSI with new VSI and update existing
2826 		 * VLAN rule to point to new VSI list ID
2827 		 */
2828 		struct ice_fltr_info tmp_fltr;
2829 		u16 vsi_handle_arr[2];
2830 		u16 cur_handle;
2831 
2832 		/* Current implementation only supports reusing VSI list with
2833 		 * one VSI count. We should never hit below condition
2834 		 */
2835 		if (v_list_itr->vsi_count > 1 &&
2836 		    v_list_itr->vsi_list_info->ref_cnt > 1) {
2837 			ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
2838 			status = ICE_ERR_CFG;
2839 			goto exit;
2840 		}
2841 
2842 		cur_handle =
2843 			find_first_bit(v_list_itr->vsi_list_info->vsi_map,
2844 				       ICE_MAX_VSI);
2845 
2846 		/* A rule already exists with the new VSI being added */
2847 		if (cur_handle == vsi_handle) {
2848 			status = ICE_ERR_ALREADY_EXISTS;
2849 			goto exit;
2850 		}
2851 
2852 		vsi_handle_arr[0] = cur_handle;
2853 		vsi_handle_arr[1] = vsi_handle;
2854 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2855 						  &vsi_list_id, lkup_type);
2856 		if (status)
2857 			goto exit;
2858 
2859 		tmp_fltr = v_list_itr->fltr_info;
2860 		tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
2861 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2862 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2863 		/* Update the previous switch rule to a new VSI list which
2864 		 * includes current VSI that is requested
2865 		 */
2866 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2867 		if (status)
2868 			goto exit;
2869 
2870 		/* before overriding VSI list map info. decrement ref_cnt of
2871 		 * previous VSI list
2872 		 */
2873 		v_list_itr->vsi_list_info->ref_cnt--;
2874 
2875 		/* now update to newly created list */
2876 		v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
2877 		v_list_itr->vsi_list_info =
2878 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2879 						vsi_list_id);
2880 		v_list_itr->vsi_count++;
2881 	}
2882 
2883 exit:
2884 	mutex_unlock(rule_lock);
2885 	return status;
2886 }
2887 
2888 /**
2889  * ice_add_vlan - Add VLAN based filter rule
2890  * @hw: pointer to the hardware structure
2891  * @v_list: list of VLAN entries and forwarding information
2892  */
2893 enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
2894 {
2895 	struct ice_fltr_list_entry *v_list_itr;
2896 
2897 	if (!v_list || !hw)
2898 		return ICE_ERR_PARAM;
2899 
2900 	list_for_each_entry(v_list_itr, v_list, list_entry) {
2901 		if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
2902 			return ICE_ERR_PARAM;
2903 		v_list_itr->fltr_info.flag = ICE_FLTR_TX;
2904 		v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
2905 		if (v_list_itr->status)
2906 			return v_list_itr->status;
2907 	}
2908 	return 0;
2909 }
2910 
2911 /**
2912  * ice_add_eth_mac - Add ethertype and MAC based filter rule
2913  * @hw: pointer to the hardware structure
2914  * @em_list: list of ether type MAC filter, MAC is optional
2915  *
2916  * This function requires the caller to populate the entries in
2917  * the filter list with the necessary fields (including flags to
2918  * indicate Tx or Rx rules).
2919  */
2920 enum ice_status
2921 ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
2922 {
2923 	struct ice_fltr_list_entry *em_list_itr;
2924 
2925 	if (!em_list || !hw)
2926 		return ICE_ERR_PARAM;
2927 
2928 	list_for_each_entry(em_list_itr, em_list, list_entry) {
2929 		enum ice_sw_lkup_type l_type =
2930 			em_list_itr->fltr_info.lkup_type;
2931 
2932 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
2933 		    l_type != ICE_SW_LKUP_ETHERTYPE)
2934 			return ICE_ERR_PARAM;
2935 
2936 		em_list_itr->status = ice_add_rule_internal(hw, l_type,
2937 							    em_list_itr);
2938 		if (em_list_itr->status)
2939 			return em_list_itr->status;
2940 	}
2941 	return 0;
2942 }
2943 
2944 /**
2945  * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
2946  * @hw: pointer to the hardware structure
2947  * @em_list: list of ethertype or ethertype MAC entries
2948  */
2949 enum ice_status
2950 ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
2951 {
2952 	struct ice_fltr_list_entry *em_list_itr, *tmp;
2953 
2954 	if (!em_list || !hw)
2955 		return ICE_ERR_PARAM;
2956 
2957 	list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
2958 		enum ice_sw_lkup_type l_type =
2959 			em_list_itr->fltr_info.lkup_type;
2960 
2961 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
2962 		    l_type != ICE_SW_LKUP_ETHERTYPE)
2963 			return ICE_ERR_PARAM;
2964 
2965 		em_list_itr->status = ice_remove_rule_internal(hw, l_type,
2966 							       em_list_itr);
2967 		if (em_list_itr->status)
2968 			return em_list_itr->status;
2969 	}
2970 	return 0;
2971 }
2972 
2973 /**
2974  * ice_rem_sw_rule_info
2975  * @hw: pointer to the hardware structure
2976  * @rule_head: pointer to the switch list structure that we want to delete
2977  */
2978 static void
2979 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
2980 {
2981 	if (!list_empty(rule_head)) {
2982 		struct ice_fltr_mgmt_list_entry *entry;
2983 		struct ice_fltr_mgmt_list_entry *tmp;
2984 
2985 		list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
2986 			list_del(&entry->list_entry);
2987 			devm_kfree(ice_hw_to_dev(hw), entry);
2988 		}
2989 	}
2990 }
2991 
2992 /**
2993  * ice_rem_adv_rule_info
2994  * @hw: pointer to the hardware structure
2995  * @rule_head: pointer to the switch list structure that we want to delete
2996  */
2997 static void
2998 ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
2999 {
3000 	struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3001 	struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3002 
3003 	if (list_empty(rule_head))
3004 		return;
3005 
3006 	list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) {
3007 		list_del(&lst_itr->list_entry);
3008 		devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
3009 		devm_kfree(ice_hw_to_dev(hw), lst_itr);
3010 	}
3011 }
3012 
3013 /**
3014  * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3015  * @hw: pointer to the hardware structure
3016  * @vsi_handle: VSI handle to set as default
3017  * @set: true to add the above mentioned switch rule, false to remove it
3018  * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3019  *
3020  * add filter rule to set/unset given VSI as default VSI for the switch
3021  * (represented by swid)
3022  */
3023 enum ice_status
3024 ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
3025 {
3026 	struct ice_aqc_sw_rules_elem *s_rule;
3027 	struct ice_fltr_info f_info;
3028 	enum ice_adminq_opc opcode;
3029 	enum ice_status status;
3030 	u16 s_rule_size;
3031 	u16 hw_vsi_id;
3032 
3033 	if (!ice_is_vsi_valid(hw, vsi_handle))
3034 		return ICE_ERR_PARAM;
3035 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3036 
3037 	s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3038 		ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3039 
3040 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
3041 	if (!s_rule)
3042 		return ICE_ERR_NO_MEMORY;
3043 
3044 	memset(&f_info, 0, sizeof(f_info));
3045 
3046 	f_info.lkup_type = ICE_SW_LKUP_DFLT;
3047 	f_info.flag = direction;
3048 	f_info.fltr_act = ICE_FWD_TO_VSI;
3049 	f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3050 
3051 	if (f_info.flag & ICE_FLTR_RX) {
3052 		f_info.src = hw->port_info->lport;
3053 		f_info.src_id = ICE_SRC_ID_LPORT;
3054 		if (!set)
3055 			f_info.fltr_rule_id =
3056 				hw->port_info->dflt_rx_vsi_rule_id;
3057 	} else if (f_info.flag & ICE_FLTR_TX) {
3058 		f_info.src_id = ICE_SRC_ID_VSI;
3059 		f_info.src = hw_vsi_id;
3060 		if (!set)
3061 			f_info.fltr_rule_id =
3062 				hw->port_info->dflt_tx_vsi_rule_id;
3063 	}
3064 
3065 	if (set)
3066 		opcode = ice_aqc_opc_add_sw_rules;
3067 	else
3068 		opcode = ice_aqc_opc_remove_sw_rules;
3069 
3070 	ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3071 
3072 	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3073 	if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3074 		goto out;
3075 	if (set) {
3076 		u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
3077 
3078 		if (f_info.flag & ICE_FLTR_TX) {
3079 			hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
3080 			hw->port_info->dflt_tx_vsi_rule_id = index;
3081 		} else if (f_info.flag & ICE_FLTR_RX) {
3082 			hw->port_info->dflt_rx_vsi_num = hw_vsi_id;
3083 			hw->port_info->dflt_rx_vsi_rule_id = index;
3084 		}
3085 	} else {
3086 		if (f_info.flag & ICE_FLTR_TX) {
3087 			hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3088 			hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3089 		} else if (f_info.flag & ICE_FLTR_RX) {
3090 			hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3091 			hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3092 		}
3093 	}
3094 
3095 out:
3096 	devm_kfree(ice_hw_to_dev(hw), s_rule);
3097 	return status;
3098 }
3099 
3100 /**
3101  * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3102  * @hw: pointer to the hardware structure
3103  * @recp_id: lookup type for which the specified rule needs to be searched
3104  * @f_info: rule information
3105  *
3106  * Helper function to search for a unicast rule entry - this is to be used
3107  * to remove unicast MAC filter that is not shared with other VSIs on the
3108  * PF switch.
3109  *
3110  * Returns pointer to entry storing the rule if found
3111  */
3112 static struct ice_fltr_mgmt_list_entry *
3113 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3114 			  struct ice_fltr_info *f_info)
3115 {
3116 	struct ice_switch_info *sw = hw->switch_info;
3117 	struct ice_fltr_mgmt_list_entry *list_itr;
3118 	struct list_head *list_head;
3119 
3120 	list_head = &sw->recp_list[recp_id].filt_rules;
3121 	list_for_each_entry(list_itr, list_head, list_entry) {
3122 		if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3123 			    sizeof(f_info->l_data)) &&
3124 		    f_info->fwd_id.hw_vsi_id ==
3125 		    list_itr->fltr_info.fwd_id.hw_vsi_id &&
3126 		    f_info->flag == list_itr->fltr_info.flag)
3127 			return list_itr;
3128 	}
3129 	return NULL;
3130 }
3131 
3132 /**
3133  * ice_remove_mac - remove a MAC address based filter rule
3134  * @hw: pointer to the hardware structure
3135  * @m_list: list of MAC addresses and forwarding information
3136  *
3137  * This function removes either a MAC filter rule or a specific VSI from a
3138  * VSI list for a multicast MAC address.
3139  *
3140  * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3141  * ice_add_mac. Caller should be aware that this call will only work if all
3142  * the entries passed into m_list were added previously. It will not attempt to
3143  * do a partial remove of entries that were found.
3144  */
3145 enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
3146 {
3147 	struct ice_fltr_list_entry *list_itr, *tmp;
3148 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3149 
3150 	if (!m_list)
3151 		return ICE_ERR_PARAM;
3152 
3153 	rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3154 	list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
3155 		enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3156 		u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3157 		u16 vsi_handle;
3158 
3159 		if (l_type != ICE_SW_LKUP_MAC)
3160 			return ICE_ERR_PARAM;
3161 
3162 		vsi_handle = list_itr->fltr_info.vsi_handle;
3163 		if (!ice_is_vsi_valid(hw, vsi_handle))
3164 			return ICE_ERR_PARAM;
3165 
3166 		list_itr->fltr_info.fwd_id.hw_vsi_id =
3167 					ice_get_hw_vsi_num(hw, vsi_handle);
3168 		if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
3169 			/* Don't remove the unicast address that belongs to
3170 			 * another VSI on the switch, since it is not being
3171 			 * shared...
3172 			 */
3173 			mutex_lock(rule_lock);
3174 			if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3175 						       &list_itr->fltr_info)) {
3176 				mutex_unlock(rule_lock);
3177 				return ICE_ERR_DOES_NOT_EXIST;
3178 			}
3179 			mutex_unlock(rule_lock);
3180 		}
3181 		list_itr->status = ice_remove_rule_internal(hw,
3182 							    ICE_SW_LKUP_MAC,
3183 							    list_itr);
3184 		if (list_itr->status)
3185 			return list_itr->status;
3186 	}
3187 	return 0;
3188 }
3189 
3190 /**
3191  * ice_remove_vlan - Remove VLAN based filter rule
3192  * @hw: pointer to the hardware structure
3193  * @v_list: list of VLAN entries and forwarding information
3194  */
3195 enum ice_status
3196 ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
3197 {
3198 	struct ice_fltr_list_entry *v_list_itr, *tmp;
3199 
3200 	if (!v_list || !hw)
3201 		return ICE_ERR_PARAM;
3202 
3203 	list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
3204 		enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3205 
3206 		if (l_type != ICE_SW_LKUP_VLAN)
3207 			return ICE_ERR_PARAM;
3208 		v_list_itr->status = ice_remove_rule_internal(hw,
3209 							      ICE_SW_LKUP_VLAN,
3210 							      v_list_itr);
3211 		if (v_list_itr->status)
3212 			return v_list_itr->status;
3213 	}
3214 	return 0;
3215 }
3216 
3217 /**
3218  * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3219  * @fm_entry: filter entry to inspect
3220  * @vsi_handle: VSI handle to compare with filter info
3221  */
3222 static bool
3223 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3224 {
3225 	return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3226 		 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3227 		(fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3228 		 fm_entry->vsi_list_info &&
3229 		 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
3230 }
3231 
3232 /**
3233  * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3234  * @hw: pointer to the hardware structure
3235  * @vsi_handle: VSI handle to remove filters from
3236  * @vsi_list_head: pointer to the list to add entry to
3237  * @fi: pointer to fltr_info of filter entry to copy & add
3238  *
3239  * Helper function, used when creating a list of filters to remove from
3240  * a specific VSI. The entry added to vsi_list_head is a COPY of the
3241  * original filter entry, with the exception of fltr_info.fltr_act and
3242  * fltr_info.fwd_id fields. These are set such that later logic can
3243  * extract which VSI to remove the fltr from, and pass on that information.
3244  */
3245 static enum ice_status
3246 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3247 			       struct list_head *vsi_list_head,
3248 			       struct ice_fltr_info *fi)
3249 {
3250 	struct ice_fltr_list_entry *tmp;
3251 
3252 	/* this memory is freed up in the caller function
3253 	 * once filters for this VSI are removed
3254 	 */
3255 	tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
3256 	if (!tmp)
3257 		return ICE_ERR_NO_MEMORY;
3258 
3259 	tmp->fltr_info = *fi;
3260 
3261 	/* Overwrite these fields to indicate which VSI to remove filter from,
3262 	 * so find and remove logic can extract the information from the
3263 	 * list entries. Note that original entries will still have proper
3264 	 * values.
3265 	 */
3266 	tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3267 	tmp->fltr_info.vsi_handle = vsi_handle;
3268 	tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3269 
3270 	list_add(&tmp->list_entry, vsi_list_head);
3271 
3272 	return 0;
3273 }
3274 
3275 /**
3276  * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3277  * @hw: pointer to the hardware structure
3278  * @vsi_handle: VSI handle to remove filters from
3279  * @lkup_list_head: pointer to the list that has certain lookup type filters
3280  * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3281  *
3282  * Locates all filters in lkup_list_head that are used by the given VSI,
3283  * and adds COPIES of those entries to vsi_list_head (intended to be used
3284  * to remove the listed filters).
3285  * Note that this means all entries in vsi_list_head must be explicitly
3286  * deallocated by the caller when done with list.
3287  */
3288 static enum ice_status
3289 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3290 			 struct list_head *lkup_list_head,
3291 			 struct list_head *vsi_list_head)
3292 {
3293 	struct ice_fltr_mgmt_list_entry *fm_entry;
3294 	enum ice_status status = 0;
3295 
3296 	/* check to make sure VSI ID is valid and within boundary */
3297 	if (!ice_is_vsi_valid(hw, vsi_handle))
3298 		return ICE_ERR_PARAM;
3299 
3300 	list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
3301 		if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
3302 			continue;
3303 
3304 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3305 							vsi_list_head,
3306 							&fm_entry->fltr_info);
3307 		if (status)
3308 			return status;
3309 	}
3310 	return status;
3311 }
3312 
3313 /**
3314  * ice_determine_promisc_mask
3315  * @fi: filter info to parse
3316  *
3317  * Helper function to determine which ICE_PROMISC_ mask corresponds
3318  * to given filter into.
3319  */
3320 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3321 {
3322 	u16 vid = fi->l_data.mac_vlan.vlan_id;
3323 	u8 *macaddr = fi->l_data.mac.mac_addr;
3324 	bool is_tx_fltr = false;
3325 	u8 promisc_mask = 0;
3326 
3327 	if (fi->flag == ICE_FLTR_TX)
3328 		is_tx_fltr = true;
3329 
3330 	if (is_broadcast_ether_addr(macaddr))
3331 		promisc_mask |= is_tx_fltr ?
3332 			ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3333 	else if (is_multicast_ether_addr(macaddr))
3334 		promisc_mask |= is_tx_fltr ?
3335 			ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3336 	else if (is_unicast_ether_addr(macaddr))
3337 		promisc_mask |= is_tx_fltr ?
3338 			ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3339 	if (vid)
3340 		promisc_mask |= is_tx_fltr ?
3341 			ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3342 
3343 	return promisc_mask;
3344 }
3345 
3346 /**
3347  * ice_remove_promisc - Remove promisc based filter rules
3348  * @hw: pointer to the hardware structure
3349  * @recp_id: recipe ID for which the rule needs to removed
3350  * @v_list: list of promisc entries
3351  */
3352 static enum ice_status
3353 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
3354 		   struct list_head *v_list)
3355 {
3356 	struct ice_fltr_list_entry *v_list_itr, *tmp;
3357 
3358 	list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
3359 		v_list_itr->status =
3360 			ice_remove_rule_internal(hw, recp_id, v_list_itr);
3361 		if (v_list_itr->status)
3362 			return v_list_itr->status;
3363 	}
3364 	return 0;
3365 }
3366 
3367 /**
3368  * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
3369  * @hw: pointer to the hardware structure
3370  * @vsi_handle: VSI handle to clear mode
3371  * @promisc_mask: mask of promiscuous config bits to clear
3372  * @vid: VLAN ID to clear VLAN promiscuous
3373  */
3374 enum ice_status
3375 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3376 		      u16 vid)
3377 {
3378 	struct ice_switch_info *sw = hw->switch_info;
3379 	struct ice_fltr_list_entry *fm_entry, *tmp;
3380 	struct list_head remove_list_head;
3381 	struct ice_fltr_mgmt_list_entry *itr;
3382 	struct list_head *rule_head;
3383 	struct mutex *rule_lock;	/* Lock to protect filter rule list */
3384 	enum ice_status status = 0;
3385 	u8 recipe_id;
3386 
3387 	if (!ice_is_vsi_valid(hw, vsi_handle))
3388 		return ICE_ERR_PARAM;
3389 
3390 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
3391 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3392 	else
3393 		recipe_id = ICE_SW_LKUP_PROMISC;
3394 
3395 	rule_head = &sw->recp_list[recipe_id].filt_rules;
3396 	rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
3397 
3398 	INIT_LIST_HEAD(&remove_list_head);
3399 
3400 	mutex_lock(rule_lock);
3401 	list_for_each_entry(itr, rule_head, list_entry) {
3402 		struct ice_fltr_info *fltr_info;
3403 		u8 fltr_promisc_mask = 0;
3404 
3405 		if (!ice_vsi_uses_fltr(itr, vsi_handle))
3406 			continue;
3407 		fltr_info = &itr->fltr_info;
3408 
3409 		if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
3410 		    vid != fltr_info->l_data.mac_vlan.vlan_id)
3411 			continue;
3412 
3413 		fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
3414 
3415 		/* Skip if filter is not completely specified by given mask */
3416 		if (fltr_promisc_mask & ~promisc_mask)
3417 			continue;
3418 
3419 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3420 							&remove_list_head,
3421 							fltr_info);
3422 		if (status) {
3423 			mutex_unlock(rule_lock);
3424 			goto free_fltr_list;
3425 		}
3426 	}
3427 	mutex_unlock(rule_lock);
3428 
3429 	status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
3430 
3431 free_fltr_list:
3432 	list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
3433 		list_del(&fm_entry->list_entry);
3434 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
3435 	}
3436 
3437 	return status;
3438 }
3439 
3440 /**
3441  * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
3442  * @hw: pointer to the hardware structure
3443  * @vsi_handle: VSI handle to configure
3444  * @promisc_mask: mask of promiscuous config bits
3445  * @vid: VLAN ID to set VLAN promiscuous
3446  */
3447 enum ice_status
3448 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
3449 {
3450 	enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
3451 	struct ice_fltr_list_entry f_list_entry;
3452 	struct ice_fltr_info new_fltr;
3453 	enum ice_status status = 0;
3454 	bool is_tx_fltr;
3455 	u16 hw_vsi_id;
3456 	int pkt_type;
3457 	u8 recipe_id;
3458 
3459 	if (!ice_is_vsi_valid(hw, vsi_handle))
3460 		return ICE_ERR_PARAM;
3461 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3462 
3463 	memset(&new_fltr, 0, sizeof(new_fltr));
3464 
3465 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
3466 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
3467 		new_fltr.l_data.mac_vlan.vlan_id = vid;
3468 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3469 	} else {
3470 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
3471 		recipe_id = ICE_SW_LKUP_PROMISC;
3472 	}
3473 
3474 	/* Separate filters must be set for each direction/packet type
3475 	 * combination, so we will loop over the mask value, store the
3476 	 * individual type, and clear it out in the input mask as it
3477 	 * is found.
3478 	 */
3479 	while (promisc_mask) {
3480 		u8 *mac_addr;
3481 
3482 		pkt_type = 0;
3483 		is_tx_fltr = false;
3484 
3485 		if (promisc_mask & ICE_PROMISC_UCAST_RX) {
3486 			promisc_mask &= ~ICE_PROMISC_UCAST_RX;
3487 			pkt_type = UCAST_FLTR;
3488 		} else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
3489 			promisc_mask &= ~ICE_PROMISC_UCAST_TX;
3490 			pkt_type = UCAST_FLTR;
3491 			is_tx_fltr = true;
3492 		} else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
3493 			promisc_mask &= ~ICE_PROMISC_MCAST_RX;
3494 			pkt_type = MCAST_FLTR;
3495 		} else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
3496 			promisc_mask &= ~ICE_PROMISC_MCAST_TX;
3497 			pkt_type = MCAST_FLTR;
3498 			is_tx_fltr = true;
3499 		} else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
3500 			promisc_mask &= ~ICE_PROMISC_BCAST_RX;
3501 			pkt_type = BCAST_FLTR;
3502 		} else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
3503 			promisc_mask &= ~ICE_PROMISC_BCAST_TX;
3504 			pkt_type = BCAST_FLTR;
3505 			is_tx_fltr = true;
3506 		}
3507 
3508 		/* Check for VLAN promiscuous flag */
3509 		if (promisc_mask & ICE_PROMISC_VLAN_RX) {
3510 			promisc_mask &= ~ICE_PROMISC_VLAN_RX;
3511 		} else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
3512 			promisc_mask &= ~ICE_PROMISC_VLAN_TX;
3513 			is_tx_fltr = true;
3514 		}
3515 
3516 		/* Set filter DA based on packet type */
3517 		mac_addr = new_fltr.l_data.mac.mac_addr;
3518 		if (pkt_type == BCAST_FLTR) {
3519 			eth_broadcast_addr(mac_addr);
3520 		} else if (pkt_type == MCAST_FLTR ||
3521 			   pkt_type == UCAST_FLTR) {
3522 			/* Use the dummy ether header DA */
3523 			ether_addr_copy(mac_addr, dummy_eth_header);
3524 			if (pkt_type == MCAST_FLTR)
3525 				mac_addr[0] |= 0x1;	/* Set multicast bit */
3526 		}
3527 
3528 		/* Need to reset this to zero for all iterations */
3529 		new_fltr.flag = 0;
3530 		if (is_tx_fltr) {
3531 			new_fltr.flag |= ICE_FLTR_TX;
3532 			new_fltr.src = hw_vsi_id;
3533 		} else {
3534 			new_fltr.flag |= ICE_FLTR_RX;
3535 			new_fltr.src = hw->port_info->lport;
3536 		}
3537 
3538 		new_fltr.fltr_act = ICE_FWD_TO_VSI;
3539 		new_fltr.vsi_handle = vsi_handle;
3540 		new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
3541 		f_list_entry.fltr_info = new_fltr;
3542 
3543 		status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
3544 		if (status)
3545 			goto set_promisc_exit;
3546 	}
3547 
3548 set_promisc_exit:
3549 	return status;
3550 }
3551 
3552 /**
3553  * ice_set_vlan_vsi_promisc
3554  * @hw: pointer to the hardware structure
3555  * @vsi_handle: VSI handle to configure
3556  * @promisc_mask: mask of promiscuous config bits
3557  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
3558  *
3559  * Configure VSI with all associated VLANs to given promiscuous mode(s)
3560  */
3561 enum ice_status
3562 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3563 			 bool rm_vlan_promisc)
3564 {
3565 	struct ice_switch_info *sw = hw->switch_info;
3566 	struct ice_fltr_list_entry *list_itr, *tmp;
3567 	struct list_head vsi_list_head;
3568 	struct list_head *vlan_head;
3569 	struct mutex *vlan_lock; /* Lock to protect filter rule list */
3570 	enum ice_status status;
3571 	u16 vlan_id;
3572 
3573 	INIT_LIST_HEAD(&vsi_list_head);
3574 	vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3575 	vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
3576 	mutex_lock(vlan_lock);
3577 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
3578 					  &vsi_list_head);
3579 	mutex_unlock(vlan_lock);
3580 	if (status)
3581 		goto free_fltr_list;
3582 
3583 	list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
3584 		vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
3585 		if (rm_vlan_promisc)
3586 			status = ice_clear_vsi_promisc(hw, vsi_handle,
3587 						       promisc_mask, vlan_id);
3588 		else
3589 			status = ice_set_vsi_promisc(hw, vsi_handle,
3590 						     promisc_mask, vlan_id);
3591 		if (status)
3592 			break;
3593 	}
3594 
3595 free_fltr_list:
3596 	list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
3597 		list_del(&list_itr->list_entry);
3598 		devm_kfree(ice_hw_to_dev(hw), list_itr);
3599 	}
3600 	return status;
3601 }
3602 
3603 /**
3604  * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
3605  * @hw: pointer to the hardware structure
3606  * @vsi_handle: VSI handle to remove filters from
3607  * @lkup: switch rule filter lookup type
3608  */
3609 static void
3610 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
3611 			 enum ice_sw_lkup_type lkup)
3612 {
3613 	struct ice_switch_info *sw = hw->switch_info;
3614 	struct ice_fltr_list_entry *fm_entry;
3615 	struct list_head remove_list_head;
3616 	struct list_head *rule_head;
3617 	struct ice_fltr_list_entry *tmp;
3618 	struct mutex *rule_lock;	/* Lock to protect filter rule list */
3619 	enum ice_status status;
3620 
3621 	INIT_LIST_HEAD(&remove_list_head);
3622 	rule_lock = &sw->recp_list[lkup].filt_rule_lock;
3623 	rule_head = &sw->recp_list[lkup].filt_rules;
3624 	mutex_lock(rule_lock);
3625 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
3626 					  &remove_list_head);
3627 	mutex_unlock(rule_lock);
3628 	if (status)
3629 		goto free_fltr_list;
3630 
3631 	switch (lkup) {
3632 	case ICE_SW_LKUP_MAC:
3633 		ice_remove_mac(hw, &remove_list_head);
3634 		break;
3635 	case ICE_SW_LKUP_VLAN:
3636 		ice_remove_vlan(hw, &remove_list_head);
3637 		break;
3638 	case ICE_SW_LKUP_PROMISC:
3639 	case ICE_SW_LKUP_PROMISC_VLAN:
3640 		ice_remove_promisc(hw, lkup, &remove_list_head);
3641 		break;
3642 	case ICE_SW_LKUP_MAC_VLAN:
3643 	case ICE_SW_LKUP_ETHERTYPE:
3644 	case ICE_SW_LKUP_ETHERTYPE_MAC:
3645 	case ICE_SW_LKUP_DFLT:
3646 	case ICE_SW_LKUP_LAST:
3647 	default:
3648 		ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
3649 		break;
3650 	}
3651 
3652 free_fltr_list:
3653 	list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
3654 		list_del(&fm_entry->list_entry);
3655 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
3656 	}
3657 }
3658 
3659 /**
3660  * ice_remove_vsi_fltr - Remove all filters for a VSI
3661  * @hw: pointer to the hardware structure
3662  * @vsi_handle: VSI handle to remove filters from
3663  */
3664 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
3665 {
3666 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
3667 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
3668 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
3669 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
3670 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
3671 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
3672 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
3673 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
3674 }
3675 
3676 /**
3677  * ice_alloc_res_cntr - allocating resource counter
3678  * @hw: pointer to the hardware structure
3679  * @type: type of resource
3680  * @alloc_shared: if set it is shared else dedicated
3681  * @num_items: number of entries requested for FD resource type
3682  * @counter_id: counter index returned by AQ call
3683  */
3684 enum ice_status
3685 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
3686 		   u16 *counter_id)
3687 {
3688 	struct ice_aqc_alloc_free_res_elem *buf;
3689 	enum ice_status status;
3690 	u16 buf_len;
3691 
3692 	/* Allocate resource */
3693 	buf_len = struct_size(buf, elem, 1);
3694 	buf = kzalloc(buf_len, GFP_KERNEL);
3695 	if (!buf)
3696 		return ICE_ERR_NO_MEMORY;
3697 
3698 	buf->num_elems = cpu_to_le16(num_items);
3699 	buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
3700 				      ICE_AQC_RES_TYPE_M) | alloc_shared);
3701 
3702 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
3703 				       ice_aqc_opc_alloc_res, NULL);
3704 	if (status)
3705 		goto exit;
3706 
3707 	*counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
3708 
3709 exit:
3710 	kfree(buf);
3711 	return status;
3712 }
3713 
3714 /**
3715  * ice_free_res_cntr - free resource counter
3716  * @hw: pointer to the hardware structure
3717  * @type: type of resource
3718  * @alloc_shared: if set it is shared else dedicated
3719  * @num_items: number of entries to be freed for FD resource type
3720  * @counter_id: counter ID resource which needs to be freed
3721  */
3722 enum ice_status
3723 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
3724 		  u16 counter_id)
3725 {
3726 	struct ice_aqc_alloc_free_res_elem *buf;
3727 	enum ice_status status;
3728 	u16 buf_len;
3729 
3730 	/* Free resource */
3731 	buf_len = struct_size(buf, elem, 1);
3732 	buf = kzalloc(buf_len, GFP_KERNEL);
3733 	if (!buf)
3734 		return ICE_ERR_NO_MEMORY;
3735 
3736 	buf->num_elems = cpu_to_le16(num_items);
3737 	buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
3738 				      ICE_AQC_RES_TYPE_M) | alloc_shared);
3739 	buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
3740 
3741 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
3742 				       ice_aqc_opc_free_res, NULL);
3743 	if (status)
3744 		ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
3745 
3746 	kfree(buf);
3747 	return status;
3748 }
3749 
3750 /* This is mapping table entry that maps every word within a given protocol
3751  * structure to the real byte offset as per the specification of that
3752  * protocol header.
3753  * for example dst address is 3 words in ethertype header and corresponding
3754  * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
3755  * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
3756  * matching entry describing its field. This needs to be updated if new
3757  * structure is added to that union.
3758  */
3759 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
3760 	{ ICE_MAC_OFOS,		{ 0, 2, 4, 6, 8, 10, 12 } },
3761 	{ ICE_MAC_IL,		{ 0, 2, 4, 6, 8, 10, 12 } },
3762 	{ ICE_ETYPE_OL,		{ 0 } },
3763 	{ ICE_VLAN_OFOS,	{ 2, 0 } },
3764 	{ ICE_IPV4_OFOS,	{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
3765 	{ ICE_IPV4_IL,		{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
3766 	{ ICE_IPV6_OFOS,	{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
3767 				 26, 28, 30, 32, 34, 36, 38 } },
3768 	{ ICE_IPV6_IL,		{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
3769 				 26, 28, 30, 32, 34, 36, 38 } },
3770 	{ ICE_TCP_IL,		{ 0, 2 } },
3771 	{ ICE_UDP_OF,		{ 0, 2 } },
3772 	{ ICE_UDP_ILOS,		{ 0, 2 } },
3773 	{ ICE_VXLAN,		{ 8, 10, 12, 14 } },
3774 	{ ICE_GENEVE,		{ 8, 10, 12, 14 } },
3775 	{ ICE_NVGRE,            { 0, 2, 4, 6 } },
3776 };
3777 
3778 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
3779 	{ ICE_MAC_OFOS,		ICE_MAC_OFOS_HW },
3780 	{ ICE_MAC_IL,		ICE_MAC_IL_HW },
3781 	{ ICE_ETYPE_OL,		ICE_ETYPE_OL_HW },
3782 	{ ICE_VLAN_OFOS,	ICE_VLAN_OL_HW },
3783 	{ ICE_IPV4_OFOS,	ICE_IPV4_OFOS_HW },
3784 	{ ICE_IPV4_IL,		ICE_IPV4_IL_HW },
3785 	{ ICE_IPV6_OFOS,	ICE_IPV6_OFOS_HW },
3786 	{ ICE_IPV6_IL,		ICE_IPV6_IL_HW },
3787 	{ ICE_TCP_IL,		ICE_TCP_IL_HW },
3788 	{ ICE_UDP_OF,		ICE_UDP_OF_HW },
3789 	{ ICE_UDP_ILOS,		ICE_UDP_ILOS_HW },
3790 	{ ICE_VXLAN,		ICE_UDP_OF_HW },
3791 	{ ICE_GENEVE,		ICE_UDP_OF_HW },
3792 	{ ICE_NVGRE,            ICE_GRE_OF_HW },
3793 };
3794 
3795 /**
3796  * ice_find_recp - find a recipe
3797  * @hw: pointer to the hardware structure
3798  * @lkup_exts: extension sequence to match
3799  *
3800  * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
3801  */
3802 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
3803 {
3804 	bool refresh_required = true;
3805 	struct ice_sw_recipe *recp;
3806 	u8 i;
3807 
3808 	/* Walk through existing recipes to find a match */
3809 	recp = hw->switch_info->recp_list;
3810 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3811 		/* If recipe was not created for this ID, in SW bookkeeping,
3812 		 * check if FW has an entry for this recipe. If the FW has an
3813 		 * entry update it in our SW bookkeeping and continue with the
3814 		 * matching.
3815 		 */
3816 		if (!recp[i].recp_created)
3817 			if (ice_get_recp_frm_fw(hw,
3818 						hw->switch_info->recp_list, i,
3819 						&refresh_required))
3820 				continue;
3821 
3822 		/* Skip inverse action recipes */
3823 		if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
3824 		    ICE_AQ_RECIPE_ACT_INV_ACT)
3825 			continue;
3826 
3827 		/* if number of words we are looking for match */
3828 		if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
3829 			struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
3830 			struct ice_fv_word *be = lkup_exts->fv_words;
3831 			u16 *cr = recp[i].lkup_exts.field_mask;
3832 			u16 *de = lkup_exts->field_mask;
3833 			bool found = true;
3834 			u8 pe, qr;
3835 
3836 			/* ar, cr, and qr are related to the recipe words, while
3837 			 * be, de, and pe are related to the lookup words
3838 			 */
3839 			for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
3840 				for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
3841 				     qr++) {
3842 					if (ar[qr].off == be[pe].off &&
3843 					    ar[qr].prot_id == be[pe].prot_id &&
3844 					    cr[qr] == de[pe])
3845 						/* Found the "pe"th word in the
3846 						 * given recipe
3847 						 */
3848 						break;
3849 				}
3850 				/* After walking through all the words in the
3851 				 * "i"th recipe if "p"th word was not found then
3852 				 * this recipe is not what we are looking for.
3853 				 * So break out from this loop and try the next
3854 				 * recipe
3855 				 */
3856 				if (qr >= recp[i].lkup_exts.n_val_words) {
3857 					found = false;
3858 					break;
3859 				}
3860 			}
3861 			/* If for "i"th recipe the found was never set to false
3862 			 * then it means we found our match
3863 			 */
3864 			if (found)
3865 				return i; /* Return the recipe ID */
3866 		}
3867 	}
3868 	return ICE_MAX_NUM_RECIPES;
3869 }
3870 
3871 /**
3872  * ice_prot_type_to_id - get protocol ID from protocol type
3873  * @type: protocol type
3874  * @id: pointer to variable that will receive the ID
3875  *
3876  * Returns true if found, false otherwise
3877  */
3878 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
3879 {
3880 	u8 i;
3881 
3882 	for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
3883 		if (ice_prot_id_tbl[i].type == type) {
3884 			*id = ice_prot_id_tbl[i].protocol_id;
3885 			return true;
3886 		}
3887 	return false;
3888 }
3889 
3890 /**
3891  * ice_fill_valid_words - count valid words
3892  * @rule: advanced rule with lookup information
3893  * @lkup_exts: byte offset extractions of the words that are valid
3894  *
3895  * calculate valid words in a lookup rule using mask value
3896  */
3897 static u8
3898 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
3899 		     struct ice_prot_lkup_ext *lkup_exts)
3900 {
3901 	u8 j, word, prot_id, ret_val;
3902 
3903 	if (!ice_prot_type_to_id(rule->type, &prot_id))
3904 		return 0;
3905 
3906 	word = lkup_exts->n_val_words;
3907 
3908 	for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
3909 		if (((u16 *)&rule->m_u)[j] &&
3910 		    rule->type < ARRAY_SIZE(ice_prot_ext)) {
3911 			/* No more space to accommodate */
3912 			if (word >= ICE_MAX_CHAIN_WORDS)
3913 				return 0;
3914 			lkup_exts->fv_words[word].off =
3915 				ice_prot_ext[rule->type].offs[j];
3916 			lkup_exts->fv_words[word].prot_id =
3917 				ice_prot_id_tbl[rule->type].protocol_id;
3918 			lkup_exts->field_mask[word] =
3919 				be16_to_cpu(((__force __be16 *)&rule->m_u)[j]);
3920 			word++;
3921 		}
3922 
3923 	ret_val = word - lkup_exts->n_val_words;
3924 	lkup_exts->n_val_words = word;
3925 
3926 	return ret_val;
3927 }
3928 
3929 /**
3930  * ice_create_first_fit_recp_def - Create a recipe grouping
3931  * @hw: pointer to the hardware structure
3932  * @lkup_exts: an array of protocol header extractions
3933  * @rg_list: pointer to a list that stores new recipe groups
3934  * @recp_cnt: pointer to a variable that stores returned number of recipe groups
3935  *
3936  * Using first fit algorithm, take all the words that are still not done
3937  * and start grouping them in 4-word groups. Each group makes up one
3938  * recipe.
3939  */
3940 static enum ice_status
3941 ice_create_first_fit_recp_def(struct ice_hw *hw,
3942 			      struct ice_prot_lkup_ext *lkup_exts,
3943 			      struct list_head *rg_list,
3944 			      u8 *recp_cnt)
3945 {
3946 	struct ice_pref_recipe_group *grp = NULL;
3947 	u8 j;
3948 
3949 	*recp_cnt = 0;
3950 
3951 	/* Walk through every word in the rule to check if it is not done. If so
3952 	 * then this word needs to be part of a new recipe.
3953 	 */
3954 	for (j = 0; j < lkup_exts->n_val_words; j++)
3955 		if (!test_bit(j, lkup_exts->done)) {
3956 			if (!grp ||
3957 			    grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
3958 				struct ice_recp_grp_entry *entry;
3959 
3960 				entry = devm_kzalloc(ice_hw_to_dev(hw),
3961 						     sizeof(*entry),
3962 						     GFP_KERNEL);
3963 				if (!entry)
3964 					return ICE_ERR_NO_MEMORY;
3965 				list_add(&entry->l_entry, rg_list);
3966 				grp = &entry->r_group;
3967 				(*recp_cnt)++;
3968 			}
3969 
3970 			grp->pairs[grp->n_val_pairs].prot_id =
3971 				lkup_exts->fv_words[j].prot_id;
3972 			grp->pairs[grp->n_val_pairs].off =
3973 				lkup_exts->fv_words[j].off;
3974 			grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
3975 			grp->n_val_pairs++;
3976 		}
3977 
3978 	return 0;
3979 }
3980 
3981 /**
3982  * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
3983  * @hw: pointer to the hardware structure
3984  * @fv_list: field vector with the extraction sequence information
3985  * @rg_list: recipe groupings with protocol-offset pairs
3986  *
3987  * Helper function to fill in the field vector indices for protocol-offset
3988  * pairs. These indexes are then ultimately programmed into a recipe.
3989  */
3990 static enum ice_status
3991 ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
3992 		       struct list_head *rg_list)
3993 {
3994 	struct ice_sw_fv_list_entry *fv;
3995 	struct ice_recp_grp_entry *rg;
3996 	struct ice_fv_word *fv_ext;
3997 
3998 	if (list_empty(fv_list))
3999 		return 0;
4000 
4001 	fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry,
4002 			      list_entry);
4003 	fv_ext = fv->fv_ptr->ew;
4004 
4005 	list_for_each_entry(rg, rg_list, l_entry) {
4006 		u8 i;
4007 
4008 		for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4009 			struct ice_fv_word *pr;
4010 			bool found = false;
4011 			u16 mask;
4012 			u8 j;
4013 
4014 			pr = &rg->r_group.pairs[i];
4015 			mask = rg->r_group.mask[i];
4016 
4017 			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4018 				if (fv_ext[j].prot_id == pr->prot_id &&
4019 				    fv_ext[j].off == pr->off) {
4020 					found = true;
4021 
4022 					/* Store index of field vector */
4023 					rg->fv_idx[i] = j;
4024 					rg->fv_mask[i] = mask;
4025 					break;
4026 				}
4027 
4028 			/* Protocol/offset could not be found, caller gave an
4029 			 * invalid pair
4030 			 */
4031 			if (!found)
4032 				return ICE_ERR_PARAM;
4033 		}
4034 	}
4035 
4036 	return 0;
4037 }
4038 
4039 /**
4040  * ice_find_free_recp_res_idx - find free result indexes for recipe
4041  * @hw: pointer to hardware structure
4042  * @profiles: bitmap of profiles that will be associated with the new recipe
4043  * @free_idx: pointer to variable to receive the free index bitmap
4044  *
4045  * The algorithm used here is:
4046  *	1. When creating a new recipe, create a set P which contains all
4047  *	   Profiles that will be associated with our new recipe
4048  *
4049  *	2. For each Profile p in set P:
4050  *	    a. Add all recipes associated with Profile p into set R
4051  *	    b. Optional : PossibleIndexes &= profile[p].possibleIndexes
4052  *		[initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
4053  *		i. Or just assume they all have the same possible indexes:
4054  *			44, 45, 46, 47
4055  *			i.e., PossibleIndexes = 0x0000F00000000000
4056  *
4057  *	3. For each Recipe r in set R:
4058  *	    a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
4059  *	    b. FreeIndexes = UsedIndexes ^ PossibleIndexes
4060  *
4061  *	FreeIndexes will contain the bits indicating the indexes free for use,
4062  *      then the code needs to update the recipe[r].used_result_idx_bits to
4063  *      indicate which indexes were selected for use by this recipe.
4064  */
4065 static u16
4066 ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
4067 			   unsigned long *free_idx)
4068 {
4069 	DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS);
4070 	DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES);
4071 	DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS);
4072 	u16 bit;
4073 
4074 	bitmap_zero(possible_idx, ICE_MAX_FV_WORDS);
4075 	bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
4076 	bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
4077 	bitmap_zero(free_idx, ICE_MAX_FV_WORDS);
4078 
4079 	bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
4080 
4081 	/* For each profile we are going to associate the recipe with, add the
4082 	 * recipes that are associated with that profile. This will give us
4083 	 * the set of recipes that our recipe may collide with. Also, determine
4084 	 * what possible result indexes are usable given this set of profiles.
4085 	 */
4086 	for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
4087 		bitmap_or(recipes, recipes, profile_to_recipe[bit],
4088 			  ICE_MAX_NUM_RECIPES);
4089 		bitmap_and(possible_idx, possible_idx,
4090 			   hw->switch_info->prof_res_bm[bit],
4091 			   ICE_MAX_FV_WORDS);
4092 	}
4093 
4094 	/* For each recipe that our new recipe may collide with, determine
4095 	 * which indexes have been used.
4096 	 */
4097 	for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
4098 		bitmap_or(used_idx, used_idx,
4099 			  hw->switch_info->recp_list[bit].res_idxs,
4100 			  ICE_MAX_FV_WORDS);
4101 
4102 	bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
4103 
4104 	/* return number of free indexes */
4105 	return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS);
4106 }
4107 
4108 /**
4109  * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4110  * @hw: pointer to hardware structure
4111  * @rm: recipe management list entry
4112  * @profiles: bitmap of profiles that will be associated.
4113  */
4114 static enum ice_status
4115 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
4116 		  unsigned long *profiles)
4117 {
4118 	DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
4119 	struct ice_aqc_recipe_data_elem *tmp;
4120 	struct ice_aqc_recipe_data_elem *buf;
4121 	struct ice_recp_grp_entry *entry;
4122 	enum ice_status status;
4123 	u16 free_res_idx;
4124 	u16 recipe_count;
4125 	u8 chain_idx;
4126 	u8 recps = 0;
4127 
4128 	/* When more than one recipe are required, another recipe is needed to
4129 	 * chain them together. Matching a tunnel metadata ID takes up one of
4130 	 * the match fields in the chaining recipe reducing the number of
4131 	 * chained recipes by one.
4132 	 */
4133 	 /* check number of free result indices */
4134 	bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
4135 	free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
4136 
4137 	ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
4138 		  free_res_idx, rm->n_grp_count);
4139 
4140 	if (rm->n_grp_count > 1) {
4141 		if (rm->n_grp_count > free_res_idx)
4142 			return ICE_ERR_MAX_LIMIT;
4143 
4144 		rm->n_grp_count++;
4145 	}
4146 
4147 	if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
4148 		return ICE_ERR_MAX_LIMIT;
4149 
4150 	tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
4151 	if (!tmp)
4152 		return ICE_ERR_NO_MEMORY;
4153 
4154 	buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
4155 			   GFP_KERNEL);
4156 	if (!buf) {
4157 		status = ICE_ERR_NO_MEMORY;
4158 		goto err_mem;
4159 	}
4160 
4161 	bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
4162 	recipe_count = ICE_MAX_NUM_RECIPES;
4163 	status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
4164 				   NULL);
4165 	if (status || recipe_count == 0)
4166 		goto err_unroll;
4167 
4168 	/* Allocate the recipe resources, and configure them according to the
4169 	 * match fields from protocol headers and extracted field vectors.
4170 	 */
4171 	chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
4172 	list_for_each_entry(entry, &rm->rg_list, l_entry) {
4173 		u8 i;
4174 
4175 		status = ice_alloc_recipe(hw, &entry->rid);
4176 		if (status)
4177 			goto err_unroll;
4178 
4179 		/* Clear the result index of the located recipe, as this will be
4180 		 * updated, if needed, later in the recipe creation process.
4181 		 */
4182 		tmp[0].content.result_indx = 0;
4183 
4184 		buf[recps] = tmp[0];
4185 		buf[recps].recipe_indx = (u8)entry->rid;
4186 		/* if the recipe is a non-root recipe RID should be programmed
4187 		 * as 0 for the rules to be applied correctly.
4188 		 */
4189 		buf[recps].content.rid = 0;
4190 		memset(&buf[recps].content.lkup_indx, 0,
4191 		       sizeof(buf[recps].content.lkup_indx));
4192 
4193 		/* All recipes use look-up index 0 to match switch ID. */
4194 		buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
4195 		buf[recps].content.mask[0] =
4196 			cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
4197 		/* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
4198 		 * to be 0
4199 		 */
4200 		for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
4201 			buf[recps].content.lkup_indx[i] = 0x80;
4202 			buf[recps].content.mask[i] = 0;
4203 		}
4204 
4205 		for (i = 0; i < entry->r_group.n_val_pairs; i++) {
4206 			buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
4207 			buf[recps].content.mask[i + 1] =
4208 				cpu_to_le16(entry->fv_mask[i]);
4209 		}
4210 
4211 		if (rm->n_grp_count > 1) {
4212 			/* Checks to see if there really is a valid result index
4213 			 * that can be used.
4214 			 */
4215 			if (chain_idx >= ICE_MAX_FV_WORDS) {
4216 				ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
4217 				status = ICE_ERR_MAX_LIMIT;
4218 				goto err_unroll;
4219 			}
4220 
4221 			entry->chain_idx = chain_idx;
4222 			buf[recps].content.result_indx =
4223 				ICE_AQ_RECIPE_RESULT_EN |
4224 				((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
4225 				 ICE_AQ_RECIPE_RESULT_DATA_M);
4226 			clear_bit(chain_idx, result_idx_bm);
4227 			chain_idx = find_first_bit(result_idx_bm,
4228 						   ICE_MAX_FV_WORDS);
4229 		}
4230 
4231 		/* fill recipe dependencies */
4232 		bitmap_zero((unsigned long *)buf[recps].recipe_bitmap,
4233 			    ICE_MAX_NUM_RECIPES);
4234 		set_bit(buf[recps].recipe_indx,
4235 			(unsigned long *)buf[recps].recipe_bitmap);
4236 		buf[recps].content.act_ctrl_fwd_priority = rm->priority;
4237 		recps++;
4238 	}
4239 
4240 	if (rm->n_grp_count == 1) {
4241 		rm->root_rid = buf[0].recipe_indx;
4242 		set_bit(buf[0].recipe_indx, rm->r_bitmap);
4243 		buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
4244 		if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
4245 			memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
4246 			       sizeof(buf[0].recipe_bitmap));
4247 		} else {
4248 			status = ICE_ERR_BAD_PTR;
4249 			goto err_unroll;
4250 		}
4251 		/* Applicable only for ROOT_RECIPE, set the fwd_priority for
4252 		 * the recipe which is getting created if specified
4253 		 * by user. Usually any advanced switch filter, which results
4254 		 * into new extraction sequence, ended up creating a new recipe
4255 		 * of type ROOT and usually recipes are associated with profiles
4256 		 * Switch rule referreing newly created recipe, needs to have
4257 		 * either/or 'fwd' or 'join' priority, otherwise switch rule
4258 		 * evaluation will not happen correctly. In other words, if
4259 		 * switch rule to be evaluated on priority basis, then recipe
4260 		 * needs to have priority, otherwise it will be evaluated last.
4261 		 */
4262 		buf[0].content.act_ctrl_fwd_priority = rm->priority;
4263 	} else {
4264 		struct ice_recp_grp_entry *last_chain_entry;
4265 		u16 rid, i;
4266 
4267 		/* Allocate the last recipe that will chain the outcomes of the
4268 		 * other recipes together
4269 		 */
4270 		status = ice_alloc_recipe(hw, &rid);
4271 		if (status)
4272 			goto err_unroll;
4273 
4274 		buf[recps].recipe_indx = (u8)rid;
4275 		buf[recps].content.rid = (u8)rid;
4276 		buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
4277 		/* the new entry created should also be part of rg_list to
4278 		 * make sure we have complete recipe
4279 		 */
4280 		last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw),
4281 						sizeof(*last_chain_entry),
4282 						GFP_KERNEL);
4283 		if (!last_chain_entry) {
4284 			status = ICE_ERR_NO_MEMORY;
4285 			goto err_unroll;
4286 		}
4287 		last_chain_entry->rid = rid;
4288 		memset(&buf[recps].content.lkup_indx, 0,
4289 		       sizeof(buf[recps].content.lkup_indx));
4290 		/* All recipes use look-up index 0 to match switch ID. */
4291 		buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
4292 		buf[recps].content.mask[0] =
4293 			cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
4294 		for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
4295 			buf[recps].content.lkup_indx[i] =
4296 				ICE_AQ_RECIPE_LKUP_IGNORE;
4297 			buf[recps].content.mask[i] = 0;
4298 		}
4299 
4300 		i = 1;
4301 		/* update r_bitmap with the recp that is used for chaining */
4302 		set_bit(rid, rm->r_bitmap);
4303 		/* this is the recipe that chains all the other recipes so it
4304 		 * should not have a chaining ID to indicate the same
4305 		 */
4306 		last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
4307 		list_for_each_entry(entry, &rm->rg_list, l_entry) {
4308 			last_chain_entry->fv_idx[i] = entry->chain_idx;
4309 			buf[recps].content.lkup_indx[i] = entry->chain_idx;
4310 			buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF);
4311 			set_bit(entry->rid, rm->r_bitmap);
4312 		}
4313 		list_add(&last_chain_entry->l_entry, &rm->rg_list);
4314 		if (sizeof(buf[recps].recipe_bitmap) >=
4315 		    sizeof(rm->r_bitmap)) {
4316 			memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
4317 			       sizeof(buf[recps].recipe_bitmap));
4318 		} else {
4319 			status = ICE_ERR_BAD_PTR;
4320 			goto err_unroll;
4321 		}
4322 		buf[recps].content.act_ctrl_fwd_priority = rm->priority;
4323 
4324 		recps++;
4325 		rm->root_rid = (u8)rid;
4326 	}
4327 	status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
4328 	if (status)
4329 		goto err_unroll;
4330 
4331 	status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
4332 	ice_release_change_lock(hw);
4333 	if (status)
4334 		goto err_unroll;
4335 
4336 	/* Every recipe that just got created add it to the recipe
4337 	 * book keeping list
4338 	 */
4339 	list_for_each_entry(entry, &rm->rg_list, l_entry) {
4340 		struct ice_switch_info *sw = hw->switch_info;
4341 		bool is_root, idx_found = false;
4342 		struct ice_sw_recipe *recp;
4343 		u16 idx, buf_idx = 0;
4344 
4345 		/* find buffer index for copying some data */
4346 		for (idx = 0; idx < rm->n_grp_count; idx++)
4347 			if (buf[idx].recipe_indx == entry->rid) {
4348 				buf_idx = idx;
4349 				idx_found = true;
4350 			}
4351 
4352 		if (!idx_found) {
4353 			status = ICE_ERR_OUT_OF_RANGE;
4354 			goto err_unroll;
4355 		}
4356 
4357 		recp = &sw->recp_list[entry->rid];
4358 		is_root = (rm->root_rid == entry->rid);
4359 		recp->is_root = is_root;
4360 
4361 		recp->root_rid = entry->rid;
4362 		recp->big_recp = (is_root && rm->n_grp_count > 1);
4363 
4364 		memcpy(&recp->ext_words, entry->r_group.pairs,
4365 		       entry->r_group.n_val_pairs * sizeof(struct ice_fv_word));
4366 
4367 		memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
4368 		       sizeof(recp->r_bitmap));
4369 
4370 		/* Copy non-result fv index values and masks to recipe. This
4371 		 * call will also update the result recipe bitmask.
4372 		 */
4373 		ice_collect_result_idx(&buf[buf_idx], recp);
4374 
4375 		/* for non-root recipes, also copy to the root, this allows
4376 		 * easier matching of a complete chained recipe
4377 		 */
4378 		if (!is_root)
4379 			ice_collect_result_idx(&buf[buf_idx],
4380 					       &sw->recp_list[rm->root_rid]);
4381 
4382 		recp->n_ext_words = entry->r_group.n_val_pairs;
4383 		recp->chain_idx = entry->chain_idx;
4384 		recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
4385 		recp->n_grp_count = rm->n_grp_count;
4386 		recp->tun_type = rm->tun_type;
4387 		recp->recp_created = true;
4388 	}
4389 	rm->root_buf = buf;
4390 	kfree(tmp);
4391 	return status;
4392 
4393 err_unroll:
4394 err_mem:
4395 	kfree(tmp);
4396 	devm_kfree(ice_hw_to_dev(hw), buf);
4397 	return status;
4398 }
4399 
4400 /**
4401  * ice_create_recipe_group - creates recipe group
4402  * @hw: pointer to hardware structure
4403  * @rm: recipe management list entry
4404  * @lkup_exts: lookup elements
4405  */
4406 static enum ice_status
4407 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
4408 			struct ice_prot_lkup_ext *lkup_exts)
4409 {
4410 	enum ice_status status;
4411 	u8 recp_count = 0;
4412 
4413 	rm->n_grp_count = 0;
4414 
4415 	/* Create recipes for words that are marked not done by packing them
4416 	 * as best fit.
4417 	 */
4418 	status = ice_create_first_fit_recp_def(hw, lkup_exts,
4419 					       &rm->rg_list, &recp_count);
4420 	if (!status) {
4421 		rm->n_grp_count += recp_count;
4422 		rm->n_ext_words = lkup_exts->n_val_words;
4423 		memcpy(&rm->ext_words, lkup_exts->fv_words,
4424 		       sizeof(rm->ext_words));
4425 		memcpy(rm->word_masks, lkup_exts->field_mask,
4426 		       sizeof(rm->word_masks));
4427 	}
4428 
4429 	return status;
4430 }
4431 
4432 /**
4433  * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
4434  * @hw: pointer to hardware structure
4435  * @lkups: lookup elements or match criteria for the advanced recipe, one
4436  *	   structure per protocol header
4437  * @lkups_cnt: number of protocols
4438  * @bm: bitmap of field vectors to consider
4439  * @fv_list: pointer to a list that holds the returned field vectors
4440  */
4441 static enum ice_status
4442 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
4443 	   unsigned long *bm, struct list_head *fv_list)
4444 {
4445 	enum ice_status status;
4446 	u8 *prot_ids;
4447 	u16 i;
4448 
4449 	prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL);
4450 	if (!prot_ids)
4451 		return ICE_ERR_NO_MEMORY;
4452 
4453 	for (i = 0; i < lkups_cnt; i++)
4454 		if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
4455 			status = ICE_ERR_CFG;
4456 			goto free_mem;
4457 		}
4458 
4459 	/* Find field vectors that include all specified protocol types */
4460 	status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
4461 
4462 free_mem:
4463 	kfree(prot_ids);
4464 	return status;
4465 }
4466 
4467 /**
4468  * ice_tun_type_match_word - determine if tun type needs a match mask
4469  * @tun_type: tunnel type
4470  * @mask: mask to be used for the tunnel
4471  */
4472 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
4473 {
4474 	switch (tun_type) {
4475 	case ICE_SW_TUN_GENEVE:
4476 	case ICE_SW_TUN_VXLAN:
4477 	case ICE_SW_TUN_NVGRE:
4478 		*mask = ICE_TUN_FLAG_MASK;
4479 		return true;
4480 
4481 	default:
4482 		*mask = 0;
4483 		return false;
4484 	}
4485 }
4486 
4487 /**
4488  * ice_add_special_words - Add words that are not protocols, such as metadata
4489  * @rinfo: other information regarding the rule e.g. priority and action info
4490  * @lkup_exts: lookup word structure
4491  */
4492 static enum ice_status
4493 ice_add_special_words(struct ice_adv_rule_info *rinfo,
4494 		      struct ice_prot_lkup_ext *lkup_exts)
4495 {
4496 	u16 mask;
4497 
4498 	/* If this is a tunneled packet, then add recipe index to match the
4499 	 * tunnel bit in the packet metadata flags.
4500 	 */
4501 	if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
4502 		if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
4503 			u8 word = lkup_exts->n_val_words++;
4504 
4505 			lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
4506 			lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
4507 			lkup_exts->field_mask[word] = mask;
4508 		} else {
4509 			return ICE_ERR_MAX_LIMIT;
4510 		}
4511 	}
4512 
4513 	return 0;
4514 }
4515 
4516 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
4517  * @hw: pointer to hardware structure
4518  * @rinfo: other information regarding the rule e.g. priority and action info
4519  * @bm: pointer to memory for returning the bitmap of field vectors
4520  */
4521 static void
4522 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
4523 			 unsigned long *bm)
4524 {
4525 	enum ice_prof_type prof_type;
4526 
4527 	bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
4528 
4529 	switch (rinfo->tun_type) {
4530 	case ICE_NON_TUN:
4531 		prof_type = ICE_PROF_NON_TUN;
4532 		break;
4533 	case ICE_ALL_TUNNELS:
4534 		prof_type = ICE_PROF_TUN_ALL;
4535 		break;
4536 	case ICE_SW_TUN_GENEVE:
4537 	case ICE_SW_TUN_VXLAN:
4538 		prof_type = ICE_PROF_TUN_UDP;
4539 		break;
4540 	case ICE_SW_TUN_NVGRE:
4541 		prof_type = ICE_PROF_TUN_GRE;
4542 		break;
4543 	default:
4544 		prof_type = ICE_PROF_ALL;
4545 		break;
4546 	}
4547 
4548 	ice_get_sw_fv_bitmap(hw, prof_type, bm);
4549 }
4550 
4551 /**
4552  * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
4553  * @hw: pointer to hardware structure
4554  * @lkups: lookup elements or match criteria for the advanced recipe, one
4555  *  structure per protocol header
4556  * @lkups_cnt: number of protocols
4557  * @rinfo: other information regarding the rule e.g. priority and action info
4558  * @rid: return the recipe ID of the recipe created
4559  */
4560 static enum ice_status
4561 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
4562 		   u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
4563 {
4564 	DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES);
4565 	DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES);
4566 	struct ice_prot_lkup_ext *lkup_exts;
4567 	struct ice_recp_grp_entry *r_entry;
4568 	struct ice_sw_fv_list_entry *fvit;
4569 	struct ice_recp_grp_entry *r_tmp;
4570 	struct ice_sw_fv_list_entry *tmp;
4571 	enum ice_status status = 0;
4572 	struct ice_sw_recipe *rm;
4573 	u8 i;
4574 
4575 	if (!lkups_cnt)
4576 		return ICE_ERR_PARAM;
4577 
4578 	lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
4579 	if (!lkup_exts)
4580 		return ICE_ERR_NO_MEMORY;
4581 
4582 	/* Determine the number of words to be matched and if it exceeds a
4583 	 * recipe's restrictions
4584 	 */
4585 	for (i = 0; i < lkups_cnt; i++) {
4586 		u16 count;
4587 
4588 		if (lkups[i].type >= ICE_PROTOCOL_LAST) {
4589 			status = ICE_ERR_CFG;
4590 			goto err_free_lkup_exts;
4591 		}
4592 
4593 		count = ice_fill_valid_words(&lkups[i], lkup_exts);
4594 		if (!count) {
4595 			status = ICE_ERR_CFG;
4596 			goto err_free_lkup_exts;
4597 		}
4598 	}
4599 
4600 	rm = kzalloc(sizeof(*rm), GFP_KERNEL);
4601 	if (!rm) {
4602 		status = ICE_ERR_NO_MEMORY;
4603 		goto err_free_lkup_exts;
4604 	}
4605 
4606 	/* Get field vectors that contain fields extracted from all the protocol
4607 	 * headers being programmed.
4608 	 */
4609 	INIT_LIST_HEAD(&rm->fv_list);
4610 	INIT_LIST_HEAD(&rm->rg_list);
4611 
4612 	/* Get bitmap of field vectors (profiles) that are compatible with the
4613 	 * rule request; only these will be searched in the subsequent call to
4614 	 * ice_get_fv.
4615 	 */
4616 	ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
4617 
4618 	status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
4619 	if (status)
4620 		goto err_unroll;
4621 
4622 	/* Create any special protocol/offset pairs, such as looking at tunnel
4623 	 * bits by extracting metadata
4624 	 */
4625 	status = ice_add_special_words(rinfo, lkup_exts);
4626 	if (status)
4627 		goto err_free_lkup_exts;
4628 
4629 	/* Group match words into recipes using preferred recipe grouping
4630 	 * criteria.
4631 	 */
4632 	status = ice_create_recipe_group(hw, rm, lkup_exts);
4633 	if (status)
4634 		goto err_unroll;
4635 
4636 	/* set the recipe priority if specified */
4637 	rm->priority = (u8)rinfo->priority;
4638 
4639 	/* Find offsets from the field vector. Pick the first one for all the
4640 	 * recipes.
4641 	 */
4642 	status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
4643 	if (status)
4644 		goto err_unroll;
4645 
4646 	/* get bitmap of all profiles the recipe will be associated with */
4647 	bitmap_zero(profiles, ICE_MAX_NUM_PROFILES);
4648 	list_for_each_entry(fvit, &rm->fv_list, list_entry) {
4649 		ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
4650 		set_bit((u16)fvit->profile_id, profiles);
4651 	}
4652 
4653 	/* Look for a recipe which matches our requested fv / mask list */
4654 	*rid = ice_find_recp(hw, lkup_exts);
4655 	if (*rid < ICE_MAX_NUM_RECIPES)
4656 		/* Success if found a recipe that match the existing criteria */
4657 		goto err_unroll;
4658 
4659 	/* Recipe we need does not exist, add a recipe */
4660 	status = ice_add_sw_recipe(hw, rm, profiles);
4661 	if (status)
4662 		goto err_unroll;
4663 
4664 	/* Associate all the recipes created with all the profiles in the
4665 	 * common field vector.
4666 	 */
4667 	list_for_each_entry(fvit, &rm->fv_list, list_entry) {
4668 		DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
4669 		u16 j;
4670 
4671 		status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
4672 						      (u8 *)r_bitmap, NULL);
4673 		if (status)
4674 			goto err_unroll;
4675 
4676 		bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
4677 			  ICE_MAX_NUM_RECIPES);
4678 		status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
4679 		if (status)
4680 			goto err_unroll;
4681 
4682 		status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
4683 						      (u8 *)r_bitmap,
4684 						      NULL);
4685 		ice_release_change_lock(hw);
4686 
4687 		if (status)
4688 			goto err_unroll;
4689 
4690 		/* Update profile to recipe bitmap array */
4691 		bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap,
4692 			    ICE_MAX_NUM_RECIPES);
4693 
4694 		/* Update recipe to profile bitmap array */
4695 		for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
4696 			set_bit((u16)fvit->profile_id, recipe_to_profile[j]);
4697 	}
4698 
4699 	*rid = rm->root_rid;
4700 	memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts,
4701 	       sizeof(*lkup_exts));
4702 err_unroll:
4703 	list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
4704 		list_del(&r_entry->l_entry);
4705 		devm_kfree(ice_hw_to_dev(hw), r_entry);
4706 	}
4707 
4708 	list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) {
4709 		list_del(&fvit->list_entry);
4710 		devm_kfree(ice_hw_to_dev(hw), fvit);
4711 	}
4712 
4713 	if (rm->root_buf)
4714 		devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
4715 
4716 	kfree(rm);
4717 
4718 err_free_lkup_exts:
4719 	kfree(lkup_exts);
4720 
4721 	return status;
4722 }
4723 
4724 /**
4725  * ice_find_dummy_packet - find dummy packet
4726  *
4727  * @lkups: lookup elements or match criteria for the advanced recipe, one
4728  *	   structure per protocol header
4729  * @lkups_cnt: number of protocols
4730  * @tun_type: tunnel type
4731  * @pkt: dummy packet to fill according to filter match criteria
4732  * @pkt_len: packet length of dummy packet
4733  * @offsets: pointer to receive the pointer to the offsets for the packet
4734  */
4735 static void
4736 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
4737 		      enum ice_sw_tunnel_type tun_type,
4738 		      const u8 **pkt, u16 *pkt_len,
4739 		      const struct ice_dummy_pkt_offsets **offsets)
4740 {
4741 	bool tcp = false, udp = false, ipv6 = false, vlan = false;
4742 	u16 i;
4743 
4744 	for (i = 0; i < lkups_cnt; i++) {
4745 		if (lkups[i].type == ICE_UDP_ILOS)
4746 			udp = true;
4747 		else if (lkups[i].type == ICE_TCP_IL)
4748 			tcp = true;
4749 		else if (lkups[i].type == ICE_IPV6_OFOS)
4750 			ipv6 = true;
4751 		else if (lkups[i].type == ICE_VLAN_OFOS)
4752 			vlan = true;
4753 		else if (lkups[i].type == ICE_ETYPE_OL &&
4754 			 lkups[i].h_u.ethertype.ethtype_id ==
4755 				cpu_to_be16(ICE_IPV6_ETHER_ID) &&
4756 			 lkups[i].m_u.ethertype.ethtype_id ==
4757 					cpu_to_be16(0xFFFF))
4758 			ipv6 = true;
4759 	}
4760 
4761 	if (tun_type == ICE_SW_TUN_NVGRE) {
4762 		if (tcp) {
4763 			*pkt = dummy_gre_tcp_packet;
4764 			*pkt_len = sizeof(dummy_gre_tcp_packet);
4765 			*offsets = dummy_gre_tcp_packet_offsets;
4766 			return;
4767 		}
4768 
4769 		*pkt = dummy_gre_udp_packet;
4770 		*pkt_len = sizeof(dummy_gre_udp_packet);
4771 		*offsets = dummy_gre_udp_packet_offsets;
4772 		return;
4773 	}
4774 
4775 	if (tun_type == ICE_SW_TUN_VXLAN ||
4776 	    tun_type == ICE_SW_TUN_GENEVE) {
4777 		if (tcp) {
4778 			*pkt = dummy_udp_tun_tcp_packet;
4779 			*pkt_len = sizeof(dummy_udp_tun_tcp_packet);
4780 			*offsets = dummy_udp_tun_tcp_packet_offsets;
4781 			return;
4782 		}
4783 
4784 		*pkt = dummy_udp_tun_udp_packet;
4785 		*pkt_len = sizeof(dummy_udp_tun_udp_packet);
4786 		*offsets = dummy_udp_tun_udp_packet_offsets;
4787 		return;
4788 	}
4789 
4790 	if (udp && !ipv6) {
4791 		if (vlan) {
4792 			*pkt = dummy_vlan_udp_packet;
4793 			*pkt_len = sizeof(dummy_vlan_udp_packet);
4794 			*offsets = dummy_vlan_udp_packet_offsets;
4795 			return;
4796 		}
4797 		*pkt = dummy_udp_packet;
4798 		*pkt_len = sizeof(dummy_udp_packet);
4799 		*offsets = dummy_udp_packet_offsets;
4800 		return;
4801 	} else if (udp && ipv6) {
4802 		if (vlan) {
4803 			*pkt = dummy_vlan_udp_ipv6_packet;
4804 			*pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
4805 			*offsets = dummy_vlan_udp_ipv6_packet_offsets;
4806 			return;
4807 		}
4808 		*pkt = dummy_udp_ipv6_packet;
4809 		*pkt_len = sizeof(dummy_udp_ipv6_packet);
4810 		*offsets = dummy_udp_ipv6_packet_offsets;
4811 		return;
4812 	} else if ((tcp && ipv6) || ipv6) {
4813 		if (vlan) {
4814 			*pkt = dummy_vlan_tcp_ipv6_packet;
4815 			*pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
4816 			*offsets = dummy_vlan_tcp_ipv6_packet_offsets;
4817 			return;
4818 		}
4819 		*pkt = dummy_tcp_ipv6_packet;
4820 		*pkt_len = sizeof(dummy_tcp_ipv6_packet);
4821 		*offsets = dummy_tcp_ipv6_packet_offsets;
4822 		return;
4823 	}
4824 
4825 	if (vlan) {
4826 		*pkt = dummy_vlan_tcp_packet;
4827 		*pkt_len = sizeof(dummy_vlan_tcp_packet);
4828 		*offsets = dummy_vlan_tcp_packet_offsets;
4829 	} else {
4830 		*pkt = dummy_tcp_packet;
4831 		*pkt_len = sizeof(dummy_tcp_packet);
4832 		*offsets = dummy_tcp_packet_offsets;
4833 	}
4834 }
4835 
4836 /**
4837  * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
4838  *
4839  * @lkups: lookup elements or match criteria for the advanced recipe, one
4840  *	   structure per protocol header
4841  * @lkups_cnt: number of protocols
4842  * @s_rule: stores rule information from the match criteria
4843  * @dummy_pkt: dummy packet to fill according to filter match criteria
4844  * @pkt_len: packet length of dummy packet
4845  * @offsets: offset info for the dummy packet
4846  */
4847 static enum ice_status
4848 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
4849 			  struct ice_aqc_sw_rules_elem *s_rule,
4850 			  const u8 *dummy_pkt, u16 pkt_len,
4851 			  const struct ice_dummy_pkt_offsets *offsets)
4852 {
4853 	u8 *pkt;
4854 	u16 i;
4855 
4856 	/* Start with a packet with a pre-defined/dummy content. Then, fill
4857 	 * in the header values to be looked up or matched.
4858 	 */
4859 	pkt = s_rule->pdata.lkup_tx_rx.hdr;
4860 
4861 	memcpy(pkt, dummy_pkt, pkt_len);
4862 
4863 	for (i = 0; i < lkups_cnt; i++) {
4864 		enum ice_protocol_type type;
4865 		u16 offset = 0, len = 0, j;
4866 		bool found = false;
4867 
4868 		/* find the start of this layer; it should be found since this
4869 		 * was already checked when search for the dummy packet
4870 		 */
4871 		type = lkups[i].type;
4872 		for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
4873 			if (type == offsets[j].type) {
4874 				offset = offsets[j].offset;
4875 				found = true;
4876 				break;
4877 			}
4878 		}
4879 		/* this should never happen in a correct calling sequence */
4880 		if (!found)
4881 			return ICE_ERR_PARAM;
4882 
4883 		switch (lkups[i].type) {
4884 		case ICE_MAC_OFOS:
4885 		case ICE_MAC_IL:
4886 			len = sizeof(struct ice_ether_hdr);
4887 			break;
4888 		case ICE_ETYPE_OL:
4889 			len = sizeof(struct ice_ethtype_hdr);
4890 			break;
4891 		case ICE_VLAN_OFOS:
4892 			len = sizeof(struct ice_vlan_hdr);
4893 			break;
4894 		case ICE_IPV4_OFOS:
4895 		case ICE_IPV4_IL:
4896 			len = sizeof(struct ice_ipv4_hdr);
4897 			break;
4898 		case ICE_IPV6_OFOS:
4899 		case ICE_IPV6_IL:
4900 			len = sizeof(struct ice_ipv6_hdr);
4901 			break;
4902 		case ICE_TCP_IL:
4903 		case ICE_UDP_OF:
4904 		case ICE_UDP_ILOS:
4905 			len = sizeof(struct ice_l4_hdr);
4906 			break;
4907 		case ICE_SCTP_IL:
4908 			len = sizeof(struct ice_sctp_hdr);
4909 			break;
4910 		case ICE_NVGRE:
4911 			len = sizeof(struct ice_nvgre_hdr);
4912 			break;
4913 		case ICE_VXLAN:
4914 		case ICE_GENEVE:
4915 			len = sizeof(struct ice_udp_tnl_hdr);
4916 			break;
4917 		default:
4918 			return ICE_ERR_PARAM;
4919 		}
4920 
4921 		/* the length should be a word multiple */
4922 		if (len % ICE_BYTES_PER_WORD)
4923 			return ICE_ERR_CFG;
4924 
4925 		/* We have the offset to the header start, the length, the
4926 		 * caller's header values and mask. Use this information to
4927 		 * copy the data into the dummy packet appropriately based on
4928 		 * the mask. Note that we need to only write the bits as
4929 		 * indicated by the mask to make sure we don't improperly write
4930 		 * over any significant packet data.
4931 		 */
4932 		for (j = 0; j < len / sizeof(u16); j++)
4933 			if (((u16 *)&lkups[i].m_u)[j])
4934 				((u16 *)(pkt + offset))[j] =
4935 					(((u16 *)(pkt + offset))[j] &
4936 					 ~((u16 *)&lkups[i].m_u)[j]) |
4937 					(((u16 *)&lkups[i].h_u)[j] &
4938 					 ((u16 *)&lkups[i].m_u)[j]);
4939 	}
4940 
4941 	s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(pkt_len);
4942 
4943 	return 0;
4944 }
4945 
4946 /**
4947  * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
4948  * @hw: pointer to the hardware structure
4949  * @tun_type: tunnel type
4950  * @pkt: dummy packet to fill in
4951  * @offsets: offset info for the dummy packet
4952  */
4953 static enum ice_status
4954 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
4955 			u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
4956 {
4957 	u16 open_port, i;
4958 
4959 	switch (tun_type) {
4960 	case ICE_SW_TUN_VXLAN:
4961 	case ICE_SW_TUN_GENEVE:
4962 		if (!ice_get_open_tunnel_port(hw, &open_port))
4963 			return ICE_ERR_CFG;
4964 		break;
4965 
4966 	default:
4967 		/* Nothing needs to be done for this tunnel type */
4968 		return 0;
4969 	}
4970 
4971 	/* Find the outer UDP protocol header and insert the port number */
4972 	for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
4973 		if (offsets[i].type == ICE_UDP_OF) {
4974 			struct ice_l4_hdr *hdr;
4975 			u16 offset;
4976 
4977 			offset = offsets[i].offset;
4978 			hdr = (struct ice_l4_hdr *)&pkt[offset];
4979 			hdr->dst_port = cpu_to_be16(open_port);
4980 
4981 			return 0;
4982 		}
4983 	}
4984 
4985 	return ICE_ERR_CFG;
4986 }
4987 
4988 /**
4989  * ice_find_adv_rule_entry - Search a rule entry
4990  * @hw: pointer to the hardware structure
4991  * @lkups: lookup elements or match criteria for the advanced recipe, one
4992  *	   structure per protocol header
4993  * @lkups_cnt: number of protocols
4994  * @recp_id: recipe ID for which we are finding the rule
4995  * @rinfo: other information regarding the rule e.g. priority and action info
4996  *
4997  * Helper function to search for a given advance rule entry
4998  * Returns pointer to entry storing the rule if found
4999  */
5000 static struct ice_adv_fltr_mgmt_list_entry *
5001 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5002 			u16 lkups_cnt, u16 recp_id,
5003 			struct ice_adv_rule_info *rinfo)
5004 {
5005 	struct ice_adv_fltr_mgmt_list_entry *list_itr;
5006 	struct ice_switch_info *sw = hw->switch_info;
5007 	int i;
5008 
5009 	list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules,
5010 			    list_entry) {
5011 		bool lkups_matched = true;
5012 
5013 		if (lkups_cnt != list_itr->lkups_cnt)
5014 			continue;
5015 		for (i = 0; i < list_itr->lkups_cnt; i++)
5016 			if (memcmp(&list_itr->lkups[i], &lkups[i],
5017 				   sizeof(*lkups))) {
5018 				lkups_matched = false;
5019 				break;
5020 			}
5021 		if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5022 		    rinfo->tun_type == list_itr->rule_info.tun_type &&
5023 		    lkups_matched)
5024 			return list_itr;
5025 	}
5026 	return NULL;
5027 }
5028 
5029 /**
5030  * ice_adv_add_update_vsi_list
5031  * @hw: pointer to the hardware structure
5032  * @m_entry: pointer to current adv filter management list entry
5033  * @cur_fltr: filter information from the book keeping entry
5034  * @new_fltr: filter information with the new VSI to be added
5035  *
5036  * Call AQ command to add or update previously created VSI list with new VSI.
5037  *
5038  * Helper function to do book keeping associated with adding filter information
5039  * The algorithm to do the booking keeping is described below :
5040  * When a VSI needs to subscribe to a given advanced filter
5041  *	if only one VSI has been added till now
5042  *		Allocate a new VSI list and add two VSIs
5043  *		to this list using switch rule command
5044  *		Update the previously created switch rule with the
5045  *		newly created VSI list ID
5046  *	if a VSI list was previously created
5047  *		Add the new VSI to the previously created VSI list set
5048  *		using the update switch rule command
5049  */
5050 static enum ice_status
5051 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5052 			    struct ice_adv_fltr_mgmt_list_entry *m_entry,
5053 			    struct ice_adv_rule_info *cur_fltr,
5054 			    struct ice_adv_rule_info *new_fltr)
5055 {
5056 	enum ice_status status;
5057 	u16 vsi_list_id = 0;
5058 
5059 	if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5060 	    cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
5061 	    cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5062 		return ICE_ERR_NOT_IMPL;
5063 
5064 	if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5065 	     new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5066 	    (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5067 	     cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5068 		return ICE_ERR_NOT_IMPL;
5069 
5070 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5071 		 /* Only one entry existed in the mapping and it was not already
5072 		  * a part of a VSI list. So, create a VSI list with the old and
5073 		  * new VSIs.
5074 		  */
5075 		struct ice_fltr_info tmp_fltr;
5076 		u16 vsi_handle_arr[2];
5077 
5078 		/* A rule already exists with the new VSI being added */
5079 		if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5080 		    new_fltr->sw_act.fwd_id.hw_vsi_id)
5081 			return ICE_ERR_ALREADY_EXISTS;
5082 
5083 		vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5084 		vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5085 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5086 						  &vsi_list_id,
5087 						  ICE_SW_LKUP_LAST);
5088 		if (status)
5089 			return status;
5090 
5091 		memset(&tmp_fltr, 0, sizeof(tmp_fltr));
5092 		tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
5093 		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5094 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5095 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5096 		tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
5097 
5098 		/* Update the previous switch rule of "forward to VSI" to
5099 		 * "fwd to VSI list"
5100 		 */
5101 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5102 		if (status)
5103 			return status;
5104 
5105 		cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
5106 		cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
5107 		m_entry->vsi_list_info =
5108 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5109 						vsi_list_id);
5110 	} else {
5111 		u16 vsi_handle = new_fltr->sw_act.vsi_handle;
5112 
5113 		if (!m_entry->vsi_list_info)
5114 			return ICE_ERR_CFG;
5115 
5116 		/* A rule already exists with the new VSI being added */
5117 		if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
5118 			return 0;
5119 
5120 		/* Update the previously created VSI list set with
5121 		 * the new VSI ID passed in
5122 		 */
5123 		vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
5124 
5125 		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
5126 						  vsi_list_id, false,
5127 						  ice_aqc_opc_update_sw_rules,
5128 						  ICE_SW_LKUP_LAST);
5129 		/* update VSI list mapping info with new VSI ID */
5130 		if (!status)
5131 			set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
5132 	}
5133 	if (!status)
5134 		m_entry->vsi_count++;
5135 	return status;
5136 }
5137 
5138 /**
5139  * ice_add_adv_rule - helper function to create an advanced switch rule
5140  * @hw: pointer to the hardware structure
5141  * @lkups: information on the words that needs to be looked up. All words
5142  * together makes one recipe
5143  * @lkups_cnt: num of entries in the lkups array
5144  * @rinfo: other information related to the rule that needs to be programmed
5145  * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
5146  *               ignored is case of error.
5147  *
5148  * This function can program only 1 rule at a time. The lkups is used to
5149  * describe the all the words that forms the "lookup" portion of the recipe.
5150  * These words can span multiple protocols. Callers to this function need to
5151  * pass in a list of protocol headers with lookup information along and mask
5152  * that determines which words are valid from the given protocol header.
5153  * rinfo describes other information related to this rule such as forwarding
5154  * IDs, priority of this rule, etc.
5155  */
5156 enum ice_status
5157 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5158 		 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
5159 		 struct ice_rule_query_data *added_entry)
5160 {
5161 	struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
5162 	u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
5163 	const struct ice_dummy_pkt_offsets *pkt_offsets;
5164 	struct ice_aqc_sw_rules_elem *s_rule = NULL;
5165 	struct list_head *rule_head;
5166 	struct ice_switch_info *sw;
5167 	enum ice_status status;
5168 	const u8 *pkt = NULL;
5169 	u16 word_cnt;
5170 	u32 act = 0;
5171 	u8 q_rgn;
5172 
5173 	/* Initialize profile to result index bitmap */
5174 	if (!hw->switch_info->prof_res_bm_init) {
5175 		hw->switch_info->prof_res_bm_init = 1;
5176 		ice_init_prof_result_bm(hw);
5177 	}
5178 
5179 	if (!lkups_cnt)
5180 		return ICE_ERR_PARAM;
5181 
5182 	/* get # of words we need to match */
5183 	word_cnt = 0;
5184 	for (i = 0; i < lkups_cnt; i++) {
5185 		u16 j, *ptr;
5186 
5187 		ptr = (u16 *)&lkups[i].m_u;
5188 		for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
5189 			if (ptr[j] != 0)
5190 				word_cnt++;
5191 	}
5192 
5193 	if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
5194 		return ICE_ERR_PARAM;
5195 
5196 	/* make sure that we can locate a dummy packet */
5197 	ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
5198 			      &pkt_offsets);
5199 	if (!pkt) {
5200 		status = ICE_ERR_PARAM;
5201 		goto err_ice_add_adv_rule;
5202 	}
5203 
5204 	if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5205 	      rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
5206 	      rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
5207 	      rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
5208 		return ICE_ERR_CFG;
5209 
5210 	vsi_handle = rinfo->sw_act.vsi_handle;
5211 	if (!ice_is_vsi_valid(hw, vsi_handle))
5212 		return ICE_ERR_PARAM;
5213 
5214 	if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
5215 		rinfo->sw_act.fwd_id.hw_vsi_id =
5216 			ice_get_hw_vsi_num(hw, vsi_handle);
5217 	if (rinfo->sw_act.flag & ICE_FLTR_TX)
5218 		rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
5219 
5220 	status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
5221 	if (status)
5222 		return status;
5223 	m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
5224 	if (m_entry) {
5225 		/* we have to add VSI to VSI_LIST and increment vsi_count.
5226 		 * Also Update VSI list so that we can change forwarding rule
5227 		 * if the rule already exists, we will check if it exists with
5228 		 * same vsi_id, if not then add it to the VSI list if it already
5229 		 * exists if not then create a VSI list and add the existing VSI
5230 		 * ID and the new VSI ID to the list
5231 		 * We will add that VSI to the list
5232 		 */
5233 		status = ice_adv_add_update_vsi_list(hw, m_entry,
5234 						     &m_entry->rule_info,
5235 						     rinfo);
5236 		if (added_entry) {
5237 			added_entry->rid = rid;
5238 			added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
5239 			added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
5240 		}
5241 		return status;
5242 	}
5243 	rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
5244 	s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
5245 	if (!s_rule)
5246 		return ICE_ERR_NO_MEMORY;
5247 	if (!rinfo->flags_info.act_valid) {
5248 		act |= ICE_SINGLE_ACT_LAN_ENABLE;
5249 		act |= ICE_SINGLE_ACT_LB_ENABLE;
5250 	} else {
5251 		act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
5252 						ICE_SINGLE_ACT_LB_ENABLE);
5253 	}
5254 
5255 	switch (rinfo->sw_act.fltr_act) {
5256 	case ICE_FWD_TO_VSI:
5257 		act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
5258 			ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
5259 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
5260 		break;
5261 	case ICE_FWD_TO_Q:
5262 		act |= ICE_SINGLE_ACT_TO_Q;
5263 		act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
5264 		       ICE_SINGLE_ACT_Q_INDEX_M;
5265 		break;
5266 	case ICE_FWD_TO_QGRP:
5267 		q_rgn = rinfo->sw_act.qgrp_size > 0 ?
5268 			(u8)ilog2(rinfo->sw_act.qgrp_size) : 0;
5269 		act |= ICE_SINGLE_ACT_TO_Q;
5270 		act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
5271 		       ICE_SINGLE_ACT_Q_INDEX_M;
5272 		act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
5273 		       ICE_SINGLE_ACT_Q_REGION_M;
5274 		break;
5275 	case ICE_DROP_PACKET:
5276 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
5277 		       ICE_SINGLE_ACT_VALID_BIT;
5278 		break;
5279 	default:
5280 		status = ICE_ERR_CFG;
5281 		goto err_ice_add_adv_rule;
5282 	}
5283 
5284 	/* set the rule LOOKUP type based on caller specified 'Rx'
5285 	 * instead of hardcoding it to be either LOOKUP_TX/RX
5286 	 *
5287 	 * for 'Rx' set the source to be the port number
5288 	 * for 'Tx' set the source to be the source HW VSI number (determined
5289 	 * by caller)
5290 	 */
5291 	if (rinfo->rx) {
5292 		s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
5293 		s_rule->pdata.lkup_tx_rx.src =
5294 			cpu_to_le16(hw->port_info->lport);
5295 	} else {
5296 		s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
5297 		s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(rinfo->sw_act.src);
5298 	}
5299 
5300 	s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid);
5301 	s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
5302 
5303 	status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
5304 					   pkt_len, pkt_offsets);
5305 	if (status)
5306 		goto err_ice_add_adv_rule;
5307 
5308 	if (rinfo->tun_type != ICE_NON_TUN) {
5309 		status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
5310 						 s_rule->pdata.lkup_tx_rx.hdr,
5311 						 pkt_offsets);
5312 		if (status)
5313 			goto err_ice_add_adv_rule;
5314 	}
5315 
5316 	status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
5317 				 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
5318 				 NULL);
5319 	if (status)
5320 		goto err_ice_add_adv_rule;
5321 	adv_fltr = devm_kzalloc(ice_hw_to_dev(hw),
5322 				sizeof(struct ice_adv_fltr_mgmt_list_entry),
5323 				GFP_KERNEL);
5324 	if (!adv_fltr) {
5325 		status = ICE_ERR_NO_MEMORY;
5326 		goto err_ice_add_adv_rule;
5327 	}
5328 
5329 	adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
5330 				       lkups_cnt * sizeof(*lkups), GFP_KERNEL);
5331 	if (!adv_fltr->lkups) {
5332 		status = ICE_ERR_NO_MEMORY;
5333 		goto err_ice_add_adv_rule;
5334 	}
5335 
5336 	adv_fltr->lkups_cnt = lkups_cnt;
5337 	adv_fltr->rule_info = *rinfo;
5338 	adv_fltr->rule_info.fltr_rule_id =
5339 		le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
5340 	sw = hw->switch_info;
5341 	sw->recp_list[rid].adv_rule = true;
5342 	rule_head = &sw->recp_list[rid].filt_rules;
5343 
5344 	if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
5345 		adv_fltr->vsi_count = 1;
5346 
5347 	/* Add rule entry to book keeping list */
5348 	list_add(&adv_fltr->list_entry, rule_head);
5349 	if (added_entry) {
5350 		added_entry->rid = rid;
5351 		added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
5352 		added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
5353 	}
5354 err_ice_add_adv_rule:
5355 	if (status && adv_fltr) {
5356 		devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups);
5357 		devm_kfree(ice_hw_to_dev(hw), adv_fltr);
5358 	}
5359 
5360 	kfree(s_rule);
5361 
5362 	return status;
5363 }
5364 
5365 /**
5366  * ice_replay_vsi_fltr - Replay filters for requested VSI
5367  * @hw: pointer to the hardware structure
5368  * @vsi_handle: driver VSI handle
5369  * @recp_id: Recipe ID for which rules need to be replayed
5370  * @list_head: list for which filters need to be replayed
5371  *
5372  * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
5373  * It is required to pass valid VSI handle.
5374  */
5375 static enum ice_status
5376 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
5377 		    struct list_head *list_head)
5378 {
5379 	struct ice_fltr_mgmt_list_entry *itr;
5380 	enum ice_status status = 0;
5381 	u16 hw_vsi_id;
5382 
5383 	if (list_empty(list_head))
5384 		return status;
5385 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5386 
5387 	list_for_each_entry(itr, list_head, list_entry) {
5388 		struct ice_fltr_list_entry f_entry;
5389 
5390 		f_entry.fltr_info = itr->fltr_info;
5391 		if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
5392 		    itr->fltr_info.vsi_handle == vsi_handle) {
5393 			/* update the src in case it is VSI num */
5394 			if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
5395 				f_entry.fltr_info.src = hw_vsi_id;
5396 			status = ice_add_rule_internal(hw, recp_id, &f_entry);
5397 			if (status)
5398 				goto end;
5399 			continue;
5400 		}
5401 		if (!itr->vsi_list_info ||
5402 		    !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
5403 			continue;
5404 		/* Clearing it so that the logic can add it back */
5405 		clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
5406 		f_entry.fltr_info.vsi_handle = vsi_handle;
5407 		f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
5408 		/* update the src in case it is VSI num */
5409 		if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
5410 			f_entry.fltr_info.src = hw_vsi_id;
5411 		if (recp_id == ICE_SW_LKUP_VLAN)
5412 			status = ice_add_vlan_internal(hw, &f_entry);
5413 		else
5414 			status = ice_add_rule_internal(hw, recp_id, &f_entry);
5415 		if (status)
5416 			goto end;
5417 	}
5418 end:
5419 	return status;
5420 }
5421 
5422 /**
5423  * ice_adv_rem_update_vsi_list
5424  * @hw: pointer to the hardware structure
5425  * @vsi_handle: VSI handle of the VSI to remove
5426  * @fm_list: filter management entry for which the VSI list management needs to
5427  *	     be done
5428  */
5429 static enum ice_status
5430 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
5431 			    struct ice_adv_fltr_mgmt_list_entry *fm_list)
5432 {
5433 	struct ice_vsi_list_map_info *vsi_list_info;
5434 	enum ice_sw_lkup_type lkup_type;
5435 	enum ice_status status;
5436 	u16 vsi_list_id;
5437 
5438 	if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
5439 	    fm_list->vsi_count == 0)
5440 		return ICE_ERR_PARAM;
5441 
5442 	/* A rule with the VSI being removed does not exist */
5443 	if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
5444 		return ICE_ERR_DOES_NOT_EXIST;
5445 
5446 	lkup_type = ICE_SW_LKUP_LAST;
5447 	vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
5448 	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
5449 					  ice_aqc_opc_update_sw_rules,
5450 					  lkup_type);
5451 	if (status)
5452 		return status;
5453 
5454 	fm_list->vsi_count--;
5455 	clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
5456 	vsi_list_info = fm_list->vsi_list_info;
5457 	if (fm_list->vsi_count == 1) {
5458 		struct ice_fltr_info tmp_fltr;
5459 		u16 rem_vsi_handle;
5460 
5461 		rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
5462 						ICE_MAX_VSI);
5463 		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
5464 			return ICE_ERR_OUT_OF_RANGE;
5465 
5466 		/* Make sure VSI list is empty before removing it below */
5467 		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
5468 						  vsi_list_id, true,
5469 						  ice_aqc_opc_update_sw_rules,
5470 						  lkup_type);
5471 		if (status)
5472 			return status;
5473 
5474 		memset(&tmp_fltr, 0, sizeof(tmp_fltr));
5475 		tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
5476 		tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
5477 		fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
5478 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
5479 		tmp_fltr.fwd_id.hw_vsi_id =
5480 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
5481 		fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
5482 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
5483 		fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
5484 
5485 		/* Update the previous switch rule of "MAC forward to VSI" to
5486 		 * "MAC fwd to VSI list"
5487 		 */
5488 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5489 		if (status) {
5490 			ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
5491 				  tmp_fltr.fwd_id.hw_vsi_id, status);
5492 			return status;
5493 		}
5494 		fm_list->vsi_list_info->ref_cnt--;
5495 
5496 		/* Remove the VSI list since it is no longer used */
5497 		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
5498 		if (status) {
5499 			ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
5500 				  vsi_list_id, status);
5501 			return status;
5502 		}
5503 
5504 		list_del(&vsi_list_info->list_entry);
5505 		devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
5506 		fm_list->vsi_list_info = NULL;
5507 	}
5508 
5509 	return status;
5510 }
5511 
5512 /**
5513  * ice_rem_adv_rule - removes existing advanced switch rule
5514  * @hw: pointer to the hardware structure
5515  * @lkups: information on the words that needs to be looked up. All words
5516  *         together makes one recipe
5517  * @lkups_cnt: num of entries in the lkups array
5518  * @rinfo: Its the pointer to the rule information for the rule
5519  *
5520  * This function can be used to remove 1 rule at a time. The lkups is
5521  * used to describe all the words that forms the "lookup" portion of the
5522  * rule. These words can span multiple protocols. Callers to this function
5523  * need to pass in a list of protocol headers with lookup information along
5524  * and mask that determines which words are valid from the given protocol
5525  * header. rinfo describes other information related to this rule such as
5526  * forwarding IDs, priority of this rule, etc.
5527  */
5528 static enum ice_status
5529 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5530 		 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
5531 {
5532 	struct ice_adv_fltr_mgmt_list_entry *list_elem;
5533 	struct ice_prot_lkup_ext lkup_exts;
5534 	enum ice_status status = 0;
5535 	bool remove_rule = false;
5536 	struct mutex *rule_lock; /* Lock to protect filter rule list */
5537 	u16 i, rid, vsi_handle;
5538 
5539 	memset(&lkup_exts, 0, sizeof(lkup_exts));
5540 	for (i = 0; i < lkups_cnt; i++) {
5541 		u16 count;
5542 
5543 		if (lkups[i].type >= ICE_PROTOCOL_LAST)
5544 			return ICE_ERR_CFG;
5545 
5546 		count = ice_fill_valid_words(&lkups[i], &lkup_exts);
5547 		if (!count)
5548 			return ICE_ERR_CFG;
5549 	}
5550 
5551 	/* Create any special protocol/offset pairs, such as looking at tunnel
5552 	 * bits by extracting metadata
5553 	 */
5554 	status = ice_add_special_words(rinfo, &lkup_exts);
5555 	if (status)
5556 		return status;
5557 
5558 	rid = ice_find_recp(hw, &lkup_exts);
5559 	/* If did not find a recipe that match the existing criteria */
5560 	if (rid == ICE_MAX_NUM_RECIPES)
5561 		return ICE_ERR_PARAM;
5562 
5563 	rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
5564 	list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
5565 	/* the rule is already removed */
5566 	if (!list_elem)
5567 		return 0;
5568 	mutex_lock(rule_lock);
5569 	if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
5570 		remove_rule = true;
5571 	} else if (list_elem->vsi_count > 1) {
5572 		remove_rule = false;
5573 		vsi_handle = rinfo->sw_act.vsi_handle;
5574 		status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
5575 	} else {
5576 		vsi_handle = rinfo->sw_act.vsi_handle;
5577 		status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
5578 		if (status) {
5579 			mutex_unlock(rule_lock);
5580 			return status;
5581 		}
5582 		if (list_elem->vsi_count == 0)
5583 			remove_rule = true;
5584 	}
5585 	mutex_unlock(rule_lock);
5586 	if (remove_rule) {
5587 		struct ice_aqc_sw_rules_elem *s_rule;
5588 		u16 rule_buf_sz;
5589 
5590 		rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
5591 		s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
5592 		if (!s_rule)
5593 			return ICE_ERR_NO_MEMORY;
5594 		s_rule->pdata.lkup_tx_rx.act = 0;
5595 		s_rule->pdata.lkup_tx_rx.index =
5596 			cpu_to_le16(list_elem->rule_info.fltr_rule_id);
5597 		s_rule->pdata.lkup_tx_rx.hdr_len = 0;
5598 		status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
5599 					 rule_buf_sz, 1,
5600 					 ice_aqc_opc_remove_sw_rules, NULL);
5601 		if (!status || status == ICE_ERR_DOES_NOT_EXIST) {
5602 			struct ice_switch_info *sw = hw->switch_info;
5603 
5604 			mutex_lock(rule_lock);
5605 			list_del(&list_elem->list_entry);
5606 			devm_kfree(ice_hw_to_dev(hw), list_elem->lkups);
5607 			devm_kfree(ice_hw_to_dev(hw), list_elem);
5608 			mutex_unlock(rule_lock);
5609 			if (list_empty(&sw->recp_list[rid].filt_rules))
5610 				sw->recp_list[rid].adv_rule = false;
5611 		}
5612 		kfree(s_rule);
5613 	}
5614 	return status;
5615 }
5616 
5617 /**
5618  * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
5619  * @hw: pointer to the hardware structure
5620  * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
5621  *
5622  * This function is used to remove 1 rule at a time. The removal is based on
5623  * the remove_entry parameter. This function will remove rule for a given
5624  * vsi_handle with a given rule_id which is passed as parameter in remove_entry
5625  */
5626 enum ice_status
5627 ice_rem_adv_rule_by_id(struct ice_hw *hw,
5628 		       struct ice_rule_query_data *remove_entry)
5629 {
5630 	struct ice_adv_fltr_mgmt_list_entry *list_itr;
5631 	struct list_head *list_head;
5632 	struct ice_adv_rule_info rinfo;
5633 	struct ice_switch_info *sw;
5634 
5635 	sw = hw->switch_info;
5636 	if (!sw->recp_list[remove_entry->rid].recp_created)
5637 		return ICE_ERR_PARAM;
5638 	list_head = &sw->recp_list[remove_entry->rid].filt_rules;
5639 	list_for_each_entry(list_itr, list_head, list_entry) {
5640 		if (list_itr->rule_info.fltr_rule_id ==
5641 		    remove_entry->rule_id) {
5642 			rinfo = list_itr->rule_info;
5643 			rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
5644 			return ice_rem_adv_rule(hw, list_itr->lkups,
5645 						list_itr->lkups_cnt, &rinfo);
5646 		}
5647 	}
5648 	/* either list is empty or unable to find rule */
5649 	return ICE_ERR_DOES_NOT_EXIST;
5650 }
5651 
5652 /**
5653  * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
5654  * @hw: pointer to the hardware structure
5655  * @vsi_handle: driver VSI handle
5656  *
5657  * Replays filters for requested VSI via vsi_handle.
5658  */
5659 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
5660 {
5661 	struct ice_switch_info *sw = hw->switch_info;
5662 	enum ice_status status = 0;
5663 	u8 i;
5664 
5665 	for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
5666 		struct list_head *head;
5667 
5668 		head = &sw->recp_list[i].filt_replay_rules;
5669 		status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
5670 		if (status)
5671 			return status;
5672 	}
5673 	return status;
5674 }
5675 
5676 /**
5677  * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
5678  * @hw: pointer to the HW struct
5679  *
5680  * Deletes the filter replay rules.
5681  */
5682 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
5683 {
5684 	struct ice_switch_info *sw = hw->switch_info;
5685 	u8 i;
5686 
5687 	if (!sw)
5688 		return;
5689 
5690 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5691 		if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
5692 			struct list_head *l_head;
5693 
5694 			l_head = &sw->recp_list[i].filt_replay_rules;
5695 			if (!sw->recp_list[i].adv_rule)
5696 				ice_rem_sw_rule_info(hw, l_head);
5697 			else
5698 				ice_rem_adv_rule_info(hw, l_head);
5699 		}
5700 	}
5701 }
5702