1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 #include "ice_lib.h"
5 #include "ice_switch.h"
6
7 #define ICE_ETH_DA_OFFSET 0
8 #define ICE_ETH_ETHTYPE_OFFSET 12
9 #define ICE_ETH_VLAN_TCI_OFFSET 14
10 #define ICE_MAX_VLAN_ID 0xFFF
11 #define ICE_IPV6_ETHER_ID 0x86DD
12
13 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
14 * struct to configure any switch filter rules.
15 * {DA (6 bytes), SA(6 bytes),
16 * Ether type (2 bytes for header without VLAN tag) OR
17 * VLAN tag (4 bytes for header with VLAN tag) }
18 *
19 * Word on Hardcoded values
20 * byte 0 = 0x2: to identify it as locally administered DA MAC
21 * byte 6 = 0x2: to identify it as locally administered SA MAC
22 * byte 12 = 0x81 & byte 13 = 0x00:
23 * In case of VLAN filter first two bytes defines ether type (0x8100)
24 * and remaining two bytes are placeholder for programming a given VLAN ID
25 * In case of Ether type filter it is treated as header without VLAN tag
26 * and byte 12 and 13 is used to program a given Ether type instead
27 */
28 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
29 0x2, 0, 0, 0, 0, 0,
30 0x81, 0, 0, 0};
31
32 enum {
33 ICE_PKT_OUTER_IPV6 = BIT(0),
34 ICE_PKT_TUN_GTPC = BIT(1),
35 ICE_PKT_TUN_GTPU = BIT(2),
36 ICE_PKT_TUN_NVGRE = BIT(3),
37 ICE_PKT_TUN_UDP = BIT(4),
38 ICE_PKT_INNER_IPV6 = BIT(5),
39 ICE_PKT_INNER_TCP = BIT(6),
40 ICE_PKT_INNER_UDP = BIT(7),
41 ICE_PKT_GTP_NOPAY = BIT(8),
42 ICE_PKT_KMALLOC = BIT(9),
43 ICE_PKT_PPPOE = BIT(10),
44 ICE_PKT_L2TPV3 = BIT(11),
45 };
46
47 struct ice_dummy_pkt_offsets {
48 enum ice_protocol_type type;
49 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
50 };
51
52 struct ice_dummy_pkt_profile {
53 const struct ice_dummy_pkt_offsets *offsets;
54 const u8 *pkt;
55 u32 match;
56 u16 pkt_len;
57 u16 offsets_len;
58 };
59
60 #define ICE_DECLARE_PKT_OFFSETS(type) \
61 static const struct ice_dummy_pkt_offsets \
62 ice_dummy_##type##_packet_offsets[]
63
64 #define ICE_DECLARE_PKT_TEMPLATE(type) \
65 static const u8 ice_dummy_##type##_packet[]
66
67 #define ICE_PKT_PROFILE(type, m) { \
68 .match = (m), \
69 .pkt = ice_dummy_##type##_packet, \
70 .pkt_len = sizeof(ice_dummy_##type##_packet), \
71 .offsets = ice_dummy_##type##_packet_offsets, \
72 .offsets_len = sizeof(ice_dummy_##type##_packet_offsets), \
73 }
74
75 ICE_DECLARE_PKT_OFFSETS(vlan) = {
76 { ICE_VLAN_OFOS, 12 },
77 };
78
79 ICE_DECLARE_PKT_TEMPLATE(vlan) = {
80 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
81 };
82
83 ICE_DECLARE_PKT_OFFSETS(qinq) = {
84 { ICE_VLAN_EX, 12 },
85 { ICE_VLAN_IN, 16 },
86 };
87
88 ICE_DECLARE_PKT_TEMPLATE(qinq) = {
89 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
90 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
91 };
92
93 ICE_DECLARE_PKT_OFFSETS(gre_tcp) = {
94 { ICE_MAC_OFOS, 0 },
95 { ICE_ETYPE_OL, 12 },
96 { ICE_IPV4_OFOS, 14 },
97 { ICE_NVGRE, 34 },
98 { ICE_MAC_IL, 42 },
99 { ICE_ETYPE_IL, 54 },
100 { ICE_IPV4_IL, 56 },
101 { ICE_TCP_IL, 76 },
102 { ICE_PROTOCOL_LAST, 0 },
103 };
104
105 ICE_DECLARE_PKT_TEMPLATE(gre_tcp) = {
106 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
107 0x00, 0x00, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
109
110 0x08, 0x00, /* ICE_ETYPE_OL 12 */
111
112 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
113 0x00, 0x00, 0x00, 0x00,
114 0x00, 0x2F, 0x00, 0x00,
115 0x00, 0x00, 0x00, 0x00,
116 0x00, 0x00, 0x00, 0x00,
117
118 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
119 0x00, 0x00, 0x00, 0x00,
120
121 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
122 0x00, 0x00, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
124
125 0x08, 0x00, /* ICE_ETYPE_IL 54 */
126
127 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
128 0x00, 0x00, 0x00, 0x00,
129 0x00, 0x06, 0x00, 0x00,
130 0x00, 0x00, 0x00, 0x00,
131 0x00, 0x00, 0x00, 0x00,
132
133 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
134 0x00, 0x00, 0x00, 0x00,
135 0x00, 0x00, 0x00, 0x00,
136 0x50, 0x02, 0x20, 0x00,
137 0x00, 0x00, 0x00, 0x00
138 };
139
140 ICE_DECLARE_PKT_OFFSETS(gre_udp) = {
141 { ICE_MAC_OFOS, 0 },
142 { ICE_ETYPE_OL, 12 },
143 { ICE_IPV4_OFOS, 14 },
144 { ICE_NVGRE, 34 },
145 { ICE_MAC_IL, 42 },
146 { ICE_ETYPE_IL, 54 },
147 { ICE_IPV4_IL, 56 },
148 { ICE_UDP_ILOS, 76 },
149 { ICE_PROTOCOL_LAST, 0 },
150 };
151
152 ICE_DECLARE_PKT_TEMPLATE(gre_udp) = {
153 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
154 0x00, 0x00, 0x00, 0x00,
155 0x00, 0x00, 0x00, 0x00,
156
157 0x08, 0x00, /* ICE_ETYPE_OL 12 */
158
159 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
160 0x00, 0x00, 0x00, 0x00,
161 0x00, 0x2F, 0x00, 0x00,
162 0x00, 0x00, 0x00, 0x00,
163 0x00, 0x00, 0x00, 0x00,
164
165 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
166 0x00, 0x00, 0x00, 0x00,
167
168 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
169 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
171
172 0x08, 0x00, /* ICE_ETYPE_IL 54 */
173
174 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
175 0x00, 0x00, 0x00, 0x00,
176 0x00, 0x11, 0x00, 0x00,
177 0x00, 0x00, 0x00, 0x00,
178 0x00, 0x00, 0x00, 0x00,
179
180 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
181 0x00, 0x08, 0x00, 0x00,
182 };
183
184 ICE_DECLARE_PKT_OFFSETS(udp_tun_tcp) = {
185 { ICE_MAC_OFOS, 0 },
186 { ICE_ETYPE_OL, 12 },
187 { ICE_IPV4_OFOS, 14 },
188 { ICE_UDP_OF, 34 },
189 { ICE_VXLAN, 42 },
190 { ICE_GENEVE, 42 },
191 { ICE_VXLAN_GPE, 42 },
192 { ICE_MAC_IL, 50 },
193 { ICE_ETYPE_IL, 62 },
194 { ICE_IPV4_IL, 64 },
195 { ICE_TCP_IL, 84 },
196 { ICE_PROTOCOL_LAST, 0 },
197 };
198
199 ICE_DECLARE_PKT_TEMPLATE(udp_tun_tcp) = {
200 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
201 0x00, 0x00, 0x00, 0x00,
202 0x00, 0x00, 0x00, 0x00,
203
204 0x08, 0x00, /* ICE_ETYPE_OL 12 */
205
206 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
207 0x00, 0x01, 0x00, 0x00,
208 0x40, 0x11, 0x00, 0x00,
209 0x00, 0x00, 0x00, 0x00,
210 0x00, 0x00, 0x00, 0x00,
211
212 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
213 0x00, 0x46, 0x00, 0x00,
214
215 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
216 0x00, 0x00, 0x00, 0x00,
217
218 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
219 0x00, 0x00, 0x00, 0x00,
220 0x00, 0x00, 0x00, 0x00,
221
222 0x08, 0x00, /* ICE_ETYPE_IL 62 */
223
224 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
225 0x00, 0x01, 0x00, 0x00,
226 0x40, 0x06, 0x00, 0x00,
227 0x00, 0x00, 0x00, 0x00,
228 0x00, 0x00, 0x00, 0x00,
229
230 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
231 0x00, 0x00, 0x00, 0x00,
232 0x00, 0x00, 0x00, 0x00,
233 0x50, 0x02, 0x20, 0x00,
234 0x00, 0x00, 0x00, 0x00
235 };
236
237 ICE_DECLARE_PKT_OFFSETS(udp_tun_udp) = {
238 { ICE_MAC_OFOS, 0 },
239 { ICE_ETYPE_OL, 12 },
240 { ICE_IPV4_OFOS, 14 },
241 { ICE_UDP_OF, 34 },
242 { ICE_VXLAN, 42 },
243 { ICE_GENEVE, 42 },
244 { ICE_VXLAN_GPE, 42 },
245 { ICE_MAC_IL, 50 },
246 { ICE_ETYPE_IL, 62 },
247 { ICE_IPV4_IL, 64 },
248 { ICE_UDP_ILOS, 84 },
249 { ICE_PROTOCOL_LAST, 0 },
250 };
251
252 ICE_DECLARE_PKT_TEMPLATE(udp_tun_udp) = {
253 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
254 0x00, 0x00, 0x00, 0x00,
255 0x00, 0x00, 0x00, 0x00,
256
257 0x08, 0x00, /* ICE_ETYPE_OL 12 */
258
259 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
260 0x00, 0x01, 0x00, 0x00,
261 0x00, 0x11, 0x00, 0x00,
262 0x00, 0x00, 0x00, 0x00,
263 0x00, 0x00, 0x00, 0x00,
264
265 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
266 0x00, 0x3a, 0x00, 0x00,
267
268 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
269 0x00, 0x00, 0x00, 0x00,
270
271 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
272 0x00, 0x00, 0x00, 0x00,
273 0x00, 0x00, 0x00, 0x00,
274
275 0x08, 0x00, /* ICE_ETYPE_IL 62 */
276
277 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
278 0x00, 0x01, 0x00, 0x00,
279 0x00, 0x11, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00,
282
283 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
284 0x00, 0x08, 0x00, 0x00,
285 };
286
287 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_tcp) = {
288 { ICE_MAC_OFOS, 0 },
289 { ICE_ETYPE_OL, 12 },
290 { ICE_IPV4_OFOS, 14 },
291 { ICE_NVGRE, 34 },
292 { ICE_MAC_IL, 42 },
293 { ICE_ETYPE_IL, 54 },
294 { ICE_IPV6_IL, 56 },
295 { ICE_TCP_IL, 96 },
296 { ICE_PROTOCOL_LAST, 0 },
297 };
298
299 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_tcp) = {
300 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
301 0x00, 0x00, 0x00, 0x00,
302 0x00, 0x00, 0x00, 0x00,
303
304 0x08, 0x00, /* ICE_ETYPE_OL 12 */
305
306 0x45, 0x00, 0x00, 0x66, /* ICE_IPV4_OFOS 14 */
307 0x00, 0x00, 0x00, 0x00,
308 0x00, 0x2F, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
311
312 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
313 0x00, 0x00, 0x00, 0x00,
314
315 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
316 0x00, 0x00, 0x00, 0x00,
317 0x00, 0x00, 0x00, 0x00,
318
319 0x86, 0xdd, /* ICE_ETYPE_IL 54 */
320
321 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
322 0x00, 0x08, 0x06, 0x40,
323 0x00, 0x00, 0x00, 0x00,
324 0x00, 0x00, 0x00, 0x00,
325 0x00, 0x00, 0x00, 0x00,
326 0x00, 0x00, 0x00, 0x00,
327 0x00, 0x00, 0x00, 0x00,
328 0x00, 0x00, 0x00, 0x00,
329 0x00, 0x00, 0x00, 0x00,
330 0x00, 0x00, 0x00, 0x00,
331
332 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 96 */
333 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
335 0x50, 0x02, 0x20, 0x00,
336 0x00, 0x00, 0x00, 0x00
337 };
338
339 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_udp) = {
340 { ICE_MAC_OFOS, 0 },
341 { ICE_ETYPE_OL, 12 },
342 { ICE_IPV4_OFOS, 14 },
343 { ICE_NVGRE, 34 },
344 { ICE_MAC_IL, 42 },
345 { ICE_ETYPE_IL, 54 },
346 { ICE_IPV6_IL, 56 },
347 { ICE_UDP_ILOS, 96 },
348 { ICE_PROTOCOL_LAST, 0 },
349 };
350
351 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_udp) = {
352 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
353 0x00, 0x00, 0x00, 0x00,
354 0x00, 0x00, 0x00, 0x00,
355
356 0x08, 0x00, /* ICE_ETYPE_OL 12 */
357
358 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
359 0x00, 0x00, 0x00, 0x00,
360 0x00, 0x2F, 0x00, 0x00,
361 0x00, 0x00, 0x00, 0x00,
362 0x00, 0x00, 0x00, 0x00,
363
364 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
365 0x00, 0x00, 0x00, 0x00,
366
367 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
368 0x00, 0x00, 0x00, 0x00,
369 0x00, 0x00, 0x00, 0x00,
370
371 0x86, 0xdd, /* ICE_ETYPE_IL 54 */
372
373 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
374 0x00, 0x08, 0x11, 0x40,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00,
381 0x00, 0x00, 0x00, 0x00,
382 0x00, 0x00, 0x00, 0x00,
383
384 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 96 */
385 0x00, 0x08, 0x00, 0x00,
386 };
387
388 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_tcp) = {
389 { ICE_MAC_OFOS, 0 },
390 { ICE_ETYPE_OL, 12 },
391 { ICE_IPV4_OFOS, 14 },
392 { ICE_UDP_OF, 34 },
393 { ICE_VXLAN, 42 },
394 { ICE_GENEVE, 42 },
395 { ICE_VXLAN_GPE, 42 },
396 { ICE_MAC_IL, 50 },
397 { ICE_ETYPE_IL, 62 },
398 { ICE_IPV6_IL, 64 },
399 { ICE_TCP_IL, 104 },
400 { ICE_PROTOCOL_LAST, 0 },
401 };
402
403 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_tcp) = {
404 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
405 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00,
407
408 0x08, 0x00, /* ICE_ETYPE_OL 12 */
409
410 0x45, 0x00, 0x00, 0x6e, /* ICE_IPV4_OFOS 14 */
411 0x00, 0x01, 0x00, 0x00,
412 0x40, 0x11, 0x00, 0x00,
413 0x00, 0x00, 0x00, 0x00,
414 0x00, 0x00, 0x00, 0x00,
415
416 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
417 0x00, 0x5a, 0x00, 0x00,
418
419 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
420 0x00, 0x00, 0x00, 0x00,
421
422 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
423 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00,
425
426 0x86, 0xdd, /* ICE_ETYPE_IL 62 */
427
428 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
429 0x00, 0x08, 0x06, 0x40,
430 0x00, 0x00, 0x00, 0x00,
431 0x00, 0x00, 0x00, 0x00,
432 0x00, 0x00, 0x00, 0x00,
433 0x00, 0x00, 0x00, 0x00,
434 0x00, 0x00, 0x00, 0x00,
435 0x00, 0x00, 0x00, 0x00,
436 0x00, 0x00, 0x00, 0x00,
437 0x00, 0x00, 0x00, 0x00,
438
439 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 104 */
440 0x00, 0x00, 0x00, 0x00,
441 0x00, 0x00, 0x00, 0x00,
442 0x50, 0x02, 0x20, 0x00,
443 0x00, 0x00, 0x00, 0x00
444 };
445
446 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_udp) = {
447 { ICE_MAC_OFOS, 0 },
448 { ICE_ETYPE_OL, 12 },
449 { ICE_IPV4_OFOS, 14 },
450 { ICE_UDP_OF, 34 },
451 { ICE_VXLAN, 42 },
452 { ICE_GENEVE, 42 },
453 { ICE_VXLAN_GPE, 42 },
454 { ICE_MAC_IL, 50 },
455 { ICE_ETYPE_IL, 62 },
456 { ICE_IPV6_IL, 64 },
457 { ICE_UDP_ILOS, 104 },
458 { ICE_PROTOCOL_LAST, 0 },
459 };
460
461 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_udp) = {
462 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
463 0x00, 0x00, 0x00, 0x00,
464 0x00, 0x00, 0x00, 0x00,
465
466 0x08, 0x00, /* ICE_ETYPE_OL 12 */
467
468 0x45, 0x00, 0x00, 0x62, /* ICE_IPV4_OFOS 14 */
469 0x00, 0x01, 0x00, 0x00,
470 0x00, 0x11, 0x00, 0x00,
471 0x00, 0x00, 0x00, 0x00,
472 0x00, 0x00, 0x00, 0x00,
473
474 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
475 0x00, 0x4e, 0x00, 0x00,
476
477 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
478 0x00, 0x00, 0x00, 0x00,
479
480 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
481 0x00, 0x00, 0x00, 0x00,
482 0x00, 0x00, 0x00, 0x00,
483
484 0x86, 0xdd, /* ICE_ETYPE_IL 62 */
485
486 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
487 0x00, 0x08, 0x11, 0x40,
488 0x00, 0x00, 0x00, 0x00,
489 0x00, 0x00, 0x00, 0x00,
490 0x00, 0x00, 0x00, 0x00,
491 0x00, 0x00, 0x00, 0x00,
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496
497 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 104 */
498 0x00, 0x08, 0x00, 0x00,
499 };
500
501 /* offset info for MAC + IPv4 + UDP dummy packet */
502 ICE_DECLARE_PKT_OFFSETS(udp) = {
503 { ICE_MAC_OFOS, 0 },
504 { ICE_ETYPE_OL, 12 },
505 { ICE_IPV4_OFOS, 14 },
506 { ICE_UDP_ILOS, 34 },
507 { ICE_PROTOCOL_LAST, 0 },
508 };
509
510 /* Dummy packet for MAC + IPv4 + UDP */
511 ICE_DECLARE_PKT_TEMPLATE(udp) = {
512 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
513 0x00, 0x00, 0x00, 0x00,
514 0x00, 0x00, 0x00, 0x00,
515
516 0x08, 0x00, /* ICE_ETYPE_OL 12 */
517
518 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
519 0x00, 0x01, 0x00, 0x00,
520 0x00, 0x11, 0x00, 0x00,
521 0x00, 0x00, 0x00, 0x00,
522 0x00, 0x00, 0x00, 0x00,
523
524 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
525 0x00, 0x08, 0x00, 0x00,
526
527 0x00, 0x00, /* 2 bytes for 4 byte alignment */
528 };
529
530 /* offset info for MAC + IPv4 + TCP dummy packet */
531 ICE_DECLARE_PKT_OFFSETS(tcp) = {
532 { ICE_MAC_OFOS, 0 },
533 { ICE_ETYPE_OL, 12 },
534 { ICE_IPV4_OFOS, 14 },
535 { ICE_TCP_IL, 34 },
536 { ICE_PROTOCOL_LAST, 0 },
537 };
538
539 /* Dummy packet for MAC + IPv4 + TCP */
540 ICE_DECLARE_PKT_TEMPLATE(tcp) = {
541 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
542 0x00, 0x00, 0x00, 0x00,
543 0x00, 0x00, 0x00, 0x00,
544
545 0x08, 0x00, /* ICE_ETYPE_OL 12 */
546
547 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
548 0x00, 0x01, 0x00, 0x00,
549 0x00, 0x06, 0x00, 0x00,
550 0x00, 0x00, 0x00, 0x00,
551 0x00, 0x00, 0x00, 0x00,
552
553 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
554 0x00, 0x00, 0x00, 0x00,
555 0x00, 0x00, 0x00, 0x00,
556 0x50, 0x00, 0x00, 0x00,
557 0x00, 0x00, 0x00, 0x00,
558
559 0x00, 0x00, /* 2 bytes for 4 byte alignment */
560 };
561
562 ICE_DECLARE_PKT_OFFSETS(tcp_ipv6) = {
563 { ICE_MAC_OFOS, 0 },
564 { ICE_ETYPE_OL, 12 },
565 { ICE_IPV6_OFOS, 14 },
566 { ICE_TCP_IL, 54 },
567 { ICE_PROTOCOL_LAST, 0 },
568 };
569
570 ICE_DECLARE_PKT_TEMPLATE(tcp_ipv6) = {
571 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
572 0x00, 0x00, 0x00, 0x00,
573 0x00, 0x00, 0x00, 0x00,
574
575 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
576
577 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
578 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
579 0x00, 0x00, 0x00, 0x00,
580 0x00, 0x00, 0x00, 0x00,
581 0x00, 0x00, 0x00, 0x00,
582 0x00, 0x00, 0x00, 0x00,
583 0x00, 0x00, 0x00, 0x00,
584 0x00, 0x00, 0x00, 0x00,
585 0x00, 0x00, 0x00, 0x00,
586 0x00, 0x00, 0x00, 0x00,
587
588 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
589 0x00, 0x00, 0x00, 0x00,
590 0x00, 0x00, 0x00, 0x00,
591 0x50, 0x00, 0x00, 0x00,
592 0x00, 0x00, 0x00, 0x00,
593
594 0x00, 0x00, /* 2 bytes for 4 byte alignment */
595 };
596
597 /* IPv6 + UDP */
598 ICE_DECLARE_PKT_OFFSETS(udp_ipv6) = {
599 { ICE_MAC_OFOS, 0 },
600 { ICE_ETYPE_OL, 12 },
601 { ICE_IPV6_OFOS, 14 },
602 { ICE_UDP_ILOS, 54 },
603 { ICE_PROTOCOL_LAST, 0 },
604 };
605
606 /* IPv6 + UDP dummy packet */
607 ICE_DECLARE_PKT_TEMPLATE(udp_ipv6) = {
608 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
609 0x00, 0x00, 0x00, 0x00,
610 0x00, 0x00, 0x00, 0x00,
611
612 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
613
614 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
615 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
616 0x00, 0x00, 0x00, 0x00,
617 0x00, 0x00, 0x00, 0x00,
618 0x00, 0x00, 0x00, 0x00,
619 0x00, 0x00, 0x00, 0x00,
620 0x00, 0x00, 0x00, 0x00,
621 0x00, 0x00, 0x00, 0x00,
622 0x00, 0x00, 0x00, 0x00,
623 0x00, 0x00, 0x00, 0x00,
624
625 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
626 0x00, 0x10, 0x00, 0x00,
627
628 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
629 0x00, 0x00, 0x00, 0x00,
630
631 0x00, 0x00, /* 2 bytes for 4 byte alignment */
632 };
633
634 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
635 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_tcp) = {
636 { ICE_MAC_OFOS, 0 },
637 { ICE_IPV4_OFOS, 14 },
638 { ICE_UDP_OF, 34 },
639 { ICE_GTP, 42 },
640 { ICE_IPV4_IL, 62 },
641 { ICE_TCP_IL, 82 },
642 { ICE_PROTOCOL_LAST, 0 },
643 };
644
645 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_tcp) = {
646 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
647 0x00, 0x00, 0x00, 0x00,
648 0x00, 0x00, 0x00, 0x00,
649 0x08, 0x00,
650
651 0x45, 0x00, 0x00, 0x58, /* IP 14 */
652 0x00, 0x00, 0x00, 0x00,
653 0x00, 0x11, 0x00, 0x00,
654 0x00, 0x00, 0x00, 0x00,
655 0x00, 0x00, 0x00, 0x00,
656
657 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
658 0x00, 0x44, 0x00, 0x00,
659
660 0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 42 */
661 0x00, 0x00, 0x00, 0x00,
662 0x00, 0x00, 0x00, 0x85,
663
664 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
665 0x00, 0x00, 0x00, 0x00,
666
667 0x45, 0x00, 0x00, 0x28, /* IP 62 */
668 0x00, 0x00, 0x00, 0x00,
669 0x00, 0x06, 0x00, 0x00,
670 0x00, 0x00, 0x00, 0x00,
671 0x00, 0x00, 0x00, 0x00,
672
673 0x00, 0x00, 0x00, 0x00, /* TCP 82 */
674 0x00, 0x00, 0x00, 0x00,
675 0x00, 0x00, 0x00, 0x00,
676 0x50, 0x00, 0x00, 0x00,
677 0x00, 0x00, 0x00, 0x00,
678
679 0x00, 0x00, /* 2 bytes for 4 byte alignment */
680 };
681
682 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
683 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_udp) = {
684 { ICE_MAC_OFOS, 0 },
685 { ICE_IPV4_OFOS, 14 },
686 { ICE_UDP_OF, 34 },
687 { ICE_GTP, 42 },
688 { ICE_IPV4_IL, 62 },
689 { ICE_UDP_ILOS, 82 },
690 { ICE_PROTOCOL_LAST, 0 },
691 };
692
693 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_udp) = {
694 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
695 0x00, 0x00, 0x00, 0x00,
696 0x00, 0x00, 0x00, 0x00,
697 0x08, 0x00,
698
699 0x45, 0x00, 0x00, 0x4c, /* IP 14 */
700 0x00, 0x00, 0x00, 0x00,
701 0x00, 0x11, 0x00, 0x00,
702 0x00, 0x00, 0x00, 0x00,
703 0x00, 0x00, 0x00, 0x00,
704
705 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
706 0x00, 0x38, 0x00, 0x00,
707
708 0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 42 */
709 0x00, 0x00, 0x00, 0x00,
710 0x00, 0x00, 0x00, 0x85,
711
712 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
713 0x00, 0x00, 0x00, 0x00,
714
715 0x45, 0x00, 0x00, 0x1c, /* IP 62 */
716 0x00, 0x00, 0x00, 0x00,
717 0x00, 0x11, 0x00, 0x00,
718 0x00, 0x00, 0x00, 0x00,
719 0x00, 0x00, 0x00, 0x00,
720
721 0x00, 0x00, 0x00, 0x00, /* UDP 82 */
722 0x00, 0x08, 0x00, 0x00,
723
724 0x00, 0x00, /* 2 bytes for 4 byte alignment */
725 };
726
727 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
728 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_tcp) = {
729 { ICE_MAC_OFOS, 0 },
730 { ICE_IPV4_OFOS, 14 },
731 { ICE_UDP_OF, 34 },
732 { ICE_GTP, 42 },
733 { ICE_IPV6_IL, 62 },
734 { ICE_TCP_IL, 102 },
735 { ICE_PROTOCOL_LAST, 0 },
736 };
737
738 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_tcp) = {
739 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
740 0x00, 0x00, 0x00, 0x00,
741 0x00, 0x00, 0x00, 0x00,
742 0x08, 0x00,
743
744 0x45, 0x00, 0x00, 0x6c, /* IP 14 */
745 0x00, 0x00, 0x00, 0x00,
746 0x00, 0x11, 0x00, 0x00,
747 0x00, 0x00, 0x00, 0x00,
748 0x00, 0x00, 0x00, 0x00,
749
750 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
751 0x00, 0x58, 0x00, 0x00,
752
753 0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 42 */
754 0x00, 0x00, 0x00, 0x00,
755 0x00, 0x00, 0x00, 0x85,
756
757 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
758 0x00, 0x00, 0x00, 0x00,
759
760 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
761 0x00, 0x14, 0x06, 0x00,
762 0x00, 0x00, 0x00, 0x00,
763 0x00, 0x00, 0x00, 0x00,
764 0x00, 0x00, 0x00, 0x00,
765 0x00, 0x00, 0x00, 0x00,
766 0x00, 0x00, 0x00, 0x00,
767 0x00, 0x00, 0x00, 0x00,
768 0x00, 0x00, 0x00, 0x00,
769 0x00, 0x00, 0x00, 0x00,
770
771 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
772 0x00, 0x00, 0x00, 0x00,
773 0x00, 0x00, 0x00, 0x00,
774 0x50, 0x00, 0x00, 0x00,
775 0x00, 0x00, 0x00, 0x00,
776
777 0x00, 0x00, /* 2 bytes for 4 byte alignment */
778 };
779
780 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_udp) = {
781 { ICE_MAC_OFOS, 0 },
782 { ICE_IPV4_OFOS, 14 },
783 { ICE_UDP_OF, 34 },
784 { ICE_GTP, 42 },
785 { ICE_IPV6_IL, 62 },
786 { ICE_UDP_ILOS, 102 },
787 { ICE_PROTOCOL_LAST, 0 },
788 };
789
790 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_udp) = {
791 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
792 0x00, 0x00, 0x00, 0x00,
793 0x00, 0x00, 0x00, 0x00,
794 0x08, 0x00,
795
796 0x45, 0x00, 0x00, 0x60, /* IP 14 */
797 0x00, 0x00, 0x00, 0x00,
798 0x00, 0x11, 0x00, 0x00,
799 0x00, 0x00, 0x00, 0x00,
800 0x00, 0x00, 0x00, 0x00,
801
802 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
803 0x00, 0x4c, 0x00, 0x00,
804
805 0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 42 */
806 0x00, 0x00, 0x00, 0x00,
807 0x00, 0x00, 0x00, 0x85,
808
809 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
810 0x00, 0x00, 0x00, 0x00,
811
812 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
813 0x00, 0x08, 0x11, 0x00,
814 0x00, 0x00, 0x00, 0x00,
815 0x00, 0x00, 0x00, 0x00,
816 0x00, 0x00, 0x00, 0x00,
817 0x00, 0x00, 0x00, 0x00,
818 0x00, 0x00, 0x00, 0x00,
819 0x00, 0x00, 0x00, 0x00,
820 0x00, 0x00, 0x00, 0x00,
821 0x00, 0x00, 0x00, 0x00,
822
823 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
824 0x00, 0x08, 0x00, 0x00,
825
826 0x00, 0x00, /* 2 bytes for 4 byte alignment */
827 };
828
829 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_tcp) = {
830 { ICE_MAC_OFOS, 0 },
831 { ICE_IPV6_OFOS, 14 },
832 { ICE_UDP_OF, 54 },
833 { ICE_GTP, 62 },
834 { ICE_IPV4_IL, 82 },
835 { ICE_TCP_IL, 102 },
836 { ICE_PROTOCOL_LAST, 0 },
837 };
838
839 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_tcp) = {
840 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
841 0x00, 0x00, 0x00, 0x00,
842 0x00, 0x00, 0x00, 0x00,
843 0x86, 0xdd,
844
845 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
846 0x00, 0x44, 0x11, 0x00,
847 0x00, 0x00, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
849 0x00, 0x00, 0x00, 0x00,
850 0x00, 0x00, 0x00, 0x00,
851 0x00, 0x00, 0x00, 0x00,
852 0x00, 0x00, 0x00, 0x00,
853 0x00, 0x00, 0x00, 0x00,
854 0x00, 0x00, 0x00, 0x00,
855
856 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
857 0x00, 0x44, 0x00, 0x00,
858
859 0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 62 */
860 0x00, 0x00, 0x00, 0x00,
861 0x00, 0x00, 0x00, 0x85,
862
863 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
864 0x00, 0x00, 0x00, 0x00,
865
866 0x45, 0x00, 0x00, 0x28, /* IP 82 */
867 0x00, 0x00, 0x00, 0x00,
868 0x00, 0x06, 0x00, 0x00,
869 0x00, 0x00, 0x00, 0x00,
870 0x00, 0x00, 0x00, 0x00,
871
872 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
873 0x00, 0x00, 0x00, 0x00,
874 0x00, 0x00, 0x00, 0x00,
875 0x50, 0x00, 0x00, 0x00,
876 0x00, 0x00, 0x00, 0x00,
877
878 0x00, 0x00, /* 2 bytes for 4 byte alignment */
879 };
880
881 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_udp) = {
882 { ICE_MAC_OFOS, 0 },
883 { ICE_IPV6_OFOS, 14 },
884 { ICE_UDP_OF, 54 },
885 { ICE_GTP, 62 },
886 { ICE_IPV4_IL, 82 },
887 { ICE_UDP_ILOS, 102 },
888 { ICE_PROTOCOL_LAST, 0 },
889 };
890
891 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_udp) = {
892 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
893 0x00, 0x00, 0x00, 0x00,
894 0x00, 0x00, 0x00, 0x00,
895 0x86, 0xdd,
896
897 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
898 0x00, 0x38, 0x11, 0x00,
899 0x00, 0x00, 0x00, 0x00,
900 0x00, 0x00, 0x00, 0x00,
901 0x00, 0x00, 0x00, 0x00,
902 0x00, 0x00, 0x00, 0x00,
903 0x00, 0x00, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00,
905 0x00, 0x00, 0x00, 0x00,
906 0x00, 0x00, 0x00, 0x00,
907
908 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
909 0x00, 0x38, 0x00, 0x00,
910
911 0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 62 */
912 0x00, 0x00, 0x00, 0x00,
913 0x00, 0x00, 0x00, 0x85,
914
915 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
916 0x00, 0x00, 0x00, 0x00,
917
918 0x45, 0x00, 0x00, 0x1c, /* IP 82 */
919 0x00, 0x00, 0x00, 0x00,
920 0x00, 0x11, 0x00, 0x00,
921 0x00, 0x00, 0x00, 0x00,
922 0x00, 0x00, 0x00, 0x00,
923
924 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
925 0x00, 0x08, 0x00, 0x00,
926
927 0x00, 0x00, /* 2 bytes for 4 byte alignment */
928 };
929
930 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_tcp) = {
931 { ICE_MAC_OFOS, 0 },
932 { ICE_IPV6_OFOS, 14 },
933 { ICE_UDP_OF, 54 },
934 { ICE_GTP, 62 },
935 { ICE_IPV6_IL, 82 },
936 { ICE_TCP_IL, 122 },
937 { ICE_PROTOCOL_LAST, 0 },
938 };
939
940 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_tcp) = {
941 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
942 0x00, 0x00, 0x00, 0x00,
943 0x00, 0x00, 0x00, 0x00,
944 0x86, 0xdd,
945
946 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
947 0x00, 0x58, 0x11, 0x00,
948 0x00, 0x00, 0x00, 0x00,
949 0x00, 0x00, 0x00, 0x00,
950 0x00, 0x00, 0x00, 0x00,
951 0x00, 0x00, 0x00, 0x00,
952 0x00, 0x00, 0x00, 0x00,
953 0x00, 0x00, 0x00, 0x00,
954 0x00, 0x00, 0x00, 0x00,
955 0x00, 0x00, 0x00, 0x00,
956
957 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
958 0x00, 0x58, 0x00, 0x00,
959
960 0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 62 */
961 0x00, 0x00, 0x00, 0x00,
962 0x00, 0x00, 0x00, 0x85,
963
964 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
965 0x00, 0x00, 0x00, 0x00,
966
967 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
968 0x00, 0x14, 0x06, 0x00,
969 0x00, 0x00, 0x00, 0x00,
970 0x00, 0x00, 0x00, 0x00,
971 0x00, 0x00, 0x00, 0x00,
972 0x00, 0x00, 0x00, 0x00,
973 0x00, 0x00, 0x00, 0x00,
974 0x00, 0x00, 0x00, 0x00,
975 0x00, 0x00, 0x00, 0x00,
976 0x00, 0x00, 0x00, 0x00,
977
978 0x00, 0x00, 0x00, 0x00, /* TCP 122 */
979 0x00, 0x00, 0x00, 0x00,
980 0x00, 0x00, 0x00, 0x00,
981 0x50, 0x00, 0x00, 0x00,
982 0x00, 0x00, 0x00, 0x00,
983
984 0x00, 0x00, /* 2 bytes for 4 byte alignment */
985 };
986
987 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_udp) = {
988 { ICE_MAC_OFOS, 0 },
989 { ICE_IPV6_OFOS, 14 },
990 { ICE_UDP_OF, 54 },
991 { ICE_GTP, 62 },
992 { ICE_IPV6_IL, 82 },
993 { ICE_UDP_ILOS, 122 },
994 { ICE_PROTOCOL_LAST, 0 },
995 };
996
997 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_udp) = {
998 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
999 0x00, 0x00, 0x00, 0x00,
1000 0x00, 0x00, 0x00, 0x00,
1001 0x86, 0xdd,
1002
1003 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
1004 0x00, 0x4c, 0x11, 0x00,
1005 0x00, 0x00, 0x00, 0x00,
1006 0x00, 0x00, 0x00, 0x00,
1007 0x00, 0x00, 0x00, 0x00,
1008 0x00, 0x00, 0x00, 0x00,
1009 0x00, 0x00, 0x00, 0x00,
1010 0x00, 0x00, 0x00, 0x00,
1011 0x00, 0x00, 0x00, 0x00,
1012 0x00, 0x00, 0x00, 0x00,
1013
1014 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
1015 0x00, 0x4c, 0x00, 0x00,
1016
1017 0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 62 */
1018 0x00, 0x00, 0x00, 0x00,
1019 0x00, 0x00, 0x00, 0x85,
1020
1021 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
1022 0x00, 0x00, 0x00, 0x00,
1023
1024 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
1025 0x00, 0x08, 0x11, 0x00,
1026 0x00, 0x00, 0x00, 0x00,
1027 0x00, 0x00, 0x00, 0x00,
1028 0x00, 0x00, 0x00, 0x00,
1029 0x00, 0x00, 0x00, 0x00,
1030 0x00, 0x00, 0x00, 0x00,
1031 0x00, 0x00, 0x00, 0x00,
1032 0x00, 0x00, 0x00, 0x00,
1033 0x00, 0x00, 0x00, 0x00,
1034
1035 0x00, 0x00, 0x00, 0x00, /* UDP 122 */
1036 0x00, 0x08, 0x00, 0x00,
1037
1038 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1039 };
1040
1041 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4) = {
1042 { ICE_MAC_OFOS, 0 },
1043 { ICE_IPV4_OFOS, 14 },
1044 { ICE_UDP_OF, 34 },
1045 { ICE_GTP_NO_PAY, 42 },
1046 { ICE_PROTOCOL_LAST, 0 },
1047 };
1048
1049 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4) = {
1050 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1051 0x00, 0x00, 0x00, 0x00,
1052 0x00, 0x00, 0x00, 0x00,
1053 0x08, 0x00,
1054
1055 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
1056 0x00, 0x00, 0x40, 0x00,
1057 0x40, 0x11, 0x00, 0x00,
1058 0x00, 0x00, 0x00, 0x00,
1059 0x00, 0x00, 0x00, 0x00,
1060
1061 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
1062 0x00, 0x00, 0x00, 0x00,
1063
1064 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
1065 0x00, 0x00, 0x00, 0x00,
1066 0x00, 0x00, 0x00, 0x85,
1067
1068 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1069 0x00, 0x00, 0x00, 0x00,
1070
1071 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
1072 0x00, 0x00, 0x40, 0x00,
1073 0x40, 0x00, 0x00, 0x00,
1074 0x00, 0x00, 0x00, 0x00,
1075 0x00, 0x00, 0x00, 0x00,
1076 0x00, 0x00,
1077 };
1078
1079 ICE_DECLARE_PKT_OFFSETS(ipv6_gtp) = {
1080 { ICE_MAC_OFOS, 0 },
1081 { ICE_IPV6_OFOS, 14 },
1082 { ICE_UDP_OF, 54 },
1083 { ICE_GTP_NO_PAY, 62 },
1084 { ICE_PROTOCOL_LAST, 0 },
1085 };
1086
1087 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = {
1088 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1089 0x00, 0x00, 0x00, 0x00,
1090 0x00, 0x00, 0x00, 0x00,
1091 0x86, 0xdd,
1092
1093 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1094 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1095 0x00, 0x00, 0x00, 0x00,
1096 0x00, 0x00, 0x00, 0x00,
1097 0x00, 0x00, 0x00, 0x00,
1098 0x00, 0x00, 0x00, 0x00,
1099 0x00, 0x00, 0x00, 0x00,
1100 0x00, 0x00, 0x00, 0x00,
1101 0x00, 0x00, 0x00, 0x00,
1102 0x00, 0x00, 0x00, 0x00,
1103
1104 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1105 0x00, 0x00, 0x00, 0x00,
1106
1107 0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */
1108 0x00, 0x00, 0x00, 0x00,
1109
1110 0x00, 0x00,
1111 };
1112
1113 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_tcp) = {
1114 { ICE_MAC_OFOS, 0 },
1115 { ICE_ETYPE_OL, 12 },
1116 { ICE_PPPOE, 14 },
1117 { ICE_IPV4_OFOS, 22 },
1118 { ICE_TCP_IL, 42 },
1119 { ICE_PROTOCOL_LAST, 0 },
1120 };
1121
1122 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_tcp) = {
1123 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1124 0x00, 0x00, 0x00, 0x00,
1125 0x00, 0x00, 0x00, 0x00,
1126
1127 0x88, 0x64, /* ICE_ETYPE_OL 12 */
1128
1129 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1130 0x00, 0x16,
1131
1132 0x00, 0x21, /* PPP Link Layer 20 */
1133
1134 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 22 */
1135 0x00, 0x01, 0x00, 0x00,
1136 0x00, 0x06, 0x00, 0x00,
1137 0x00, 0x00, 0x00, 0x00,
1138 0x00, 0x00, 0x00, 0x00,
1139
1140 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 42 */
1141 0x00, 0x00, 0x00, 0x00,
1142 0x00, 0x00, 0x00, 0x00,
1143 0x50, 0x00, 0x00, 0x00,
1144 0x00, 0x00, 0x00, 0x00,
1145
1146 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1147 };
1148
1149 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_udp) = {
1150 { ICE_MAC_OFOS, 0 },
1151 { ICE_ETYPE_OL, 12 },
1152 { ICE_PPPOE, 14 },
1153 { ICE_IPV4_OFOS, 22 },
1154 { ICE_UDP_ILOS, 42 },
1155 { ICE_PROTOCOL_LAST, 0 },
1156 };
1157
1158 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_udp) = {
1159 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1160 0x00, 0x00, 0x00, 0x00,
1161 0x00, 0x00, 0x00, 0x00,
1162
1163 0x88, 0x64, /* ICE_ETYPE_OL 12 */
1164
1165 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1166 0x00, 0x16,
1167
1168 0x00, 0x21, /* PPP Link Layer 20 */
1169
1170 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1171 0x00, 0x01, 0x00, 0x00,
1172 0x00, 0x11, 0x00, 0x00,
1173 0x00, 0x00, 0x00, 0x00,
1174 0x00, 0x00, 0x00, 0x00,
1175
1176 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1177 0x00, 0x08, 0x00, 0x00,
1178
1179 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1180 };
1181
1182 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_tcp) = {
1183 { ICE_MAC_OFOS, 0 },
1184 { ICE_ETYPE_OL, 12 },
1185 { ICE_PPPOE, 14 },
1186 { ICE_IPV6_OFOS, 22 },
1187 { ICE_TCP_IL, 62 },
1188 { ICE_PROTOCOL_LAST, 0 },
1189 };
1190
1191 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_tcp) = {
1192 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1193 0x00, 0x00, 0x00, 0x00,
1194 0x00, 0x00, 0x00, 0x00,
1195
1196 0x88, 0x64, /* ICE_ETYPE_OL 12 */
1197
1198 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1199 0x00, 0x2a,
1200
1201 0x00, 0x57, /* PPP Link Layer 20 */
1202
1203 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1204 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1205 0x00, 0x00, 0x00, 0x00,
1206 0x00, 0x00, 0x00, 0x00,
1207 0x00, 0x00, 0x00, 0x00,
1208 0x00, 0x00, 0x00, 0x00,
1209 0x00, 0x00, 0x00, 0x00,
1210 0x00, 0x00, 0x00, 0x00,
1211 0x00, 0x00, 0x00, 0x00,
1212 0x00, 0x00, 0x00, 0x00,
1213
1214 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 62 */
1215 0x00, 0x00, 0x00, 0x00,
1216 0x00, 0x00, 0x00, 0x00,
1217 0x50, 0x00, 0x00, 0x00,
1218 0x00, 0x00, 0x00, 0x00,
1219
1220 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1221 };
1222
1223 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_udp) = {
1224 { ICE_MAC_OFOS, 0 },
1225 { ICE_ETYPE_OL, 12 },
1226 { ICE_PPPOE, 14 },
1227 { ICE_IPV6_OFOS, 22 },
1228 { ICE_UDP_ILOS, 62 },
1229 { ICE_PROTOCOL_LAST, 0 },
1230 };
1231
1232 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_udp) = {
1233 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1234 0x00, 0x00, 0x00, 0x00,
1235 0x00, 0x00, 0x00, 0x00,
1236
1237 0x88, 0x64, /* ICE_ETYPE_OL 12 */
1238
1239 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1240 0x00, 0x2a,
1241
1242 0x00, 0x57, /* PPP Link Layer 20 */
1243
1244 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1245 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1246 0x00, 0x00, 0x00, 0x00,
1247 0x00, 0x00, 0x00, 0x00,
1248 0x00, 0x00, 0x00, 0x00,
1249 0x00, 0x00, 0x00, 0x00,
1250 0x00, 0x00, 0x00, 0x00,
1251 0x00, 0x00, 0x00, 0x00,
1252 0x00, 0x00, 0x00, 0x00,
1253 0x00, 0x00, 0x00, 0x00,
1254
1255 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1256 0x00, 0x08, 0x00, 0x00,
1257
1258 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1259 };
1260
1261 ICE_DECLARE_PKT_OFFSETS(ipv4_l2tpv3) = {
1262 { ICE_MAC_OFOS, 0 },
1263 { ICE_ETYPE_OL, 12 },
1264 { ICE_IPV4_OFOS, 14 },
1265 { ICE_L2TPV3, 34 },
1266 { ICE_PROTOCOL_LAST, 0 },
1267 };
1268
1269 ICE_DECLARE_PKT_TEMPLATE(ipv4_l2tpv3) = {
1270 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1271 0x00, 0x00, 0x00, 0x00,
1272 0x00, 0x00, 0x00, 0x00,
1273
1274 0x08, 0x00, /* ICE_ETYPE_OL 12 */
1275
1276 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1277 0x00, 0x00, 0x40, 0x00,
1278 0x40, 0x73, 0x00, 0x00,
1279 0x00, 0x00, 0x00, 0x00,
1280 0x00, 0x00, 0x00, 0x00,
1281
1282 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1283 0x00, 0x00, 0x00, 0x00,
1284 0x00, 0x00, 0x00, 0x00,
1285 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1286 };
1287
1288 ICE_DECLARE_PKT_OFFSETS(ipv6_l2tpv3) = {
1289 { ICE_MAC_OFOS, 0 },
1290 { ICE_ETYPE_OL, 12 },
1291 { ICE_IPV6_OFOS, 14 },
1292 { ICE_L2TPV3, 54 },
1293 { ICE_PROTOCOL_LAST, 0 },
1294 };
1295
1296 ICE_DECLARE_PKT_TEMPLATE(ipv6_l2tpv3) = {
1297 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1298 0x00, 0x00, 0x00, 0x00,
1299 0x00, 0x00, 0x00, 0x00,
1300
1301 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
1302
1303 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1304 0x00, 0x0c, 0x73, 0x40,
1305 0x00, 0x00, 0x00, 0x00,
1306 0x00, 0x00, 0x00, 0x00,
1307 0x00, 0x00, 0x00, 0x00,
1308 0x00, 0x00, 0x00, 0x00,
1309 0x00, 0x00, 0x00, 0x00,
1310 0x00, 0x00, 0x00, 0x00,
1311 0x00, 0x00, 0x00, 0x00,
1312 0x00, 0x00, 0x00, 0x00,
1313
1314 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1315 0x00, 0x00, 0x00, 0x00,
1316 0x00, 0x00, 0x00, 0x00,
1317 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1318 };
1319
1320 static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
1321 ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 |
1322 ICE_PKT_GTP_NOPAY),
1323 ICE_PKT_PROFILE(ipv6_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1324 ICE_PKT_OUTER_IPV6 |
1325 ICE_PKT_INNER_IPV6 |
1326 ICE_PKT_INNER_UDP),
1327 ICE_PKT_PROFILE(ipv6_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1328 ICE_PKT_OUTER_IPV6 |
1329 ICE_PKT_INNER_IPV6),
1330 ICE_PKT_PROFILE(ipv6_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1331 ICE_PKT_OUTER_IPV6 |
1332 ICE_PKT_INNER_UDP),
1333 ICE_PKT_PROFILE(ipv6_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU |
1334 ICE_PKT_OUTER_IPV6),
1335 ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPU | ICE_PKT_GTP_NOPAY),
1336 ICE_PKT_PROFILE(ipv4_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1337 ICE_PKT_INNER_IPV6 |
1338 ICE_PKT_INNER_UDP),
1339 ICE_PKT_PROFILE(ipv4_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1340 ICE_PKT_INNER_IPV6),
1341 ICE_PKT_PROFILE(ipv4_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1342 ICE_PKT_INNER_UDP),
1343 ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU),
1344 ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6),
1345 ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC),
1346 ICE_PKT_PROFILE(pppoe_ipv6_udp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6 |
1347 ICE_PKT_INNER_UDP),
1348 ICE_PKT_PROFILE(pppoe_ipv6_tcp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6),
1349 ICE_PKT_PROFILE(pppoe_ipv4_udp, ICE_PKT_PPPOE | ICE_PKT_INNER_UDP),
1350 ICE_PKT_PROFILE(pppoe_ipv4_tcp, ICE_PKT_PPPOE),
1351 ICE_PKT_PROFILE(gre_ipv6_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6 |
1352 ICE_PKT_INNER_TCP),
1353 ICE_PKT_PROFILE(gre_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_TCP),
1354 ICE_PKT_PROFILE(gre_ipv6_udp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6),
1355 ICE_PKT_PROFILE(gre_udp, ICE_PKT_TUN_NVGRE),
1356 ICE_PKT_PROFILE(udp_tun_ipv6_tcp, ICE_PKT_TUN_UDP |
1357 ICE_PKT_INNER_IPV6 |
1358 ICE_PKT_INNER_TCP),
1359 ICE_PKT_PROFILE(ipv6_l2tpv3, ICE_PKT_L2TPV3 | ICE_PKT_OUTER_IPV6),
1360 ICE_PKT_PROFILE(ipv4_l2tpv3, ICE_PKT_L2TPV3),
1361 ICE_PKT_PROFILE(udp_tun_tcp, ICE_PKT_TUN_UDP | ICE_PKT_INNER_TCP),
1362 ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP |
1363 ICE_PKT_INNER_IPV6),
1364 ICE_PKT_PROFILE(udp_tun_udp, ICE_PKT_TUN_UDP),
1365 ICE_PKT_PROFILE(udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP),
1366 ICE_PKT_PROFILE(udp, ICE_PKT_INNER_UDP),
1367 ICE_PKT_PROFILE(tcp_ipv6, ICE_PKT_OUTER_IPV6),
1368 ICE_PKT_PROFILE(tcp, 0),
1369 };
1370
1371 /* this is a recipe to profile association bitmap */
1372 static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1373 ICE_MAX_NUM_PROFILES);
1374
1375 /* this is a profile to recipe association bitmap */
1376 static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1377 ICE_MAX_NUM_RECIPES);
1378
1379 /**
1380 * ice_init_def_sw_recp - initialize the recipe book keeping tables
1381 * @hw: pointer to the HW struct
1382 *
1383 * Allocate memory for the entire recipe table and initialize the structures/
1384 * entries corresponding to basic recipes.
1385 */
ice_init_def_sw_recp(struct ice_hw * hw)1386 int ice_init_def_sw_recp(struct ice_hw *hw)
1387 {
1388 struct ice_sw_recipe *recps;
1389 u8 i;
1390
1391 recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
1392 sizeof(*recps), GFP_KERNEL);
1393 if (!recps)
1394 return -ENOMEM;
1395
1396 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1397 recps[i].root_rid = i;
1398 INIT_LIST_HEAD(&recps[i].filt_rules);
1399 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1400 INIT_LIST_HEAD(&recps[i].rg_list);
1401 mutex_init(&recps[i].filt_rule_lock);
1402 }
1403
1404 hw->switch_info->recp_list = recps;
1405
1406 return 0;
1407 }
1408
1409 /**
1410 * ice_aq_get_sw_cfg - get switch configuration
1411 * @hw: pointer to the hardware structure
1412 * @buf: pointer to the result buffer
1413 * @buf_size: length of the buffer available for response
1414 * @req_desc: pointer to requested descriptor
1415 * @num_elems: pointer to number of elements
1416 * @cd: pointer to command details structure or NULL
1417 *
1418 * Get switch configuration (0x0200) to be placed in buf.
1419 * This admin command returns information such as initial VSI/port number
1420 * and switch ID it belongs to.
1421 *
1422 * NOTE: *req_desc is both an input/output parameter.
1423 * The caller of this function first calls this function with *request_desc set
1424 * to 0. If the response from f/w has *req_desc set to 0, all the switch
1425 * configuration information has been returned; if non-zero (meaning not all
1426 * the information was returned), the caller should call this function again
1427 * with *req_desc set to the previous value returned by f/w to get the
1428 * next block of switch configuration information.
1429 *
1430 * *num_elems is output only parameter. This reflects the number of elements
1431 * in response buffer. The caller of this function to use *num_elems while
1432 * parsing the response buffer.
1433 */
1434 static int
ice_aq_get_sw_cfg(struct ice_hw * hw,struct ice_aqc_get_sw_cfg_resp_elem * buf,u16 buf_size,u16 * req_desc,u16 * num_elems,struct ice_sq_cd * cd)1435 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
1436 u16 buf_size, u16 *req_desc, u16 *num_elems,
1437 struct ice_sq_cd *cd)
1438 {
1439 struct ice_aqc_get_sw_cfg *cmd;
1440 struct ice_aq_desc desc;
1441 int status;
1442
1443 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1444 cmd = &desc.params.get_sw_conf;
1445 cmd->element = cpu_to_le16(*req_desc);
1446
1447 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1448 if (!status) {
1449 *req_desc = le16_to_cpu(cmd->element);
1450 *num_elems = le16_to_cpu(cmd->num_elems);
1451 }
1452
1453 return status;
1454 }
1455
1456 /**
1457 * ice_aq_add_vsi
1458 * @hw: pointer to the HW struct
1459 * @vsi_ctx: pointer to a VSI context struct
1460 * @cd: pointer to command details structure or NULL
1461 *
1462 * Add a VSI context to the hardware (0x0210)
1463 */
1464 static int
ice_aq_add_vsi(struct ice_hw * hw,struct ice_vsi_ctx * vsi_ctx,struct ice_sq_cd * cd)1465 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1466 struct ice_sq_cd *cd)
1467 {
1468 struct ice_aqc_add_update_free_vsi_resp *res;
1469 struct ice_aqc_add_get_update_free_vsi *cmd;
1470 struct ice_aq_desc desc;
1471 int status;
1472
1473 cmd = &desc.params.vsi_cmd;
1474 res = &desc.params.add_update_free_vsi_res;
1475
1476 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1477
1478 if (!vsi_ctx->alloc_from_pool)
1479 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
1480 ICE_AQ_VSI_IS_VALID);
1481 cmd->vf_id = vsi_ctx->vf_num;
1482
1483 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1484
1485 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1486
1487 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1488 sizeof(vsi_ctx->info), cd);
1489
1490 if (!status) {
1491 vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1492 vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
1493 vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
1494 }
1495
1496 return status;
1497 }
1498
1499 /**
1500 * ice_aq_free_vsi
1501 * @hw: pointer to the HW struct
1502 * @vsi_ctx: pointer to a VSI context struct
1503 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1504 * @cd: pointer to command details structure or NULL
1505 *
1506 * Free VSI context info from hardware (0x0213)
1507 */
1508 static int
ice_aq_free_vsi(struct ice_hw * hw,struct ice_vsi_ctx * vsi_ctx,bool keep_vsi_alloc,struct ice_sq_cd * cd)1509 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1510 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1511 {
1512 struct ice_aqc_add_update_free_vsi_resp *resp;
1513 struct ice_aqc_add_get_update_free_vsi *cmd;
1514 struct ice_aq_desc desc;
1515 int status;
1516
1517 cmd = &desc.params.vsi_cmd;
1518 resp = &desc.params.add_update_free_vsi_res;
1519
1520 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1521
1522 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1523 if (keep_vsi_alloc)
1524 cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
1525
1526 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1527 if (!status) {
1528 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1529 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1530 }
1531
1532 return status;
1533 }
1534
1535 /**
1536 * ice_aq_update_vsi
1537 * @hw: pointer to the HW struct
1538 * @vsi_ctx: pointer to a VSI context struct
1539 * @cd: pointer to command details structure or NULL
1540 *
1541 * Update VSI context in the hardware (0x0211)
1542 */
1543 static int
ice_aq_update_vsi(struct ice_hw * hw,struct ice_vsi_ctx * vsi_ctx,struct ice_sq_cd * cd)1544 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1545 struct ice_sq_cd *cd)
1546 {
1547 struct ice_aqc_add_update_free_vsi_resp *resp;
1548 struct ice_aqc_add_get_update_free_vsi *cmd;
1549 struct ice_aq_desc desc;
1550 int status;
1551
1552 cmd = &desc.params.vsi_cmd;
1553 resp = &desc.params.add_update_free_vsi_res;
1554
1555 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1556
1557 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1558
1559 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1560
1561 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1562 sizeof(vsi_ctx->info), cd);
1563
1564 if (!status) {
1565 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1566 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1567 }
1568
1569 return status;
1570 }
1571
1572 /**
1573 * ice_is_vsi_valid - check whether the VSI is valid or not
1574 * @hw: pointer to the HW struct
1575 * @vsi_handle: VSI handle
1576 *
1577 * check whether the VSI is valid or not
1578 */
ice_is_vsi_valid(struct ice_hw * hw,u16 vsi_handle)1579 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1580 {
1581 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1582 }
1583
1584 /**
1585 * ice_get_hw_vsi_num - return the HW VSI number
1586 * @hw: pointer to the HW struct
1587 * @vsi_handle: VSI handle
1588 *
1589 * return the HW VSI number
1590 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1591 */
ice_get_hw_vsi_num(struct ice_hw * hw,u16 vsi_handle)1592 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1593 {
1594 return hw->vsi_ctx[vsi_handle]->vsi_num;
1595 }
1596
1597 /**
1598 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1599 * @hw: pointer to the HW struct
1600 * @vsi_handle: VSI handle
1601 *
1602 * return the VSI context entry for a given VSI handle
1603 */
ice_get_vsi_ctx(struct ice_hw * hw,u16 vsi_handle)1604 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1605 {
1606 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1607 }
1608
1609 /**
1610 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1611 * @hw: pointer to the HW struct
1612 * @vsi_handle: VSI handle
1613 * @vsi: VSI context pointer
1614 *
1615 * save the VSI context entry for a given VSI handle
1616 */
1617 static void
ice_save_vsi_ctx(struct ice_hw * hw,u16 vsi_handle,struct ice_vsi_ctx * vsi)1618 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1619 {
1620 hw->vsi_ctx[vsi_handle] = vsi;
1621 }
1622
1623 /**
1624 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1625 * @hw: pointer to the HW struct
1626 * @vsi_handle: VSI handle
1627 */
ice_clear_vsi_q_ctx(struct ice_hw * hw,u16 vsi_handle)1628 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1629 {
1630 struct ice_vsi_ctx *vsi = ice_get_vsi_ctx(hw, vsi_handle);
1631 u8 i;
1632
1633 if (!vsi)
1634 return;
1635 ice_for_each_traffic_class(i) {
1636 devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
1637 vsi->lan_q_ctx[i] = NULL;
1638 devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
1639 vsi->rdma_q_ctx[i] = NULL;
1640 }
1641 }
1642
1643 /**
1644 * ice_clear_vsi_ctx - clear the VSI context entry
1645 * @hw: pointer to the HW struct
1646 * @vsi_handle: VSI handle
1647 *
1648 * clear the VSI context entry
1649 */
ice_clear_vsi_ctx(struct ice_hw * hw,u16 vsi_handle)1650 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1651 {
1652 struct ice_vsi_ctx *vsi;
1653
1654 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1655 if (vsi) {
1656 ice_clear_vsi_q_ctx(hw, vsi_handle);
1657 devm_kfree(ice_hw_to_dev(hw), vsi);
1658 hw->vsi_ctx[vsi_handle] = NULL;
1659 }
1660 }
1661
1662 /**
1663 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1664 * @hw: pointer to the HW struct
1665 */
ice_clear_all_vsi_ctx(struct ice_hw * hw)1666 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1667 {
1668 u16 i;
1669
1670 for (i = 0; i < ICE_MAX_VSI; i++)
1671 ice_clear_vsi_ctx(hw, i);
1672 }
1673
1674 /**
1675 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1676 * @hw: pointer to the HW struct
1677 * @vsi_handle: unique VSI handle provided by drivers
1678 * @vsi_ctx: pointer to a VSI context struct
1679 * @cd: pointer to command details structure or NULL
1680 *
1681 * Add a VSI context to the hardware also add it into the VSI handle list.
1682 * If this function gets called after reset for existing VSIs then update
1683 * with the new HW VSI number in the corresponding VSI handle list entry.
1684 */
1685 int
ice_add_vsi(struct ice_hw * hw,u16 vsi_handle,struct ice_vsi_ctx * vsi_ctx,struct ice_sq_cd * cd)1686 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1687 struct ice_sq_cd *cd)
1688 {
1689 struct ice_vsi_ctx *tmp_vsi_ctx;
1690 int status;
1691
1692 if (vsi_handle >= ICE_MAX_VSI)
1693 return -EINVAL;
1694 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1695 if (status)
1696 return status;
1697 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1698 if (!tmp_vsi_ctx) {
1699 /* Create a new VSI context */
1700 tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
1701 sizeof(*tmp_vsi_ctx), GFP_KERNEL);
1702 if (!tmp_vsi_ctx) {
1703 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1704 return -ENOMEM;
1705 }
1706 *tmp_vsi_ctx = *vsi_ctx;
1707 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1708 } else {
1709 /* update with new HW VSI num */
1710 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1711 }
1712
1713 return 0;
1714 }
1715
1716 /**
1717 * ice_free_vsi- free VSI context from hardware and VSI handle list
1718 * @hw: pointer to the HW struct
1719 * @vsi_handle: unique VSI handle
1720 * @vsi_ctx: pointer to a VSI context struct
1721 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1722 * @cd: pointer to command details structure or NULL
1723 *
1724 * Free VSI context info from hardware as well as from VSI handle list
1725 */
1726 int
ice_free_vsi(struct ice_hw * hw,u16 vsi_handle,struct ice_vsi_ctx * vsi_ctx,bool keep_vsi_alloc,struct ice_sq_cd * cd)1727 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1728 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1729 {
1730 int status;
1731
1732 if (!ice_is_vsi_valid(hw, vsi_handle))
1733 return -EINVAL;
1734 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1735 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1736 if (!status)
1737 ice_clear_vsi_ctx(hw, vsi_handle);
1738 return status;
1739 }
1740
1741 /**
1742 * ice_update_vsi
1743 * @hw: pointer to the HW struct
1744 * @vsi_handle: unique VSI handle
1745 * @vsi_ctx: pointer to a VSI context struct
1746 * @cd: pointer to command details structure or NULL
1747 *
1748 * Update VSI context in the hardware
1749 */
1750 int
ice_update_vsi(struct ice_hw * hw,u16 vsi_handle,struct ice_vsi_ctx * vsi_ctx,struct ice_sq_cd * cd)1751 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1752 struct ice_sq_cd *cd)
1753 {
1754 if (!ice_is_vsi_valid(hw, vsi_handle))
1755 return -EINVAL;
1756 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1757 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1758 }
1759
1760 /**
1761 * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI
1762 * @hw: pointer to HW struct
1763 * @vsi_handle: VSI SW index
1764 * @enable: boolean for enable/disable
1765 */
1766 int
ice_cfg_rdma_fltr(struct ice_hw * hw,u16 vsi_handle,bool enable)1767 ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
1768 {
1769 struct ice_vsi_ctx *ctx, *cached_ctx;
1770 int status;
1771
1772 cached_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1773 if (!cached_ctx)
1774 return -ENOENT;
1775
1776 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1777 if (!ctx)
1778 return -ENOMEM;
1779
1780 ctx->info.q_opt_rss = cached_ctx->info.q_opt_rss;
1781 ctx->info.q_opt_tc = cached_ctx->info.q_opt_tc;
1782 ctx->info.q_opt_flags = cached_ctx->info.q_opt_flags;
1783
1784 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
1785
1786 if (enable)
1787 ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1788 else
1789 ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1790
1791 status = ice_update_vsi(hw, vsi_handle, ctx, NULL);
1792 if (!status) {
1793 cached_ctx->info.q_opt_flags = ctx->info.q_opt_flags;
1794 cached_ctx->info.valid_sections |= ctx->info.valid_sections;
1795 }
1796
1797 kfree(ctx);
1798 return status;
1799 }
1800
1801 /**
1802 * ice_aq_alloc_free_vsi_list
1803 * @hw: pointer to the HW struct
1804 * @vsi_list_id: VSI list ID returned or used for lookup
1805 * @lkup_type: switch rule filter lookup type
1806 * @opc: switch rules population command type - pass in the command opcode
1807 *
1808 * allocates or free a VSI list resource
1809 */
1810 static int
ice_aq_alloc_free_vsi_list(struct ice_hw * hw,u16 * vsi_list_id,enum ice_sw_lkup_type lkup_type,enum ice_adminq_opc opc)1811 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1812 enum ice_sw_lkup_type lkup_type,
1813 enum ice_adminq_opc opc)
1814 {
1815 struct ice_aqc_alloc_free_res_elem *sw_buf;
1816 struct ice_aqc_res_elem *vsi_ele;
1817 u16 buf_len;
1818 int status;
1819
1820 buf_len = struct_size(sw_buf, elem, 1);
1821 sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
1822 if (!sw_buf)
1823 return -ENOMEM;
1824 sw_buf->num_elems = cpu_to_le16(1);
1825
1826 if (lkup_type == ICE_SW_LKUP_MAC ||
1827 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1828 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1829 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1830 lkup_type == ICE_SW_LKUP_PROMISC ||
1831 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1832 lkup_type == ICE_SW_LKUP_DFLT ||
1833 lkup_type == ICE_SW_LKUP_LAST) {
1834 sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1835 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1836 if (opc == ice_aqc_opc_alloc_res)
1837 sw_buf->res_type =
1838 cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE |
1839 ICE_AQC_RES_TYPE_FLAG_SHARED);
1840 else
1841 sw_buf->res_type =
1842 cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1843 } else {
1844 status = -EINVAL;
1845 goto ice_aq_alloc_free_vsi_list_exit;
1846 }
1847
1848 if (opc == ice_aqc_opc_free_res)
1849 sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
1850
1851 status = ice_aq_alloc_free_res(hw, sw_buf, buf_len, opc);
1852 if (status)
1853 goto ice_aq_alloc_free_vsi_list_exit;
1854
1855 if (opc == ice_aqc_opc_alloc_res) {
1856 vsi_ele = &sw_buf->elem[0];
1857 *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
1858 }
1859
1860 ice_aq_alloc_free_vsi_list_exit:
1861 devm_kfree(ice_hw_to_dev(hw), sw_buf);
1862 return status;
1863 }
1864
1865 /**
1866 * ice_aq_sw_rules - add/update/remove switch rules
1867 * @hw: pointer to the HW struct
1868 * @rule_list: pointer to switch rule population list
1869 * @rule_list_sz: total size of the rule list in bytes
1870 * @num_rules: number of switch rules in the rule_list
1871 * @opc: switch rules population command type - pass in the command opcode
1872 * @cd: pointer to command details structure or NULL
1873 *
1874 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1875 */
1876 int
ice_aq_sw_rules(struct ice_hw * hw,void * rule_list,u16 rule_list_sz,u8 num_rules,enum ice_adminq_opc opc,struct ice_sq_cd * cd)1877 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1878 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1879 {
1880 struct ice_aq_desc desc;
1881 int status;
1882
1883 if (opc != ice_aqc_opc_add_sw_rules &&
1884 opc != ice_aqc_opc_update_sw_rules &&
1885 opc != ice_aqc_opc_remove_sw_rules)
1886 return -EINVAL;
1887
1888 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1889
1890 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1891 desc.params.sw_rules.num_rules_fltr_entry_index =
1892 cpu_to_le16(num_rules);
1893 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1894 if (opc != ice_aqc_opc_add_sw_rules &&
1895 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
1896 status = -ENOENT;
1897
1898 return status;
1899 }
1900
1901 /**
1902 * ice_aq_add_recipe - add switch recipe
1903 * @hw: pointer to the HW struct
1904 * @s_recipe_list: pointer to switch rule population list
1905 * @num_recipes: number of switch recipes in the list
1906 * @cd: pointer to command details structure or NULL
1907 *
1908 * Add(0x0290)
1909 */
1910 int
ice_aq_add_recipe(struct ice_hw * hw,struct ice_aqc_recipe_data_elem * s_recipe_list,u16 num_recipes,struct ice_sq_cd * cd)1911 ice_aq_add_recipe(struct ice_hw *hw,
1912 struct ice_aqc_recipe_data_elem *s_recipe_list,
1913 u16 num_recipes, struct ice_sq_cd *cd)
1914 {
1915 struct ice_aqc_add_get_recipe *cmd;
1916 struct ice_aq_desc desc;
1917 u16 buf_size;
1918
1919 cmd = &desc.params.add_get_recipe;
1920 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1921
1922 cmd->num_sub_recipes = cpu_to_le16(num_recipes);
1923 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1924
1925 buf_size = num_recipes * sizeof(*s_recipe_list);
1926
1927 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1928 }
1929
1930 /**
1931 * ice_aq_get_recipe - get switch recipe
1932 * @hw: pointer to the HW struct
1933 * @s_recipe_list: pointer to switch rule population list
1934 * @num_recipes: pointer to the number of recipes (input and output)
1935 * @recipe_root: root recipe number of recipe(s) to retrieve
1936 * @cd: pointer to command details structure or NULL
1937 *
1938 * Get(0x0292)
1939 *
1940 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1941 * On output, *num_recipes will equal the number of entries returned in
1942 * s_recipe_list.
1943 *
1944 * The caller must supply enough space in s_recipe_list to hold all possible
1945 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1946 */
1947 int
ice_aq_get_recipe(struct ice_hw * hw,struct ice_aqc_recipe_data_elem * s_recipe_list,u16 * num_recipes,u16 recipe_root,struct ice_sq_cd * cd)1948 ice_aq_get_recipe(struct ice_hw *hw,
1949 struct ice_aqc_recipe_data_elem *s_recipe_list,
1950 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1951 {
1952 struct ice_aqc_add_get_recipe *cmd;
1953 struct ice_aq_desc desc;
1954 u16 buf_size;
1955 int status;
1956
1957 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1958 return -EINVAL;
1959
1960 cmd = &desc.params.add_get_recipe;
1961 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1962
1963 cmd->return_index = cpu_to_le16(recipe_root);
1964 cmd->num_sub_recipes = 0;
1965
1966 buf_size = *num_recipes * sizeof(*s_recipe_list);
1967
1968 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1969 *num_recipes = le16_to_cpu(cmd->num_sub_recipes);
1970
1971 return status;
1972 }
1973
1974 /**
1975 * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
1976 * @hw: pointer to the HW struct
1977 * @params: parameters used to update the default recipe
1978 *
1979 * This function only supports updating default recipes and it only supports
1980 * updating a single recipe based on the lkup_idx at a time.
1981 *
1982 * This is done as a read-modify-write operation. First, get the current recipe
1983 * contents based on the recipe's ID. Then modify the field vector index and
1984 * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
1985 * the pre-existing recipe with the modifications.
1986 */
1987 int
ice_update_recipe_lkup_idx(struct ice_hw * hw,struct ice_update_recipe_lkup_idx_params * params)1988 ice_update_recipe_lkup_idx(struct ice_hw *hw,
1989 struct ice_update_recipe_lkup_idx_params *params)
1990 {
1991 struct ice_aqc_recipe_data_elem *rcp_list;
1992 u16 num_recps = ICE_MAX_NUM_RECIPES;
1993 int status;
1994
1995 rcp_list = kcalloc(num_recps, sizeof(*rcp_list), GFP_KERNEL);
1996 if (!rcp_list)
1997 return -ENOMEM;
1998
1999 /* read current recipe list from firmware */
2000 rcp_list->recipe_indx = params->rid;
2001 status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
2002 if (status) {
2003 ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
2004 params->rid, status);
2005 goto error_out;
2006 }
2007
2008 /* only modify existing recipe's lkup_idx and mask if valid, while
2009 * leaving all other fields the same, then update the recipe firmware
2010 */
2011 rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
2012 if (params->mask_valid)
2013 rcp_list->content.mask[params->lkup_idx] =
2014 cpu_to_le16(params->mask);
2015
2016 if (params->ignore_valid)
2017 rcp_list->content.lkup_indx[params->lkup_idx] |=
2018 ICE_AQ_RECIPE_LKUP_IGNORE;
2019
2020 status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
2021 if (status)
2022 ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
2023 params->rid, params->lkup_idx, params->fv_idx,
2024 params->mask, params->mask_valid ? "true" : "false",
2025 status);
2026
2027 error_out:
2028 kfree(rcp_list);
2029 return status;
2030 }
2031
2032 /**
2033 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
2034 * @hw: pointer to the HW struct
2035 * @profile_id: package profile ID to associate the recipe with
2036 * @r_assoc: Recipe bitmap filled in and need to be returned as response
2037 * @cd: pointer to command details structure or NULL
2038 * Recipe to profile association (0x0291)
2039 */
2040 int
ice_aq_map_recipe_to_profile(struct ice_hw * hw,u32 profile_id,u64 r_assoc,struct ice_sq_cd * cd)2041 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc,
2042 struct ice_sq_cd *cd)
2043 {
2044 struct ice_aqc_recipe_to_profile *cmd;
2045 struct ice_aq_desc desc;
2046
2047 cmd = &desc.params.recipe_to_profile;
2048 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
2049 cmd->profile_id = cpu_to_le16(profile_id);
2050 /* Set the recipe ID bit in the bitmask to let the device know which
2051 * profile we are associating the recipe to
2052 */
2053 cmd->recipe_assoc = cpu_to_le64(r_assoc);
2054
2055 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2056 }
2057
2058 /**
2059 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
2060 * @hw: pointer to the HW struct
2061 * @profile_id: package profile ID to associate the recipe with
2062 * @r_assoc: Recipe bitmap filled in and need to be returned as response
2063 * @cd: pointer to command details structure or NULL
2064 * Associate profile ID with given recipe (0x0293)
2065 */
2066 int
ice_aq_get_recipe_to_profile(struct ice_hw * hw,u32 profile_id,u64 * r_assoc,struct ice_sq_cd * cd)2067 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc,
2068 struct ice_sq_cd *cd)
2069 {
2070 struct ice_aqc_recipe_to_profile *cmd;
2071 struct ice_aq_desc desc;
2072 int status;
2073
2074 cmd = &desc.params.recipe_to_profile;
2075 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2076 cmd->profile_id = cpu_to_le16(profile_id);
2077
2078 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2079 if (!status)
2080 *r_assoc = le64_to_cpu(cmd->recipe_assoc);
2081
2082 return status;
2083 }
2084
2085 /**
2086 * ice_alloc_recipe - add recipe resource
2087 * @hw: pointer to the hardware structure
2088 * @rid: recipe ID returned as response to AQ call
2089 */
ice_alloc_recipe(struct ice_hw * hw,u16 * rid)2090 int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2091 {
2092 struct ice_aqc_alloc_free_res_elem *sw_buf;
2093 u16 buf_len;
2094 int status;
2095
2096 buf_len = struct_size(sw_buf, elem, 1);
2097 sw_buf = kzalloc(buf_len, GFP_KERNEL);
2098 if (!sw_buf)
2099 return -ENOMEM;
2100
2101 sw_buf->num_elems = cpu_to_le16(1);
2102 sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
2103 ICE_AQC_RES_TYPE_S) |
2104 ICE_AQC_RES_TYPE_FLAG_SHARED);
2105 status = ice_aq_alloc_free_res(hw, sw_buf, buf_len,
2106 ice_aqc_opc_alloc_res);
2107 if (!status)
2108 *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
2109 kfree(sw_buf);
2110
2111 return status;
2112 }
2113
2114 /**
2115 * ice_get_recp_to_prof_map - updates recipe to profile mapping
2116 * @hw: pointer to hardware structure
2117 *
2118 * This function is used to populate recipe_to_profile matrix where index to
2119 * this array is the recipe ID and the element is the mapping of which profiles
2120 * is this recipe mapped to.
2121 */
ice_get_recp_to_prof_map(struct ice_hw * hw)2122 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2123 {
2124 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
2125 u64 recp_assoc;
2126 u16 i;
2127
2128 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2129 u16 j;
2130
2131 bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2132 bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
2133 if (ice_aq_get_recipe_to_profile(hw, i, &recp_assoc, NULL))
2134 continue;
2135 bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES);
2136 bitmap_copy(profile_to_recipe[i], r_bitmap,
2137 ICE_MAX_NUM_RECIPES);
2138 for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2139 set_bit(i, recipe_to_profile[j]);
2140 }
2141 }
2142
2143 /**
2144 * ice_collect_result_idx - copy result index values
2145 * @buf: buffer that contains the result index
2146 * @recp: the recipe struct to copy data into
2147 */
2148 static void
ice_collect_result_idx(struct ice_aqc_recipe_data_elem * buf,struct ice_sw_recipe * recp)2149 ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
2150 struct ice_sw_recipe *recp)
2151 {
2152 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2153 set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2154 recp->res_idxs);
2155 }
2156
2157 /**
2158 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
2159 * @hw: pointer to hardware structure
2160 * @recps: struct that we need to populate
2161 * @rid: recipe ID that we are populating
2162 * @refresh_required: true if we should get recipe to profile mapping from FW
2163 *
2164 * This function is used to populate all the necessary entries into our
2165 * bookkeeping so that we have a current list of all the recipes that are
2166 * programmed in the firmware.
2167 */
2168 static int
ice_get_recp_frm_fw(struct ice_hw * hw,struct ice_sw_recipe * recps,u8 rid,bool * refresh_required)2169 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2170 bool *refresh_required)
2171 {
2172 DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS);
2173 struct ice_aqc_recipe_data_elem *tmp;
2174 u16 num_recps = ICE_MAX_NUM_RECIPES;
2175 struct ice_prot_lkup_ext *lkup_exts;
2176 u8 fv_word_idx = 0;
2177 u16 sub_recps;
2178 int status;
2179
2180 bitmap_zero(result_bm, ICE_MAX_FV_WORDS);
2181
2182 /* we need a buffer big enough to accommodate all the recipes */
2183 tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
2184 if (!tmp)
2185 return -ENOMEM;
2186
2187 tmp[0].recipe_indx = rid;
2188 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2189 /* non-zero status meaning recipe doesn't exist */
2190 if (status)
2191 goto err_unroll;
2192
2193 /* Get recipe to profile map so that we can get the fv from lkups that
2194 * we read for a recipe from FW. Since we want to minimize the number of
2195 * times we make this FW call, just make one call and cache the copy
2196 * until a new recipe is added. This operation is only required the
2197 * first time to get the changes from FW. Then to search existing
2198 * entries we don't need to update the cache again until another recipe
2199 * gets added.
2200 */
2201 if (*refresh_required) {
2202 ice_get_recp_to_prof_map(hw);
2203 *refresh_required = false;
2204 }
2205
2206 /* Start populating all the entries for recps[rid] based on lkups from
2207 * firmware. Note that we are only creating the root recipe in our
2208 * database.
2209 */
2210 lkup_exts = &recps[rid].lkup_exts;
2211
2212 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2213 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2214 struct ice_recp_grp_entry *rg_entry;
2215 u8 i, prof, idx, prot = 0;
2216 bool is_root;
2217 u16 off = 0;
2218
2219 rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
2220 GFP_KERNEL);
2221 if (!rg_entry) {
2222 status = -ENOMEM;
2223 goto err_unroll;
2224 }
2225
2226 idx = root_bufs.recipe_indx;
2227 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2228
2229 /* Mark all result indices in this chain */
2230 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2231 set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2232 result_bm);
2233
2234 /* get the first profile that is associated with rid */
2235 prof = find_first_bit(recipe_to_profile[idx],
2236 ICE_MAX_NUM_PROFILES);
2237 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2238 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2239
2240 rg_entry->fv_idx[i] = lkup_indx;
2241 rg_entry->fv_mask[i] =
2242 le16_to_cpu(root_bufs.content.mask[i + 1]);
2243
2244 /* If the recipe is a chained recipe then all its
2245 * child recipe's result will have a result index.
2246 * To fill fv_words we should not use those result
2247 * index, we only need the protocol ids and offsets.
2248 * We will skip all the fv_idx which stores result
2249 * index in them. We also need to skip any fv_idx which
2250 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2251 * valid offset value.
2252 */
2253 if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) ||
2254 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2255 rg_entry->fv_idx[i] == 0)
2256 continue;
2257
2258 ice_find_prot_off(hw, ICE_BLK_SW, prof,
2259 rg_entry->fv_idx[i], &prot, &off);
2260 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2261 lkup_exts->fv_words[fv_word_idx].off = off;
2262 lkup_exts->field_mask[fv_word_idx] =
2263 rg_entry->fv_mask[i];
2264 fv_word_idx++;
2265 }
2266 /* populate rg_list with the data from the child entry of this
2267 * recipe
2268 */
2269 list_add(&rg_entry->l_entry, &recps[rid].rg_list);
2270
2271 /* Propagate some data to the recipe database */
2272 recps[idx].is_root = !!is_root;
2273 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2274 recps[idx].need_pass_l2 = !!(root_bufs.content.act_ctrl &
2275 ICE_AQ_RECIPE_ACT_NEED_PASS_L2);
2276 recps[idx].allow_pass_l2 = !!(root_bufs.content.act_ctrl &
2277 ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2);
2278 bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2279 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2280 recps[idx].chain_idx = root_bufs.content.result_indx &
2281 ~ICE_AQ_RECIPE_RESULT_EN;
2282 set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2283 } else {
2284 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2285 }
2286
2287 if (!is_root)
2288 continue;
2289
2290 /* Only do the following for root recipes entries */
2291 memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2292 sizeof(recps[idx].r_bitmap));
2293 recps[idx].root_rid = root_bufs.content.rid &
2294 ~ICE_AQ_RECIPE_ID_IS_ROOT;
2295 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2296 }
2297
2298 /* Complete initialization of the root recipe entry */
2299 lkup_exts->n_val_words = fv_word_idx;
2300 recps[rid].big_recp = (num_recps > 1);
2301 recps[rid].n_grp_count = (u8)num_recps;
2302 recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp,
2303 recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
2304 GFP_KERNEL);
2305 if (!recps[rid].root_buf) {
2306 status = -ENOMEM;
2307 goto err_unroll;
2308 }
2309
2310 /* Copy result indexes */
2311 bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2312 recps[rid].recp_created = true;
2313
2314 err_unroll:
2315 kfree(tmp);
2316 return status;
2317 }
2318
2319 /* ice_init_port_info - Initialize port_info with switch configuration data
2320 * @pi: pointer to port_info
2321 * @vsi_port_num: VSI number or port number
2322 * @type: Type of switch element (port or VSI)
2323 * @swid: switch ID of the switch the element is attached to
2324 * @pf_vf_num: PF or VF number
2325 * @is_vf: true if the element is a VF, false otherwise
2326 */
2327 static void
ice_init_port_info(struct ice_port_info * pi,u16 vsi_port_num,u8 type,u16 swid,u16 pf_vf_num,bool is_vf)2328 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2329 u16 swid, u16 pf_vf_num, bool is_vf)
2330 {
2331 switch (type) {
2332 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2333 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2334 pi->sw_id = swid;
2335 pi->pf_vf_num = pf_vf_num;
2336 pi->is_vf = is_vf;
2337 break;
2338 default:
2339 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
2340 break;
2341 }
2342 }
2343
2344 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2345 * @hw: pointer to the hardware structure
2346 */
ice_get_initial_sw_cfg(struct ice_hw * hw)2347 int ice_get_initial_sw_cfg(struct ice_hw *hw)
2348 {
2349 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
2350 u16 req_desc = 0;
2351 u16 num_elems;
2352 int status;
2353 u16 i;
2354
2355 rbuf = kzalloc(ICE_SW_CFG_MAX_BUF_LEN, GFP_KERNEL);
2356 if (!rbuf)
2357 return -ENOMEM;
2358
2359 /* Multiple calls to ice_aq_get_sw_cfg may be required
2360 * to get all the switch configuration information. The need
2361 * for additional calls is indicated by ice_aq_get_sw_cfg
2362 * writing a non-zero value in req_desc
2363 */
2364 do {
2365 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2366
2367 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2368 &req_desc, &num_elems, NULL);
2369
2370 if (status)
2371 break;
2372
2373 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
2374 u16 pf_vf_num, swid, vsi_port_num;
2375 bool is_vf = false;
2376 u8 res_type;
2377
2378 vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
2379 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2380
2381 pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
2382 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2383
2384 swid = le16_to_cpu(ele->swid);
2385
2386 if (le16_to_cpu(ele->pf_vf_num) &
2387 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2388 is_vf = true;
2389
2390 res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >>
2391 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2392
2393 if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
2394 /* FW VSI is not needed. Just continue. */
2395 continue;
2396 }
2397
2398 ice_init_port_info(hw->port_info, vsi_port_num,
2399 res_type, swid, pf_vf_num, is_vf);
2400 }
2401 } while (req_desc && !status);
2402
2403 kfree(rbuf);
2404 return status;
2405 }
2406
2407 /**
2408 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2409 * @hw: pointer to the hardware structure
2410 * @fi: filter info structure to fill/update
2411 *
2412 * This helper function populates the lb_en and lan_en elements of the provided
2413 * ice_fltr_info struct using the switch's type and characteristics of the
2414 * switch rule being configured.
2415 */
ice_fill_sw_info(struct ice_hw * hw,struct ice_fltr_info * fi)2416 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2417 {
2418 fi->lb_en = false;
2419 fi->lan_en = false;
2420 if ((fi->flag & ICE_FLTR_TX) &&
2421 (fi->fltr_act == ICE_FWD_TO_VSI ||
2422 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2423 fi->fltr_act == ICE_FWD_TO_Q ||
2424 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2425 /* Setting LB for prune actions will result in replicated
2426 * packets to the internal switch that will be dropped.
2427 */
2428 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2429 fi->lb_en = true;
2430
2431 /* Set lan_en to TRUE if
2432 * 1. The switch is a VEB AND
2433 * 2
2434 * 2.1 The lookup is a directional lookup like ethertype,
2435 * promiscuous, ethertype-MAC, promiscuous-VLAN
2436 * and default-port OR
2437 * 2.2 The lookup is VLAN, OR
2438 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2439 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2440 *
2441 * OR
2442 *
2443 * The switch is a VEPA.
2444 *
2445 * In all other cases, the LAN enable has to be set to false.
2446 */
2447 if (hw->evb_veb) {
2448 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2449 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2450 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2451 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2452 fi->lkup_type == ICE_SW_LKUP_DFLT ||
2453 fi->lkup_type == ICE_SW_LKUP_VLAN ||
2454 (fi->lkup_type == ICE_SW_LKUP_MAC &&
2455 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
2456 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2457 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
2458 fi->lan_en = true;
2459 } else {
2460 fi->lan_en = true;
2461 }
2462 }
2463 }
2464
2465 /**
2466 * ice_fill_eth_hdr - helper to copy dummy_eth_hdr into supplied buffer
2467 * @eth_hdr: pointer to buffer to populate
2468 */
ice_fill_eth_hdr(u8 * eth_hdr)2469 void ice_fill_eth_hdr(u8 *eth_hdr)
2470 {
2471 memcpy(eth_hdr, dummy_eth_header, DUMMY_ETH_HDR_LEN);
2472 }
2473
2474 /**
2475 * ice_fill_sw_rule - Helper function to fill switch rule structure
2476 * @hw: pointer to the hardware structure
2477 * @f_info: entry containing packet forwarding information
2478 * @s_rule: switch rule structure to be filled in based on mac_entry
2479 * @opc: switch rules population command type - pass in the command opcode
2480 */
2481 static void
ice_fill_sw_rule(struct ice_hw * hw,struct ice_fltr_info * f_info,struct ice_sw_rule_lkup_rx_tx * s_rule,enum ice_adminq_opc opc)2482 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2483 struct ice_sw_rule_lkup_rx_tx *s_rule,
2484 enum ice_adminq_opc opc)
2485 {
2486 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2487 u16 vlan_tpid = ETH_P_8021Q;
2488 void *daddr = NULL;
2489 u16 eth_hdr_sz;
2490 u8 *eth_hdr;
2491 u32 act = 0;
2492 __be16 *off;
2493 u8 q_rgn;
2494
2495 if (opc == ice_aqc_opc_remove_sw_rules) {
2496 s_rule->act = 0;
2497 s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
2498 s_rule->hdr_len = 0;
2499 return;
2500 }
2501
2502 eth_hdr_sz = sizeof(dummy_eth_header);
2503 eth_hdr = s_rule->hdr_data;
2504
2505 /* initialize the ether header with a dummy header */
2506 memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
2507 ice_fill_sw_info(hw, f_info);
2508
2509 switch (f_info->fltr_act) {
2510 case ICE_FWD_TO_VSI:
2511 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2512 ICE_SINGLE_ACT_VSI_ID_M;
2513 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2514 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2515 ICE_SINGLE_ACT_VALID_BIT;
2516 break;
2517 case ICE_FWD_TO_VSI_LIST:
2518 act |= ICE_SINGLE_ACT_VSI_LIST;
2519 act |= (f_info->fwd_id.vsi_list_id <<
2520 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2521 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2522 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2523 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2524 ICE_SINGLE_ACT_VALID_BIT;
2525 break;
2526 case ICE_FWD_TO_Q:
2527 act |= ICE_SINGLE_ACT_TO_Q;
2528 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2529 ICE_SINGLE_ACT_Q_INDEX_M;
2530 break;
2531 case ICE_DROP_PACKET:
2532 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2533 ICE_SINGLE_ACT_VALID_BIT;
2534 break;
2535 case ICE_FWD_TO_QGRP:
2536 q_rgn = f_info->qgrp_size > 0 ?
2537 (u8)ilog2(f_info->qgrp_size) : 0;
2538 act |= ICE_SINGLE_ACT_TO_Q;
2539 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2540 ICE_SINGLE_ACT_Q_INDEX_M;
2541 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2542 ICE_SINGLE_ACT_Q_REGION_M;
2543 break;
2544 default:
2545 return;
2546 }
2547
2548 if (f_info->lb_en)
2549 act |= ICE_SINGLE_ACT_LB_ENABLE;
2550 if (f_info->lan_en)
2551 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2552
2553 switch (f_info->lkup_type) {
2554 case ICE_SW_LKUP_MAC:
2555 daddr = f_info->l_data.mac.mac_addr;
2556 break;
2557 case ICE_SW_LKUP_VLAN:
2558 vlan_id = f_info->l_data.vlan.vlan_id;
2559 if (f_info->l_data.vlan.tpid_valid)
2560 vlan_tpid = f_info->l_data.vlan.tpid;
2561 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2562 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2563 act |= ICE_SINGLE_ACT_PRUNE;
2564 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2565 }
2566 break;
2567 case ICE_SW_LKUP_ETHERTYPE_MAC:
2568 daddr = f_info->l_data.ethertype_mac.mac_addr;
2569 fallthrough;
2570 case ICE_SW_LKUP_ETHERTYPE:
2571 off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2572 *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
2573 break;
2574 case ICE_SW_LKUP_MAC_VLAN:
2575 daddr = f_info->l_data.mac_vlan.mac_addr;
2576 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2577 break;
2578 case ICE_SW_LKUP_PROMISC_VLAN:
2579 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2580 fallthrough;
2581 case ICE_SW_LKUP_PROMISC:
2582 daddr = f_info->l_data.mac_vlan.mac_addr;
2583 break;
2584 default:
2585 break;
2586 }
2587
2588 s_rule->hdr.type = (f_info->flag & ICE_FLTR_RX) ?
2589 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2590 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
2591
2592 /* Recipe set depending on lookup type */
2593 s_rule->recipe_id = cpu_to_le16(f_info->lkup_type);
2594 s_rule->src = cpu_to_le16(f_info->src);
2595 s_rule->act = cpu_to_le32(act);
2596
2597 if (daddr)
2598 ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
2599
2600 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2601 off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2602 *off = cpu_to_be16(vlan_id);
2603 off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2604 *off = cpu_to_be16(vlan_tpid);
2605 }
2606
2607 /* Create the switch rule with the final dummy Ethernet header */
2608 if (opc != ice_aqc_opc_update_sw_rules)
2609 s_rule->hdr_len = cpu_to_le16(eth_hdr_sz);
2610 }
2611
2612 /**
2613 * ice_add_marker_act
2614 * @hw: pointer to the hardware structure
2615 * @m_ent: the management entry for which sw marker needs to be added
2616 * @sw_marker: sw marker to tag the Rx descriptor with
2617 * @l_id: large action resource ID
2618 *
2619 * Create a large action to hold software marker and update the switch rule
2620 * entry pointed by m_ent with newly created large action
2621 */
2622 static int
ice_add_marker_act(struct ice_hw * hw,struct ice_fltr_mgmt_list_entry * m_ent,u16 sw_marker,u16 l_id)2623 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2624 u16 sw_marker, u16 l_id)
2625 {
2626 struct ice_sw_rule_lkup_rx_tx *rx_tx;
2627 struct ice_sw_rule_lg_act *lg_act;
2628 /* For software marker we need 3 large actions
2629 * 1. FWD action: FWD TO VSI or VSI LIST
2630 * 2. GENERIC VALUE action to hold the profile ID
2631 * 3. GENERIC VALUE action to hold the software marker ID
2632 */
2633 const u16 num_lg_acts = 3;
2634 u16 lg_act_size;
2635 u16 rules_size;
2636 int status;
2637 u32 act;
2638 u16 id;
2639
2640 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2641 return -EINVAL;
2642
2643 /* Create two back-to-back switch rules and submit them to the HW using
2644 * one memory buffer:
2645 * 1. Large Action
2646 * 2. Look up Tx Rx
2647 */
2648 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(lg_act, num_lg_acts);
2649 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(rx_tx);
2650 lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
2651 if (!lg_act)
2652 return -ENOMEM;
2653
2654 rx_tx = (typeof(rx_tx))((u8 *)lg_act + lg_act_size);
2655
2656 /* Fill in the first switch rule i.e. large action */
2657 lg_act->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
2658 lg_act->index = cpu_to_le16(l_id);
2659 lg_act->size = cpu_to_le16(num_lg_acts);
2660
2661 /* First action VSI forwarding or VSI list forwarding depending on how
2662 * many VSIs
2663 */
2664 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2665 m_ent->fltr_info.fwd_id.hw_vsi_id;
2666
2667 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2668 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
2669 if (m_ent->vsi_count > 1)
2670 act |= ICE_LG_ACT_VSI_LIST;
2671 lg_act->act[0] = cpu_to_le32(act);
2672
2673 /* Second action descriptor type */
2674 act = ICE_LG_ACT_GENERIC;
2675
2676 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2677 lg_act->act[1] = cpu_to_le32(act);
2678
2679 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2680 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2681
2682 /* Third action Marker value */
2683 act |= ICE_LG_ACT_GENERIC;
2684 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2685 ICE_LG_ACT_GENERIC_VALUE_M;
2686
2687 lg_act->act[2] = cpu_to_le32(act);
2688
2689 /* call the fill switch rule to fill the lookup Tx Rx structure */
2690 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2691 ice_aqc_opc_update_sw_rules);
2692
2693 /* Update the action to point to the large action ID */
2694 rx_tx->act = cpu_to_le32(ICE_SINGLE_ACT_PTR |
2695 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2696 ICE_SINGLE_ACT_PTR_VAL_M));
2697
2698 /* Use the filter rule ID of the previously created rule with single
2699 * act. Once the update happens, hardware will treat this as large
2700 * action
2701 */
2702 rx_tx->index = cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
2703
2704 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2705 ice_aqc_opc_update_sw_rules, NULL);
2706 if (!status) {
2707 m_ent->lg_act_idx = l_id;
2708 m_ent->sw_marker_id = sw_marker;
2709 }
2710
2711 devm_kfree(ice_hw_to_dev(hw), lg_act);
2712 return status;
2713 }
2714
2715 /**
2716 * ice_create_vsi_list_map
2717 * @hw: pointer to the hardware structure
2718 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2719 * @num_vsi: number of VSI handles in the array
2720 * @vsi_list_id: VSI list ID generated as part of allocate resource
2721 *
2722 * Helper function to create a new entry of VSI list ID to VSI mapping
2723 * using the given VSI list ID
2724 */
2725 static struct ice_vsi_list_map_info *
ice_create_vsi_list_map(struct ice_hw * hw,u16 * vsi_handle_arr,u16 num_vsi,u16 vsi_list_id)2726 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2727 u16 vsi_list_id)
2728 {
2729 struct ice_switch_info *sw = hw->switch_info;
2730 struct ice_vsi_list_map_info *v_map;
2731 int i;
2732
2733 v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL);
2734 if (!v_map)
2735 return NULL;
2736
2737 v_map->vsi_list_id = vsi_list_id;
2738 v_map->ref_cnt = 1;
2739 for (i = 0; i < num_vsi; i++)
2740 set_bit(vsi_handle_arr[i], v_map->vsi_map);
2741
2742 list_add(&v_map->list_entry, &sw->vsi_list_map_head);
2743 return v_map;
2744 }
2745
2746 /**
2747 * ice_update_vsi_list_rule
2748 * @hw: pointer to the hardware structure
2749 * @vsi_handle_arr: array of VSI handles to form a VSI list
2750 * @num_vsi: number of VSI handles in the array
2751 * @vsi_list_id: VSI list ID generated as part of allocate resource
2752 * @remove: Boolean value to indicate if this is a remove action
2753 * @opc: switch rules population command type - pass in the command opcode
2754 * @lkup_type: lookup type of the filter
2755 *
2756 * Call AQ command to add a new switch rule or update existing switch rule
2757 * using the given VSI list ID
2758 */
2759 static int
ice_update_vsi_list_rule(struct ice_hw * hw,u16 * vsi_handle_arr,u16 num_vsi,u16 vsi_list_id,bool remove,enum ice_adminq_opc opc,enum ice_sw_lkup_type lkup_type)2760 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2761 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2762 enum ice_sw_lkup_type lkup_type)
2763 {
2764 struct ice_sw_rule_vsi_list *s_rule;
2765 u16 s_rule_size;
2766 u16 rule_type;
2767 int status;
2768 int i;
2769
2770 if (!num_vsi)
2771 return -EINVAL;
2772
2773 if (lkup_type == ICE_SW_LKUP_MAC ||
2774 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2775 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2776 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2777 lkup_type == ICE_SW_LKUP_PROMISC ||
2778 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2779 lkup_type == ICE_SW_LKUP_DFLT ||
2780 lkup_type == ICE_SW_LKUP_LAST)
2781 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2782 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2783 else if (lkup_type == ICE_SW_LKUP_VLAN)
2784 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2785 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2786 else
2787 return -EINVAL;
2788
2789 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi);
2790 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
2791 if (!s_rule)
2792 return -ENOMEM;
2793 for (i = 0; i < num_vsi; i++) {
2794 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2795 status = -EINVAL;
2796 goto exit;
2797 }
2798 /* AQ call requires hw_vsi_id(s) */
2799 s_rule->vsi[i] =
2800 cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2801 }
2802
2803 s_rule->hdr.type = cpu_to_le16(rule_type);
2804 s_rule->number_vsi = cpu_to_le16(num_vsi);
2805 s_rule->index = cpu_to_le16(vsi_list_id);
2806
2807 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2808
2809 exit:
2810 devm_kfree(ice_hw_to_dev(hw), s_rule);
2811 return status;
2812 }
2813
2814 /**
2815 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2816 * @hw: pointer to the HW struct
2817 * @vsi_handle_arr: array of VSI handles to form a VSI list
2818 * @num_vsi: number of VSI handles in the array
2819 * @vsi_list_id: stores the ID of the VSI list to be created
2820 * @lkup_type: switch rule filter's lookup type
2821 */
2822 static int
ice_create_vsi_list_rule(struct ice_hw * hw,u16 * vsi_handle_arr,u16 num_vsi,u16 * vsi_list_id,enum ice_sw_lkup_type lkup_type)2823 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2824 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2825 {
2826 int status;
2827
2828 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2829 ice_aqc_opc_alloc_res);
2830 if (status)
2831 return status;
2832
2833 /* Update the newly created VSI list to include the specified VSIs */
2834 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2835 *vsi_list_id, false,
2836 ice_aqc_opc_add_sw_rules, lkup_type);
2837 }
2838
2839 /**
2840 * ice_create_pkt_fwd_rule
2841 * @hw: pointer to the hardware structure
2842 * @f_entry: entry containing packet forwarding information
2843 *
2844 * Create switch rule with given filter information and add an entry
2845 * to the corresponding filter management list to track this switch rule
2846 * and VSI mapping
2847 */
2848 static int
ice_create_pkt_fwd_rule(struct ice_hw * hw,struct ice_fltr_list_entry * f_entry)2849 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2850 struct ice_fltr_list_entry *f_entry)
2851 {
2852 struct ice_fltr_mgmt_list_entry *fm_entry;
2853 struct ice_sw_rule_lkup_rx_tx *s_rule;
2854 enum ice_sw_lkup_type l_type;
2855 struct ice_sw_recipe *recp;
2856 int status;
2857
2858 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2859 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
2860 GFP_KERNEL);
2861 if (!s_rule)
2862 return -ENOMEM;
2863 fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
2864 GFP_KERNEL);
2865 if (!fm_entry) {
2866 status = -ENOMEM;
2867 goto ice_create_pkt_fwd_rule_exit;
2868 }
2869
2870 fm_entry->fltr_info = f_entry->fltr_info;
2871
2872 /* Initialize all the fields for the management entry */
2873 fm_entry->vsi_count = 1;
2874 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2875 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2876 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2877
2878 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2879 ice_aqc_opc_add_sw_rules);
2880
2881 status = ice_aq_sw_rules(hw, s_rule,
2882 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
2883 ice_aqc_opc_add_sw_rules, NULL);
2884 if (status) {
2885 devm_kfree(ice_hw_to_dev(hw), fm_entry);
2886 goto ice_create_pkt_fwd_rule_exit;
2887 }
2888
2889 f_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
2890 fm_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
2891
2892 /* The book keeping entries will get removed when base driver
2893 * calls remove filter AQ command
2894 */
2895 l_type = fm_entry->fltr_info.lkup_type;
2896 recp = &hw->switch_info->recp_list[l_type];
2897 list_add(&fm_entry->list_entry, &recp->filt_rules);
2898
2899 ice_create_pkt_fwd_rule_exit:
2900 devm_kfree(ice_hw_to_dev(hw), s_rule);
2901 return status;
2902 }
2903
2904 /**
2905 * ice_update_pkt_fwd_rule
2906 * @hw: pointer to the hardware structure
2907 * @f_info: filter information for switch rule
2908 *
2909 * Call AQ command to update a previously created switch rule with a
2910 * VSI list ID
2911 */
2912 static int
ice_update_pkt_fwd_rule(struct ice_hw * hw,struct ice_fltr_info * f_info)2913 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2914 {
2915 struct ice_sw_rule_lkup_rx_tx *s_rule;
2916 int status;
2917
2918 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2919 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
2920 GFP_KERNEL);
2921 if (!s_rule)
2922 return -ENOMEM;
2923
2924 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2925
2926 s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
2927
2928 /* Update switch rule with new rule set to forward VSI list */
2929 status = ice_aq_sw_rules(hw, s_rule,
2930 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
2931 ice_aqc_opc_update_sw_rules, NULL);
2932
2933 devm_kfree(ice_hw_to_dev(hw), s_rule);
2934 return status;
2935 }
2936
2937 /**
2938 * ice_update_sw_rule_bridge_mode
2939 * @hw: pointer to the HW struct
2940 *
2941 * Updates unicast switch filter rules based on VEB/VEPA mode
2942 */
ice_update_sw_rule_bridge_mode(struct ice_hw * hw)2943 int ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2944 {
2945 struct ice_switch_info *sw = hw->switch_info;
2946 struct ice_fltr_mgmt_list_entry *fm_entry;
2947 struct list_head *rule_head;
2948 struct mutex *rule_lock; /* Lock to protect filter rule list */
2949 int status = 0;
2950
2951 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2952 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2953
2954 mutex_lock(rule_lock);
2955 list_for_each_entry(fm_entry, rule_head, list_entry) {
2956 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2957 u8 *addr = fi->l_data.mac.mac_addr;
2958
2959 /* Update unicast Tx rules to reflect the selected
2960 * VEB/VEPA mode
2961 */
2962 if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
2963 (fi->fltr_act == ICE_FWD_TO_VSI ||
2964 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2965 fi->fltr_act == ICE_FWD_TO_Q ||
2966 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2967 status = ice_update_pkt_fwd_rule(hw, fi);
2968 if (status)
2969 break;
2970 }
2971 }
2972
2973 mutex_unlock(rule_lock);
2974
2975 return status;
2976 }
2977
2978 /**
2979 * ice_add_update_vsi_list
2980 * @hw: pointer to the hardware structure
2981 * @m_entry: pointer to current filter management list entry
2982 * @cur_fltr: filter information from the book keeping entry
2983 * @new_fltr: filter information with the new VSI to be added
2984 *
2985 * Call AQ command to add or update previously created VSI list with new VSI.
2986 *
2987 * Helper function to do book keeping associated with adding filter information
2988 * The algorithm to do the book keeping is described below :
2989 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2990 * if only one VSI has been added till now
2991 * Allocate a new VSI list and add two VSIs
2992 * to this list using switch rule command
2993 * Update the previously created switch rule with the
2994 * newly created VSI list ID
2995 * if a VSI list was previously created
2996 * Add the new VSI to the previously created VSI list set
2997 * using the update switch rule command
2998 */
2999 static int
ice_add_update_vsi_list(struct ice_hw * hw,struct ice_fltr_mgmt_list_entry * m_entry,struct ice_fltr_info * cur_fltr,struct ice_fltr_info * new_fltr)3000 ice_add_update_vsi_list(struct ice_hw *hw,
3001 struct ice_fltr_mgmt_list_entry *m_entry,
3002 struct ice_fltr_info *cur_fltr,
3003 struct ice_fltr_info *new_fltr)
3004 {
3005 u16 vsi_list_id = 0;
3006 int status = 0;
3007
3008 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
3009 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
3010 return -EOPNOTSUPP;
3011
3012 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
3013 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
3014 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
3015 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
3016 return -EOPNOTSUPP;
3017
3018 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
3019 /* Only one entry existed in the mapping and it was not already
3020 * a part of a VSI list. So, create a VSI list with the old and
3021 * new VSIs.
3022 */
3023 struct ice_fltr_info tmp_fltr;
3024 u16 vsi_handle_arr[2];
3025
3026 /* A rule already exists with the new VSI being added */
3027 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
3028 return -EEXIST;
3029
3030 vsi_handle_arr[0] = cur_fltr->vsi_handle;
3031 vsi_handle_arr[1] = new_fltr->vsi_handle;
3032 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3033 &vsi_list_id,
3034 new_fltr->lkup_type);
3035 if (status)
3036 return status;
3037
3038 tmp_fltr = *new_fltr;
3039 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
3040 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3041 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3042 /* Update the previous switch rule of "MAC forward to VSI" to
3043 * "MAC fwd to VSI list"
3044 */
3045 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3046 if (status)
3047 return status;
3048
3049 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
3050 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3051 m_entry->vsi_list_info =
3052 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3053 vsi_list_id);
3054
3055 if (!m_entry->vsi_list_info)
3056 return -ENOMEM;
3057
3058 /* If this entry was large action then the large action needs
3059 * to be updated to point to FWD to VSI list
3060 */
3061 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
3062 status =
3063 ice_add_marker_act(hw, m_entry,
3064 m_entry->sw_marker_id,
3065 m_entry->lg_act_idx);
3066 } else {
3067 u16 vsi_handle = new_fltr->vsi_handle;
3068 enum ice_adminq_opc opcode;
3069
3070 if (!m_entry->vsi_list_info)
3071 return -EIO;
3072
3073 /* A rule already exists with the new VSI being added */
3074 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
3075 return -EEXIST;
3076
3077 /* Update the previously created VSI list set with
3078 * the new VSI ID passed in
3079 */
3080 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
3081 opcode = ice_aqc_opc_update_sw_rules;
3082
3083 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
3084 vsi_list_id, false, opcode,
3085 new_fltr->lkup_type);
3086 /* update VSI list mapping info with new VSI ID */
3087 if (!status)
3088 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
3089 }
3090 if (!status)
3091 m_entry->vsi_count++;
3092 return status;
3093 }
3094
3095 /**
3096 * ice_find_rule_entry - Search a rule entry
3097 * @hw: pointer to the hardware structure
3098 * @recp_id: lookup type for which the specified rule needs to be searched
3099 * @f_info: rule information
3100 *
3101 * Helper function to search for a given rule entry
3102 * Returns pointer to entry storing the rule if found
3103 */
3104 static struct ice_fltr_mgmt_list_entry *
ice_find_rule_entry(struct ice_hw * hw,u8 recp_id,struct ice_fltr_info * f_info)3105 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
3106 {
3107 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3108 struct ice_switch_info *sw = hw->switch_info;
3109 struct list_head *list_head;
3110
3111 list_head = &sw->recp_list[recp_id].filt_rules;
3112 list_for_each_entry(list_itr, list_head, list_entry) {
3113 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3114 sizeof(f_info->l_data)) &&
3115 f_info->flag == list_itr->fltr_info.flag) {
3116 ret = list_itr;
3117 break;
3118 }
3119 }
3120 return ret;
3121 }
3122
3123 /**
3124 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3125 * @hw: pointer to the hardware structure
3126 * @recp_id: lookup type for which VSI lists needs to be searched
3127 * @vsi_handle: VSI handle to be found in VSI list
3128 * @vsi_list_id: VSI list ID found containing vsi_handle
3129 *
3130 * Helper function to search a VSI list with single entry containing given VSI
3131 * handle element. This can be extended further to search VSI list with more
3132 * than 1 vsi_count. Returns pointer to VSI list entry if found.
3133 */
3134 struct ice_vsi_list_map_info *
ice_find_vsi_list_entry(struct ice_hw * hw,u8 recp_id,u16 vsi_handle,u16 * vsi_list_id)3135 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
3136 u16 *vsi_list_id)
3137 {
3138 struct ice_vsi_list_map_info *map_info = NULL;
3139 struct ice_switch_info *sw = hw->switch_info;
3140 struct ice_fltr_mgmt_list_entry *list_itr;
3141 struct list_head *list_head;
3142
3143 list_head = &sw->recp_list[recp_id].filt_rules;
3144 list_for_each_entry(list_itr, list_head, list_entry) {
3145 if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
3146 map_info = list_itr->vsi_list_info;
3147 if (test_bit(vsi_handle, map_info->vsi_map)) {
3148 *vsi_list_id = map_info->vsi_list_id;
3149 return map_info;
3150 }
3151 }
3152 }
3153 return NULL;
3154 }
3155
3156 /**
3157 * ice_add_rule_internal - add rule for a given lookup type
3158 * @hw: pointer to the hardware structure
3159 * @recp_id: lookup type (recipe ID) for which rule has to be added
3160 * @f_entry: structure containing MAC forwarding information
3161 *
3162 * Adds or updates the rule lists for a given recipe
3163 */
3164 static int
ice_add_rule_internal(struct ice_hw * hw,u8 recp_id,struct ice_fltr_list_entry * f_entry)3165 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
3166 struct ice_fltr_list_entry *f_entry)
3167 {
3168 struct ice_switch_info *sw = hw->switch_info;
3169 struct ice_fltr_info *new_fltr, *cur_fltr;
3170 struct ice_fltr_mgmt_list_entry *m_entry;
3171 struct mutex *rule_lock; /* Lock to protect filter rule list */
3172 int status = 0;
3173
3174 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3175 return -EINVAL;
3176 f_entry->fltr_info.fwd_id.hw_vsi_id =
3177 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3178
3179 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3180
3181 mutex_lock(rule_lock);
3182 new_fltr = &f_entry->fltr_info;
3183 if (new_fltr->flag & ICE_FLTR_RX)
3184 new_fltr->src = hw->port_info->lport;
3185 else if (new_fltr->flag & ICE_FLTR_TX)
3186 new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
3187
3188 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
3189 if (!m_entry) {
3190 mutex_unlock(rule_lock);
3191 return ice_create_pkt_fwd_rule(hw, f_entry);
3192 }
3193
3194 cur_fltr = &m_entry->fltr_info;
3195 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3196 mutex_unlock(rule_lock);
3197
3198 return status;
3199 }
3200
3201 /**
3202 * ice_remove_vsi_list_rule
3203 * @hw: pointer to the hardware structure
3204 * @vsi_list_id: VSI list ID generated as part of allocate resource
3205 * @lkup_type: switch rule filter lookup type
3206 *
3207 * The VSI list should be emptied before this function is called to remove the
3208 * VSI list.
3209 */
3210 static int
ice_remove_vsi_list_rule(struct ice_hw * hw,u16 vsi_list_id,enum ice_sw_lkup_type lkup_type)3211 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3212 enum ice_sw_lkup_type lkup_type)
3213 {
3214 struct ice_sw_rule_vsi_list *s_rule;
3215 u16 s_rule_size;
3216 int status;
3217
3218 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, 0);
3219 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
3220 if (!s_rule)
3221 return -ENOMEM;
3222
3223 s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
3224 s_rule->index = cpu_to_le16(vsi_list_id);
3225
3226 /* Free the vsi_list resource that we allocated. It is assumed that the
3227 * list is empty at this point.
3228 */
3229 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3230 ice_aqc_opc_free_res);
3231
3232 devm_kfree(ice_hw_to_dev(hw), s_rule);
3233 return status;
3234 }
3235
3236 /**
3237 * ice_rem_update_vsi_list
3238 * @hw: pointer to the hardware structure
3239 * @vsi_handle: VSI handle of the VSI to remove
3240 * @fm_list: filter management entry for which the VSI list management needs to
3241 * be done
3242 */
3243 static int
ice_rem_update_vsi_list(struct ice_hw * hw,u16 vsi_handle,struct ice_fltr_mgmt_list_entry * fm_list)3244 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3245 struct ice_fltr_mgmt_list_entry *fm_list)
3246 {
3247 enum ice_sw_lkup_type lkup_type;
3248 u16 vsi_list_id;
3249 int status = 0;
3250
3251 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3252 fm_list->vsi_count == 0)
3253 return -EINVAL;
3254
3255 /* A rule with the VSI being removed does not exist */
3256 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
3257 return -ENOENT;
3258
3259 lkup_type = fm_list->fltr_info.lkup_type;
3260 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3261 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3262 ice_aqc_opc_update_sw_rules,
3263 lkup_type);
3264 if (status)
3265 return status;
3266
3267 fm_list->vsi_count--;
3268 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3269
3270 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3271 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3272 struct ice_vsi_list_map_info *vsi_list_info =
3273 fm_list->vsi_list_info;
3274 u16 rem_vsi_handle;
3275
3276 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
3277 ICE_MAX_VSI);
3278 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3279 return -EIO;
3280
3281 /* Make sure VSI list is empty before removing it below */
3282 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3283 vsi_list_id, true,
3284 ice_aqc_opc_update_sw_rules,
3285 lkup_type);
3286 if (status)
3287 return status;
3288
3289 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3290 tmp_fltr_info.fwd_id.hw_vsi_id =
3291 ice_get_hw_vsi_num(hw, rem_vsi_handle);
3292 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3293 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3294 if (status) {
3295 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3296 tmp_fltr_info.fwd_id.hw_vsi_id, status);
3297 return status;
3298 }
3299
3300 fm_list->fltr_info = tmp_fltr_info;
3301 }
3302
3303 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3304 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3305 struct ice_vsi_list_map_info *vsi_list_info =
3306 fm_list->vsi_list_info;
3307
3308 /* Remove the VSI list since it is no longer used */
3309 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3310 if (status) {
3311 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
3312 vsi_list_id, status);
3313 return status;
3314 }
3315
3316 list_del(&vsi_list_info->list_entry);
3317 devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
3318 fm_list->vsi_list_info = NULL;
3319 }
3320
3321 return status;
3322 }
3323
3324 /**
3325 * ice_remove_rule_internal - Remove a filter rule of a given type
3326 * @hw: pointer to the hardware structure
3327 * @recp_id: recipe ID for which the rule needs to removed
3328 * @f_entry: rule entry containing filter information
3329 */
3330 static int
ice_remove_rule_internal(struct ice_hw * hw,u8 recp_id,struct ice_fltr_list_entry * f_entry)3331 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
3332 struct ice_fltr_list_entry *f_entry)
3333 {
3334 struct ice_switch_info *sw = hw->switch_info;
3335 struct ice_fltr_mgmt_list_entry *list_elem;
3336 struct mutex *rule_lock; /* Lock to protect filter rule list */
3337 bool remove_rule = false;
3338 u16 vsi_handle;
3339 int status = 0;
3340
3341 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3342 return -EINVAL;
3343 f_entry->fltr_info.fwd_id.hw_vsi_id =
3344 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3345
3346 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3347 mutex_lock(rule_lock);
3348 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
3349 if (!list_elem) {
3350 status = -ENOENT;
3351 goto exit;
3352 }
3353
3354 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3355 remove_rule = true;
3356 } else if (!list_elem->vsi_list_info) {
3357 status = -ENOENT;
3358 goto exit;
3359 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3360 /* a ref_cnt > 1 indicates that the vsi_list is being
3361 * shared by multiple rules. Decrement the ref_cnt and
3362 * remove this rule, but do not modify the list, as it
3363 * is in-use by other rules.
3364 */
3365 list_elem->vsi_list_info->ref_cnt--;
3366 remove_rule = true;
3367 } else {
3368 /* a ref_cnt of 1 indicates the vsi_list is only used
3369 * by one rule. However, the original removal request is only
3370 * for a single VSI. Update the vsi_list first, and only
3371 * remove the rule if there are no further VSIs in this list.
3372 */
3373 vsi_handle = f_entry->fltr_info.vsi_handle;
3374 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3375 if (status)
3376 goto exit;
3377 /* if VSI count goes to zero after updating the VSI list */
3378 if (list_elem->vsi_count == 0)
3379 remove_rule = true;
3380 }
3381
3382 if (remove_rule) {
3383 /* Remove the lookup rule */
3384 struct ice_sw_rule_lkup_rx_tx *s_rule;
3385
3386 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
3387 ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
3388 GFP_KERNEL);
3389 if (!s_rule) {
3390 status = -ENOMEM;
3391 goto exit;
3392 }
3393
3394 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3395 ice_aqc_opc_remove_sw_rules);
3396
3397 status = ice_aq_sw_rules(hw, s_rule,
3398 ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
3399 1, ice_aqc_opc_remove_sw_rules, NULL);
3400
3401 /* Remove a book keeping from the list */
3402 devm_kfree(ice_hw_to_dev(hw), s_rule);
3403
3404 if (status)
3405 goto exit;
3406
3407 list_del(&list_elem->list_entry);
3408 devm_kfree(ice_hw_to_dev(hw), list_elem);
3409 }
3410 exit:
3411 mutex_unlock(rule_lock);
3412 return status;
3413 }
3414
3415 /**
3416 * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI
3417 * @hw: pointer to the hardware structure
3418 * @vlan_id: VLAN ID
3419 * @vsi_handle: check MAC filter for this VSI
3420 */
ice_vlan_fltr_exist(struct ice_hw * hw,u16 vlan_id,u16 vsi_handle)3421 bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
3422 {
3423 struct ice_fltr_mgmt_list_entry *entry;
3424 struct list_head *rule_head;
3425 struct ice_switch_info *sw;
3426 struct mutex *rule_lock; /* Lock to protect filter rule list */
3427 u16 hw_vsi_id;
3428
3429 if (vlan_id > ICE_MAX_VLAN_ID)
3430 return false;
3431
3432 if (!ice_is_vsi_valid(hw, vsi_handle))
3433 return false;
3434
3435 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3436 sw = hw->switch_info;
3437 rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
3438 if (!rule_head)
3439 return false;
3440
3441 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3442 mutex_lock(rule_lock);
3443 list_for_each_entry(entry, rule_head, list_entry) {
3444 struct ice_fltr_info *f_info = &entry->fltr_info;
3445 u16 entry_vlan_id = f_info->l_data.vlan.vlan_id;
3446 struct ice_vsi_list_map_info *map_info;
3447
3448 if (entry_vlan_id > ICE_MAX_VLAN_ID)
3449 continue;
3450
3451 if (f_info->flag != ICE_FLTR_TX ||
3452 f_info->src_id != ICE_SRC_ID_VSI ||
3453 f_info->lkup_type != ICE_SW_LKUP_VLAN)
3454 continue;
3455
3456 /* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */
3457 if (f_info->fltr_act != ICE_FWD_TO_VSI &&
3458 f_info->fltr_act != ICE_FWD_TO_VSI_LIST)
3459 continue;
3460
3461 if (f_info->fltr_act == ICE_FWD_TO_VSI) {
3462 if (hw_vsi_id != f_info->fwd_id.hw_vsi_id)
3463 continue;
3464 } else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3465 /* If filter_action is FWD_TO_VSI_LIST, make sure
3466 * that VSI being checked is part of VSI list
3467 */
3468 if (entry->vsi_count == 1 &&
3469 entry->vsi_list_info) {
3470 map_info = entry->vsi_list_info;
3471 if (!test_bit(vsi_handle, map_info->vsi_map))
3472 continue;
3473 }
3474 }
3475
3476 if (vlan_id == entry_vlan_id) {
3477 mutex_unlock(rule_lock);
3478 return true;
3479 }
3480 }
3481 mutex_unlock(rule_lock);
3482
3483 return false;
3484 }
3485
3486 /**
3487 * ice_add_mac - Add a MAC address based filter rule
3488 * @hw: pointer to the hardware structure
3489 * @m_list: list of MAC addresses and forwarding information
3490 */
ice_add_mac(struct ice_hw * hw,struct list_head * m_list)3491 int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
3492 {
3493 struct ice_fltr_list_entry *m_list_itr;
3494 int status = 0;
3495
3496 if (!m_list || !hw)
3497 return -EINVAL;
3498
3499 list_for_each_entry(m_list_itr, m_list, list_entry) {
3500 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3501 u16 vsi_handle;
3502 u16 hw_vsi_id;
3503
3504 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3505 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3506 if (!ice_is_vsi_valid(hw, vsi_handle))
3507 return -EINVAL;
3508 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3509 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3510 /* update the src in case it is VSI num */
3511 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3512 return -EINVAL;
3513 m_list_itr->fltr_info.src = hw_vsi_id;
3514 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3515 is_zero_ether_addr(add))
3516 return -EINVAL;
3517
3518 m_list_itr->status = ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3519 m_list_itr);
3520 if (m_list_itr->status)
3521 return m_list_itr->status;
3522 }
3523
3524 return status;
3525 }
3526
3527 /**
3528 * ice_add_vlan_internal - Add one VLAN based filter rule
3529 * @hw: pointer to the hardware structure
3530 * @f_entry: filter entry containing one VLAN information
3531 */
3532 static int
ice_add_vlan_internal(struct ice_hw * hw,struct ice_fltr_list_entry * f_entry)3533 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3534 {
3535 struct ice_switch_info *sw = hw->switch_info;
3536 struct ice_fltr_mgmt_list_entry *v_list_itr;
3537 struct ice_fltr_info *new_fltr, *cur_fltr;
3538 enum ice_sw_lkup_type lkup_type;
3539 u16 vsi_list_id = 0, vsi_handle;
3540 struct mutex *rule_lock; /* Lock to protect filter rule list */
3541 int status = 0;
3542
3543 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3544 return -EINVAL;
3545
3546 f_entry->fltr_info.fwd_id.hw_vsi_id =
3547 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3548 new_fltr = &f_entry->fltr_info;
3549
3550 /* VLAN ID should only be 12 bits */
3551 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3552 return -EINVAL;
3553
3554 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3555 return -EINVAL;
3556
3557 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3558 lkup_type = new_fltr->lkup_type;
3559 vsi_handle = new_fltr->vsi_handle;
3560 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3561 mutex_lock(rule_lock);
3562 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3563 if (!v_list_itr) {
3564 struct ice_vsi_list_map_info *map_info = NULL;
3565
3566 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3567 /* All VLAN pruning rules use a VSI list. Check if
3568 * there is already a VSI list containing VSI that we
3569 * want to add. If found, use the same vsi_list_id for
3570 * this new VLAN rule or else create a new list.
3571 */
3572 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3573 vsi_handle,
3574 &vsi_list_id);
3575 if (!map_info) {
3576 status = ice_create_vsi_list_rule(hw,
3577 &vsi_handle,
3578 1,
3579 &vsi_list_id,
3580 lkup_type);
3581 if (status)
3582 goto exit;
3583 }
3584 /* Convert the action to forwarding to a VSI list. */
3585 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3586 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3587 }
3588
3589 status = ice_create_pkt_fwd_rule(hw, f_entry);
3590 if (!status) {
3591 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3592 new_fltr);
3593 if (!v_list_itr) {
3594 status = -ENOENT;
3595 goto exit;
3596 }
3597 /* reuse VSI list for new rule and increment ref_cnt */
3598 if (map_info) {
3599 v_list_itr->vsi_list_info = map_info;
3600 map_info->ref_cnt++;
3601 } else {
3602 v_list_itr->vsi_list_info =
3603 ice_create_vsi_list_map(hw, &vsi_handle,
3604 1, vsi_list_id);
3605 }
3606 }
3607 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3608 /* Update existing VSI list to add new VSI ID only if it used
3609 * by one VLAN rule.
3610 */
3611 cur_fltr = &v_list_itr->fltr_info;
3612 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3613 new_fltr);
3614 } else {
3615 /* If VLAN rule exists and VSI list being used by this rule is
3616 * referenced by more than 1 VLAN rule. Then create a new VSI
3617 * list appending previous VSI with new VSI and update existing
3618 * VLAN rule to point to new VSI list ID
3619 */
3620 struct ice_fltr_info tmp_fltr;
3621 u16 vsi_handle_arr[2];
3622 u16 cur_handle;
3623
3624 /* Current implementation only supports reusing VSI list with
3625 * one VSI count. We should never hit below condition
3626 */
3627 if (v_list_itr->vsi_count > 1 &&
3628 v_list_itr->vsi_list_info->ref_cnt > 1) {
3629 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3630 status = -EIO;
3631 goto exit;
3632 }
3633
3634 cur_handle =
3635 find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3636 ICE_MAX_VSI);
3637
3638 /* A rule already exists with the new VSI being added */
3639 if (cur_handle == vsi_handle) {
3640 status = -EEXIST;
3641 goto exit;
3642 }
3643
3644 vsi_handle_arr[0] = cur_handle;
3645 vsi_handle_arr[1] = vsi_handle;
3646 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3647 &vsi_list_id, lkup_type);
3648 if (status)
3649 goto exit;
3650
3651 tmp_fltr = v_list_itr->fltr_info;
3652 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3653 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3654 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3655 /* Update the previous switch rule to a new VSI list which
3656 * includes current VSI that is requested
3657 */
3658 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3659 if (status)
3660 goto exit;
3661
3662 /* before overriding VSI list map info. decrement ref_cnt of
3663 * previous VSI list
3664 */
3665 v_list_itr->vsi_list_info->ref_cnt--;
3666
3667 /* now update to newly created list */
3668 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3669 v_list_itr->vsi_list_info =
3670 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3671 vsi_list_id);
3672 v_list_itr->vsi_count++;
3673 }
3674
3675 exit:
3676 mutex_unlock(rule_lock);
3677 return status;
3678 }
3679
3680 /**
3681 * ice_add_vlan - Add VLAN based filter rule
3682 * @hw: pointer to the hardware structure
3683 * @v_list: list of VLAN entries and forwarding information
3684 */
ice_add_vlan(struct ice_hw * hw,struct list_head * v_list)3685 int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
3686 {
3687 struct ice_fltr_list_entry *v_list_itr;
3688
3689 if (!v_list || !hw)
3690 return -EINVAL;
3691
3692 list_for_each_entry(v_list_itr, v_list, list_entry) {
3693 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3694 return -EINVAL;
3695 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3696 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3697 if (v_list_itr->status)
3698 return v_list_itr->status;
3699 }
3700 return 0;
3701 }
3702
3703 /**
3704 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3705 * @hw: pointer to the hardware structure
3706 * @em_list: list of ether type MAC filter, MAC is optional
3707 *
3708 * This function requires the caller to populate the entries in
3709 * the filter list with the necessary fields (including flags to
3710 * indicate Tx or Rx rules).
3711 */
ice_add_eth_mac(struct ice_hw * hw,struct list_head * em_list)3712 int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3713 {
3714 struct ice_fltr_list_entry *em_list_itr;
3715
3716 if (!em_list || !hw)
3717 return -EINVAL;
3718
3719 list_for_each_entry(em_list_itr, em_list, list_entry) {
3720 enum ice_sw_lkup_type l_type =
3721 em_list_itr->fltr_info.lkup_type;
3722
3723 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3724 l_type != ICE_SW_LKUP_ETHERTYPE)
3725 return -EINVAL;
3726
3727 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3728 em_list_itr);
3729 if (em_list_itr->status)
3730 return em_list_itr->status;
3731 }
3732 return 0;
3733 }
3734
3735 /**
3736 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3737 * @hw: pointer to the hardware structure
3738 * @em_list: list of ethertype or ethertype MAC entries
3739 */
ice_remove_eth_mac(struct ice_hw * hw,struct list_head * em_list)3740 int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3741 {
3742 struct ice_fltr_list_entry *em_list_itr, *tmp;
3743
3744 if (!em_list || !hw)
3745 return -EINVAL;
3746
3747 list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
3748 enum ice_sw_lkup_type l_type =
3749 em_list_itr->fltr_info.lkup_type;
3750
3751 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3752 l_type != ICE_SW_LKUP_ETHERTYPE)
3753 return -EINVAL;
3754
3755 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3756 em_list_itr);
3757 if (em_list_itr->status)
3758 return em_list_itr->status;
3759 }
3760 return 0;
3761 }
3762
3763 /**
3764 * ice_rem_sw_rule_info
3765 * @hw: pointer to the hardware structure
3766 * @rule_head: pointer to the switch list structure that we want to delete
3767 */
3768 static void
ice_rem_sw_rule_info(struct ice_hw * hw,struct list_head * rule_head)3769 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3770 {
3771 if (!list_empty(rule_head)) {
3772 struct ice_fltr_mgmt_list_entry *entry;
3773 struct ice_fltr_mgmt_list_entry *tmp;
3774
3775 list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
3776 list_del(&entry->list_entry);
3777 devm_kfree(ice_hw_to_dev(hw), entry);
3778 }
3779 }
3780 }
3781
3782 /**
3783 * ice_rem_adv_rule_info
3784 * @hw: pointer to the hardware structure
3785 * @rule_head: pointer to the switch list structure that we want to delete
3786 */
3787 static void
ice_rem_adv_rule_info(struct ice_hw * hw,struct list_head * rule_head)3788 ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3789 {
3790 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3791 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3792
3793 if (list_empty(rule_head))
3794 return;
3795
3796 list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) {
3797 list_del(&lst_itr->list_entry);
3798 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
3799 devm_kfree(ice_hw_to_dev(hw), lst_itr);
3800 }
3801 }
3802
3803 /**
3804 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3805 * @pi: pointer to the port_info structure
3806 * @vsi_handle: VSI handle to set as default
3807 * @set: true to add the above mentioned switch rule, false to remove it
3808 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3809 *
3810 * add filter rule to set/unset given VSI as default VSI for the switch
3811 * (represented by swid)
3812 */
3813 int
ice_cfg_dflt_vsi(struct ice_port_info * pi,u16 vsi_handle,bool set,u8 direction)3814 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3815 u8 direction)
3816 {
3817 struct ice_fltr_list_entry f_list_entry;
3818 struct ice_fltr_info f_info;
3819 struct ice_hw *hw = pi->hw;
3820 u16 hw_vsi_id;
3821 int status;
3822
3823 if (!ice_is_vsi_valid(hw, vsi_handle))
3824 return -EINVAL;
3825
3826 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3827
3828 memset(&f_info, 0, sizeof(f_info));
3829
3830 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3831 f_info.flag = direction;
3832 f_info.fltr_act = ICE_FWD_TO_VSI;
3833 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3834 f_info.vsi_handle = vsi_handle;
3835
3836 if (f_info.flag & ICE_FLTR_RX) {
3837 f_info.src = hw->port_info->lport;
3838 f_info.src_id = ICE_SRC_ID_LPORT;
3839 } else if (f_info.flag & ICE_FLTR_TX) {
3840 f_info.src_id = ICE_SRC_ID_VSI;
3841 f_info.src = hw_vsi_id;
3842 }
3843 f_list_entry.fltr_info = f_info;
3844
3845 if (set)
3846 status = ice_add_rule_internal(hw, ICE_SW_LKUP_DFLT,
3847 &f_list_entry);
3848 else
3849 status = ice_remove_rule_internal(hw, ICE_SW_LKUP_DFLT,
3850 &f_list_entry);
3851
3852 return status;
3853 }
3854
3855 /**
3856 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3857 * @fm_entry: filter entry to inspect
3858 * @vsi_handle: VSI handle to compare with filter info
3859 */
3860 static bool
ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry * fm_entry,u16 vsi_handle)3861 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3862 {
3863 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3864 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3865 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3866 fm_entry->vsi_list_info &&
3867 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
3868 }
3869
3870 /**
3871 * ice_check_if_dflt_vsi - check if VSI is default VSI
3872 * @pi: pointer to the port_info structure
3873 * @vsi_handle: vsi handle to check for in filter list
3874 * @rule_exists: indicates if there are any VSI's in the rule list
3875 *
3876 * checks if the VSI is in a default VSI list, and also indicates
3877 * if the default VSI list is empty
3878 */
3879 bool
ice_check_if_dflt_vsi(struct ice_port_info * pi,u16 vsi_handle,bool * rule_exists)3880 ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
3881 bool *rule_exists)
3882 {
3883 struct ice_fltr_mgmt_list_entry *fm_entry;
3884 struct ice_sw_recipe *recp_list;
3885 struct list_head *rule_head;
3886 struct mutex *rule_lock; /* Lock to protect filter rule list */
3887 bool ret = false;
3888
3889 recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT];
3890 rule_lock = &recp_list->filt_rule_lock;
3891 rule_head = &recp_list->filt_rules;
3892
3893 mutex_lock(rule_lock);
3894
3895 if (rule_exists && !list_empty(rule_head))
3896 *rule_exists = true;
3897
3898 list_for_each_entry(fm_entry, rule_head, list_entry) {
3899 if (ice_vsi_uses_fltr(fm_entry, vsi_handle)) {
3900 ret = true;
3901 break;
3902 }
3903 }
3904
3905 mutex_unlock(rule_lock);
3906
3907 return ret;
3908 }
3909
3910 /**
3911 * ice_remove_mac - remove a MAC address based filter rule
3912 * @hw: pointer to the hardware structure
3913 * @m_list: list of MAC addresses and forwarding information
3914 *
3915 * This function removes either a MAC filter rule or a specific VSI from a
3916 * VSI list for a multicast MAC address.
3917 *
3918 * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should
3919 * be aware that this call will only work if all the entries passed into m_list
3920 * were added previously. It will not attempt to do a partial remove of entries
3921 * that were found.
3922 */
ice_remove_mac(struct ice_hw * hw,struct list_head * m_list)3923 int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
3924 {
3925 struct ice_fltr_list_entry *list_itr, *tmp;
3926
3927 if (!m_list)
3928 return -EINVAL;
3929
3930 list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
3931 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3932 u16 vsi_handle;
3933
3934 if (l_type != ICE_SW_LKUP_MAC)
3935 return -EINVAL;
3936
3937 vsi_handle = list_itr->fltr_info.vsi_handle;
3938 if (!ice_is_vsi_valid(hw, vsi_handle))
3939 return -EINVAL;
3940
3941 list_itr->fltr_info.fwd_id.hw_vsi_id =
3942 ice_get_hw_vsi_num(hw, vsi_handle);
3943
3944 list_itr->status = ice_remove_rule_internal(hw,
3945 ICE_SW_LKUP_MAC,
3946 list_itr);
3947 if (list_itr->status)
3948 return list_itr->status;
3949 }
3950 return 0;
3951 }
3952
3953 /**
3954 * ice_remove_vlan - Remove VLAN based filter rule
3955 * @hw: pointer to the hardware structure
3956 * @v_list: list of VLAN entries and forwarding information
3957 */
ice_remove_vlan(struct ice_hw * hw,struct list_head * v_list)3958 int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
3959 {
3960 struct ice_fltr_list_entry *v_list_itr, *tmp;
3961
3962 if (!v_list || !hw)
3963 return -EINVAL;
3964
3965 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
3966 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3967
3968 if (l_type != ICE_SW_LKUP_VLAN)
3969 return -EINVAL;
3970 v_list_itr->status = ice_remove_rule_internal(hw,
3971 ICE_SW_LKUP_VLAN,
3972 v_list_itr);
3973 if (v_list_itr->status)
3974 return v_list_itr->status;
3975 }
3976 return 0;
3977 }
3978
3979 /**
3980 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3981 * @hw: pointer to the hardware structure
3982 * @vsi_handle: VSI handle to remove filters from
3983 * @vsi_list_head: pointer to the list to add entry to
3984 * @fi: pointer to fltr_info of filter entry to copy & add
3985 *
3986 * Helper function, used when creating a list of filters to remove from
3987 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3988 * original filter entry, with the exception of fltr_info.fltr_act and
3989 * fltr_info.fwd_id fields. These are set such that later logic can
3990 * extract which VSI to remove the fltr from, and pass on that information.
3991 */
3992 static int
ice_add_entry_to_vsi_fltr_list(struct ice_hw * hw,u16 vsi_handle,struct list_head * vsi_list_head,struct ice_fltr_info * fi)3993 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3994 struct list_head *vsi_list_head,
3995 struct ice_fltr_info *fi)
3996 {
3997 struct ice_fltr_list_entry *tmp;
3998
3999 /* this memory is freed up in the caller function
4000 * once filters for this VSI are removed
4001 */
4002 tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
4003 if (!tmp)
4004 return -ENOMEM;
4005
4006 tmp->fltr_info = *fi;
4007
4008 /* Overwrite these fields to indicate which VSI to remove filter from,
4009 * so find and remove logic can extract the information from the
4010 * list entries. Note that original entries will still have proper
4011 * values.
4012 */
4013 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4014 tmp->fltr_info.vsi_handle = vsi_handle;
4015 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4016
4017 list_add(&tmp->list_entry, vsi_list_head);
4018
4019 return 0;
4020 }
4021
4022 /**
4023 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4024 * @hw: pointer to the hardware structure
4025 * @vsi_handle: VSI handle to remove filters from
4026 * @lkup_list_head: pointer to the list that has certain lookup type filters
4027 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4028 *
4029 * Locates all filters in lkup_list_head that are used by the given VSI,
4030 * and adds COPIES of those entries to vsi_list_head (intended to be used
4031 * to remove the listed filters).
4032 * Note that this means all entries in vsi_list_head must be explicitly
4033 * deallocated by the caller when done with list.
4034 */
4035 static int
ice_add_to_vsi_fltr_list(struct ice_hw * hw,u16 vsi_handle,struct list_head * lkup_list_head,struct list_head * vsi_list_head)4036 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4037 struct list_head *lkup_list_head,
4038 struct list_head *vsi_list_head)
4039 {
4040 struct ice_fltr_mgmt_list_entry *fm_entry;
4041 int status = 0;
4042
4043 /* check to make sure VSI ID is valid and within boundary */
4044 if (!ice_is_vsi_valid(hw, vsi_handle))
4045 return -EINVAL;
4046
4047 list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
4048 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
4049 continue;
4050
4051 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4052 vsi_list_head,
4053 &fm_entry->fltr_info);
4054 if (status)
4055 return status;
4056 }
4057 return status;
4058 }
4059
4060 /**
4061 * ice_determine_promisc_mask
4062 * @fi: filter info to parse
4063 *
4064 * Helper function to determine which ICE_PROMISC_ mask corresponds
4065 * to given filter into.
4066 */
ice_determine_promisc_mask(struct ice_fltr_info * fi)4067 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4068 {
4069 u16 vid = fi->l_data.mac_vlan.vlan_id;
4070 u8 *macaddr = fi->l_data.mac.mac_addr;
4071 bool is_tx_fltr = false;
4072 u8 promisc_mask = 0;
4073
4074 if (fi->flag == ICE_FLTR_TX)
4075 is_tx_fltr = true;
4076
4077 if (is_broadcast_ether_addr(macaddr))
4078 promisc_mask |= is_tx_fltr ?
4079 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4080 else if (is_multicast_ether_addr(macaddr))
4081 promisc_mask |= is_tx_fltr ?
4082 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4083 else if (is_unicast_ether_addr(macaddr))
4084 promisc_mask |= is_tx_fltr ?
4085 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4086 if (vid)
4087 promisc_mask |= is_tx_fltr ?
4088 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4089
4090 return promisc_mask;
4091 }
4092
4093 /**
4094 * ice_remove_promisc - Remove promisc based filter rules
4095 * @hw: pointer to the hardware structure
4096 * @recp_id: recipe ID for which the rule needs to removed
4097 * @v_list: list of promisc entries
4098 */
4099 static int
ice_remove_promisc(struct ice_hw * hw,u8 recp_id,struct list_head * v_list)4100 ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list)
4101 {
4102 struct ice_fltr_list_entry *v_list_itr, *tmp;
4103
4104 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
4105 v_list_itr->status =
4106 ice_remove_rule_internal(hw, recp_id, v_list_itr);
4107 if (v_list_itr->status)
4108 return v_list_itr->status;
4109 }
4110 return 0;
4111 }
4112
4113 /**
4114 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4115 * @hw: pointer to the hardware structure
4116 * @vsi_handle: VSI handle to clear mode
4117 * @promisc_mask: mask of promiscuous config bits to clear
4118 * @vid: VLAN ID to clear VLAN promiscuous
4119 */
4120 int
ice_clear_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 promisc_mask,u16 vid)4121 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4122 u16 vid)
4123 {
4124 struct ice_switch_info *sw = hw->switch_info;
4125 struct ice_fltr_list_entry *fm_entry, *tmp;
4126 struct list_head remove_list_head;
4127 struct ice_fltr_mgmt_list_entry *itr;
4128 struct list_head *rule_head;
4129 struct mutex *rule_lock; /* Lock to protect filter rule list */
4130 int status = 0;
4131 u8 recipe_id;
4132
4133 if (!ice_is_vsi_valid(hw, vsi_handle))
4134 return -EINVAL;
4135
4136 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4137 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4138 else
4139 recipe_id = ICE_SW_LKUP_PROMISC;
4140
4141 rule_head = &sw->recp_list[recipe_id].filt_rules;
4142 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4143
4144 INIT_LIST_HEAD(&remove_list_head);
4145
4146 mutex_lock(rule_lock);
4147 list_for_each_entry(itr, rule_head, list_entry) {
4148 struct ice_fltr_info *fltr_info;
4149 u8 fltr_promisc_mask = 0;
4150
4151 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4152 continue;
4153 fltr_info = &itr->fltr_info;
4154
4155 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4156 vid != fltr_info->l_data.mac_vlan.vlan_id)
4157 continue;
4158
4159 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4160
4161 /* Skip if filter is not completely specified by given mask */
4162 if (fltr_promisc_mask & ~promisc_mask)
4163 continue;
4164
4165 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4166 &remove_list_head,
4167 fltr_info);
4168 if (status) {
4169 mutex_unlock(rule_lock);
4170 goto free_fltr_list;
4171 }
4172 }
4173 mutex_unlock(rule_lock);
4174
4175 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4176
4177 free_fltr_list:
4178 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4179 list_del(&fm_entry->list_entry);
4180 devm_kfree(ice_hw_to_dev(hw), fm_entry);
4181 }
4182
4183 return status;
4184 }
4185
4186 /**
4187 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4188 * @hw: pointer to the hardware structure
4189 * @vsi_handle: VSI handle to configure
4190 * @promisc_mask: mask of promiscuous config bits
4191 * @vid: VLAN ID to set VLAN promiscuous
4192 */
4193 int
ice_set_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 promisc_mask,u16 vid)4194 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4195 {
4196 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4197 struct ice_fltr_list_entry f_list_entry;
4198 struct ice_fltr_info new_fltr;
4199 bool is_tx_fltr;
4200 int status = 0;
4201 u16 hw_vsi_id;
4202 int pkt_type;
4203 u8 recipe_id;
4204
4205 if (!ice_is_vsi_valid(hw, vsi_handle))
4206 return -EINVAL;
4207 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4208
4209 memset(&new_fltr, 0, sizeof(new_fltr));
4210
4211 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4212 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4213 new_fltr.l_data.mac_vlan.vlan_id = vid;
4214 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4215 } else {
4216 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4217 recipe_id = ICE_SW_LKUP_PROMISC;
4218 }
4219
4220 /* Separate filters must be set for each direction/packet type
4221 * combination, so we will loop over the mask value, store the
4222 * individual type, and clear it out in the input mask as it
4223 * is found.
4224 */
4225 while (promisc_mask) {
4226 u8 *mac_addr;
4227
4228 pkt_type = 0;
4229 is_tx_fltr = false;
4230
4231 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4232 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4233 pkt_type = UCAST_FLTR;
4234 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4235 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4236 pkt_type = UCAST_FLTR;
4237 is_tx_fltr = true;
4238 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4239 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4240 pkt_type = MCAST_FLTR;
4241 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4242 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4243 pkt_type = MCAST_FLTR;
4244 is_tx_fltr = true;
4245 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4246 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4247 pkt_type = BCAST_FLTR;
4248 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4249 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4250 pkt_type = BCAST_FLTR;
4251 is_tx_fltr = true;
4252 }
4253
4254 /* Check for VLAN promiscuous flag */
4255 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4256 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4257 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4258 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4259 is_tx_fltr = true;
4260 }
4261
4262 /* Set filter DA based on packet type */
4263 mac_addr = new_fltr.l_data.mac.mac_addr;
4264 if (pkt_type == BCAST_FLTR) {
4265 eth_broadcast_addr(mac_addr);
4266 } else if (pkt_type == MCAST_FLTR ||
4267 pkt_type == UCAST_FLTR) {
4268 /* Use the dummy ether header DA */
4269 ether_addr_copy(mac_addr, dummy_eth_header);
4270 if (pkt_type == MCAST_FLTR)
4271 mac_addr[0] |= 0x1; /* Set multicast bit */
4272 }
4273
4274 /* Need to reset this to zero for all iterations */
4275 new_fltr.flag = 0;
4276 if (is_tx_fltr) {
4277 new_fltr.flag |= ICE_FLTR_TX;
4278 new_fltr.src = hw_vsi_id;
4279 } else {
4280 new_fltr.flag |= ICE_FLTR_RX;
4281 new_fltr.src = hw->port_info->lport;
4282 }
4283
4284 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4285 new_fltr.vsi_handle = vsi_handle;
4286 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4287 f_list_entry.fltr_info = new_fltr;
4288
4289 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4290 if (status)
4291 goto set_promisc_exit;
4292 }
4293
4294 set_promisc_exit:
4295 return status;
4296 }
4297
4298 /**
4299 * ice_set_vlan_vsi_promisc
4300 * @hw: pointer to the hardware structure
4301 * @vsi_handle: VSI handle to configure
4302 * @promisc_mask: mask of promiscuous config bits
4303 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4304 *
4305 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4306 */
4307 int
ice_set_vlan_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 promisc_mask,bool rm_vlan_promisc)4308 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4309 bool rm_vlan_promisc)
4310 {
4311 struct ice_switch_info *sw = hw->switch_info;
4312 struct ice_fltr_list_entry *list_itr, *tmp;
4313 struct list_head vsi_list_head;
4314 struct list_head *vlan_head;
4315 struct mutex *vlan_lock; /* Lock to protect filter rule list */
4316 u16 vlan_id;
4317 int status;
4318
4319 INIT_LIST_HEAD(&vsi_list_head);
4320 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4321 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4322 mutex_lock(vlan_lock);
4323 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4324 &vsi_list_head);
4325 mutex_unlock(vlan_lock);
4326 if (status)
4327 goto free_fltr_list;
4328
4329 list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
4330 /* Avoid enabling or disabling VLAN zero twice when in double
4331 * VLAN mode
4332 */
4333 if (ice_is_dvm_ena(hw) &&
4334 list_itr->fltr_info.l_data.vlan.tpid == 0)
4335 continue;
4336
4337 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4338 if (rm_vlan_promisc)
4339 status = ice_clear_vsi_promisc(hw, vsi_handle,
4340 promisc_mask, vlan_id);
4341 else
4342 status = ice_set_vsi_promisc(hw, vsi_handle,
4343 promisc_mask, vlan_id);
4344 if (status && status != -EEXIST)
4345 break;
4346 }
4347
4348 free_fltr_list:
4349 list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
4350 list_del(&list_itr->list_entry);
4351 devm_kfree(ice_hw_to_dev(hw), list_itr);
4352 }
4353 return status;
4354 }
4355
4356 /**
4357 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4358 * @hw: pointer to the hardware structure
4359 * @vsi_handle: VSI handle to remove filters from
4360 * @lkup: switch rule filter lookup type
4361 */
4362 static void
ice_remove_vsi_lkup_fltr(struct ice_hw * hw,u16 vsi_handle,enum ice_sw_lkup_type lkup)4363 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4364 enum ice_sw_lkup_type lkup)
4365 {
4366 struct ice_switch_info *sw = hw->switch_info;
4367 struct ice_fltr_list_entry *fm_entry;
4368 struct list_head remove_list_head;
4369 struct list_head *rule_head;
4370 struct ice_fltr_list_entry *tmp;
4371 struct mutex *rule_lock; /* Lock to protect filter rule list */
4372 int status;
4373
4374 INIT_LIST_HEAD(&remove_list_head);
4375 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4376 rule_head = &sw->recp_list[lkup].filt_rules;
4377 mutex_lock(rule_lock);
4378 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4379 &remove_list_head);
4380 mutex_unlock(rule_lock);
4381 if (status)
4382 goto free_fltr_list;
4383
4384 switch (lkup) {
4385 case ICE_SW_LKUP_MAC:
4386 ice_remove_mac(hw, &remove_list_head);
4387 break;
4388 case ICE_SW_LKUP_VLAN:
4389 ice_remove_vlan(hw, &remove_list_head);
4390 break;
4391 case ICE_SW_LKUP_PROMISC:
4392 case ICE_SW_LKUP_PROMISC_VLAN:
4393 ice_remove_promisc(hw, lkup, &remove_list_head);
4394 break;
4395 case ICE_SW_LKUP_MAC_VLAN:
4396 case ICE_SW_LKUP_ETHERTYPE:
4397 case ICE_SW_LKUP_ETHERTYPE_MAC:
4398 case ICE_SW_LKUP_DFLT:
4399 case ICE_SW_LKUP_LAST:
4400 default:
4401 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
4402 break;
4403 }
4404
4405 free_fltr_list:
4406 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4407 list_del(&fm_entry->list_entry);
4408 devm_kfree(ice_hw_to_dev(hw), fm_entry);
4409 }
4410 }
4411
4412 /**
4413 * ice_remove_vsi_fltr - Remove all filters for a VSI
4414 * @hw: pointer to the hardware structure
4415 * @vsi_handle: VSI handle to remove filters from
4416 */
ice_remove_vsi_fltr(struct ice_hw * hw,u16 vsi_handle)4417 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4418 {
4419 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4420 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4421 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4422 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4423 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4424 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4425 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4426 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4427 }
4428
4429 /**
4430 * ice_alloc_res_cntr - allocating resource counter
4431 * @hw: pointer to the hardware structure
4432 * @type: type of resource
4433 * @alloc_shared: if set it is shared else dedicated
4434 * @num_items: number of entries requested for FD resource type
4435 * @counter_id: counter index returned by AQ call
4436 */
4437 int
ice_alloc_res_cntr(struct ice_hw * hw,u8 type,u8 alloc_shared,u16 num_items,u16 * counter_id)4438 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4439 u16 *counter_id)
4440 {
4441 struct ice_aqc_alloc_free_res_elem *buf;
4442 u16 buf_len;
4443 int status;
4444
4445 /* Allocate resource */
4446 buf_len = struct_size(buf, elem, 1);
4447 buf = kzalloc(buf_len, GFP_KERNEL);
4448 if (!buf)
4449 return -ENOMEM;
4450
4451 buf->num_elems = cpu_to_le16(num_items);
4452 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4453 ICE_AQC_RES_TYPE_M) | alloc_shared);
4454
4455 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res);
4456 if (status)
4457 goto exit;
4458
4459 *counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
4460
4461 exit:
4462 kfree(buf);
4463 return status;
4464 }
4465
4466 /**
4467 * ice_free_res_cntr - free resource counter
4468 * @hw: pointer to the hardware structure
4469 * @type: type of resource
4470 * @alloc_shared: if set it is shared else dedicated
4471 * @num_items: number of entries to be freed for FD resource type
4472 * @counter_id: counter ID resource which needs to be freed
4473 */
4474 int
ice_free_res_cntr(struct ice_hw * hw,u8 type,u8 alloc_shared,u16 num_items,u16 counter_id)4475 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4476 u16 counter_id)
4477 {
4478 struct ice_aqc_alloc_free_res_elem *buf;
4479 u16 buf_len;
4480 int status;
4481
4482 /* Free resource */
4483 buf_len = struct_size(buf, elem, 1);
4484 buf = kzalloc(buf_len, GFP_KERNEL);
4485 if (!buf)
4486 return -ENOMEM;
4487
4488 buf->num_elems = cpu_to_le16(num_items);
4489 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4490 ICE_AQC_RES_TYPE_M) | alloc_shared);
4491 buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
4492
4493 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res);
4494 if (status)
4495 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
4496
4497 kfree(buf);
4498 return status;
4499 }
4500
4501 #define ICE_PROTOCOL_ENTRY(id, ...) { \
4502 .prot_type = id, \
4503 .offs = {__VA_ARGS__}, \
4504 }
4505
4506 /**
4507 * ice_share_res - set a resource as shared or dedicated
4508 * @hw: hw struct of original owner of resource
4509 * @type: resource type
4510 * @shared: is the resource being set to shared
4511 * @res_id: resource id (descriptor)
4512 */
ice_share_res(struct ice_hw * hw,u16 type,u8 shared,u16 res_id)4513 int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id)
4514 {
4515 struct ice_aqc_alloc_free_res_elem *buf;
4516 u16 buf_len;
4517 int status;
4518
4519 buf_len = struct_size(buf, elem, 1);
4520 buf = kzalloc(buf_len, GFP_KERNEL);
4521 if (!buf)
4522 return -ENOMEM;
4523
4524 buf->num_elems = cpu_to_le16(1);
4525 if (shared)
4526 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4527 ICE_AQC_RES_TYPE_M) |
4528 ICE_AQC_RES_TYPE_FLAG_SHARED);
4529 else
4530 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4531 ICE_AQC_RES_TYPE_M) &
4532 ~ICE_AQC_RES_TYPE_FLAG_SHARED);
4533
4534 buf->elem[0].e.sw_resp = cpu_to_le16(res_id);
4535 status = ice_aq_alloc_free_res(hw, buf, buf_len,
4536 ice_aqc_opc_share_res);
4537 if (status)
4538 ice_debug(hw, ICE_DBG_SW, "Could not set resource type %u id %u to %s\n",
4539 type, res_id, shared ? "SHARED" : "DEDICATED");
4540
4541 kfree(buf);
4542 return status;
4543 }
4544
4545 /* This is mapping table entry that maps every word within a given protocol
4546 * structure to the real byte offset as per the specification of that
4547 * protocol header.
4548 * for example dst address is 3 words in ethertype header and corresponding
4549 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4550 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4551 * matching entry describing its field. This needs to be updated if new
4552 * structure is added to that union.
4553 */
4554 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4555 ICE_PROTOCOL_ENTRY(ICE_MAC_OFOS, 0, 2, 4, 6, 8, 10, 12),
4556 ICE_PROTOCOL_ENTRY(ICE_MAC_IL, 0, 2, 4, 6, 8, 10, 12),
4557 ICE_PROTOCOL_ENTRY(ICE_ETYPE_OL, 0),
4558 ICE_PROTOCOL_ENTRY(ICE_ETYPE_IL, 0),
4559 ICE_PROTOCOL_ENTRY(ICE_VLAN_OFOS, 2, 0),
4560 ICE_PROTOCOL_ENTRY(ICE_IPV4_OFOS, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18),
4561 ICE_PROTOCOL_ENTRY(ICE_IPV4_IL, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18),
4562 ICE_PROTOCOL_ENTRY(ICE_IPV6_OFOS, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18,
4563 20, 22, 24, 26, 28, 30, 32, 34, 36, 38),
4564 ICE_PROTOCOL_ENTRY(ICE_IPV6_IL, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
4565 22, 24, 26, 28, 30, 32, 34, 36, 38),
4566 ICE_PROTOCOL_ENTRY(ICE_TCP_IL, 0, 2),
4567 ICE_PROTOCOL_ENTRY(ICE_UDP_OF, 0, 2),
4568 ICE_PROTOCOL_ENTRY(ICE_UDP_ILOS, 0, 2),
4569 ICE_PROTOCOL_ENTRY(ICE_VXLAN, 8, 10, 12, 14),
4570 ICE_PROTOCOL_ENTRY(ICE_GENEVE, 8, 10, 12, 14),
4571 ICE_PROTOCOL_ENTRY(ICE_NVGRE, 0, 2, 4, 6),
4572 ICE_PROTOCOL_ENTRY(ICE_GTP, 8, 10, 12, 14, 16, 18, 20, 22),
4573 ICE_PROTOCOL_ENTRY(ICE_GTP_NO_PAY, 8, 10, 12, 14),
4574 ICE_PROTOCOL_ENTRY(ICE_PPPOE, 0, 2, 4, 6),
4575 ICE_PROTOCOL_ENTRY(ICE_L2TPV3, 0, 2, 4, 6, 8, 10),
4576 ICE_PROTOCOL_ENTRY(ICE_VLAN_EX, 2, 0),
4577 ICE_PROTOCOL_ENTRY(ICE_VLAN_IN, 2, 0),
4578 ICE_PROTOCOL_ENTRY(ICE_HW_METADATA,
4579 ICE_SOURCE_PORT_MDID_OFFSET,
4580 ICE_PTYPE_MDID_OFFSET,
4581 ICE_PACKET_LENGTH_MDID_OFFSET,
4582 ICE_SOURCE_VSI_MDID_OFFSET,
4583 ICE_PKT_VLAN_MDID_OFFSET,
4584 ICE_PKT_TUNNEL_MDID_OFFSET,
4585 ICE_PKT_TCP_MDID_OFFSET,
4586 ICE_PKT_ERROR_MDID_OFFSET),
4587 };
4588
4589 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4590 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4591 { ICE_MAC_IL, ICE_MAC_IL_HW },
4592 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4593 { ICE_ETYPE_IL, ICE_ETYPE_IL_HW },
4594 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
4595 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4596 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4597 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4598 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4599 { ICE_TCP_IL, ICE_TCP_IL_HW },
4600 { ICE_UDP_OF, ICE_UDP_OF_HW },
4601 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4602 { ICE_VXLAN, ICE_UDP_OF_HW },
4603 { ICE_GENEVE, ICE_UDP_OF_HW },
4604 { ICE_NVGRE, ICE_GRE_OF_HW },
4605 { ICE_GTP, ICE_UDP_OF_HW },
4606 { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
4607 { ICE_PPPOE, ICE_PPPOE_HW },
4608 { ICE_L2TPV3, ICE_L2TPV3_HW },
4609 { ICE_VLAN_EX, ICE_VLAN_OF_HW },
4610 { ICE_VLAN_IN, ICE_VLAN_OL_HW },
4611 { ICE_HW_METADATA, ICE_META_DATA_ID_HW },
4612 };
4613
4614 /**
4615 * ice_find_recp - find a recipe
4616 * @hw: pointer to the hardware structure
4617 * @lkup_exts: extension sequence to match
4618 * @rinfo: information regarding the rule e.g. priority and action info
4619 *
4620 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4621 */
4622 static u16
ice_find_recp(struct ice_hw * hw,struct ice_prot_lkup_ext * lkup_exts,const struct ice_adv_rule_info * rinfo)4623 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
4624 const struct ice_adv_rule_info *rinfo)
4625 {
4626 bool refresh_required = true;
4627 struct ice_sw_recipe *recp;
4628 u8 i;
4629
4630 /* Walk through existing recipes to find a match */
4631 recp = hw->switch_info->recp_list;
4632 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4633 /* If recipe was not created for this ID, in SW bookkeeping,
4634 * check if FW has an entry for this recipe. If the FW has an
4635 * entry update it in our SW bookkeeping and continue with the
4636 * matching.
4637 */
4638 if (!recp[i].recp_created)
4639 if (ice_get_recp_frm_fw(hw,
4640 hw->switch_info->recp_list, i,
4641 &refresh_required))
4642 continue;
4643
4644 /* Skip inverse action recipes */
4645 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4646 ICE_AQ_RECIPE_ACT_INV_ACT)
4647 continue;
4648
4649 /* if number of words we are looking for match */
4650 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4651 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
4652 struct ice_fv_word *be = lkup_exts->fv_words;
4653 u16 *cr = recp[i].lkup_exts.field_mask;
4654 u16 *de = lkup_exts->field_mask;
4655 bool found = true;
4656 u8 pe, qr;
4657
4658 /* ar, cr, and qr are related to the recipe words, while
4659 * be, de, and pe are related to the lookup words
4660 */
4661 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
4662 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
4663 qr++) {
4664 if (ar[qr].off == be[pe].off &&
4665 ar[qr].prot_id == be[pe].prot_id &&
4666 cr[qr] == de[pe])
4667 /* Found the "pe"th word in the
4668 * given recipe
4669 */
4670 break;
4671 }
4672 /* After walking through all the words in the
4673 * "i"th recipe if "p"th word was not found then
4674 * this recipe is not what we are looking for.
4675 * So break out from this loop and try the next
4676 * recipe
4677 */
4678 if (qr >= recp[i].lkup_exts.n_val_words) {
4679 found = false;
4680 break;
4681 }
4682 }
4683 /* If for "i"th recipe the found was never set to false
4684 * then it means we found our match
4685 * Also tun type and *_pass_l2 of recipe needs to be
4686 * checked
4687 */
4688 if (found && recp[i].tun_type == rinfo->tun_type &&
4689 recp[i].need_pass_l2 == rinfo->need_pass_l2 &&
4690 recp[i].allow_pass_l2 == rinfo->allow_pass_l2)
4691 return i; /* Return the recipe ID */
4692 }
4693 }
4694 return ICE_MAX_NUM_RECIPES;
4695 }
4696
4697 /**
4698 * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
4699 *
4700 * As protocol id for outer vlan is different in dvm and svm, if dvm is
4701 * supported protocol array record for outer vlan has to be modified to
4702 * reflect the value proper for DVM.
4703 */
ice_change_proto_id_to_dvm(void)4704 void ice_change_proto_id_to_dvm(void)
4705 {
4706 u8 i;
4707
4708 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4709 if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
4710 ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
4711 ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
4712 }
4713
4714 /**
4715 * ice_prot_type_to_id - get protocol ID from protocol type
4716 * @type: protocol type
4717 * @id: pointer to variable that will receive the ID
4718 *
4719 * Returns true if found, false otherwise
4720 */
ice_prot_type_to_id(enum ice_protocol_type type,u8 * id)4721 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
4722 {
4723 u8 i;
4724
4725 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4726 if (ice_prot_id_tbl[i].type == type) {
4727 *id = ice_prot_id_tbl[i].protocol_id;
4728 return true;
4729 }
4730 return false;
4731 }
4732
4733 /**
4734 * ice_fill_valid_words - count valid words
4735 * @rule: advanced rule with lookup information
4736 * @lkup_exts: byte offset extractions of the words that are valid
4737 *
4738 * calculate valid words in a lookup rule using mask value
4739 */
4740 static u8
ice_fill_valid_words(struct ice_adv_lkup_elem * rule,struct ice_prot_lkup_ext * lkup_exts)4741 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4742 struct ice_prot_lkup_ext *lkup_exts)
4743 {
4744 u8 j, word, prot_id, ret_val;
4745
4746 if (!ice_prot_type_to_id(rule->type, &prot_id))
4747 return 0;
4748
4749 word = lkup_exts->n_val_words;
4750
4751 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4752 if (((u16 *)&rule->m_u)[j] &&
4753 rule->type < ARRAY_SIZE(ice_prot_ext)) {
4754 /* No more space to accommodate */
4755 if (word >= ICE_MAX_CHAIN_WORDS)
4756 return 0;
4757 lkup_exts->fv_words[word].off =
4758 ice_prot_ext[rule->type].offs[j];
4759 lkup_exts->fv_words[word].prot_id =
4760 ice_prot_id_tbl[rule->type].protocol_id;
4761 lkup_exts->field_mask[word] =
4762 be16_to_cpu(((__force __be16 *)&rule->m_u)[j]);
4763 word++;
4764 }
4765
4766 ret_val = word - lkup_exts->n_val_words;
4767 lkup_exts->n_val_words = word;
4768
4769 return ret_val;
4770 }
4771
4772 /**
4773 * ice_create_first_fit_recp_def - Create a recipe grouping
4774 * @hw: pointer to the hardware structure
4775 * @lkup_exts: an array of protocol header extractions
4776 * @rg_list: pointer to a list that stores new recipe groups
4777 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4778 *
4779 * Using first fit algorithm, take all the words that are still not done
4780 * and start grouping them in 4-word groups. Each group makes up one
4781 * recipe.
4782 */
4783 static int
ice_create_first_fit_recp_def(struct ice_hw * hw,struct ice_prot_lkup_ext * lkup_exts,struct list_head * rg_list,u8 * recp_cnt)4784 ice_create_first_fit_recp_def(struct ice_hw *hw,
4785 struct ice_prot_lkup_ext *lkup_exts,
4786 struct list_head *rg_list,
4787 u8 *recp_cnt)
4788 {
4789 struct ice_pref_recipe_group *grp = NULL;
4790 u8 j;
4791
4792 *recp_cnt = 0;
4793
4794 /* Walk through every word in the rule to check if it is not done. If so
4795 * then this word needs to be part of a new recipe.
4796 */
4797 for (j = 0; j < lkup_exts->n_val_words; j++)
4798 if (!test_bit(j, lkup_exts->done)) {
4799 if (!grp ||
4800 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4801 struct ice_recp_grp_entry *entry;
4802
4803 entry = devm_kzalloc(ice_hw_to_dev(hw),
4804 sizeof(*entry),
4805 GFP_KERNEL);
4806 if (!entry)
4807 return -ENOMEM;
4808 list_add(&entry->l_entry, rg_list);
4809 grp = &entry->r_group;
4810 (*recp_cnt)++;
4811 }
4812
4813 grp->pairs[grp->n_val_pairs].prot_id =
4814 lkup_exts->fv_words[j].prot_id;
4815 grp->pairs[grp->n_val_pairs].off =
4816 lkup_exts->fv_words[j].off;
4817 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4818 grp->n_val_pairs++;
4819 }
4820
4821 return 0;
4822 }
4823
4824 /**
4825 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4826 * @hw: pointer to the hardware structure
4827 * @fv_list: field vector with the extraction sequence information
4828 * @rg_list: recipe groupings with protocol-offset pairs
4829 *
4830 * Helper function to fill in the field vector indices for protocol-offset
4831 * pairs. These indexes are then ultimately programmed into a recipe.
4832 */
4833 static int
ice_fill_fv_word_index(struct ice_hw * hw,struct list_head * fv_list,struct list_head * rg_list)4834 ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
4835 struct list_head *rg_list)
4836 {
4837 struct ice_sw_fv_list_entry *fv;
4838 struct ice_recp_grp_entry *rg;
4839 struct ice_fv_word *fv_ext;
4840
4841 if (list_empty(fv_list))
4842 return 0;
4843
4844 fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry,
4845 list_entry);
4846 fv_ext = fv->fv_ptr->ew;
4847
4848 list_for_each_entry(rg, rg_list, l_entry) {
4849 u8 i;
4850
4851 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4852 struct ice_fv_word *pr;
4853 bool found = false;
4854 u16 mask;
4855 u8 j;
4856
4857 pr = &rg->r_group.pairs[i];
4858 mask = rg->r_group.mask[i];
4859
4860 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4861 if (fv_ext[j].prot_id == pr->prot_id &&
4862 fv_ext[j].off == pr->off) {
4863 found = true;
4864
4865 /* Store index of field vector */
4866 rg->fv_idx[i] = j;
4867 rg->fv_mask[i] = mask;
4868 break;
4869 }
4870
4871 /* Protocol/offset could not be found, caller gave an
4872 * invalid pair
4873 */
4874 if (!found)
4875 return -EINVAL;
4876 }
4877 }
4878
4879 return 0;
4880 }
4881
4882 /**
4883 * ice_find_free_recp_res_idx - find free result indexes for recipe
4884 * @hw: pointer to hardware structure
4885 * @profiles: bitmap of profiles that will be associated with the new recipe
4886 * @free_idx: pointer to variable to receive the free index bitmap
4887 *
4888 * The algorithm used here is:
4889 * 1. When creating a new recipe, create a set P which contains all
4890 * Profiles that will be associated with our new recipe
4891 *
4892 * 2. For each Profile p in set P:
4893 * a. Add all recipes associated with Profile p into set R
4894 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
4895 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
4896 * i. Or just assume they all have the same possible indexes:
4897 * 44, 45, 46, 47
4898 * i.e., PossibleIndexes = 0x0000F00000000000
4899 *
4900 * 3. For each Recipe r in set R:
4901 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
4902 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
4903 *
4904 * FreeIndexes will contain the bits indicating the indexes free for use,
4905 * then the code needs to update the recipe[r].used_result_idx_bits to
4906 * indicate which indexes were selected for use by this recipe.
4907 */
4908 static u16
ice_find_free_recp_res_idx(struct ice_hw * hw,const unsigned long * profiles,unsigned long * free_idx)4909 ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
4910 unsigned long *free_idx)
4911 {
4912 DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS);
4913 DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES);
4914 DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS);
4915 u16 bit;
4916
4917 bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
4918 bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
4919
4920 bitmap_fill(possible_idx, ICE_MAX_FV_WORDS);
4921
4922 /* For each profile we are going to associate the recipe with, add the
4923 * recipes that are associated with that profile. This will give us
4924 * the set of recipes that our recipe may collide with. Also, determine
4925 * what possible result indexes are usable given this set of profiles.
4926 */
4927 for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
4928 bitmap_or(recipes, recipes, profile_to_recipe[bit],
4929 ICE_MAX_NUM_RECIPES);
4930 bitmap_and(possible_idx, possible_idx,
4931 hw->switch_info->prof_res_bm[bit],
4932 ICE_MAX_FV_WORDS);
4933 }
4934
4935 /* For each recipe that our new recipe may collide with, determine
4936 * which indexes have been used.
4937 */
4938 for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
4939 bitmap_or(used_idx, used_idx,
4940 hw->switch_info->recp_list[bit].res_idxs,
4941 ICE_MAX_FV_WORDS);
4942
4943 bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
4944
4945 /* return number of free indexes */
4946 return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS);
4947 }
4948
4949 /**
4950 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4951 * @hw: pointer to hardware structure
4952 * @rm: recipe management list entry
4953 * @profiles: bitmap of profiles that will be associated.
4954 */
4955 static int
ice_add_sw_recipe(struct ice_hw * hw,struct ice_sw_recipe * rm,unsigned long * profiles)4956 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
4957 unsigned long *profiles)
4958 {
4959 DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
4960 struct ice_aqc_recipe_content *content;
4961 struct ice_aqc_recipe_data_elem *tmp;
4962 struct ice_aqc_recipe_data_elem *buf;
4963 struct ice_recp_grp_entry *entry;
4964 u16 free_res_idx;
4965 u16 recipe_count;
4966 u8 chain_idx;
4967 u8 recps = 0;
4968 int status;
4969
4970 /* When more than one recipe are required, another recipe is needed to
4971 * chain them together. Matching a tunnel metadata ID takes up one of
4972 * the match fields in the chaining recipe reducing the number of
4973 * chained recipes by one.
4974 */
4975 /* check number of free result indices */
4976 bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
4977 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
4978
4979 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
4980 free_res_idx, rm->n_grp_count);
4981
4982 if (rm->n_grp_count > 1) {
4983 if (rm->n_grp_count > free_res_idx)
4984 return -ENOSPC;
4985
4986 rm->n_grp_count++;
4987 }
4988
4989 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
4990 return -ENOSPC;
4991
4992 tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
4993 if (!tmp)
4994 return -ENOMEM;
4995
4996 buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
4997 GFP_KERNEL);
4998 if (!buf) {
4999 status = -ENOMEM;
5000 goto err_mem;
5001 }
5002
5003 bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5004 recipe_count = ICE_MAX_NUM_RECIPES;
5005 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5006 NULL);
5007 if (status || recipe_count == 0)
5008 goto err_unroll;
5009
5010 /* Allocate the recipe resources, and configure them according to the
5011 * match fields from protocol headers and extracted field vectors.
5012 */
5013 chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5014 list_for_each_entry(entry, &rm->rg_list, l_entry) {
5015 u8 i;
5016
5017 status = ice_alloc_recipe(hw, &entry->rid);
5018 if (status)
5019 goto err_unroll;
5020
5021 content = &buf[recps].content;
5022
5023 /* Clear the result index of the located recipe, as this will be
5024 * updated, if needed, later in the recipe creation process.
5025 */
5026 tmp[0].content.result_indx = 0;
5027
5028 buf[recps] = tmp[0];
5029 buf[recps].recipe_indx = (u8)entry->rid;
5030 /* if the recipe is a non-root recipe RID should be programmed
5031 * as 0 for the rules to be applied correctly.
5032 */
5033 content->rid = 0;
5034 memset(&content->lkup_indx, 0,
5035 sizeof(content->lkup_indx));
5036
5037 /* All recipes use look-up index 0 to match switch ID. */
5038 content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5039 content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
5040 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5041 * to be 0
5042 */
5043 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5044 content->lkup_indx[i] = 0x80;
5045 content->mask[i] = 0;
5046 }
5047
5048 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5049 content->lkup_indx[i + 1] = entry->fv_idx[i];
5050 content->mask[i + 1] = cpu_to_le16(entry->fv_mask[i]);
5051 }
5052
5053 if (rm->n_grp_count > 1) {
5054 /* Checks to see if there really is a valid result index
5055 * that can be used.
5056 */
5057 if (chain_idx >= ICE_MAX_FV_WORDS) {
5058 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
5059 status = -ENOSPC;
5060 goto err_unroll;
5061 }
5062
5063 entry->chain_idx = chain_idx;
5064 content->result_indx =
5065 ICE_AQ_RECIPE_RESULT_EN |
5066 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5067 ICE_AQ_RECIPE_RESULT_DATA_M);
5068 clear_bit(chain_idx, result_idx_bm);
5069 chain_idx = find_first_bit(result_idx_bm,
5070 ICE_MAX_FV_WORDS);
5071 }
5072
5073 /* fill recipe dependencies */
5074 bitmap_zero((unsigned long *)buf[recps].recipe_bitmap,
5075 ICE_MAX_NUM_RECIPES);
5076 set_bit(buf[recps].recipe_indx,
5077 (unsigned long *)buf[recps].recipe_bitmap);
5078 content->act_ctrl_fwd_priority = rm->priority;
5079
5080 if (rm->need_pass_l2)
5081 content->act_ctrl |= ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
5082
5083 if (rm->allow_pass_l2)
5084 content->act_ctrl |= ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
5085 recps++;
5086 }
5087
5088 if (rm->n_grp_count == 1) {
5089 rm->root_rid = buf[0].recipe_indx;
5090 set_bit(buf[0].recipe_indx, rm->r_bitmap);
5091 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5092 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5093 memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5094 sizeof(buf[0].recipe_bitmap));
5095 } else {
5096 status = -EINVAL;
5097 goto err_unroll;
5098 }
5099 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5100 * the recipe which is getting created if specified
5101 * by user. Usually any advanced switch filter, which results
5102 * into new extraction sequence, ended up creating a new recipe
5103 * of type ROOT and usually recipes are associated with profiles
5104 * Switch rule referreing newly created recipe, needs to have
5105 * either/or 'fwd' or 'join' priority, otherwise switch rule
5106 * evaluation will not happen correctly. In other words, if
5107 * switch rule to be evaluated on priority basis, then recipe
5108 * needs to have priority, otherwise it will be evaluated last.
5109 */
5110 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5111 } else {
5112 struct ice_recp_grp_entry *last_chain_entry;
5113 u16 rid, i;
5114
5115 /* Allocate the last recipe that will chain the outcomes of the
5116 * other recipes together
5117 */
5118 status = ice_alloc_recipe(hw, &rid);
5119 if (status)
5120 goto err_unroll;
5121
5122 content = &buf[recps].content;
5123
5124 buf[recps].recipe_indx = (u8)rid;
5125 content->rid = (u8)rid;
5126 content->rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5127 /* the new entry created should also be part of rg_list to
5128 * make sure we have complete recipe
5129 */
5130 last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw),
5131 sizeof(*last_chain_entry),
5132 GFP_KERNEL);
5133 if (!last_chain_entry) {
5134 status = -ENOMEM;
5135 goto err_unroll;
5136 }
5137 last_chain_entry->rid = rid;
5138 memset(&content->lkup_indx, 0, sizeof(content->lkup_indx));
5139 /* All recipes use look-up index 0 to match switch ID. */
5140 content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5141 content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
5142 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5143 content->lkup_indx[i] = ICE_AQ_RECIPE_LKUP_IGNORE;
5144 content->mask[i] = 0;
5145 }
5146
5147 i = 1;
5148 /* update r_bitmap with the recp that is used for chaining */
5149 set_bit(rid, rm->r_bitmap);
5150 /* this is the recipe that chains all the other recipes so it
5151 * should not have a chaining ID to indicate the same
5152 */
5153 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5154 list_for_each_entry(entry, &rm->rg_list, l_entry) {
5155 last_chain_entry->fv_idx[i] = entry->chain_idx;
5156 content->lkup_indx[i] = entry->chain_idx;
5157 content->mask[i++] = cpu_to_le16(0xFFFF);
5158 set_bit(entry->rid, rm->r_bitmap);
5159 }
5160 list_add(&last_chain_entry->l_entry, &rm->rg_list);
5161 if (sizeof(buf[recps].recipe_bitmap) >=
5162 sizeof(rm->r_bitmap)) {
5163 memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5164 sizeof(buf[recps].recipe_bitmap));
5165 } else {
5166 status = -EINVAL;
5167 goto err_unroll;
5168 }
5169 content->act_ctrl_fwd_priority = rm->priority;
5170
5171 recps++;
5172 rm->root_rid = (u8)rid;
5173 }
5174 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5175 if (status)
5176 goto err_unroll;
5177
5178 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5179 ice_release_change_lock(hw);
5180 if (status)
5181 goto err_unroll;
5182
5183 /* Every recipe that just got created add it to the recipe
5184 * book keeping list
5185 */
5186 list_for_each_entry(entry, &rm->rg_list, l_entry) {
5187 struct ice_switch_info *sw = hw->switch_info;
5188 bool is_root, idx_found = false;
5189 struct ice_sw_recipe *recp;
5190 u16 idx, buf_idx = 0;
5191
5192 /* find buffer index for copying some data */
5193 for (idx = 0; idx < rm->n_grp_count; idx++)
5194 if (buf[idx].recipe_indx == entry->rid) {
5195 buf_idx = idx;
5196 idx_found = true;
5197 }
5198
5199 if (!idx_found) {
5200 status = -EIO;
5201 goto err_unroll;
5202 }
5203
5204 recp = &sw->recp_list[entry->rid];
5205 is_root = (rm->root_rid == entry->rid);
5206 recp->is_root = is_root;
5207
5208 recp->root_rid = entry->rid;
5209 recp->big_recp = (is_root && rm->n_grp_count > 1);
5210
5211 memcpy(&recp->ext_words, entry->r_group.pairs,
5212 entry->r_group.n_val_pairs * sizeof(struct ice_fv_word));
5213
5214 memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5215 sizeof(recp->r_bitmap));
5216
5217 /* Copy non-result fv index values and masks to recipe. This
5218 * call will also update the result recipe bitmask.
5219 */
5220 ice_collect_result_idx(&buf[buf_idx], recp);
5221
5222 /* for non-root recipes, also copy to the root, this allows
5223 * easier matching of a complete chained recipe
5224 */
5225 if (!is_root)
5226 ice_collect_result_idx(&buf[buf_idx],
5227 &sw->recp_list[rm->root_rid]);
5228
5229 recp->n_ext_words = entry->r_group.n_val_pairs;
5230 recp->chain_idx = entry->chain_idx;
5231 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5232 recp->n_grp_count = rm->n_grp_count;
5233 recp->tun_type = rm->tun_type;
5234 recp->need_pass_l2 = rm->need_pass_l2;
5235 recp->allow_pass_l2 = rm->allow_pass_l2;
5236 recp->recp_created = true;
5237 }
5238 rm->root_buf = buf;
5239 kfree(tmp);
5240 return status;
5241
5242 err_unroll:
5243 err_mem:
5244 kfree(tmp);
5245 devm_kfree(ice_hw_to_dev(hw), buf);
5246 return status;
5247 }
5248
5249 /**
5250 * ice_create_recipe_group - creates recipe group
5251 * @hw: pointer to hardware structure
5252 * @rm: recipe management list entry
5253 * @lkup_exts: lookup elements
5254 */
5255 static int
ice_create_recipe_group(struct ice_hw * hw,struct ice_sw_recipe * rm,struct ice_prot_lkup_ext * lkup_exts)5256 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5257 struct ice_prot_lkup_ext *lkup_exts)
5258 {
5259 u8 recp_count = 0;
5260 int status;
5261
5262 rm->n_grp_count = 0;
5263
5264 /* Create recipes for words that are marked not done by packing them
5265 * as best fit.
5266 */
5267 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5268 &rm->rg_list, &recp_count);
5269 if (!status) {
5270 rm->n_grp_count += recp_count;
5271 rm->n_ext_words = lkup_exts->n_val_words;
5272 memcpy(&rm->ext_words, lkup_exts->fv_words,
5273 sizeof(rm->ext_words));
5274 memcpy(rm->word_masks, lkup_exts->field_mask,
5275 sizeof(rm->word_masks));
5276 }
5277
5278 return status;
5279 }
5280
5281 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5282 * @hw: pointer to hardware structure
5283 * @rinfo: other information regarding the rule e.g. priority and action info
5284 * @bm: pointer to memory for returning the bitmap of field vectors
5285 */
5286 static void
ice_get_compat_fv_bitmap(struct ice_hw * hw,struct ice_adv_rule_info * rinfo,unsigned long * bm)5287 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5288 unsigned long *bm)
5289 {
5290 enum ice_prof_type prof_type;
5291
5292 bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
5293
5294 switch (rinfo->tun_type) {
5295 case ICE_NON_TUN:
5296 prof_type = ICE_PROF_NON_TUN;
5297 break;
5298 case ICE_ALL_TUNNELS:
5299 prof_type = ICE_PROF_TUN_ALL;
5300 break;
5301 case ICE_SW_TUN_GENEVE:
5302 case ICE_SW_TUN_VXLAN:
5303 prof_type = ICE_PROF_TUN_UDP;
5304 break;
5305 case ICE_SW_TUN_NVGRE:
5306 prof_type = ICE_PROF_TUN_GRE;
5307 break;
5308 case ICE_SW_TUN_GTPU:
5309 prof_type = ICE_PROF_TUN_GTPU;
5310 break;
5311 case ICE_SW_TUN_GTPC:
5312 prof_type = ICE_PROF_TUN_GTPC;
5313 break;
5314 case ICE_SW_TUN_AND_NON_TUN:
5315 default:
5316 prof_type = ICE_PROF_ALL;
5317 break;
5318 }
5319
5320 ice_get_sw_fv_bitmap(hw, prof_type, bm);
5321 }
5322
5323 /**
5324 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5325 * @hw: pointer to hardware structure
5326 * @lkups: lookup elements or match criteria for the advanced recipe, one
5327 * structure per protocol header
5328 * @lkups_cnt: number of protocols
5329 * @rinfo: other information regarding the rule e.g. priority and action info
5330 * @rid: return the recipe ID of the recipe created
5331 */
5332 static int
ice_add_adv_recipe(struct ice_hw * hw,struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,struct ice_adv_rule_info * rinfo,u16 * rid)5333 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5334 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5335 {
5336 DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES);
5337 DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES);
5338 struct ice_prot_lkup_ext *lkup_exts;
5339 struct ice_recp_grp_entry *r_entry;
5340 struct ice_sw_fv_list_entry *fvit;
5341 struct ice_recp_grp_entry *r_tmp;
5342 struct ice_sw_fv_list_entry *tmp;
5343 struct ice_sw_recipe *rm;
5344 int status = 0;
5345 u8 i;
5346
5347 if (!lkups_cnt)
5348 return -EINVAL;
5349
5350 lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
5351 if (!lkup_exts)
5352 return -ENOMEM;
5353
5354 /* Determine the number of words to be matched and if it exceeds a
5355 * recipe's restrictions
5356 */
5357 for (i = 0; i < lkups_cnt; i++) {
5358 u16 count;
5359
5360 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5361 status = -EIO;
5362 goto err_free_lkup_exts;
5363 }
5364
5365 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5366 if (!count) {
5367 status = -EIO;
5368 goto err_free_lkup_exts;
5369 }
5370 }
5371
5372 rm = kzalloc(sizeof(*rm), GFP_KERNEL);
5373 if (!rm) {
5374 status = -ENOMEM;
5375 goto err_free_lkup_exts;
5376 }
5377
5378 /* Get field vectors that contain fields extracted from all the protocol
5379 * headers being programmed.
5380 */
5381 INIT_LIST_HEAD(&rm->fv_list);
5382 INIT_LIST_HEAD(&rm->rg_list);
5383
5384 /* Get bitmap of field vectors (profiles) that are compatible with the
5385 * rule request; only these will be searched in the subsequent call to
5386 * ice_get_sw_fv_list.
5387 */
5388 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5389
5390 status = ice_get_sw_fv_list(hw, lkup_exts, fv_bitmap, &rm->fv_list);
5391 if (status)
5392 goto err_unroll;
5393
5394 /* Group match words into recipes using preferred recipe grouping
5395 * criteria.
5396 */
5397 status = ice_create_recipe_group(hw, rm, lkup_exts);
5398 if (status)
5399 goto err_unroll;
5400
5401 /* set the recipe priority if specified */
5402 rm->priority = (u8)rinfo->priority;
5403
5404 rm->need_pass_l2 = rinfo->need_pass_l2;
5405 rm->allow_pass_l2 = rinfo->allow_pass_l2;
5406
5407 /* Find offsets from the field vector. Pick the first one for all the
5408 * recipes.
5409 */
5410 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5411 if (status)
5412 goto err_unroll;
5413
5414 /* get bitmap of all profiles the recipe will be associated with */
5415 bitmap_zero(profiles, ICE_MAX_NUM_PROFILES);
5416 list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5417 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5418 set_bit((u16)fvit->profile_id, profiles);
5419 }
5420
5421 /* Look for a recipe which matches our requested fv / mask list */
5422 *rid = ice_find_recp(hw, lkup_exts, rinfo);
5423 if (*rid < ICE_MAX_NUM_RECIPES)
5424 /* Success if found a recipe that match the existing criteria */
5425 goto err_unroll;
5426
5427 rm->tun_type = rinfo->tun_type;
5428 /* Recipe we need does not exist, add a recipe */
5429 status = ice_add_sw_recipe(hw, rm, profiles);
5430 if (status)
5431 goto err_unroll;
5432
5433 /* Associate all the recipes created with all the profiles in the
5434 * common field vector.
5435 */
5436 list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5437 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
5438 u64 recp_assoc;
5439 u16 j;
5440
5441 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5442 &recp_assoc, NULL);
5443 if (status)
5444 goto err_unroll;
5445
5446 bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES);
5447 bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
5448 ICE_MAX_NUM_RECIPES);
5449 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5450 if (status)
5451 goto err_unroll;
5452
5453 bitmap_to_arr64(&recp_assoc, r_bitmap, ICE_MAX_NUM_RECIPES);
5454 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5455 recp_assoc, NULL);
5456 ice_release_change_lock(hw);
5457
5458 if (status)
5459 goto err_unroll;
5460
5461 /* Update profile to recipe bitmap array */
5462 bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap,
5463 ICE_MAX_NUM_RECIPES);
5464
5465 /* Update recipe to profile bitmap array */
5466 for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
5467 set_bit((u16)fvit->profile_id, recipe_to_profile[j]);
5468 }
5469
5470 *rid = rm->root_rid;
5471 memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts,
5472 sizeof(*lkup_exts));
5473 err_unroll:
5474 list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
5475 list_del(&r_entry->l_entry);
5476 devm_kfree(ice_hw_to_dev(hw), r_entry);
5477 }
5478
5479 list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) {
5480 list_del(&fvit->list_entry);
5481 devm_kfree(ice_hw_to_dev(hw), fvit);
5482 }
5483
5484 devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
5485 kfree(rm);
5486
5487 err_free_lkup_exts:
5488 kfree(lkup_exts);
5489
5490 return status;
5491 }
5492
5493 /**
5494 * ice_dummy_packet_add_vlan - insert VLAN header to dummy pkt
5495 *
5496 * @dummy_pkt: dummy packet profile pattern to which VLAN tag(s) will be added
5497 * @num_vlan: number of VLAN tags
5498 */
5499 static struct ice_dummy_pkt_profile *
ice_dummy_packet_add_vlan(const struct ice_dummy_pkt_profile * dummy_pkt,u32 num_vlan)5500 ice_dummy_packet_add_vlan(const struct ice_dummy_pkt_profile *dummy_pkt,
5501 u32 num_vlan)
5502 {
5503 struct ice_dummy_pkt_profile *profile;
5504 struct ice_dummy_pkt_offsets *offsets;
5505 u32 buf_len, off, etype_off, i;
5506 u8 *pkt;
5507
5508 if (num_vlan < 1 || num_vlan > 2)
5509 return ERR_PTR(-EINVAL);
5510
5511 off = num_vlan * VLAN_HLEN;
5512
5513 buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet_offsets)) +
5514 dummy_pkt->offsets_len;
5515 offsets = kzalloc(buf_len, GFP_KERNEL);
5516 if (!offsets)
5517 return ERR_PTR(-ENOMEM);
5518
5519 offsets[0] = dummy_pkt->offsets[0];
5520 if (num_vlan == 2) {
5521 offsets[1] = ice_dummy_qinq_packet_offsets[0];
5522 offsets[2] = ice_dummy_qinq_packet_offsets[1];
5523 } else if (num_vlan == 1) {
5524 offsets[1] = ice_dummy_vlan_packet_offsets[0];
5525 }
5526
5527 for (i = 1; dummy_pkt->offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5528 offsets[i + num_vlan].type = dummy_pkt->offsets[i].type;
5529 offsets[i + num_vlan].offset =
5530 dummy_pkt->offsets[i].offset + off;
5531 }
5532 offsets[i + num_vlan] = dummy_pkt->offsets[i];
5533
5534 etype_off = dummy_pkt->offsets[1].offset;
5535
5536 buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet)) +
5537 dummy_pkt->pkt_len;
5538 pkt = kzalloc(buf_len, GFP_KERNEL);
5539 if (!pkt) {
5540 kfree(offsets);
5541 return ERR_PTR(-ENOMEM);
5542 }
5543
5544 memcpy(pkt, dummy_pkt->pkt, etype_off);
5545 memcpy(pkt + etype_off,
5546 num_vlan == 2 ? ice_dummy_qinq_packet : ice_dummy_vlan_packet,
5547 off);
5548 memcpy(pkt + etype_off + off, dummy_pkt->pkt + etype_off,
5549 dummy_pkt->pkt_len - etype_off);
5550
5551 profile = kzalloc(sizeof(*profile), GFP_KERNEL);
5552 if (!profile) {
5553 kfree(offsets);
5554 kfree(pkt);
5555 return ERR_PTR(-ENOMEM);
5556 }
5557
5558 profile->offsets = offsets;
5559 profile->pkt = pkt;
5560 profile->pkt_len = buf_len;
5561 profile->match |= ICE_PKT_KMALLOC;
5562
5563 return profile;
5564 }
5565
5566 /**
5567 * ice_find_dummy_packet - find dummy packet
5568 *
5569 * @lkups: lookup elements or match criteria for the advanced recipe, one
5570 * structure per protocol header
5571 * @lkups_cnt: number of protocols
5572 * @tun_type: tunnel type
5573 *
5574 * Returns the &ice_dummy_pkt_profile corresponding to these lookup params.
5575 */
5576 static const struct ice_dummy_pkt_profile *
ice_find_dummy_packet(struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,enum ice_sw_tunnel_type tun_type)5577 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5578 enum ice_sw_tunnel_type tun_type)
5579 {
5580 const struct ice_dummy_pkt_profile *ret = ice_dummy_pkt_profiles;
5581 u32 match = 0, vlan_count = 0;
5582 u16 i;
5583
5584 switch (tun_type) {
5585 case ICE_SW_TUN_GTPC:
5586 match |= ICE_PKT_TUN_GTPC;
5587 break;
5588 case ICE_SW_TUN_GTPU:
5589 match |= ICE_PKT_TUN_GTPU;
5590 break;
5591 case ICE_SW_TUN_NVGRE:
5592 match |= ICE_PKT_TUN_NVGRE;
5593 break;
5594 case ICE_SW_TUN_GENEVE:
5595 case ICE_SW_TUN_VXLAN:
5596 match |= ICE_PKT_TUN_UDP;
5597 break;
5598 default:
5599 break;
5600 }
5601
5602 for (i = 0; i < lkups_cnt; i++) {
5603 if (lkups[i].type == ICE_UDP_ILOS)
5604 match |= ICE_PKT_INNER_UDP;
5605 else if (lkups[i].type == ICE_TCP_IL)
5606 match |= ICE_PKT_INNER_TCP;
5607 else if (lkups[i].type == ICE_IPV6_OFOS)
5608 match |= ICE_PKT_OUTER_IPV6;
5609 else if (lkups[i].type == ICE_VLAN_OFOS ||
5610 lkups[i].type == ICE_VLAN_EX)
5611 vlan_count++;
5612 else if (lkups[i].type == ICE_VLAN_IN)
5613 vlan_count++;
5614 else if (lkups[i].type == ICE_ETYPE_OL &&
5615 lkups[i].h_u.ethertype.ethtype_id ==
5616 cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5617 lkups[i].m_u.ethertype.ethtype_id ==
5618 cpu_to_be16(0xFFFF))
5619 match |= ICE_PKT_OUTER_IPV6;
5620 else if (lkups[i].type == ICE_ETYPE_IL &&
5621 lkups[i].h_u.ethertype.ethtype_id ==
5622 cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5623 lkups[i].m_u.ethertype.ethtype_id ==
5624 cpu_to_be16(0xFFFF))
5625 match |= ICE_PKT_INNER_IPV6;
5626 else if (lkups[i].type == ICE_IPV6_IL)
5627 match |= ICE_PKT_INNER_IPV6;
5628 else if (lkups[i].type == ICE_GTP_NO_PAY)
5629 match |= ICE_PKT_GTP_NOPAY;
5630 else if (lkups[i].type == ICE_PPPOE) {
5631 match |= ICE_PKT_PPPOE;
5632 if (lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
5633 htons(PPP_IPV6))
5634 match |= ICE_PKT_OUTER_IPV6;
5635 } else if (lkups[i].type == ICE_L2TPV3)
5636 match |= ICE_PKT_L2TPV3;
5637 }
5638
5639 while (ret->match && (match & ret->match) != ret->match)
5640 ret++;
5641
5642 if (vlan_count != 0)
5643 ret = ice_dummy_packet_add_vlan(ret, vlan_count);
5644
5645 return ret;
5646 }
5647
5648 /**
5649 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5650 *
5651 * @lkups: lookup elements or match criteria for the advanced recipe, one
5652 * structure per protocol header
5653 * @lkups_cnt: number of protocols
5654 * @s_rule: stores rule information from the match criteria
5655 * @profile: dummy packet profile (the template, its size and header offsets)
5656 */
5657 static int
ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,struct ice_sw_rule_lkup_rx_tx * s_rule,const struct ice_dummy_pkt_profile * profile)5658 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5659 struct ice_sw_rule_lkup_rx_tx *s_rule,
5660 const struct ice_dummy_pkt_profile *profile)
5661 {
5662 u8 *pkt;
5663 u16 i;
5664
5665 /* Start with a packet with a pre-defined/dummy content. Then, fill
5666 * in the header values to be looked up or matched.
5667 */
5668 pkt = s_rule->hdr_data;
5669
5670 memcpy(pkt, profile->pkt, profile->pkt_len);
5671
5672 for (i = 0; i < lkups_cnt; i++) {
5673 const struct ice_dummy_pkt_offsets *offsets = profile->offsets;
5674 enum ice_protocol_type type;
5675 u16 offset = 0, len = 0, j;
5676 bool found = false;
5677
5678 /* find the start of this layer; it should be found since this
5679 * was already checked when search for the dummy packet
5680 */
5681 type = lkups[i].type;
5682 /* metadata isn't present in the packet */
5683 if (type == ICE_HW_METADATA)
5684 continue;
5685
5686 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5687 if (type == offsets[j].type) {
5688 offset = offsets[j].offset;
5689 found = true;
5690 break;
5691 }
5692 }
5693 /* this should never happen in a correct calling sequence */
5694 if (!found)
5695 return -EINVAL;
5696
5697 switch (lkups[i].type) {
5698 case ICE_MAC_OFOS:
5699 case ICE_MAC_IL:
5700 len = sizeof(struct ice_ether_hdr);
5701 break;
5702 case ICE_ETYPE_OL:
5703 case ICE_ETYPE_IL:
5704 len = sizeof(struct ice_ethtype_hdr);
5705 break;
5706 case ICE_VLAN_OFOS:
5707 case ICE_VLAN_EX:
5708 case ICE_VLAN_IN:
5709 len = sizeof(struct ice_vlan_hdr);
5710 break;
5711 case ICE_IPV4_OFOS:
5712 case ICE_IPV4_IL:
5713 len = sizeof(struct ice_ipv4_hdr);
5714 break;
5715 case ICE_IPV6_OFOS:
5716 case ICE_IPV6_IL:
5717 len = sizeof(struct ice_ipv6_hdr);
5718 break;
5719 case ICE_TCP_IL:
5720 case ICE_UDP_OF:
5721 case ICE_UDP_ILOS:
5722 len = sizeof(struct ice_l4_hdr);
5723 break;
5724 case ICE_SCTP_IL:
5725 len = sizeof(struct ice_sctp_hdr);
5726 break;
5727 case ICE_NVGRE:
5728 len = sizeof(struct ice_nvgre_hdr);
5729 break;
5730 case ICE_VXLAN:
5731 case ICE_GENEVE:
5732 len = sizeof(struct ice_udp_tnl_hdr);
5733 break;
5734 case ICE_GTP_NO_PAY:
5735 case ICE_GTP:
5736 len = sizeof(struct ice_udp_gtp_hdr);
5737 break;
5738 case ICE_PPPOE:
5739 len = sizeof(struct ice_pppoe_hdr);
5740 break;
5741 case ICE_L2TPV3:
5742 len = sizeof(struct ice_l2tpv3_sess_hdr);
5743 break;
5744 default:
5745 return -EINVAL;
5746 }
5747
5748 /* the length should be a word multiple */
5749 if (len % ICE_BYTES_PER_WORD)
5750 return -EIO;
5751
5752 /* We have the offset to the header start, the length, the
5753 * caller's header values and mask. Use this information to
5754 * copy the data into the dummy packet appropriately based on
5755 * the mask. Note that we need to only write the bits as
5756 * indicated by the mask to make sure we don't improperly write
5757 * over any significant packet data.
5758 */
5759 for (j = 0; j < len / sizeof(u16); j++) {
5760 u16 *ptr = (u16 *)(pkt + offset);
5761 u16 mask = lkups[i].m_raw[j];
5762
5763 if (!mask)
5764 continue;
5765
5766 ptr[j] = (ptr[j] & ~mask) | (lkups[i].h_raw[j] & mask);
5767 }
5768 }
5769
5770 s_rule->hdr_len = cpu_to_le16(profile->pkt_len);
5771
5772 return 0;
5773 }
5774
5775 /**
5776 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
5777 * @hw: pointer to the hardware structure
5778 * @tun_type: tunnel type
5779 * @pkt: dummy packet to fill in
5780 * @offsets: offset info for the dummy packet
5781 */
5782 static int
ice_fill_adv_packet_tun(struct ice_hw * hw,enum ice_sw_tunnel_type tun_type,u8 * pkt,const struct ice_dummy_pkt_offsets * offsets)5783 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
5784 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
5785 {
5786 u16 open_port, i;
5787
5788 switch (tun_type) {
5789 case ICE_SW_TUN_VXLAN:
5790 if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
5791 return -EIO;
5792 break;
5793 case ICE_SW_TUN_GENEVE:
5794 if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
5795 return -EIO;
5796 break;
5797 default:
5798 /* Nothing needs to be done for this tunnel type */
5799 return 0;
5800 }
5801
5802 /* Find the outer UDP protocol header and insert the port number */
5803 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5804 if (offsets[i].type == ICE_UDP_OF) {
5805 struct ice_l4_hdr *hdr;
5806 u16 offset;
5807
5808 offset = offsets[i].offset;
5809 hdr = (struct ice_l4_hdr *)&pkt[offset];
5810 hdr->dst_port = cpu_to_be16(open_port);
5811
5812 return 0;
5813 }
5814 }
5815
5816 return -EIO;
5817 }
5818
5819 /**
5820 * ice_fill_adv_packet_vlan - fill dummy packet with VLAN tag type
5821 * @hw: pointer to hw structure
5822 * @vlan_type: VLAN tag type
5823 * @pkt: dummy packet to fill in
5824 * @offsets: offset info for the dummy packet
5825 */
5826 static int
ice_fill_adv_packet_vlan(struct ice_hw * hw,u16 vlan_type,u8 * pkt,const struct ice_dummy_pkt_offsets * offsets)5827 ice_fill_adv_packet_vlan(struct ice_hw *hw, u16 vlan_type, u8 *pkt,
5828 const struct ice_dummy_pkt_offsets *offsets)
5829 {
5830 u16 i;
5831
5832 /* Check if there is something to do */
5833 if (!vlan_type || !ice_is_dvm_ena(hw))
5834 return 0;
5835
5836 /* Find VLAN header and insert VLAN TPID */
5837 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5838 if (offsets[i].type == ICE_VLAN_OFOS ||
5839 offsets[i].type == ICE_VLAN_EX) {
5840 struct ice_vlan_hdr *hdr;
5841 u16 offset;
5842
5843 offset = offsets[i].offset;
5844 hdr = (struct ice_vlan_hdr *)&pkt[offset];
5845 hdr->type = cpu_to_be16(vlan_type);
5846
5847 return 0;
5848 }
5849 }
5850
5851 return -EIO;
5852 }
5853
ice_rules_equal(const struct ice_adv_rule_info * first,const struct ice_adv_rule_info * second)5854 static bool ice_rules_equal(const struct ice_adv_rule_info *first,
5855 const struct ice_adv_rule_info *second)
5856 {
5857 return first->sw_act.flag == second->sw_act.flag &&
5858 first->tun_type == second->tun_type &&
5859 first->vlan_type == second->vlan_type &&
5860 first->src_vsi == second->src_vsi &&
5861 first->need_pass_l2 == second->need_pass_l2 &&
5862 first->allow_pass_l2 == second->allow_pass_l2;
5863 }
5864
5865 /**
5866 * ice_find_adv_rule_entry - Search a rule entry
5867 * @hw: pointer to the hardware structure
5868 * @lkups: lookup elements or match criteria for the advanced recipe, one
5869 * structure per protocol header
5870 * @lkups_cnt: number of protocols
5871 * @recp_id: recipe ID for which we are finding the rule
5872 * @rinfo: other information regarding the rule e.g. priority and action info
5873 *
5874 * Helper function to search for a given advance rule entry
5875 * Returns pointer to entry storing the rule if found
5876 */
5877 static struct ice_adv_fltr_mgmt_list_entry *
ice_find_adv_rule_entry(struct ice_hw * hw,struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,u16 recp_id,struct ice_adv_rule_info * rinfo)5878 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5879 u16 lkups_cnt, u16 recp_id,
5880 struct ice_adv_rule_info *rinfo)
5881 {
5882 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5883 struct ice_switch_info *sw = hw->switch_info;
5884 int i;
5885
5886 list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules,
5887 list_entry) {
5888 bool lkups_matched = true;
5889
5890 if (lkups_cnt != list_itr->lkups_cnt)
5891 continue;
5892 for (i = 0; i < list_itr->lkups_cnt; i++)
5893 if (memcmp(&list_itr->lkups[i], &lkups[i],
5894 sizeof(*lkups))) {
5895 lkups_matched = false;
5896 break;
5897 }
5898 if (ice_rules_equal(rinfo, &list_itr->rule_info) &&
5899 lkups_matched)
5900 return list_itr;
5901 }
5902 return NULL;
5903 }
5904
5905 /**
5906 * ice_adv_add_update_vsi_list
5907 * @hw: pointer to the hardware structure
5908 * @m_entry: pointer to current adv filter management list entry
5909 * @cur_fltr: filter information from the book keeping entry
5910 * @new_fltr: filter information with the new VSI to be added
5911 *
5912 * Call AQ command to add or update previously created VSI list with new VSI.
5913 *
5914 * Helper function to do book keeping associated with adding filter information
5915 * The algorithm to do the booking keeping is described below :
5916 * When a VSI needs to subscribe to a given advanced filter
5917 * if only one VSI has been added till now
5918 * Allocate a new VSI list and add two VSIs
5919 * to this list using switch rule command
5920 * Update the previously created switch rule with the
5921 * newly created VSI list ID
5922 * if a VSI list was previously created
5923 * Add the new VSI to the previously created VSI list set
5924 * using the update switch rule command
5925 */
5926 static int
ice_adv_add_update_vsi_list(struct ice_hw * hw,struct ice_adv_fltr_mgmt_list_entry * m_entry,struct ice_adv_rule_info * cur_fltr,struct ice_adv_rule_info * new_fltr)5927 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5928 struct ice_adv_fltr_mgmt_list_entry *m_entry,
5929 struct ice_adv_rule_info *cur_fltr,
5930 struct ice_adv_rule_info *new_fltr)
5931 {
5932 u16 vsi_list_id = 0;
5933 int status;
5934
5935 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5936 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
5937 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5938 return -EOPNOTSUPP;
5939
5940 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5941 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5942 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5943 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5944 return -EOPNOTSUPP;
5945
5946 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5947 /* Only one entry existed in the mapping and it was not already
5948 * a part of a VSI list. So, create a VSI list with the old and
5949 * new VSIs.
5950 */
5951 struct ice_fltr_info tmp_fltr;
5952 u16 vsi_handle_arr[2];
5953
5954 /* A rule already exists with the new VSI being added */
5955 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5956 new_fltr->sw_act.fwd_id.hw_vsi_id)
5957 return -EEXIST;
5958
5959 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5960 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5961 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5962 &vsi_list_id,
5963 ICE_SW_LKUP_LAST);
5964 if (status)
5965 return status;
5966
5967 memset(&tmp_fltr, 0, sizeof(tmp_fltr));
5968 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
5969 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5970 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5971 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5972 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
5973
5974 /* Update the previous switch rule of "forward to VSI" to
5975 * "fwd to VSI list"
5976 */
5977 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5978 if (status)
5979 return status;
5980
5981 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
5982 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
5983 m_entry->vsi_list_info =
5984 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5985 vsi_list_id);
5986 } else {
5987 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
5988
5989 if (!m_entry->vsi_list_info)
5990 return -EIO;
5991
5992 /* A rule already exists with the new VSI being added */
5993 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
5994 return 0;
5995
5996 /* Update the previously created VSI list set with
5997 * the new VSI ID passed in
5998 */
5999 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6000
6001 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6002 vsi_list_id, false,
6003 ice_aqc_opc_update_sw_rules,
6004 ICE_SW_LKUP_LAST);
6005 /* update VSI list mapping info with new VSI ID */
6006 if (!status)
6007 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
6008 }
6009 if (!status)
6010 m_entry->vsi_count++;
6011 return status;
6012 }
6013
ice_rule_add_tunnel_metadata(struct ice_adv_lkup_elem * lkup)6014 void ice_rule_add_tunnel_metadata(struct ice_adv_lkup_elem *lkup)
6015 {
6016 lkup->type = ICE_HW_METADATA;
6017 lkup->m_u.metadata.flags[ICE_PKT_FLAGS_MDID21] |=
6018 cpu_to_be16(ICE_PKT_TUNNEL_MASK);
6019 }
6020
ice_rule_add_direction_metadata(struct ice_adv_lkup_elem * lkup)6021 void ice_rule_add_direction_metadata(struct ice_adv_lkup_elem *lkup)
6022 {
6023 lkup->type = ICE_HW_METADATA;
6024 lkup->m_u.metadata.flags[ICE_PKT_FLAGS_MDID20] |=
6025 cpu_to_be16(ICE_PKT_FROM_NETWORK);
6026 }
6027
ice_rule_add_vlan_metadata(struct ice_adv_lkup_elem * lkup)6028 void ice_rule_add_vlan_metadata(struct ice_adv_lkup_elem *lkup)
6029 {
6030 lkup->type = ICE_HW_METADATA;
6031 lkup->m_u.metadata.flags[ICE_PKT_FLAGS_MDID20] |=
6032 cpu_to_be16(ICE_PKT_VLAN_MASK);
6033 }
6034
ice_rule_add_src_vsi_metadata(struct ice_adv_lkup_elem * lkup)6035 void ice_rule_add_src_vsi_metadata(struct ice_adv_lkup_elem *lkup)
6036 {
6037 lkup->type = ICE_HW_METADATA;
6038 lkup->m_u.metadata.source_vsi = cpu_to_be16(ICE_MDID_SOURCE_VSI_MASK);
6039 }
6040
6041 /**
6042 * ice_add_adv_rule - helper function to create an advanced switch rule
6043 * @hw: pointer to the hardware structure
6044 * @lkups: information on the words that needs to be looked up. All words
6045 * together makes one recipe
6046 * @lkups_cnt: num of entries in the lkups array
6047 * @rinfo: other information related to the rule that needs to be programmed
6048 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6049 * ignored is case of error.
6050 *
6051 * This function can program only 1 rule at a time. The lkups is used to
6052 * describe the all the words that forms the "lookup" portion of the recipe.
6053 * These words can span multiple protocols. Callers to this function need to
6054 * pass in a list of protocol headers with lookup information along and mask
6055 * that determines which words are valid from the given protocol header.
6056 * rinfo describes other information related to this rule such as forwarding
6057 * IDs, priority of this rule, etc.
6058 */
6059 int
ice_add_adv_rule(struct ice_hw * hw,struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,struct ice_adv_rule_info * rinfo,struct ice_rule_query_data * added_entry)6060 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6061 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6062 struct ice_rule_query_data *added_entry)
6063 {
6064 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6065 struct ice_sw_rule_lkup_rx_tx *s_rule = NULL;
6066 const struct ice_dummy_pkt_profile *profile;
6067 u16 rid = 0, i, rule_buf_sz, vsi_handle;
6068 struct list_head *rule_head;
6069 struct ice_switch_info *sw;
6070 u16 word_cnt;
6071 u32 act = 0;
6072 int status;
6073 u8 q_rgn;
6074
6075 /* Initialize profile to result index bitmap */
6076 if (!hw->switch_info->prof_res_bm_init) {
6077 hw->switch_info->prof_res_bm_init = 1;
6078 ice_init_prof_result_bm(hw);
6079 }
6080
6081 if (!lkups_cnt)
6082 return -EINVAL;
6083
6084 /* get # of words we need to match */
6085 word_cnt = 0;
6086 for (i = 0; i < lkups_cnt; i++) {
6087 u16 j;
6088
6089 for (j = 0; j < ARRAY_SIZE(lkups->m_raw); j++)
6090 if (lkups[i].m_raw[j])
6091 word_cnt++;
6092 }
6093
6094 if (!word_cnt)
6095 return -EINVAL;
6096
6097 if (word_cnt > ICE_MAX_CHAIN_WORDS)
6098 return -ENOSPC;
6099
6100 /* locate a dummy packet */
6101 profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type);
6102 if (IS_ERR(profile))
6103 return PTR_ERR(profile);
6104
6105 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6106 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6107 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6108 rinfo->sw_act.fltr_act == ICE_DROP_PACKET ||
6109 rinfo->sw_act.fltr_act == ICE_NOP)) {
6110 status = -EIO;
6111 goto free_pkt_profile;
6112 }
6113
6114 vsi_handle = rinfo->sw_act.vsi_handle;
6115 if (!ice_is_vsi_valid(hw, vsi_handle)) {
6116 status = -EINVAL;
6117 goto free_pkt_profile;
6118 }
6119
6120 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6121 rinfo->sw_act.fltr_act == ICE_NOP)
6122 rinfo->sw_act.fwd_id.hw_vsi_id =
6123 ice_get_hw_vsi_num(hw, vsi_handle);
6124
6125 if (rinfo->src_vsi)
6126 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, rinfo->src_vsi);
6127 else
6128 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6129
6130 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6131 if (status)
6132 goto free_pkt_profile;
6133 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6134 if (m_entry) {
6135 /* we have to add VSI to VSI_LIST and increment vsi_count.
6136 * Also Update VSI list so that we can change forwarding rule
6137 * if the rule already exists, we will check if it exists with
6138 * same vsi_id, if not then add it to the VSI list if it already
6139 * exists if not then create a VSI list and add the existing VSI
6140 * ID and the new VSI ID to the list
6141 * We will add that VSI to the list
6142 */
6143 status = ice_adv_add_update_vsi_list(hw, m_entry,
6144 &m_entry->rule_info,
6145 rinfo);
6146 if (added_entry) {
6147 added_entry->rid = rid;
6148 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6149 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6150 }
6151 goto free_pkt_profile;
6152 }
6153 rule_buf_sz = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, profile->pkt_len);
6154 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6155 if (!s_rule) {
6156 status = -ENOMEM;
6157 goto free_pkt_profile;
6158 }
6159 if (!rinfo->flags_info.act_valid) {
6160 act |= ICE_SINGLE_ACT_LAN_ENABLE;
6161 act |= ICE_SINGLE_ACT_LB_ENABLE;
6162 } else {
6163 act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
6164 ICE_SINGLE_ACT_LB_ENABLE);
6165 }
6166
6167 switch (rinfo->sw_act.fltr_act) {
6168 case ICE_FWD_TO_VSI:
6169 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6170 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6171 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6172 break;
6173 case ICE_FWD_TO_Q:
6174 act |= ICE_SINGLE_ACT_TO_Q;
6175 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6176 ICE_SINGLE_ACT_Q_INDEX_M;
6177 break;
6178 case ICE_FWD_TO_QGRP:
6179 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6180 (u8)ilog2(rinfo->sw_act.qgrp_size) : 0;
6181 act |= ICE_SINGLE_ACT_TO_Q;
6182 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6183 ICE_SINGLE_ACT_Q_INDEX_M;
6184 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6185 ICE_SINGLE_ACT_Q_REGION_M;
6186 break;
6187 case ICE_DROP_PACKET:
6188 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6189 ICE_SINGLE_ACT_VALID_BIT;
6190 break;
6191 case ICE_NOP:
6192 act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
6193 rinfo->sw_act.fwd_id.hw_vsi_id);
6194 act &= ~ICE_SINGLE_ACT_VALID_BIT;
6195 break;
6196 default:
6197 status = -EIO;
6198 goto err_ice_add_adv_rule;
6199 }
6200
6201 /* If there is no matching criteria for direction there
6202 * is only one difference between Rx and Tx:
6203 * - get switch id base on VSI number from source field (Tx)
6204 * - get switch id base on port number (Rx)
6205 *
6206 * If matching on direction metadata is chose rule direction is
6207 * extracted from type value set here.
6208 */
6209 if (rinfo->sw_act.flag & ICE_FLTR_TX) {
6210 s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
6211 s_rule->src = cpu_to_le16(rinfo->sw_act.src);
6212 } else {
6213 s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
6214 s_rule->src = cpu_to_le16(hw->port_info->lport);
6215 }
6216
6217 s_rule->recipe_id = cpu_to_le16(rid);
6218 s_rule->act = cpu_to_le32(act);
6219
6220 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, profile);
6221 if (status)
6222 goto err_ice_add_adv_rule;
6223
6224 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type, s_rule->hdr_data,
6225 profile->offsets);
6226 if (status)
6227 goto err_ice_add_adv_rule;
6228
6229 status = ice_fill_adv_packet_vlan(hw, rinfo->vlan_type,
6230 s_rule->hdr_data,
6231 profile->offsets);
6232 if (status)
6233 goto err_ice_add_adv_rule;
6234
6235 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6236 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6237 NULL);
6238 if (status)
6239 goto err_ice_add_adv_rule;
6240 adv_fltr = devm_kzalloc(ice_hw_to_dev(hw),
6241 sizeof(struct ice_adv_fltr_mgmt_list_entry),
6242 GFP_KERNEL);
6243 if (!adv_fltr) {
6244 status = -ENOMEM;
6245 goto err_ice_add_adv_rule;
6246 }
6247
6248 adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
6249 lkups_cnt * sizeof(*lkups), GFP_KERNEL);
6250 if (!adv_fltr->lkups) {
6251 status = -ENOMEM;
6252 goto err_ice_add_adv_rule;
6253 }
6254
6255 adv_fltr->lkups_cnt = lkups_cnt;
6256 adv_fltr->rule_info = *rinfo;
6257 adv_fltr->rule_info.fltr_rule_id = le16_to_cpu(s_rule->index);
6258 sw = hw->switch_info;
6259 sw->recp_list[rid].adv_rule = true;
6260 rule_head = &sw->recp_list[rid].filt_rules;
6261
6262 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6263 adv_fltr->vsi_count = 1;
6264
6265 /* Add rule entry to book keeping list */
6266 list_add(&adv_fltr->list_entry, rule_head);
6267 if (added_entry) {
6268 added_entry->rid = rid;
6269 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6270 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6271 }
6272 err_ice_add_adv_rule:
6273 if (status && adv_fltr) {
6274 devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups);
6275 devm_kfree(ice_hw_to_dev(hw), adv_fltr);
6276 }
6277
6278 kfree(s_rule);
6279
6280 free_pkt_profile:
6281 if (profile->match & ICE_PKT_KMALLOC) {
6282 kfree(profile->offsets);
6283 kfree(profile->pkt);
6284 kfree(profile);
6285 }
6286
6287 return status;
6288 }
6289
6290 /**
6291 * ice_replay_vsi_fltr - Replay filters for requested VSI
6292 * @hw: pointer to the hardware structure
6293 * @vsi_handle: driver VSI handle
6294 * @recp_id: Recipe ID for which rules need to be replayed
6295 * @list_head: list for which filters need to be replayed
6296 *
6297 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6298 * It is required to pass valid VSI handle.
6299 */
6300 static int
ice_replay_vsi_fltr(struct ice_hw * hw,u16 vsi_handle,u8 recp_id,struct list_head * list_head)6301 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6302 struct list_head *list_head)
6303 {
6304 struct ice_fltr_mgmt_list_entry *itr;
6305 int status = 0;
6306 u16 hw_vsi_id;
6307
6308 if (list_empty(list_head))
6309 return status;
6310 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6311
6312 list_for_each_entry(itr, list_head, list_entry) {
6313 struct ice_fltr_list_entry f_entry;
6314
6315 f_entry.fltr_info = itr->fltr_info;
6316 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6317 itr->fltr_info.vsi_handle == vsi_handle) {
6318 /* update the src in case it is VSI num */
6319 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6320 f_entry.fltr_info.src = hw_vsi_id;
6321 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6322 if (status)
6323 goto end;
6324 continue;
6325 }
6326 if (!itr->vsi_list_info ||
6327 !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
6328 continue;
6329 f_entry.fltr_info.vsi_handle = vsi_handle;
6330 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6331 /* update the src in case it is VSI num */
6332 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6333 f_entry.fltr_info.src = hw_vsi_id;
6334 if (recp_id == ICE_SW_LKUP_VLAN)
6335 status = ice_add_vlan_internal(hw, &f_entry);
6336 else
6337 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6338 if (status)
6339 goto end;
6340 }
6341 end:
6342 return status;
6343 }
6344
6345 /**
6346 * ice_adv_rem_update_vsi_list
6347 * @hw: pointer to the hardware structure
6348 * @vsi_handle: VSI handle of the VSI to remove
6349 * @fm_list: filter management entry for which the VSI list management needs to
6350 * be done
6351 */
6352 static int
ice_adv_rem_update_vsi_list(struct ice_hw * hw,u16 vsi_handle,struct ice_adv_fltr_mgmt_list_entry * fm_list)6353 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6354 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6355 {
6356 struct ice_vsi_list_map_info *vsi_list_info;
6357 enum ice_sw_lkup_type lkup_type;
6358 u16 vsi_list_id;
6359 int status;
6360
6361 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6362 fm_list->vsi_count == 0)
6363 return -EINVAL;
6364
6365 /* A rule with the VSI being removed does not exist */
6366 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
6367 return -ENOENT;
6368
6369 lkup_type = ICE_SW_LKUP_LAST;
6370 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6371 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6372 ice_aqc_opc_update_sw_rules,
6373 lkup_type);
6374 if (status)
6375 return status;
6376
6377 fm_list->vsi_count--;
6378 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6379 vsi_list_info = fm_list->vsi_list_info;
6380 if (fm_list->vsi_count == 1) {
6381 struct ice_fltr_info tmp_fltr;
6382 u16 rem_vsi_handle;
6383
6384 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
6385 ICE_MAX_VSI);
6386 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6387 return -EIO;
6388
6389 /* Make sure VSI list is empty before removing it below */
6390 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6391 vsi_list_id, true,
6392 ice_aqc_opc_update_sw_rules,
6393 lkup_type);
6394 if (status)
6395 return status;
6396
6397 memset(&tmp_fltr, 0, sizeof(tmp_fltr));
6398 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
6399 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6400 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6401 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6402 tmp_fltr.fwd_id.hw_vsi_id =
6403 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6404 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6405 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6406 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
6407
6408 /* Update the previous switch rule of "MAC forward to VSI" to
6409 * "MAC fwd to VSI list"
6410 */
6411 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6412 if (status) {
6413 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6414 tmp_fltr.fwd_id.hw_vsi_id, status);
6415 return status;
6416 }
6417 fm_list->vsi_list_info->ref_cnt--;
6418
6419 /* Remove the VSI list since it is no longer used */
6420 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6421 if (status) {
6422 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
6423 vsi_list_id, status);
6424 return status;
6425 }
6426
6427 list_del(&vsi_list_info->list_entry);
6428 devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
6429 fm_list->vsi_list_info = NULL;
6430 }
6431
6432 return status;
6433 }
6434
6435 /**
6436 * ice_rem_adv_rule - removes existing advanced switch rule
6437 * @hw: pointer to the hardware structure
6438 * @lkups: information on the words that needs to be looked up. All words
6439 * together makes one recipe
6440 * @lkups_cnt: num of entries in the lkups array
6441 * @rinfo: Its the pointer to the rule information for the rule
6442 *
6443 * This function can be used to remove 1 rule at a time. The lkups is
6444 * used to describe all the words that forms the "lookup" portion of the
6445 * rule. These words can span multiple protocols. Callers to this function
6446 * need to pass in a list of protocol headers with lookup information along
6447 * and mask that determines which words are valid from the given protocol
6448 * header. rinfo describes other information related to this rule such as
6449 * forwarding IDs, priority of this rule, etc.
6450 */
6451 static int
ice_rem_adv_rule(struct ice_hw * hw,struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,struct ice_adv_rule_info * rinfo)6452 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6453 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6454 {
6455 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6456 struct ice_prot_lkup_ext lkup_exts;
6457 bool remove_rule = false;
6458 struct mutex *rule_lock; /* Lock to protect filter rule list */
6459 u16 i, rid, vsi_handle;
6460 int status = 0;
6461
6462 memset(&lkup_exts, 0, sizeof(lkup_exts));
6463 for (i = 0; i < lkups_cnt; i++) {
6464 u16 count;
6465
6466 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6467 return -EIO;
6468
6469 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6470 if (!count)
6471 return -EIO;
6472 }
6473
6474 rid = ice_find_recp(hw, &lkup_exts, rinfo);
6475 /* If did not find a recipe that match the existing criteria */
6476 if (rid == ICE_MAX_NUM_RECIPES)
6477 return -EINVAL;
6478
6479 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6480 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6481 /* the rule is already removed */
6482 if (!list_elem)
6483 return 0;
6484 mutex_lock(rule_lock);
6485 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6486 remove_rule = true;
6487 } else if (list_elem->vsi_count > 1) {
6488 remove_rule = false;
6489 vsi_handle = rinfo->sw_act.vsi_handle;
6490 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6491 } else {
6492 vsi_handle = rinfo->sw_act.vsi_handle;
6493 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6494 if (status) {
6495 mutex_unlock(rule_lock);
6496 return status;
6497 }
6498 if (list_elem->vsi_count == 0)
6499 remove_rule = true;
6500 }
6501 mutex_unlock(rule_lock);
6502 if (remove_rule) {
6503 struct ice_sw_rule_lkup_rx_tx *s_rule;
6504 u16 rule_buf_sz;
6505
6506 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule);
6507 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6508 if (!s_rule)
6509 return -ENOMEM;
6510 s_rule->act = 0;
6511 s_rule->index = cpu_to_le16(list_elem->rule_info.fltr_rule_id);
6512 s_rule->hdr_len = 0;
6513 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6514 rule_buf_sz, 1,
6515 ice_aqc_opc_remove_sw_rules, NULL);
6516 if (!status || status == -ENOENT) {
6517 struct ice_switch_info *sw = hw->switch_info;
6518
6519 mutex_lock(rule_lock);
6520 list_del(&list_elem->list_entry);
6521 devm_kfree(ice_hw_to_dev(hw), list_elem->lkups);
6522 devm_kfree(ice_hw_to_dev(hw), list_elem);
6523 mutex_unlock(rule_lock);
6524 if (list_empty(&sw->recp_list[rid].filt_rules))
6525 sw->recp_list[rid].adv_rule = false;
6526 }
6527 kfree(s_rule);
6528 }
6529 return status;
6530 }
6531
6532 /**
6533 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6534 * @hw: pointer to the hardware structure
6535 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6536 *
6537 * This function is used to remove 1 rule at a time. The removal is based on
6538 * the remove_entry parameter. This function will remove rule for a given
6539 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6540 */
6541 int
ice_rem_adv_rule_by_id(struct ice_hw * hw,struct ice_rule_query_data * remove_entry)6542 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6543 struct ice_rule_query_data *remove_entry)
6544 {
6545 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6546 struct list_head *list_head;
6547 struct ice_adv_rule_info rinfo;
6548 struct ice_switch_info *sw;
6549
6550 sw = hw->switch_info;
6551 if (!sw->recp_list[remove_entry->rid].recp_created)
6552 return -EINVAL;
6553 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6554 list_for_each_entry(list_itr, list_head, list_entry) {
6555 if (list_itr->rule_info.fltr_rule_id ==
6556 remove_entry->rule_id) {
6557 rinfo = list_itr->rule_info;
6558 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6559 return ice_rem_adv_rule(hw, list_itr->lkups,
6560 list_itr->lkups_cnt, &rinfo);
6561 }
6562 }
6563 /* either list is empty or unable to find rule */
6564 return -ENOENT;
6565 }
6566
6567 /**
6568 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6569 * @hw: pointer to the hardware structure
6570 * @vsi_handle: driver VSI handle
6571 * @list_head: list for which filters need to be replayed
6572 *
6573 * Replay the advanced rule for the given VSI.
6574 */
6575 static int
ice_replay_vsi_adv_rule(struct ice_hw * hw,u16 vsi_handle,struct list_head * list_head)6576 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6577 struct list_head *list_head)
6578 {
6579 struct ice_rule_query_data added_entry = { 0 };
6580 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6581 int status = 0;
6582
6583 if (list_empty(list_head))
6584 return status;
6585 list_for_each_entry(adv_fltr, list_head, list_entry) {
6586 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6587 u16 lk_cnt = adv_fltr->lkups_cnt;
6588
6589 if (vsi_handle != rinfo->sw_act.vsi_handle)
6590 continue;
6591 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6592 &added_entry);
6593 if (status)
6594 break;
6595 }
6596 return status;
6597 }
6598
6599 /**
6600 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6601 * @hw: pointer to the hardware structure
6602 * @vsi_handle: driver VSI handle
6603 *
6604 * Replays filters for requested VSI via vsi_handle.
6605 */
ice_replay_vsi_all_fltr(struct ice_hw * hw,u16 vsi_handle)6606 int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6607 {
6608 struct ice_switch_info *sw = hw->switch_info;
6609 int status;
6610 u8 i;
6611
6612 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6613 struct list_head *head;
6614
6615 head = &sw->recp_list[i].filt_replay_rules;
6616 if (!sw->recp_list[i].adv_rule)
6617 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6618 else
6619 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6620 if (status)
6621 return status;
6622 }
6623 return status;
6624 }
6625
6626 /**
6627 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6628 * @hw: pointer to the HW struct
6629 *
6630 * Deletes the filter replay rules.
6631 */
ice_rm_all_sw_replay_rule_info(struct ice_hw * hw)6632 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6633 {
6634 struct ice_switch_info *sw = hw->switch_info;
6635 u8 i;
6636
6637 if (!sw)
6638 return;
6639
6640 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6641 if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
6642 struct list_head *l_head;
6643
6644 l_head = &sw->recp_list[i].filt_replay_rules;
6645 if (!sw->recp_list[i].adv_rule)
6646 ice_rem_sw_rule_info(hw, l_head);
6647 else
6648 ice_rem_adv_rule_info(hw, l_head);
6649 }
6650 }
6651 }
6652