1 /*
2 * QEMU rocker switch emulation - OF-DPA flow processing support
3 *
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17 #include "qemu/osdep.h"
18 #include "net/eth.h"
19 #include "qapi/error.h"
20 #include "qapi/qapi-commands-rocker.h"
21 #include "qemu/iov.h"
22 #include "qemu/timer.h"
23
24 #include "rocker.h"
25 #include "rocker_hw.h"
26 #include "rocker_fp.h"
27 #include "rocker_tlv.h"
28 #include "rocker_world.h"
29 #include "rocker_desc.h"
30 #include "rocker_of_dpa.h"
31
32 static const MACAddr zero_mac = { .a = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } };
33 static const MACAddr ff_mac = { .a = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } };
34
35 typedef struct of_dpa {
36 World *world;
37 GHashTable *flow_tbl;
38 GHashTable *group_tbl;
39 unsigned int flow_tbl_max_size;
40 unsigned int group_tbl_max_size;
41 } OfDpa;
42
43 /* flow_key stolen mostly from OVS
44 *
45 * Note: fields that compare with network packet header fields
46 * are stored in network order (BE) to avoid per-packet field
47 * byte-swaps.
48 */
49
50 typedef struct of_dpa_flow_key {
51 uint32_t in_pport; /* ingress port */
52 uint32_t tunnel_id; /* overlay tunnel id */
53 uint32_t tbl_id; /* table id */
54 struct {
55 __be16 vlan_id; /* 0 if no VLAN */
56 MACAddr src; /* ethernet source address */
57 MACAddr dst; /* ethernet destination address */
58 __be16 type; /* ethernet frame type */
59 } eth;
60 struct {
61 uint8_t proto; /* IP protocol or ARP opcode */
62 uint8_t tos; /* IP ToS */
63 uint8_t ttl; /* IP TTL/hop limit */
64 uint8_t frag; /* one of FRAG_TYPE_* */
65 } ip;
66 union {
67 struct {
68 struct {
69 __be32 src; /* IP source address */
70 __be32 dst; /* IP destination address */
71 } addr;
72 union {
73 struct {
74 __be16 src; /* TCP/UDP/SCTP source port */
75 __be16 dst; /* TCP/UDP/SCTP destination port */
76 __be16 flags; /* TCP flags */
77 } tp;
78 struct {
79 MACAddr sha; /* ARP source hardware address */
80 MACAddr tha; /* ARP target hardware address */
81 } arp;
82 };
83 } ipv4;
84 struct {
85 struct {
86 Ipv6Addr src; /* IPv6 source address */
87 Ipv6Addr dst; /* IPv6 destination address */
88 } addr;
89 __be32 label; /* IPv6 flow label */
90 struct {
91 __be16 src; /* TCP/UDP/SCTP source port */
92 __be16 dst; /* TCP/UDP/SCTP destination port */
93 __be16 flags; /* TCP flags */
94 } tp;
95 struct {
96 Ipv6Addr target; /* ND target address */
97 MACAddr sll; /* ND source link layer address */
98 MACAddr tll; /* ND target link layer address */
99 } nd;
100 } ipv6;
101 };
102 int width; /* how many uint64_t's in key? */
103 } OfDpaFlowKey;
104
105 /* Width of key which includes field 'f' in u64s, rounded up */
106 #define FLOW_KEY_WIDTH(f) \
107 DIV_ROUND_UP(offsetof(OfDpaFlowKey, f) + sizeof_field(OfDpaFlowKey, f), \
108 sizeof(uint64_t))
109
110 typedef struct of_dpa_flow_action {
111 uint32_t goto_tbl;
112 struct {
113 uint32_t group_id;
114 uint32_t tun_log_lport;
115 __be16 vlan_id;
116 } write;
117 struct {
118 __be16 new_vlan_id;
119 uint32_t out_pport;
120 uint8_t copy_to_cpu;
121 __be16 vlan_id;
122 } apply;
123 } OfDpaFlowAction;
124
125 typedef struct of_dpa_flow {
126 uint32_t lpm;
127 uint32_t priority;
128 uint32_t hardtime;
129 uint32_t idletime;
130 uint64_t cookie;
131 OfDpaFlowKey key;
132 OfDpaFlowKey mask;
133 OfDpaFlowAction action;
134 struct {
135 uint64_t hits;
136 int64_t install_time;
137 int64_t refresh_time;
138 uint64_t rx_pkts;
139 uint64_t tx_pkts;
140 } stats;
141 } OfDpaFlow;
142
143 typedef struct of_dpa_flow_pkt_fields {
144 uint32_t tunnel_id;
145 struct eth_header *ethhdr;
146 __be16 *h_proto;
147 struct vlan_header *vlanhdr;
148 struct ip_header *ipv4hdr;
149 struct ip6_header *ipv6hdr;
150 Ipv6Addr *ipv6_src_addr;
151 Ipv6Addr *ipv6_dst_addr;
152 } OfDpaFlowPktFields;
153
154 typedef struct of_dpa_flow_context {
155 uint32_t in_pport;
156 uint32_t tunnel_id;
157 struct iovec *iov;
158 int iovcnt;
159 struct eth_header ethhdr_rewrite;
160 struct vlan_header vlanhdr_rewrite;
161 struct vlan_header vlanhdr;
162 OfDpa *of_dpa;
163 OfDpaFlowPktFields fields;
164 OfDpaFlowAction action_set;
165 } OfDpaFlowContext;
166
167 typedef struct of_dpa_flow_match {
168 OfDpaFlowKey value;
169 OfDpaFlow *best;
170 } OfDpaFlowMatch;
171
172 typedef struct of_dpa_group {
173 uint32_t id;
174 union {
175 struct {
176 uint32_t out_pport;
177 uint8_t pop_vlan;
178 } l2_interface;
179 struct {
180 uint32_t group_id;
181 MACAddr src_mac;
182 MACAddr dst_mac;
183 __be16 vlan_id;
184 } l2_rewrite;
185 struct {
186 uint16_t group_count;
187 uint32_t *group_ids;
188 } l2_flood;
189 struct {
190 uint32_t group_id;
191 MACAddr src_mac;
192 MACAddr dst_mac;
193 __be16 vlan_id;
194 uint8_t ttl_check;
195 } l3_unicast;
196 };
197 } OfDpaGroup;
198
of_dpa_mask2prefix(__be32 mask)199 static int of_dpa_mask2prefix(__be32 mask)
200 {
201 int i;
202 int count = 32;
203
204 for (i = 0; i < 32; i++) {
205 if (!(ntohl(mask) & ((2 << i) - 1))) {
206 count--;
207 }
208 }
209
210 return count;
211 }
212
213 #if defined(DEBUG_ROCKER)
of_dpa_flow_key_dump(OfDpaFlowKey * key,OfDpaFlowKey * mask)214 static void of_dpa_flow_key_dump(OfDpaFlowKey *key, OfDpaFlowKey *mask)
215 {
216 char buf[512], *b = buf, *mac;
217
218 b += sprintf(b, " tbl %2d", key->tbl_id);
219
220 if (key->in_pport || (mask && mask->in_pport)) {
221 b += sprintf(b, " in_pport %2d", key->in_pport);
222 if (mask && mask->in_pport != 0xffffffff) {
223 b += sprintf(b, "/0x%08x", key->in_pport);
224 }
225 }
226
227 if (key->tunnel_id || (mask && mask->tunnel_id)) {
228 b += sprintf(b, " tun %8d", key->tunnel_id);
229 if (mask && mask->tunnel_id != 0xffffffff) {
230 b += sprintf(b, "/0x%08x", key->tunnel_id);
231 }
232 }
233
234 if (key->eth.vlan_id || (mask && mask->eth.vlan_id)) {
235 b += sprintf(b, " vlan %4d", ntohs(key->eth.vlan_id));
236 if (mask && mask->eth.vlan_id != 0xffff) {
237 b += sprintf(b, "/0x%04x", ntohs(key->eth.vlan_id));
238 }
239 }
240
241 if (memcmp(key->eth.src.a, zero_mac.a, ETH_ALEN) ||
242 (mask && memcmp(mask->eth.src.a, zero_mac.a, ETH_ALEN))) {
243 mac = qemu_mac_strdup_printf(key->eth.src.a);
244 b += sprintf(b, " src %s", mac);
245 g_free(mac);
246 if (mask && memcmp(mask->eth.src.a, ff_mac.a, ETH_ALEN)) {
247 mac = qemu_mac_strdup_printf(mask->eth.src.a);
248 b += sprintf(b, "/%s", mac);
249 g_free(mac);
250 }
251 }
252
253 if (memcmp(key->eth.dst.a, zero_mac.a, ETH_ALEN) ||
254 (mask && memcmp(mask->eth.dst.a, zero_mac.a, ETH_ALEN))) {
255 mac = qemu_mac_strdup_printf(key->eth.dst.a);
256 b += sprintf(b, " dst %s", mac);
257 g_free(mac);
258 if (mask && memcmp(mask->eth.dst.a, ff_mac.a, ETH_ALEN)) {
259 mac = qemu_mac_strdup_printf(mask->eth.dst.a);
260 b += sprintf(b, "/%s", mac);
261 g_free(mac);
262 }
263 }
264
265 if (key->eth.type || (mask && mask->eth.type)) {
266 b += sprintf(b, " type 0x%04x", ntohs(key->eth.type));
267 if (mask && mask->eth.type != 0xffff) {
268 b += sprintf(b, "/0x%04x", ntohs(mask->eth.type));
269 }
270 switch (ntohs(key->eth.type)) {
271 case 0x0800:
272 case 0x86dd:
273 if (key->ip.proto || (mask && mask->ip.proto)) {
274 b += sprintf(b, " ip proto %2d", key->ip.proto);
275 if (mask && mask->ip.proto != 0xff) {
276 b += sprintf(b, "/0x%02x", mask->ip.proto);
277 }
278 }
279 if (key->ip.tos || (mask && mask->ip.tos)) {
280 b += sprintf(b, " ip tos %2d", key->ip.tos);
281 if (mask && mask->ip.tos != 0xff) {
282 b += sprintf(b, "/0x%02x", mask->ip.tos);
283 }
284 }
285 break;
286 }
287 switch (ntohs(key->eth.type)) {
288 case 0x0800:
289 if (key->ipv4.addr.dst || (mask && mask->ipv4.addr.dst)) {
290 b += sprintf(b, " dst %s",
291 inet_ntoa(*(struct in_addr *)&key->ipv4.addr.dst));
292 if (mask) {
293 b += sprintf(b, "/%d",
294 of_dpa_mask2prefix(mask->ipv4.addr.dst));
295 }
296 }
297 break;
298 }
299 }
300
301 DPRINTF("%s\n", buf);
302 }
303 #else
304 #define of_dpa_flow_key_dump(k, m)
305 #endif
306
_of_dpa_flow_match(void * key,void * value,void * user_data)307 static void _of_dpa_flow_match(void *key, void *value, void *user_data)
308 {
309 OfDpaFlow *flow = value;
310 OfDpaFlowMatch *match = user_data;
311 uint64_t *k = (uint64_t *)&flow->key;
312 uint64_t *m = (uint64_t *)&flow->mask;
313 uint64_t *v = (uint64_t *)&match->value;
314 int i;
315
316 if (flow->key.tbl_id == match->value.tbl_id) {
317 of_dpa_flow_key_dump(&flow->key, &flow->mask);
318 }
319
320 if (flow->key.width > match->value.width) {
321 return;
322 }
323
324 for (i = 0; i < flow->key.width; i++, k++, m++, v++) {
325 if ((~*k & *m & *v) | (*k & *m & ~*v)) {
326 return;
327 }
328 }
329
330 DPRINTF("match\n");
331
332 if (!match->best ||
333 flow->priority > match->best->priority ||
334 flow->lpm > match->best->lpm) {
335 match->best = flow;
336 }
337 }
338
of_dpa_flow_match(OfDpa * of_dpa,OfDpaFlowMatch * match)339 static OfDpaFlow *of_dpa_flow_match(OfDpa *of_dpa, OfDpaFlowMatch *match)
340 {
341 DPRINTF("\nnew search\n");
342 of_dpa_flow_key_dump(&match->value, NULL);
343
344 g_hash_table_foreach(of_dpa->flow_tbl, _of_dpa_flow_match, match);
345
346 return match->best;
347 }
348
of_dpa_flow_find(OfDpa * of_dpa,uint64_t cookie)349 static OfDpaFlow *of_dpa_flow_find(OfDpa *of_dpa, uint64_t cookie)
350 {
351 return g_hash_table_lookup(of_dpa->flow_tbl, &cookie);
352 }
353
of_dpa_flow_add(OfDpa * of_dpa,OfDpaFlow * flow)354 static int of_dpa_flow_add(OfDpa *of_dpa, OfDpaFlow *flow)
355 {
356 g_hash_table_insert(of_dpa->flow_tbl, &flow->cookie, flow);
357
358 return ROCKER_OK;
359 }
360
of_dpa_flow_del(OfDpa * of_dpa,OfDpaFlow * flow)361 static void of_dpa_flow_del(OfDpa *of_dpa, OfDpaFlow *flow)
362 {
363 g_hash_table_remove(of_dpa->flow_tbl, &flow->cookie);
364 }
365
of_dpa_flow_alloc(uint64_t cookie)366 static OfDpaFlow *of_dpa_flow_alloc(uint64_t cookie)
367 {
368 OfDpaFlow *flow;
369 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
370
371 flow = g_new0(OfDpaFlow, 1);
372
373 flow->cookie = cookie;
374 flow->mask.tbl_id = 0xffffffff;
375
376 flow->stats.install_time = flow->stats.refresh_time = now;
377
378 return flow;
379 }
380
of_dpa_flow_pkt_hdr_reset(OfDpaFlowContext * fc)381 static void of_dpa_flow_pkt_hdr_reset(OfDpaFlowContext *fc)
382 {
383 OfDpaFlowPktFields *fields = &fc->fields;
384
385 fc->iov[0].iov_base = fields->ethhdr;
386 fc->iov[0].iov_len = sizeof(struct eth_header);
387 fc->iov[1].iov_base = fields->vlanhdr;
388 fc->iov[1].iov_len = fields->vlanhdr ? sizeof(struct vlan_header) : 0;
389 }
390
of_dpa_flow_pkt_parse(OfDpaFlowContext * fc,const struct iovec * iov,int iovcnt)391 static void of_dpa_flow_pkt_parse(OfDpaFlowContext *fc,
392 const struct iovec *iov, int iovcnt)
393 {
394 OfDpaFlowPktFields *fields = &fc->fields;
395 size_t sofar = 0;
396 int i;
397
398 sofar += sizeof(struct eth_header);
399 if (iov->iov_len < sofar) {
400 DPRINTF("flow_pkt_parse underrun on eth_header\n");
401 return;
402 }
403
404 fields->ethhdr = iov->iov_base;
405 fields->h_proto = &fields->ethhdr->h_proto;
406
407 if (ntohs(*fields->h_proto) == ETH_P_VLAN) {
408 sofar += sizeof(struct vlan_header);
409 if (iov->iov_len < sofar) {
410 DPRINTF("flow_pkt_parse underrun on vlan_header\n");
411 return;
412 }
413 fields->vlanhdr = (struct vlan_header *)(fields->ethhdr + 1);
414 fields->h_proto = &fields->vlanhdr->h_proto;
415 }
416
417 switch (ntohs(*fields->h_proto)) {
418 case ETH_P_IP:
419 sofar += sizeof(struct ip_header);
420 if (iov->iov_len < sofar) {
421 DPRINTF("flow_pkt_parse underrun on ip_header\n");
422 return;
423 }
424 fields->ipv4hdr = (struct ip_header *)(fields->h_proto + 1);
425 break;
426 case ETH_P_IPV6:
427 sofar += sizeof(struct ip6_header);
428 if (iov->iov_len < sofar) {
429 DPRINTF("flow_pkt_parse underrun on ip6_header\n");
430 return;
431 }
432 fields->ipv6hdr = (struct ip6_header *)(fields->h_proto + 1);
433 break;
434 }
435
436 /* To facilitate (potential) VLAN tag insertion, Make a
437 * copy of the iov and insert two new vectors at the
438 * beginning for eth hdr and vlan hdr. No data is copied,
439 * just the vectors.
440 */
441
442 of_dpa_flow_pkt_hdr_reset(fc);
443
444 fc->iov[2].iov_base = fields->h_proto + 1;
445 fc->iov[2].iov_len = iov->iov_len - fc->iov[0].iov_len - fc->iov[1].iov_len;
446
447 for (i = 1; i < iovcnt; i++) {
448 fc->iov[i+2] = iov[i];
449 }
450
451 fc->iovcnt = iovcnt + 2;
452 }
453
of_dpa_flow_pkt_insert_vlan(OfDpaFlowContext * fc,__be16 vlan_id)454 static void of_dpa_flow_pkt_insert_vlan(OfDpaFlowContext *fc, __be16 vlan_id)
455 {
456 OfDpaFlowPktFields *fields = &fc->fields;
457 uint16_t h_proto = fields->ethhdr->h_proto;
458
459 if (fields->vlanhdr) {
460 DPRINTF("flow_pkt_insert_vlan packet already has vlan\n");
461 return;
462 }
463
464 fields->ethhdr->h_proto = htons(ETH_P_VLAN);
465 fields->vlanhdr = &fc->vlanhdr;
466 fields->vlanhdr->h_tci = vlan_id;
467 fields->vlanhdr->h_proto = h_proto;
468 fields->h_proto = &fields->vlanhdr->h_proto;
469
470 fc->iov[1].iov_base = fields->vlanhdr;
471 fc->iov[1].iov_len = sizeof(struct vlan_header);
472 }
473
of_dpa_flow_pkt_strip_vlan(OfDpaFlowContext * fc)474 static void of_dpa_flow_pkt_strip_vlan(OfDpaFlowContext *fc)
475 {
476 OfDpaFlowPktFields *fields = &fc->fields;
477
478 if (!fields->vlanhdr) {
479 return;
480 }
481
482 fc->iov[0].iov_len -= sizeof(fields->ethhdr->h_proto);
483 fc->iov[1].iov_base = fields->h_proto;
484 fc->iov[1].iov_len = sizeof(fields->ethhdr->h_proto);
485 }
486
of_dpa_flow_pkt_hdr_rewrite(OfDpaFlowContext * fc,uint8_t * src_mac,uint8_t * dst_mac,__be16 vlan_id)487 static void of_dpa_flow_pkt_hdr_rewrite(OfDpaFlowContext *fc,
488 uint8_t *src_mac, uint8_t *dst_mac,
489 __be16 vlan_id)
490 {
491 OfDpaFlowPktFields *fields = &fc->fields;
492
493 if (src_mac || dst_mac) {
494 memcpy(&fc->ethhdr_rewrite, fields->ethhdr, sizeof(struct eth_header));
495 if (src_mac && memcmp(src_mac, zero_mac.a, ETH_ALEN)) {
496 memcpy(fc->ethhdr_rewrite.h_source, src_mac, ETH_ALEN);
497 }
498 if (dst_mac && memcmp(dst_mac, zero_mac.a, ETH_ALEN)) {
499 memcpy(fc->ethhdr_rewrite.h_dest, dst_mac, ETH_ALEN);
500 }
501 fc->iov[0].iov_base = &fc->ethhdr_rewrite;
502 }
503
504 if (vlan_id && fields->vlanhdr) {
505 fc->vlanhdr_rewrite = fc->vlanhdr;
506 fc->vlanhdr_rewrite.h_tci = vlan_id;
507 fc->iov[1].iov_base = &fc->vlanhdr_rewrite;
508 }
509 }
510
511 static void of_dpa_flow_ig_tbl(OfDpaFlowContext *fc, uint32_t tbl_id);
512
of_dpa_ig_port_build_match(OfDpaFlowContext * fc,OfDpaFlowMatch * match)513 static void of_dpa_ig_port_build_match(OfDpaFlowContext *fc,
514 OfDpaFlowMatch *match)
515 {
516 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
517 match->value.in_pport = fc->in_pport;
518 match->value.width = FLOW_KEY_WIDTH(tbl_id);
519 }
520
of_dpa_ig_port_miss(OfDpaFlowContext * fc)521 static void of_dpa_ig_port_miss(OfDpaFlowContext *fc)
522 {
523 uint32_t port;
524
525 /* The default on miss is for packets from physical ports
526 * to go to the VLAN Flow Table. There is no default rule
527 * for packets from logical ports, which are dropped on miss.
528 */
529
530 if (fp_port_from_pport(fc->in_pport, &port)) {
531 of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_VLAN);
532 }
533 }
534
of_dpa_vlan_build_match(OfDpaFlowContext * fc,OfDpaFlowMatch * match)535 static void of_dpa_vlan_build_match(OfDpaFlowContext *fc,
536 OfDpaFlowMatch *match)
537 {
538 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
539 match->value.in_pport = fc->in_pport;
540 if (fc->fields.vlanhdr) {
541 match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
542 }
543 match->value.width = FLOW_KEY_WIDTH(eth.vlan_id);
544 }
545
of_dpa_vlan_insert(OfDpaFlowContext * fc,OfDpaFlow * flow)546 static void of_dpa_vlan_insert(OfDpaFlowContext *fc,
547 OfDpaFlow *flow)
548 {
549 if (flow->action.apply.new_vlan_id) {
550 of_dpa_flow_pkt_insert_vlan(fc, flow->action.apply.new_vlan_id);
551 }
552 }
553
of_dpa_term_mac_build_match(OfDpaFlowContext * fc,OfDpaFlowMatch * match)554 static void of_dpa_term_mac_build_match(OfDpaFlowContext *fc,
555 OfDpaFlowMatch *match)
556 {
557 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
558 match->value.in_pport = fc->in_pport;
559 match->value.eth.type = *fc->fields.h_proto;
560 match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
561 memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
562 sizeof(match->value.eth.dst.a));
563 match->value.width = FLOW_KEY_WIDTH(eth.type);
564 }
565
of_dpa_term_mac_miss(OfDpaFlowContext * fc)566 static void of_dpa_term_mac_miss(OfDpaFlowContext *fc)
567 {
568 of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_BRIDGING);
569 }
570
of_dpa_apply_actions(OfDpaFlowContext * fc,OfDpaFlow * flow)571 static void of_dpa_apply_actions(OfDpaFlowContext *fc,
572 OfDpaFlow *flow)
573 {
574 fc->action_set.apply.copy_to_cpu = flow->action.apply.copy_to_cpu;
575 fc->action_set.apply.vlan_id = flow->key.eth.vlan_id;
576 }
577
of_dpa_bridging_build_match(OfDpaFlowContext * fc,OfDpaFlowMatch * match)578 static void of_dpa_bridging_build_match(OfDpaFlowContext *fc,
579 OfDpaFlowMatch *match)
580 {
581 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
582 if (fc->fields.vlanhdr) {
583 match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
584 } else if (fc->tunnel_id) {
585 match->value.tunnel_id = fc->tunnel_id;
586 }
587 memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
588 sizeof(match->value.eth.dst.a));
589 match->value.width = FLOW_KEY_WIDTH(eth.dst);
590 }
591
of_dpa_bridging_learn(OfDpaFlowContext * fc,OfDpaFlow * dst_flow)592 static void of_dpa_bridging_learn(OfDpaFlowContext *fc,
593 OfDpaFlow *dst_flow)
594 {
595 OfDpaFlowMatch match = { { 0, }, };
596 OfDpaFlow *flow;
597 uint8_t *addr;
598 uint16_t vlan_id;
599 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
600 int64_t refresh_delay = 1;
601
602 /* Do a lookup in bridge table by src_mac/vlan */
603
604 addr = fc->fields.ethhdr->h_source;
605 vlan_id = fc->fields.vlanhdr->h_tci;
606
607 match.value.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
608 match.value.eth.vlan_id = vlan_id;
609 memcpy(match.value.eth.dst.a, addr, sizeof(match.value.eth.dst.a));
610 match.value.width = FLOW_KEY_WIDTH(eth.dst);
611
612 flow = of_dpa_flow_match(fc->of_dpa, &match);
613 if (flow) {
614 if (!memcmp(flow->mask.eth.dst.a, ff_mac.a,
615 sizeof(flow->mask.eth.dst.a))) {
616 /* src_mac/vlan already learned; if in_port and out_port
617 * don't match, the end station has moved and the port
618 * needs updating */
619 /* XXX implement the in_port/out_port check */
620 if (now - flow->stats.refresh_time < refresh_delay) {
621 return;
622 }
623 flow->stats.refresh_time = now;
624 }
625 }
626
627 /* Let driver know about mac/vlan. This may be a new mac/vlan
628 * or a refresh of existing mac/vlan that's been hit after the
629 * refresh_delay.
630 */
631
632 rocker_event_mac_vlan_seen(world_rocker(fc->of_dpa->world),
633 fc->in_pport, addr, vlan_id);
634 }
635
of_dpa_bridging_miss(OfDpaFlowContext * fc)636 static void of_dpa_bridging_miss(OfDpaFlowContext *fc)
637 {
638 of_dpa_bridging_learn(fc, NULL);
639 of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
640 }
641
of_dpa_bridging_action_write(OfDpaFlowContext * fc,OfDpaFlow * flow)642 static void of_dpa_bridging_action_write(OfDpaFlowContext *fc,
643 OfDpaFlow *flow)
644 {
645 if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
646 fc->action_set.write.group_id = flow->action.write.group_id;
647 }
648 fc->action_set.write.tun_log_lport = flow->action.write.tun_log_lport;
649 }
650
of_dpa_unicast_routing_build_match(OfDpaFlowContext * fc,OfDpaFlowMatch * match)651 static void of_dpa_unicast_routing_build_match(OfDpaFlowContext *fc,
652 OfDpaFlowMatch *match)
653 {
654 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
655 match->value.eth.type = *fc->fields.h_proto;
656 if (fc->fields.ipv4hdr) {
657 match->value.ipv4.addr.dst = fc->fields.ipv4hdr->ip_dst;
658 }
659 if (fc->fields.ipv6_dst_addr) {
660 memcpy(&match->value.ipv6.addr.dst, fc->fields.ipv6_dst_addr,
661 sizeof(match->value.ipv6.addr.dst));
662 }
663 match->value.width = FLOW_KEY_WIDTH(ipv6.addr.dst);
664 }
665
of_dpa_unicast_routing_miss(OfDpaFlowContext * fc)666 static void of_dpa_unicast_routing_miss(OfDpaFlowContext *fc)
667 {
668 of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
669 }
670
of_dpa_unicast_routing_action_write(OfDpaFlowContext * fc,OfDpaFlow * flow)671 static void of_dpa_unicast_routing_action_write(OfDpaFlowContext *fc,
672 OfDpaFlow *flow)
673 {
674 if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
675 fc->action_set.write.group_id = flow->action.write.group_id;
676 }
677 }
678
679 static void
of_dpa_multicast_routing_build_match(OfDpaFlowContext * fc,OfDpaFlowMatch * match)680 of_dpa_multicast_routing_build_match(OfDpaFlowContext *fc,
681 OfDpaFlowMatch *match)
682 {
683 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
684 match->value.eth.type = *fc->fields.h_proto;
685 match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
686 if (fc->fields.ipv4hdr) {
687 match->value.ipv4.addr.src = fc->fields.ipv4hdr->ip_src;
688 match->value.ipv4.addr.dst = fc->fields.ipv4hdr->ip_dst;
689 }
690 if (fc->fields.ipv6_src_addr) {
691 memcpy(&match->value.ipv6.addr.src, fc->fields.ipv6_src_addr,
692 sizeof(match->value.ipv6.addr.src));
693 }
694 if (fc->fields.ipv6_dst_addr) {
695 memcpy(&match->value.ipv6.addr.dst, fc->fields.ipv6_dst_addr,
696 sizeof(match->value.ipv6.addr.dst));
697 }
698 match->value.width = FLOW_KEY_WIDTH(ipv6.addr.dst);
699 }
700
of_dpa_multicast_routing_miss(OfDpaFlowContext * fc)701 static void of_dpa_multicast_routing_miss(OfDpaFlowContext *fc)
702 {
703 of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
704 }
705
706 static void
of_dpa_multicast_routing_action_write(OfDpaFlowContext * fc,OfDpaFlow * flow)707 of_dpa_multicast_routing_action_write(OfDpaFlowContext *fc,
708 OfDpaFlow *flow)
709 {
710 if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
711 fc->action_set.write.group_id = flow->action.write.group_id;
712 }
713 fc->action_set.write.vlan_id = flow->action.write.vlan_id;
714 }
715
of_dpa_acl_build_match(OfDpaFlowContext * fc,OfDpaFlowMatch * match)716 static void of_dpa_acl_build_match(OfDpaFlowContext *fc,
717 OfDpaFlowMatch *match)
718 {
719 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
720 match->value.in_pport = fc->in_pport;
721 memcpy(match->value.eth.src.a, fc->fields.ethhdr->h_source,
722 sizeof(match->value.eth.src.a));
723 memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
724 sizeof(match->value.eth.dst.a));
725 match->value.eth.type = *fc->fields.h_proto;
726 match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
727 match->value.width = FLOW_KEY_WIDTH(eth.type);
728 if (fc->fields.ipv4hdr) {
729 match->value.ip.proto = fc->fields.ipv4hdr->ip_p;
730 match->value.ip.tos = fc->fields.ipv4hdr->ip_tos;
731 match->value.width = FLOW_KEY_WIDTH(ip.tos);
732 } else if (fc->fields.ipv6hdr) {
733 match->value.ip.proto =
734 fc->fields.ipv6hdr->ip6_ctlun.ip6_un1.ip6_un1_nxt;
735 match->value.ip.tos = 0; /* XXX what goes here? */
736 match->value.width = FLOW_KEY_WIDTH(ip.tos);
737 }
738 }
739
740 static void of_dpa_eg(OfDpaFlowContext *fc);
of_dpa_acl_hit(OfDpaFlowContext * fc,OfDpaFlow * dst_flow)741 static void of_dpa_acl_hit(OfDpaFlowContext *fc,
742 OfDpaFlow *dst_flow)
743 {
744 of_dpa_eg(fc);
745 }
746
of_dpa_acl_action_write(OfDpaFlowContext * fc,OfDpaFlow * flow)747 static void of_dpa_acl_action_write(OfDpaFlowContext *fc,
748 OfDpaFlow *flow)
749 {
750 if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
751 fc->action_set.write.group_id = flow->action.write.group_id;
752 }
753 }
754
of_dpa_drop(OfDpaFlowContext * fc)755 static void of_dpa_drop(OfDpaFlowContext *fc)
756 {
757 /* drop packet */
758 }
759
of_dpa_group_find(OfDpa * of_dpa,uint32_t group_id)760 static OfDpaGroup *of_dpa_group_find(OfDpa *of_dpa,
761 uint32_t group_id)
762 {
763 return g_hash_table_lookup(of_dpa->group_tbl, &group_id);
764 }
765
of_dpa_group_add(OfDpa * of_dpa,OfDpaGroup * group)766 static int of_dpa_group_add(OfDpa *of_dpa, OfDpaGroup *group)
767 {
768 g_hash_table_insert(of_dpa->group_tbl, &group->id, group);
769
770 return 0;
771 }
772
773 #if 0
774 static int of_dpa_group_mod(OfDpa *of_dpa, OfDpaGroup *group)
775 {
776 OfDpaGroup *old_group = of_dpa_group_find(of_dpa, group->id);
777
778 if (!old_group) {
779 return -ENOENT;
780 }
781
782 /* XXX */
783
784 return 0;
785 }
786 #endif
787
of_dpa_group_del(OfDpa * of_dpa,OfDpaGroup * group)788 static int of_dpa_group_del(OfDpa *of_dpa, OfDpaGroup *group)
789 {
790 g_hash_table_remove(of_dpa->group_tbl, &group->id);
791
792 return 0;
793 }
794
795 #if 0
796 static int of_dpa_group_get_stats(OfDpa *of_dpa, uint32_t id)
797 {
798 OfDpaGroup *group = of_dpa_group_find(of_dpa, id);
799
800 if (!group) {
801 return -ENOENT;
802 }
803
804 /* XXX get/return stats */
805
806 return 0;
807 }
808 #endif
809
of_dpa_group_alloc(uint32_t id)810 static OfDpaGroup *of_dpa_group_alloc(uint32_t id)
811 {
812 OfDpaGroup *group = g_new0(OfDpaGroup, 1);
813
814 group->id = id;
815
816 return group;
817 }
818
of_dpa_output_l2_interface(OfDpaFlowContext * fc,OfDpaGroup * group)819 static void of_dpa_output_l2_interface(OfDpaFlowContext *fc,
820 OfDpaGroup *group)
821 {
822 uint8_t copy_to_cpu = fc->action_set.apply.copy_to_cpu;
823
824 if (group->l2_interface.pop_vlan) {
825 of_dpa_flow_pkt_strip_vlan(fc);
826 }
827
828 /* Note: By default, and as per the OpenFlow 1.3.1
829 * specification, a packet cannot be forwarded back
830 * to the IN_PORT from which it came in. An action
831 * bucket that specifies the particular packet's
832 * egress port is not evaluated.
833 */
834
835 if (group->l2_interface.out_pport == 0) {
836 rx_produce(fc->of_dpa->world, fc->in_pport, fc->iov, fc->iovcnt,
837 copy_to_cpu);
838 } else if (group->l2_interface.out_pport != fc->in_pport) {
839 rocker_port_eg(world_rocker(fc->of_dpa->world),
840 group->l2_interface.out_pport,
841 fc->iov, fc->iovcnt);
842 }
843 }
844
of_dpa_output_l2_rewrite(OfDpaFlowContext * fc,OfDpaGroup * group)845 static void of_dpa_output_l2_rewrite(OfDpaFlowContext *fc,
846 OfDpaGroup *group)
847 {
848 OfDpaGroup *l2_group =
849 of_dpa_group_find(fc->of_dpa, group->l2_rewrite.group_id);
850
851 if (!l2_group) {
852 return;
853 }
854
855 of_dpa_flow_pkt_hdr_rewrite(fc, group->l2_rewrite.src_mac.a,
856 group->l2_rewrite.dst_mac.a,
857 group->l2_rewrite.vlan_id);
858 of_dpa_output_l2_interface(fc, l2_group);
859 }
860
of_dpa_output_l2_flood(OfDpaFlowContext * fc,OfDpaGroup * group)861 static void of_dpa_output_l2_flood(OfDpaFlowContext *fc,
862 OfDpaGroup *group)
863 {
864 OfDpaGroup *l2_group;
865 int i;
866
867 for (i = 0; i < group->l2_flood.group_count; i++) {
868 of_dpa_flow_pkt_hdr_reset(fc);
869 l2_group = of_dpa_group_find(fc->of_dpa, group->l2_flood.group_ids[i]);
870 if (!l2_group) {
871 continue;
872 }
873 switch (ROCKER_GROUP_TYPE_GET(l2_group->id)) {
874 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
875 of_dpa_output_l2_interface(fc, l2_group);
876 break;
877 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
878 of_dpa_output_l2_rewrite(fc, l2_group);
879 break;
880 }
881 }
882 }
883
of_dpa_output_l3_unicast(OfDpaFlowContext * fc,OfDpaGroup * group)884 static void of_dpa_output_l3_unicast(OfDpaFlowContext *fc, OfDpaGroup *group)
885 {
886 OfDpaGroup *l2_group =
887 of_dpa_group_find(fc->of_dpa, group->l3_unicast.group_id);
888
889 if (!l2_group) {
890 return;
891 }
892
893 of_dpa_flow_pkt_hdr_rewrite(fc, group->l3_unicast.src_mac.a,
894 group->l3_unicast.dst_mac.a,
895 group->l3_unicast.vlan_id);
896 /* XXX need ttl_check */
897 of_dpa_output_l2_interface(fc, l2_group);
898 }
899
of_dpa_eg(OfDpaFlowContext * fc)900 static void of_dpa_eg(OfDpaFlowContext *fc)
901 {
902 OfDpaFlowAction *set = &fc->action_set;
903 OfDpaGroup *group;
904 uint32_t group_id;
905
906 /* send a copy of pkt to CPU (controller)? */
907
908 if (set->apply.copy_to_cpu) {
909 group_id = ROCKER_GROUP_L2_INTERFACE(set->apply.vlan_id, 0);
910 group = of_dpa_group_find(fc->of_dpa, group_id);
911 if (group) {
912 of_dpa_output_l2_interface(fc, group);
913 of_dpa_flow_pkt_hdr_reset(fc);
914 }
915 }
916
917 /* process group write actions */
918
919 if (!set->write.group_id) {
920 return;
921 }
922
923 group = of_dpa_group_find(fc->of_dpa, set->write.group_id);
924 if (!group) {
925 return;
926 }
927
928 switch (ROCKER_GROUP_TYPE_GET(group->id)) {
929 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
930 of_dpa_output_l2_interface(fc, group);
931 break;
932 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
933 of_dpa_output_l2_rewrite(fc, group);
934 break;
935 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
936 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
937 of_dpa_output_l2_flood(fc, group);
938 break;
939 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
940 of_dpa_output_l3_unicast(fc, group);
941 break;
942 }
943 }
944
945 typedef struct of_dpa_flow_tbl_ops {
946 void (*build_match)(OfDpaFlowContext *fc, OfDpaFlowMatch *match);
947 void (*hit)(OfDpaFlowContext *fc, OfDpaFlow *flow);
948 void (*miss)(OfDpaFlowContext *fc);
949 void (*hit_no_goto)(OfDpaFlowContext *fc);
950 void (*action_apply)(OfDpaFlowContext *fc, OfDpaFlow *flow);
951 void (*action_write)(OfDpaFlowContext *fc, OfDpaFlow *flow);
952 } OfDpaFlowTblOps;
953
954 static OfDpaFlowTblOps of_dpa_tbl_ops[] = {
955 [ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT] = {
956 .build_match = of_dpa_ig_port_build_match,
957 .miss = of_dpa_ig_port_miss,
958 .hit_no_goto = of_dpa_drop,
959 },
960 [ROCKER_OF_DPA_TABLE_ID_VLAN] = {
961 .build_match = of_dpa_vlan_build_match,
962 .hit_no_goto = of_dpa_drop,
963 .action_apply = of_dpa_vlan_insert,
964 },
965 [ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC] = {
966 .build_match = of_dpa_term_mac_build_match,
967 .miss = of_dpa_term_mac_miss,
968 .hit_no_goto = of_dpa_drop,
969 .action_apply = of_dpa_apply_actions,
970 },
971 [ROCKER_OF_DPA_TABLE_ID_BRIDGING] = {
972 .build_match = of_dpa_bridging_build_match,
973 .hit = of_dpa_bridging_learn,
974 .miss = of_dpa_bridging_miss,
975 .hit_no_goto = of_dpa_drop,
976 .action_apply = of_dpa_apply_actions,
977 .action_write = of_dpa_bridging_action_write,
978 },
979 [ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING] = {
980 .build_match = of_dpa_unicast_routing_build_match,
981 .miss = of_dpa_unicast_routing_miss,
982 .hit_no_goto = of_dpa_drop,
983 .action_write = of_dpa_unicast_routing_action_write,
984 },
985 [ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING] = {
986 .build_match = of_dpa_multicast_routing_build_match,
987 .miss = of_dpa_multicast_routing_miss,
988 .hit_no_goto = of_dpa_drop,
989 .action_write = of_dpa_multicast_routing_action_write,
990 },
991 [ROCKER_OF_DPA_TABLE_ID_ACL_POLICY] = {
992 .build_match = of_dpa_acl_build_match,
993 .hit = of_dpa_acl_hit,
994 .miss = of_dpa_eg,
995 .action_apply = of_dpa_apply_actions,
996 .action_write = of_dpa_acl_action_write,
997 },
998 };
999
of_dpa_flow_ig_tbl(OfDpaFlowContext * fc,uint32_t tbl_id)1000 static void of_dpa_flow_ig_tbl(OfDpaFlowContext *fc, uint32_t tbl_id)
1001 {
1002 OfDpaFlowTblOps *ops = &of_dpa_tbl_ops[tbl_id];
1003 OfDpaFlowMatch match = { { 0, }, };
1004 OfDpaFlow *flow;
1005
1006 if (ops->build_match) {
1007 ops->build_match(fc, &match);
1008 } else {
1009 return;
1010 }
1011
1012 flow = of_dpa_flow_match(fc->of_dpa, &match);
1013 if (!flow) {
1014 if (ops->miss) {
1015 ops->miss(fc);
1016 }
1017 return;
1018 }
1019
1020 flow->stats.hits++;
1021
1022 if (ops->action_apply) {
1023 ops->action_apply(fc, flow);
1024 }
1025
1026 if (ops->action_write) {
1027 ops->action_write(fc, flow);
1028 }
1029
1030 if (ops->hit) {
1031 ops->hit(fc, flow);
1032 }
1033
1034 if (flow->action.goto_tbl) {
1035 of_dpa_flow_ig_tbl(fc, flow->action.goto_tbl);
1036 } else if (ops->hit_no_goto) {
1037 ops->hit_no_goto(fc);
1038 }
1039
1040 /* drop packet */
1041 }
1042
of_dpa_ig(World * world,uint32_t pport,const struct iovec * iov,int iovcnt)1043 static ssize_t of_dpa_ig(World *world, uint32_t pport,
1044 const struct iovec *iov, int iovcnt)
1045 {
1046 g_autofree struct iovec *iov_copy = g_new(struct iovec, iovcnt + 2);
1047 OfDpaFlowContext fc = {
1048 .of_dpa = world_private(world),
1049 .in_pport = pport,
1050 .iov = iov_copy,
1051 .iovcnt = iovcnt + 2,
1052 };
1053
1054 of_dpa_flow_pkt_parse(&fc, iov, iovcnt);
1055 of_dpa_flow_ig_tbl(&fc, ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT);
1056
1057 return iov_size(iov, iovcnt);
1058 }
1059
1060 #define ROCKER_TUNNEL_LPORT 0x00010000
1061
of_dpa_cmd_add_ig_port(OfDpaFlow * flow,RockerTlv ** flow_tlvs)1062 static int of_dpa_cmd_add_ig_port(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1063 {
1064 OfDpaFlowKey *key = &flow->key;
1065 OfDpaFlowKey *mask = &flow->mask;
1066 OfDpaFlowAction *action = &flow->action;
1067 bool overlay_tunnel;
1068
1069 if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1070 !flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1071 return -ROCKER_EINVAL;
1072 }
1073
1074 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
1075 key->width = FLOW_KEY_WIDTH(tbl_id);
1076
1077 key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1078 if (flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]) {
1079 mask->in_pport =
1080 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1081 }
1082
1083 overlay_tunnel = !!(key->in_pport & ROCKER_TUNNEL_LPORT);
1084
1085 action->goto_tbl =
1086 rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1087
1088 if (!overlay_tunnel && action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_VLAN) {
1089 return -ROCKER_EINVAL;
1090 }
1091
1092 if (overlay_tunnel && action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_BRIDGING) {
1093 return -ROCKER_EINVAL;
1094 }
1095
1096 return ROCKER_OK;
1097 }
1098
of_dpa_cmd_add_vlan(OfDpaFlow * flow,RockerTlv ** flow_tlvs)1099 static int of_dpa_cmd_add_vlan(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1100 {
1101 OfDpaFlowKey *key = &flow->key;
1102 OfDpaFlowKey *mask = &flow->mask;
1103 OfDpaFlowAction *action = &flow->action;
1104 uint32_t port;
1105 bool untagged;
1106
1107 if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1108 !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1109 DPRINTF("Must give in_pport and vlan_id to install VLAN tbl entry\n");
1110 return -ROCKER_EINVAL;
1111 }
1112
1113 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
1114 key->width = FLOW_KEY_WIDTH(eth.vlan_id);
1115
1116 key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1117 if (!fp_port_from_pport(key->in_pport, &port)) {
1118 DPRINTF("in_pport (%d) not a front-panel port\n", key->in_pport);
1119 return -ROCKER_EINVAL;
1120 }
1121 mask->in_pport = 0xffffffff;
1122
1123 key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1124
1125 if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1126 mask->eth.vlan_id =
1127 rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1128 }
1129
1130 if (key->eth.vlan_id) {
1131 untagged = false; /* filtering */
1132 } else {
1133 untagged = true;
1134 }
1135
1136 if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1137 action->goto_tbl =
1138 rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1139 if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC) {
1140 DPRINTF("Goto tbl (%d) must be TERM_MAC\n", action->goto_tbl);
1141 return -ROCKER_EINVAL;
1142 }
1143 }
1144
1145 if (untagged) {
1146 if (!flow_tlvs[ROCKER_TLV_OF_DPA_NEW_VLAN_ID]) {
1147 DPRINTF("Must specify new vlan_id if untagged\n");
1148 return -ROCKER_EINVAL;
1149 }
1150 action->apply.new_vlan_id =
1151 rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_NEW_VLAN_ID]);
1152 if (1 > ntohs(action->apply.new_vlan_id) ||
1153 ntohs(action->apply.new_vlan_id) > 4095) {
1154 DPRINTF("New vlan_id (%d) must be between 1 and 4095\n",
1155 ntohs(action->apply.new_vlan_id));
1156 return -ROCKER_EINVAL;
1157 }
1158 }
1159
1160 return ROCKER_OK;
1161 }
1162
of_dpa_cmd_add_term_mac(OfDpaFlow * flow,RockerTlv ** flow_tlvs)1163 static int of_dpa_cmd_add_term_mac(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1164 {
1165 OfDpaFlowKey *key = &flow->key;
1166 OfDpaFlowKey *mask = &flow->mask;
1167 OfDpaFlowAction *action = &flow->action;
1168 const MACAddr ipv4_mcast = { .a = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 } };
1169 const MACAddr ipv4_mask = { .a = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 } };
1170 const MACAddr ipv6_mcast = { .a = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 } };
1171 const MACAddr ipv6_mask = { .a = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } };
1172 uint32_t port;
1173 bool unicast = false;
1174 bool multicast = false;
1175
1176 if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1177 !flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK] ||
1178 !flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE] ||
1179 !flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC] ||
1180 !flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK] ||
1181 !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID] ||
1182 !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1183 return -ROCKER_EINVAL;
1184 }
1185
1186 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
1187 key->width = FLOW_KEY_WIDTH(eth.type);
1188
1189 key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1190 if (!fp_port_from_pport(key->in_pport, &port)) {
1191 return -ROCKER_EINVAL;
1192 }
1193 mask->in_pport =
1194 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1195
1196 key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1197 if (key->eth.type != htons(0x0800) && key->eth.type != htons(0x86dd)) {
1198 return -ROCKER_EINVAL;
1199 }
1200 mask->eth.type = htons(0xffff);
1201
1202 memcpy(key->eth.dst.a,
1203 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1204 sizeof(key->eth.dst.a));
1205 memcpy(mask->eth.dst.a,
1206 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1207 sizeof(mask->eth.dst.a));
1208
1209 if ((key->eth.dst.a[0] & 0x01) == 0x00) {
1210 unicast = true;
1211 }
1212
1213 /* only two wildcard rules are acceptable for IPv4 and IPv6 multicast */
1214 if (memcmp(key->eth.dst.a, ipv4_mcast.a, sizeof(key->eth.dst.a)) == 0 &&
1215 memcmp(mask->eth.dst.a, ipv4_mask.a, sizeof(mask->eth.dst.a)) == 0) {
1216 multicast = true;
1217 }
1218 if (memcmp(key->eth.dst.a, ipv6_mcast.a, sizeof(key->eth.dst.a)) == 0 &&
1219 memcmp(mask->eth.dst.a, ipv6_mask.a, sizeof(mask->eth.dst.a)) == 0) {
1220 multicast = true;
1221 }
1222
1223 if (!unicast && !multicast) {
1224 return -ROCKER_EINVAL;
1225 }
1226
1227 key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1228 mask->eth.vlan_id =
1229 rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1230
1231 if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1232 action->goto_tbl =
1233 rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1234
1235 if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING &&
1236 action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING) {
1237 return -ROCKER_EINVAL;
1238 }
1239
1240 if (unicast &&
1241 action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING) {
1242 return -ROCKER_EINVAL;
1243 }
1244
1245 if (multicast &&
1246 action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING) {
1247 return -ROCKER_EINVAL;
1248 }
1249 }
1250
1251 if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1252 action->apply.copy_to_cpu =
1253 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1254 }
1255
1256 return ROCKER_OK;
1257 }
1258
of_dpa_cmd_add_bridging(OfDpaFlow * flow,RockerTlv ** flow_tlvs)1259 static int of_dpa_cmd_add_bridging(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1260 {
1261 OfDpaFlowKey *key = &flow->key;
1262 OfDpaFlowKey *mask = &flow->mask;
1263 OfDpaFlowAction *action = &flow->action;
1264 bool unicast = false;
1265 bool dst_mac = false;
1266 bool dst_mac_mask = false;
1267 enum {
1268 BRIDGING_MODE_UNKNOWN,
1269 BRIDGING_MODE_VLAN_UCAST,
1270 BRIDGING_MODE_VLAN_MCAST,
1271 BRIDGING_MODE_VLAN_DFLT,
1272 BRIDGING_MODE_TUNNEL_UCAST,
1273 BRIDGING_MODE_TUNNEL_MCAST,
1274 BRIDGING_MODE_TUNNEL_DFLT,
1275 } mode = BRIDGING_MODE_UNKNOWN;
1276
1277 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
1278
1279 if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1280 key->eth.vlan_id =
1281 rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1282 mask->eth.vlan_id = 0xffff;
1283 key->width = FLOW_KEY_WIDTH(eth.vlan_id);
1284 }
1285
1286 if (flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]) {
1287 key->tunnel_id =
1288 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]);
1289 mask->tunnel_id = 0xffffffff;
1290 key->width = FLOW_KEY_WIDTH(tunnel_id);
1291 }
1292
1293 /* can't do VLAN bridging and tunnel bridging at same time */
1294 if (key->eth.vlan_id && key->tunnel_id) {
1295 DPRINTF("can't do VLAN bridging and tunnel bridging at same time\n");
1296 return -ROCKER_EINVAL;
1297 }
1298
1299 if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1300 memcpy(key->eth.dst.a,
1301 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1302 sizeof(key->eth.dst.a));
1303 key->width = FLOW_KEY_WIDTH(eth.dst);
1304 dst_mac = true;
1305 unicast = (key->eth.dst.a[0] & 0x01) == 0x00;
1306 }
1307
1308 if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]) {
1309 memcpy(mask->eth.dst.a,
1310 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1311 sizeof(mask->eth.dst.a));
1312 key->width = FLOW_KEY_WIDTH(eth.dst);
1313 dst_mac_mask = true;
1314 } else if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1315 memcpy(mask->eth.dst.a, ff_mac.a, sizeof(mask->eth.dst.a));
1316 }
1317
1318 if (key->eth.vlan_id) {
1319 if (dst_mac && !dst_mac_mask) {
1320 mode = unicast ? BRIDGING_MODE_VLAN_UCAST :
1321 BRIDGING_MODE_VLAN_MCAST;
1322 } else if ((dst_mac && dst_mac_mask) || !dst_mac) {
1323 mode = BRIDGING_MODE_VLAN_DFLT;
1324 }
1325 } else if (key->tunnel_id) {
1326 if (dst_mac && !dst_mac_mask) {
1327 mode = unicast ? BRIDGING_MODE_TUNNEL_UCAST :
1328 BRIDGING_MODE_TUNNEL_MCAST;
1329 } else if ((dst_mac && dst_mac_mask) || !dst_mac) {
1330 mode = BRIDGING_MODE_TUNNEL_DFLT;
1331 }
1332 }
1333
1334 if (mode == BRIDGING_MODE_UNKNOWN) {
1335 DPRINTF("Unknown bridging mode\n");
1336 return -ROCKER_EINVAL;
1337 }
1338
1339 if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1340 action->goto_tbl =
1341 rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1342 if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1343 DPRINTF("Briding goto tbl must be ACL policy\n");
1344 return -ROCKER_EINVAL;
1345 }
1346 }
1347
1348 if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1349 action->write.group_id =
1350 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1351 switch (mode) {
1352 case BRIDGING_MODE_VLAN_UCAST:
1353 if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1354 ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) {
1355 DPRINTF("Bridging mode vlan ucast needs L2 "
1356 "interface group (0x%08x)\n",
1357 action->write.group_id);
1358 return -ROCKER_EINVAL;
1359 }
1360 break;
1361 case BRIDGING_MODE_VLAN_MCAST:
1362 if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1363 ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST) {
1364 DPRINTF("Bridging mode vlan mcast needs L2 "
1365 "mcast group (0x%08x)\n",
1366 action->write.group_id);
1367 return -ROCKER_EINVAL;
1368 }
1369 break;
1370 case BRIDGING_MODE_VLAN_DFLT:
1371 if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1372 ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD) {
1373 DPRINTF("Bridging mode vlan dflt needs L2 "
1374 "flood group (0x%08x)\n",
1375 action->write.group_id);
1376 return -ROCKER_EINVAL;
1377 }
1378 break;
1379 case BRIDGING_MODE_TUNNEL_MCAST:
1380 if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1381 ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY) {
1382 DPRINTF("Bridging mode tunnel mcast needs L2 "
1383 "overlay group (0x%08x)\n",
1384 action->write.group_id);
1385 return -ROCKER_EINVAL;
1386 }
1387 break;
1388 case BRIDGING_MODE_TUNNEL_DFLT:
1389 if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1390 ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY) {
1391 DPRINTF("Bridging mode tunnel dflt needs L2 "
1392 "overlay group (0x%08x)\n",
1393 action->write.group_id);
1394 return -ROCKER_EINVAL;
1395 }
1396 break;
1397 default:
1398 return -ROCKER_EINVAL;
1399 }
1400 }
1401
1402 if (flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_LPORT]) {
1403 action->write.tun_log_lport =
1404 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_LPORT]);
1405 if (mode != BRIDGING_MODE_TUNNEL_UCAST) {
1406 DPRINTF("Have tunnel logical port but not "
1407 "in bridging tunnel mode\n");
1408 return -ROCKER_EINVAL;
1409 }
1410 }
1411
1412 if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1413 action->apply.copy_to_cpu =
1414 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1415 }
1416
1417 return ROCKER_OK;
1418 }
1419
of_dpa_cmd_add_unicast_routing(OfDpaFlow * flow,RockerTlv ** flow_tlvs)1420 static int of_dpa_cmd_add_unicast_routing(OfDpaFlow *flow,
1421 RockerTlv **flow_tlvs)
1422 {
1423 OfDpaFlowKey *key = &flow->key;
1424 OfDpaFlowKey *mask = &flow->mask;
1425 OfDpaFlowAction *action = &flow->action;
1426 enum {
1427 UNICAST_ROUTING_MODE_UNKNOWN,
1428 UNICAST_ROUTING_MODE_IPV4,
1429 UNICAST_ROUTING_MODE_IPV6,
1430 } mode = UNICAST_ROUTING_MODE_UNKNOWN;
1431 uint8_t type;
1432
1433 if (!flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]) {
1434 return -ROCKER_EINVAL;
1435 }
1436
1437 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
1438 key->width = FLOW_KEY_WIDTH(ipv6.addr.dst);
1439
1440 key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1441 switch (ntohs(key->eth.type)) {
1442 case 0x0800:
1443 mode = UNICAST_ROUTING_MODE_IPV4;
1444 break;
1445 case 0x86dd:
1446 mode = UNICAST_ROUTING_MODE_IPV6;
1447 break;
1448 default:
1449 return -ROCKER_EINVAL;
1450 }
1451 mask->eth.type = htons(0xffff);
1452
1453 switch (mode) {
1454 case UNICAST_ROUTING_MODE_IPV4:
1455 if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]) {
1456 return -ROCKER_EINVAL;
1457 }
1458 key->ipv4.addr.dst =
1459 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]);
1460 if (ipv4_addr_is_multicast(key->ipv4.addr.dst)) {
1461 return -ROCKER_EINVAL;
1462 }
1463 flow->lpm = of_dpa_mask2prefix(htonl(0xffffffff));
1464 if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP_MASK]) {
1465 mask->ipv4.addr.dst =
1466 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP_MASK]);
1467 flow->lpm = of_dpa_mask2prefix(mask->ipv4.addr.dst);
1468 }
1469 break;
1470 case UNICAST_ROUTING_MODE_IPV6:
1471 if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]) {
1472 return -ROCKER_EINVAL;
1473 }
1474 memcpy(&key->ipv6.addr.dst,
1475 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]),
1476 sizeof(key->ipv6.addr.dst));
1477 if (ipv6_addr_is_multicast(&key->ipv6.addr.dst)) {
1478 return -ROCKER_EINVAL;
1479 }
1480 if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6_MASK]) {
1481 memcpy(&mask->ipv6.addr.dst,
1482 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6_MASK]),
1483 sizeof(mask->ipv6.addr.dst));
1484 }
1485 break;
1486 default:
1487 return -ROCKER_EINVAL;
1488 }
1489
1490 if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1491 action->goto_tbl =
1492 rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1493 if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1494 return -ROCKER_EINVAL;
1495 }
1496 }
1497
1498 if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1499 action->write.group_id =
1500 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1501 type = ROCKER_GROUP_TYPE_GET(action->write.group_id);
1502 if (type != ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE &&
1503 type != ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST &&
1504 type != ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP) {
1505 return -ROCKER_EINVAL;
1506 }
1507 }
1508
1509 return ROCKER_OK;
1510 }
1511
of_dpa_cmd_add_multicast_routing(OfDpaFlow * flow,RockerTlv ** flow_tlvs)1512 static int of_dpa_cmd_add_multicast_routing(OfDpaFlow *flow,
1513 RockerTlv **flow_tlvs)
1514 {
1515 OfDpaFlowKey *key = &flow->key;
1516 OfDpaFlowKey *mask = &flow->mask;
1517 OfDpaFlowAction *action = &flow->action;
1518 enum {
1519 MULTICAST_ROUTING_MODE_UNKNOWN,
1520 MULTICAST_ROUTING_MODE_IPV4,
1521 MULTICAST_ROUTING_MODE_IPV6,
1522 } mode = MULTICAST_ROUTING_MODE_UNKNOWN;
1523
1524 if (!flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE] ||
1525 !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1526 return -ROCKER_EINVAL;
1527 }
1528
1529 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
1530 key->width = FLOW_KEY_WIDTH(ipv6.addr.dst);
1531
1532 key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1533 switch (ntohs(key->eth.type)) {
1534 case 0x0800:
1535 mode = MULTICAST_ROUTING_MODE_IPV4;
1536 break;
1537 case 0x86dd:
1538 mode = MULTICAST_ROUTING_MODE_IPV6;
1539 break;
1540 default:
1541 return -ROCKER_EINVAL;
1542 }
1543
1544 key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1545
1546 switch (mode) {
1547 case MULTICAST_ROUTING_MODE_IPV4:
1548
1549 if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]) {
1550 key->ipv4.addr.src =
1551 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]);
1552 }
1553
1554 if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP_MASK]) {
1555 mask->ipv4.addr.src =
1556 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP_MASK]);
1557 }
1558
1559 if (!flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]) {
1560 if (mask->ipv4.addr.src != 0) {
1561 return -ROCKER_EINVAL;
1562 }
1563 }
1564
1565 if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]) {
1566 return -ROCKER_EINVAL;
1567 }
1568
1569 key->ipv4.addr.dst =
1570 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]);
1571 if (!ipv4_addr_is_multicast(key->ipv4.addr.dst)) {
1572 return -ROCKER_EINVAL;
1573 }
1574
1575 break;
1576
1577 case MULTICAST_ROUTING_MODE_IPV6:
1578
1579 if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]) {
1580 memcpy(&key->ipv6.addr.src,
1581 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]),
1582 sizeof(key->ipv6.addr.src));
1583 }
1584
1585 if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6_MASK]) {
1586 memcpy(&mask->ipv6.addr.src,
1587 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6_MASK]),
1588 sizeof(mask->ipv6.addr.src));
1589 }
1590
1591 if (!flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]) {
1592 if (mask->ipv6.addr.src.addr32[0] != 0 &&
1593 mask->ipv6.addr.src.addr32[1] != 0 &&
1594 mask->ipv6.addr.src.addr32[2] != 0 &&
1595 mask->ipv6.addr.src.addr32[3] != 0) {
1596 return -ROCKER_EINVAL;
1597 }
1598 }
1599
1600 if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]) {
1601 return -ROCKER_EINVAL;
1602 }
1603
1604 memcpy(&key->ipv6.addr.dst,
1605 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]),
1606 sizeof(key->ipv6.addr.dst));
1607 if (!ipv6_addr_is_multicast(&key->ipv6.addr.dst)) {
1608 return -ROCKER_EINVAL;
1609 }
1610
1611 break;
1612
1613 default:
1614 return -ROCKER_EINVAL;
1615 }
1616
1617 if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1618 action->goto_tbl =
1619 rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1620 if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1621 return -ROCKER_EINVAL;
1622 }
1623 }
1624
1625 if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1626 action->write.group_id =
1627 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1628 if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1629 ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST) {
1630 return -ROCKER_EINVAL;
1631 }
1632 action->write.vlan_id = key->eth.vlan_id;
1633 }
1634
1635 return ROCKER_OK;
1636 }
1637
of_dpa_cmd_add_acl_ip(OfDpaFlowKey * key,OfDpaFlowKey * mask,RockerTlv ** flow_tlvs)1638 static void of_dpa_cmd_add_acl_ip(OfDpaFlowKey *key, OfDpaFlowKey *mask,
1639 RockerTlv **flow_tlvs)
1640 {
1641 key->width = FLOW_KEY_WIDTH(ip.tos);
1642
1643 key->ip.proto = 0;
1644 key->ip.tos = 0;
1645 mask->ip.proto = 0;
1646 mask->ip.tos = 0;
1647
1648 if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO]) {
1649 key->ip.proto =
1650 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO]);
1651 }
1652 if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO_MASK]) {
1653 mask->ip.proto =
1654 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO_MASK]);
1655 }
1656 if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP]) {
1657 key->ip.tos =
1658 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP]);
1659 }
1660 if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP_MASK]) {
1661 mask->ip.tos =
1662 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP_MASK]);
1663 }
1664 if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN]) {
1665 key->ip.tos |=
1666 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN]) << 6;
1667 }
1668 if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN_MASK]) {
1669 mask->ip.tos |=
1670 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN_MASK]) << 6;
1671 }
1672 }
1673
of_dpa_cmd_add_acl(OfDpaFlow * flow,RockerTlv ** flow_tlvs)1674 static int of_dpa_cmd_add_acl(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1675 {
1676 OfDpaFlowKey *key = &flow->key;
1677 OfDpaFlowKey *mask = &flow->mask;
1678 OfDpaFlowAction *action = &flow->action;
1679 enum {
1680 ACL_MODE_UNKNOWN,
1681 ACL_MODE_IPV4_VLAN,
1682 ACL_MODE_IPV6_VLAN,
1683 ACL_MODE_IPV4_TENANT,
1684 ACL_MODE_IPV6_TENANT,
1685 ACL_MODE_NON_IP_VLAN,
1686 ACL_MODE_NON_IP_TENANT,
1687 ACL_MODE_ANY_VLAN,
1688 ACL_MODE_ANY_TENANT,
1689 } mode = ACL_MODE_UNKNOWN;
1690
1691 if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1692 !flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]) {
1693 return -ROCKER_EINVAL;
1694 }
1695
1696 if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID] &&
1697 flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]) {
1698 return -ROCKER_EINVAL;
1699 }
1700
1701 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1702 key->width = FLOW_KEY_WIDTH(eth.type);
1703
1704 key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1705 if (flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]) {
1706 mask->in_pport =
1707 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1708 }
1709
1710 if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
1711 memcpy(key->eth.src.a,
1712 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
1713 sizeof(key->eth.src.a));
1714 }
1715
1716 if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC_MASK]) {
1717 memcpy(mask->eth.src.a,
1718 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC_MASK]),
1719 sizeof(mask->eth.src.a));
1720 }
1721
1722 if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1723 memcpy(key->eth.dst.a,
1724 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1725 sizeof(key->eth.dst.a));
1726 }
1727
1728 if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]) {
1729 memcpy(mask->eth.dst.a,
1730 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1731 sizeof(mask->eth.dst.a));
1732 }
1733
1734 key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1735 if (key->eth.type) {
1736 mask->eth.type = 0xffff;
1737 }
1738
1739 if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1740 key->eth.vlan_id =
1741 rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1742 }
1743
1744 if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1745 mask->eth.vlan_id =
1746 rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1747 }
1748
1749 switch (ntohs(key->eth.type)) {
1750 case 0x0000:
1751 mode = (key->eth.vlan_id) ? ACL_MODE_ANY_VLAN : ACL_MODE_ANY_TENANT;
1752 break;
1753 case 0x0800:
1754 mode = (key->eth.vlan_id) ? ACL_MODE_IPV4_VLAN : ACL_MODE_IPV4_TENANT;
1755 break;
1756 case 0x86dd:
1757 mode = (key->eth.vlan_id) ? ACL_MODE_IPV6_VLAN : ACL_MODE_IPV6_TENANT;
1758 break;
1759 default:
1760 mode = (key->eth.vlan_id) ? ACL_MODE_NON_IP_VLAN :
1761 ACL_MODE_NON_IP_TENANT;
1762 break;
1763 }
1764
1765 /* XXX only supporting VLAN modes for now */
1766 if (mode != ACL_MODE_IPV4_VLAN &&
1767 mode != ACL_MODE_IPV6_VLAN &&
1768 mode != ACL_MODE_NON_IP_VLAN &&
1769 mode != ACL_MODE_ANY_VLAN) {
1770 return -ROCKER_EINVAL;
1771 }
1772
1773 switch (ntohs(key->eth.type)) {
1774 case 0x0800:
1775 case 0x86dd:
1776 of_dpa_cmd_add_acl_ip(key, mask, flow_tlvs);
1777 break;
1778 }
1779
1780 if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1781 action->write.group_id =
1782 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1783 }
1784
1785 if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1786 action->apply.copy_to_cpu =
1787 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1788 }
1789
1790 return ROCKER_OK;
1791 }
1792
of_dpa_cmd_flow_add_mod(OfDpa * of_dpa,OfDpaFlow * flow,RockerTlv ** flow_tlvs)1793 static int of_dpa_cmd_flow_add_mod(OfDpa *of_dpa, OfDpaFlow *flow,
1794 RockerTlv **flow_tlvs)
1795 {
1796 enum rocker_of_dpa_table_id tbl;
1797 int err = ROCKER_OK;
1798
1799 if (!flow_tlvs[ROCKER_TLV_OF_DPA_TABLE_ID] ||
1800 !flow_tlvs[ROCKER_TLV_OF_DPA_PRIORITY] ||
1801 !flow_tlvs[ROCKER_TLV_OF_DPA_HARDTIME]) {
1802 return -ROCKER_EINVAL;
1803 }
1804
1805 tbl = rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_TABLE_ID]);
1806 flow->priority = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_PRIORITY]);
1807 flow->hardtime = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_HARDTIME]);
1808
1809 if (flow_tlvs[ROCKER_TLV_OF_DPA_IDLETIME]) {
1810 if (tbl == ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT ||
1811 tbl == ROCKER_OF_DPA_TABLE_ID_VLAN ||
1812 tbl == ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC) {
1813 return -ROCKER_EINVAL;
1814 }
1815 flow->idletime =
1816 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IDLETIME]);
1817 }
1818
1819 switch (tbl) {
1820 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
1821 err = of_dpa_cmd_add_ig_port(flow, flow_tlvs);
1822 break;
1823 case ROCKER_OF_DPA_TABLE_ID_VLAN:
1824 err = of_dpa_cmd_add_vlan(flow, flow_tlvs);
1825 break;
1826 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
1827 err = of_dpa_cmd_add_term_mac(flow, flow_tlvs);
1828 break;
1829 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
1830 err = of_dpa_cmd_add_bridging(flow, flow_tlvs);
1831 break;
1832 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
1833 err = of_dpa_cmd_add_unicast_routing(flow, flow_tlvs);
1834 break;
1835 case ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING:
1836 err = of_dpa_cmd_add_multicast_routing(flow, flow_tlvs);
1837 break;
1838 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
1839 err = of_dpa_cmd_add_acl(flow, flow_tlvs);
1840 break;
1841 }
1842
1843 return err;
1844 }
1845
of_dpa_cmd_flow_add(OfDpa * of_dpa,uint64_t cookie,RockerTlv ** flow_tlvs)1846 static int of_dpa_cmd_flow_add(OfDpa *of_dpa, uint64_t cookie,
1847 RockerTlv **flow_tlvs)
1848 {
1849 OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1850 int err = ROCKER_OK;
1851
1852 if (flow) {
1853 return -ROCKER_EEXIST;
1854 }
1855
1856 flow = of_dpa_flow_alloc(cookie);
1857
1858 err = of_dpa_cmd_flow_add_mod(of_dpa, flow, flow_tlvs);
1859 if (err) {
1860 g_free(flow);
1861 return err;
1862 }
1863
1864 return of_dpa_flow_add(of_dpa, flow);
1865 }
1866
of_dpa_cmd_flow_mod(OfDpa * of_dpa,uint64_t cookie,RockerTlv ** flow_tlvs)1867 static int of_dpa_cmd_flow_mod(OfDpa *of_dpa, uint64_t cookie,
1868 RockerTlv **flow_tlvs)
1869 {
1870 OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1871
1872 if (!flow) {
1873 return -ROCKER_ENOENT;
1874 }
1875
1876 return of_dpa_cmd_flow_add_mod(of_dpa, flow, flow_tlvs);
1877 }
1878
of_dpa_cmd_flow_del(OfDpa * of_dpa,uint64_t cookie)1879 static int of_dpa_cmd_flow_del(OfDpa *of_dpa, uint64_t cookie)
1880 {
1881 OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1882
1883 if (!flow) {
1884 return -ROCKER_ENOENT;
1885 }
1886
1887 of_dpa_flow_del(of_dpa, flow);
1888
1889 return ROCKER_OK;
1890 }
1891
of_dpa_cmd_flow_get_stats(OfDpa * of_dpa,uint64_t cookie,struct desc_info * info,char * buf)1892 static int of_dpa_cmd_flow_get_stats(OfDpa *of_dpa, uint64_t cookie,
1893 struct desc_info *info, char *buf)
1894 {
1895 OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1896 size_t tlv_size;
1897 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
1898 int pos;
1899
1900 if (!flow) {
1901 return -ROCKER_ENOENT;
1902 }
1903
1904 tlv_size = rocker_tlv_total_size(sizeof(uint32_t)) + /* duration */
1905 rocker_tlv_total_size(sizeof(uint64_t)) + /* rx_pkts */
1906 rocker_tlv_total_size(sizeof(uint64_t)); /* tx_ptks */
1907
1908 if (tlv_size > desc_buf_size(info)) {
1909 return -ROCKER_EMSGSIZE;
1910 }
1911
1912 pos = 0;
1913 rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION,
1914 (int32_t)(now - flow->stats.install_time));
1915 rocker_tlv_put_le64(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS,
1916 flow->stats.rx_pkts);
1917 rocker_tlv_put_le64(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS,
1918 flow->stats.tx_pkts);
1919
1920 return desc_set_buf(info, tlv_size);
1921 }
1922
of_dpa_flow_cmd(OfDpa * of_dpa,struct desc_info * info,char * buf,uint16_t cmd,RockerTlv ** flow_tlvs)1923 static int of_dpa_flow_cmd(OfDpa *of_dpa, struct desc_info *info,
1924 char *buf, uint16_t cmd,
1925 RockerTlv **flow_tlvs)
1926 {
1927 uint64_t cookie;
1928
1929 if (!flow_tlvs[ROCKER_TLV_OF_DPA_COOKIE]) {
1930 return -ROCKER_EINVAL;
1931 }
1932
1933 cookie = rocker_tlv_get_le64(flow_tlvs[ROCKER_TLV_OF_DPA_COOKIE]);
1934
1935 switch (cmd) {
1936 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD:
1937 return of_dpa_cmd_flow_add(of_dpa, cookie, flow_tlvs);
1938 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD:
1939 return of_dpa_cmd_flow_mod(of_dpa, cookie, flow_tlvs);
1940 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL:
1941 return of_dpa_cmd_flow_del(of_dpa, cookie);
1942 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS:
1943 return of_dpa_cmd_flow_get_stats(of_dpa, cookie, info, buf);
1944 }
1945
1946 return -ROCKER_ENOTSUP;
1947 }
1948
of_dpa_cmd_add_l2_interface(OfDpaGroup * group,RockerTlv ** group_tlvs)1949 static int of_dpa_cmd_add_l2_interface(OfDpaGroup *group,
1950 RockerTlv **group_tlvs)
1951 {
1952 if (!group_tlvs[ROCKER_TLV_OF_DPA_OUT_PPORT] ||
1953 !group_tlvs[ROCKER_TLV_OF_DPA_POP_VLAN]) {
1954 return -ROCKER_EINVAL;
1955 }
1956
1957 group->l2_interface.out_pport =
1958 rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_OUT_PPORT]);
1959 group->l2_interface.pop_vlan =
1960 rocker_tlv_get_u8(group_tlvs[ROCKER_TLV_OF_DPA_POP_VLAN]);
1961
1962 return ROCKER_OK;
1963 }
1964
of_dpa_cmd_add_l2_rewrite(OfDpa * of_dpa,OfDpaGroup * group,RockerTlv ** group_tlvs)1965 static int of_dpa_cmd_add_l2_rewrite(OfDpa *of_dpa, OfDpaGroup *group,
1966 RockerTlv **group_tlvs)
1967 {
1968 OfDpaGroup *l2_interface_group;
1969
1970 if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]) {
1971 return -ROCKER_EINVAL;
1972 }
1973
1974 group->l2_rewrite.group_id =
1975 rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]);
1976
1977 l2_interface_group = of_dpa_group_find(of_dpa, group->l2_rewrite.group_id);
1978 if (!l2_interface_group ||
1979 ROCKER_GROUP_TYPE_GET(l2_interface_group->id) !=
1980 ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) {
1981 DPRINTF("l2 rewrite group needs a valid l2 interface group\n");
1982 return -ROCKER_EINVAL;
1983 }
1984
1985 if (group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
1986 memcpy(group->l2_rewrite.src_mac.a,
1987 rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
1988 sizeof(group->l2_rewrite.src_mac.a));
1989 }
1990
1991 if (group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1992 memcpy(group->l2_rewrite.dst_mac.a,
1993 rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1994 sizeof(group->l2_rewrite.dst_mac.a));
1995 }
1996
1997 if (group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1998 group->l2_rewrite.vlan_id =
1999 rocker_tlv_get_u16(group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
2000 if (ROCKER_GROUP_VLAN_GET(l2_interface_group->id) !=
2001 (ntohs(group->l2_rewrite.vlan_id) & VLAN_VID_MASK)) {
2002 DPRINTF("Set VLAN ID must be same as L2 interface group\n");
2003 return -ROCKER_EINVAL;
2004 }
2005 }
2006
2007 return ROCKER_OK;
2008 }
2009
of_dpa_cmd_add_l2_flood(OfDpa * of_dpa,OfDpaGroup * group,RockerTlv ** group_tlvs)2010 static int of_dpa_cmd_add_l2_flood(OfDpa *of_dpa, OfDpaGroup *group,
2011 RockerTlv **group_tlvs)
2012 {
2013 OfDpaGroup *l2_group;
2014 RockerTlv **tlvs;
2015 int err;
2016 int i;
2017
2018 if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_COUNT] ||
2019 !group_tlvs[ROCKER_TLV_OF_DPA_GROUP_IDS]) {
2020 return -ROCKER_EINVAL;
2021 }
2022
2023 group->l2_flood.group_count =
2024 rocker_tlv_get_le16(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_COUNT]);
2025
2026 tlvs = g_new0(RockerTlv *, group->l2_flood.group_count + 1);
2027
2028 g_free(group->l2_flood.group_ids);
2029 group->l2_flood.group_ids =
2030 g_new0(uint32_t, group->l2_flood.group_count);
2031
2032 rocker_tlv_parse_nested(tlvs, group->l2_flood.group_count,
2033 group_tlvs[ROCKER_TLV_OF_DPA_GROUP_IDS]);
2034
2035 for (i = 0; i < group->l2_flood.group_count; i++) {
2036 group->l2_flood.group_ids[i] = rocker_tlv_get_le32(tlvs[i + 1]);
2037 }
2038
2039 /* All of the L2 interface groups referenced by the L2 flood
2040 * must have same VLAN
2041 */
2042
2043 for (i = 0; i < group->l2_flood.group_count; i++) {
2044 l2_group = of_dpa_group_find(of_dpa, group->l2_flood.group_ids[i]);
2045 if (!l2_group) {
2046 continue;
2047 }
2048 if ((ROCKER_GROUP_TYPE_GET(l2_group->id) ==
2049 ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) &&
2050 (ROCKER_GROUP_VLAN_GET(l2_group->id) !=
2051 ROCKER_GROUP_VLAN_GET(group->id))) {
2052 DPRINTF("l2 interface group 0x%08x VLAN doesn't match l2 "
2053 "flood group 0x%08x\n",
2054 group->l2_flood.group_ids[i], group->id);
2055 err = -ROCKER_EINVAL;
2056 goto err_out;
2057 }
2058 }
2059
2060 g_free(tlvs);
2061 return ROCKER_OK;
2062
2063 err_out:
2064 group->l2_flood.group_count = 0;
2065 g_free(group->l2_flood.group_ids);
2066 g_free(tlvs);
2067
2068 return err;
2069 }
2070
of_dpa_cmd_add_l3_unicast(OfDpaGroup * group,RockerTlv ** group_tlvs)2071 static int of_dpa_cmd_add_l3_unicast(OfDpaGroup *group, RockerTlv **group_tlvs)
2072 {
2073 if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]) {
2074 return -ROCKER_EINVAL;
2075 }
2076
2077 group->l3_unicast.group_id =
2078 rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]);
2079
2080 if (group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
2081 memcpy(group->l3_unicast.src_mac.a,
2082 rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
2083 sizeof(group->l3_unicast.src_mac.a));
2084 }
2085
2086 if (group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
2087 memcpy(group->l3_unicast.dst_mac.a,
2088 rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
2089 sizeof(group->l3_unicast.dst_mac.a));
2090 }
2091
2092 if (group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
2093 group->l3_unicast.vlan_id =
2094 rocker_tlv_get_u16(group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
2095 }
2096
2097 if (group_tlvs[ROCKER_TLV_OF_DPA_TTL_CHECK]) {
2098 group->l3_unicast.ttl_check =
2099 rocker_tlv_get_u8(group_tlvs[ROCKER_TLV_OF_DPA_TTL_CHECK]);
2100 }
2101
2102 return ROCKER_OK;
2103 }
2104
of_dpa_cmd_group_do(OfDpa * of_dpa,uint32_t group_id,OfDpaGroup * group,RockerTlv ** group_tlvs)2105 static int of_dpa_cmd_group_do(OfDpa *of_dpa, uint32_t group_id,
2106 OfDpaGroup *group, RockerTlv **group_tlvs)
2107 {
2108 uint8_t type = ROCKER_GROUP_TYPE_GET(group_id);
2109
2110 switch (type) {
2111 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2112 return of_dpa_cmd_add_l2_interface(group, group_tlvs);
2113 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2114 return of_dpa_cmd_add_l2_rewrite(of_dpa, group, group_tlvs);
2115 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2116 /* Treat L2 multicast group same as a L2 flood group */
2117 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2118 return of_dpa_cmd_add_l2_flood(of_dpa, group, group_tlvs);
2119 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2120 return of_dpa_cmd_add_l3_unicast(group, group_tlvs);
2121 }
2122
2123 return -ROCKER_ENOTSUP;
2124 }
2125
of_dpa_cmd_group_add(OfDpa * of_dpa,uint32_t group_id,RockerTlv ** group_tlvs)2126 static int of_dpa_cmd_group_add(OfDpa *of_dpa, uint32_t group_id,
2127 RockerTlv **group_tlvs)
2128 {
2129 OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2130 int err;
2131
2132 if (group) {
2133 return -ROCKER_EEXIST;
2134 }
2135
2136 group = of_dpa_group_alloc(group_id);
2137
2138 err = of_dpa_cmd_group_do(of_dpa, group_id, group, group_tlvs);
2139 if (err) {
2140 goto err_cmd_add;
2141 }
2142
2143 err = of_dpa_group_add(of_dpa, group);
2144 if (err) {
2145 goto err_cmd_add;
2146 }
2147
2148 return ROCKER_OK;
2149
2150 err_cmd_add:
2151 g_free(group);
2152 return err;
2153 }
2154
of_dpa_cmd_group_mod(OfDpa * of_dpa,uint32_t group_id,RockerTlv ** group_tlvs)2155 static int of_dpa_cmd_group_mod(OfDpa *of_dpa, uint32_t group_id,
2156 RockerTlv **group_tlvs)
2157 {
2158 OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2159
2160 if (!group) {
2161 return -ROCKER_ENOENT;
2162 }
2163
2164 return of_dpa_cmd_group_do(of_dpa, group_id, group, group_tlvs);
2165 }
2166
of_dpa_cmd_group_del(OfDpa * of_dpa,uint32_t group_id)2167 static int of_dpa_cmd_group_del(OfDpa *of_dpa, uint32_t group_id)
2168 {
2169 OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2170
2171 if (!group) {
2172 return -ROCKER_ENOENT;
2173 }
2174
2175 return of_dpa_group_del(of_dpa, group);
2176 }
2177
of_dpa_cmd_group_get_stats(OfDpa * of_dpa,uint32_t group_id,struct desc_info * info,char * buf)2178 static int of_dpa_cmd_group_get_stats(OfDpa *of_dpa, uint32_t group_id,
2179 struct desc_info *info, char *buf)
2180 {
2181 return -ROCKER_ENOTSUP;
2182 }
2183
of_dpa_group_cmd(OfDpa * of_dpa,struct desc_info * info,char * buf,uint16_t cmd,RockerTlv ** group_tlvs)2184 static int of_dpa_group_cmd(OfDpa *of_dpa, struct desc_info *info,
2185 char *buf, uint16_t cmd, RockerTlv **group_tlvs)
2186 {
2187 uint32_t group_id;
2188
2189 if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
2190 return -ROCKER_EINVAL;
2191 }
2192
2193 group_id = rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
2194
2195 switch (cmd) {
2196 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD:
2197 return of_dpa_cmd_group_add(of_dpa, group_id, group_tlvs);
2198 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD:
2199 return of_dpa_cmd_group_mod(of_dpa, group_id, group_tlvs);
2200 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL:
2201 return of_dpa_cmd_group_del(of_dpa, group_id);
2202 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS:
2203 return of_dpa_cmd_group_get_stats(of_dpa, group_id, info, buf);
2204 }
2205
2206 return -ROCKER_ENOTSUP;
2207 }
2208
of_dpa_cmd(World * world,struct desc_info * info,char * buf,uint16_t cmd,RockerTlv * cmd_info_tlv)2209 static int of_dpa_cmd(World *world, struct desc_info *info,
2210 char *buf, uint16_t cmd, RockerTlv *cmd_info_tlv)
2211 {
2212 OfDpa *of_dpa = world_private(world);
2213 RockerTlv *tlvs[ROCKER_TLV_OF_DPA_MAX + 1];
2214
2215 rocker_tlv_parse_nested(tlvs, ROCKER_TLV_OF_DPA_MAX, cmd_info_tlv);
2216
2217 switch (cmd) {
2218 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD:
2219 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD:
2220 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL:
2221 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS:
2222 return of_dpa_flow_cmd(of_dpa, info, buf, cmd, tlvs);
2223 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD:
2224 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD:
2225 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL:
2226 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS:
2227 return of_dpa_group_cmd(of_dpa, info, buf, cmd, tlvs);
2228 }
2229
2230 return -ROCKER_ENOTSUP;
2231 }
2232
rocker_int64_equal(gconstpointer v1,gconstpointer v2)2233 static gboolean rocker_int64_equal(gconstpointer v1, gconstpointer v2)
2234 {
2235 return *((const uint64_t *)v1) == *((const uint64_t *)v2);
2236 }
2237
rocker_int64_hash(gconstpointer v)2238 static guint rocker_int64_hash(gconstpointer v)
2239 {
2240 return (guint)*(const uint64_t *)v;
2241 }
2242
of_dpa_init(World * world)2243 static int of_dpa_init(World *world)
2244 {
2245 OfDpa *of_dpa = world_private(world);
2246
2247 of_dpa->world = world;
2248
2249 of_dpa->flow_tbl = g_hash_table_new_full(rocker_int64_hash,
2250 rocker_int64_equal,
2251 NULL, g_free);
2252 if (!of_dpa->flow_tbl) {
2253 return -ENOMEM;
2254 }
2255
2256 of_dpa->group_tbl = g_hash_table_new_full(g_int_hash, g_int_equal,
2257 NULL, g_free);
2258 if (!of_dpa->group_tbl) {
2259 goto err_group_tbl;
2260 }
2261
2262 /* XXX hardcode some artificial table max values */
2263 of_dpa->flow_tbl_max_size = 100;
2264 of_dpa->group_tbl_max_size = 100;
2265
2266 return 0;
2267
2268 err_group_tbl:
2269 g_hash_table_destroy(of_dpa->flow_tbl);
2270 return -ENOMEM;
2271 }
2272
of_dpa_uninit(World * world)2273 static void of_dpa_uninit(World *world)
2274 {
2275 OfDpa *of_dpa = world_private(world);
2276
2277 g_hash_table_destroy(of_dpa->group_tbl);
2278 g_hash_table_destroy(of_dpa->flow_tbl);
2279 }
2280
2281 struct of_dpa_flow_fill_context {
2282 RockerOfDpaFlowList *list;
2283 uint32_t tbl_id;
2284 };
2285
of_dpa_flow_fill(void * cookie,void * value,void * user_data)2286 static void of_dpa_flow_fill(void *cookie, void *value, void *user_data)
2287 {
2288 struct of_dpa_flow *flow = value;
2289 struct of_dpa_flow_key *key = &flow->key;
2290 struct of_dpa_flow_key *mask = &flow->mask;
2291 struct of_dpa_flow_fill_context *flow_context = user_data;
2292 RockerOfDpaFlow *nflow;
2293 RockerOfDpaFlowKey *nkey;
2294 RockerOfDpaFlowMask *nmask;
2295 RockerOfDpaFlowAction *naction;
2296
2297 if (flow_context->tbl_id != -1 &&
2298 flow_context->tbl_id != key->tbl_id) {
2299 return;
2300 }
2301
2302 nflow = g_malloc0(sizeof(*nflow));
2303 nkey = nflow->key = g_malloc0(sizeof(*nkey));
2304 nmask = nflow->mask = g_malloc0(sizeof(*nmask));
2305 naction = nflow->action = g_malloc0(sizeof(*naction));
2306
2307 nflow->cookie = flow->cookie;
2308 nflow->hits = flow->stats.hits;
2309 nkey->priority = flow->priority;
2310 nkey->tbl_id = key->tbl_id;
2311
2312 if (key->in_pport || mask->in_pport) {
2313 nkey->has_in_pport = true;
2314 nkey->in_pport = key->in_pport;
2315 }
2316
2317 if (nkey->has_in_pport && mask->in_pport != 0xffffffff) {
2318 nmask->has_in_pport = true;
2319 nmask->in_pport = mask->in_pport;
2320 }
2321
2322 if (key->eth.vlan_id || mask->eth.vlan_id) {
2323 nkey->has_vlan_id = true;
2324 nkey->vlan_id = ntohs(key->eth.vlan_id);
2325 }
2326
2327 if (nkey->has_vlan_id && mask->eth.vlan_id != 0xffff) {
2328 nmask->has_vlan_id = true;
2329 nmask->vlan_id = ntohs(mask->eth.vlan_id);
2330 }
2331
2332 if (key->tunnel_id || mask->tunnel_id) {
2333 nkey->has_tunnel_id = true;
2334 nkey->tunnel_id = key->tunnel_id;
2335 }
2336
2337 if (nkey->has_tunnel_id && mask->tunnel_id != 0xffffffff) {
2338 nmask->has_tunnel_id = true;
2339 nmask->tunnel_id = mask->tunnel_id;
2340 }
2341
2342 if (memcmp(key->eth.src.a, zero_mac.a, ETH_ALEN) ||
2343 memcmp(mask->eth.src.a, zero_mac.a, ETH_ALEN)) {
2344 nkey->eth_src = qemu_mac_strdup_printf(key->eth.src.a);
2345 }
2346
2347 if (nkey->eth_src && memcmp(mask->eth.src.a, ff_mac.a, ETH_ALEN)) {
2348 nmask->eth_src = qemu_mac_strdup_printf(mask->eth.src.a);
2349 }
2350
2351 if (memcmp(key->eth.dst.a, zero_mac.a, ETH_ALEN) ||
2352 memcmp(mask->eth.dst.a, zero_mac.a, ETH_ALEN)) {
2353 nkey->eth_dst = qemu_mac_strdup_printf(key->eth.dst.a);
2354 }
2355
2356 if (nkey->eth_dst && memcmp(mask->eth.dst.a, ff_mac.a, ETH_ALEN)) {
2357 nmask->eth_dst = qemu_mac_strdup_printf(mask->eth.dst.a);
2358 }
2359
2360 if (key->eth.type) {
2361
2362 nkey->has_eth_type = true;
2363 nkey->eth_type = ntohs(key->eth.type);
2364
2365 switch (ntohs(key->eth.type)) {
2366 case 0x0800:
2367 case 0x86dd:
2368 if (key->ip.proto || mask->ip.proto) {
2369 nkey->has_ip_proto = true;
2370 nkey->ip_proto = key->ip.proto;
2371 }
2372 if (nkey->has_ip_proto && mask->ip.proto != 0xff) {
2373 nmask->has_ip_proto = true;
2374 nmask->ip_proto = mask->ip.proto;
2375 }
2376 if (key->ip.tos || mask->ip.tos) {
2377 nkey->has_ip_tos = true;
2378 nkey->ip_tos = key->ip.tos;
2379 }
2380 if (nkey->has_ip_tos && mask->ip.tos != 0xff) {
2381 nmask->has_ip_tos = true;
2382 nmask->ip_tos = mask->ip.tos;
2383 }
2384 break;
2385 }
2386
2387 switch (ntohs(key->eth.type)) {
2388 case 0x0800:
2389 if (key->ipv4.addr.dst || mask->ipv4.addr.dst) {
2390 char *dst = inet_ntoa(*(struct in_addr *)&key->ipv4.addr.dst);
2391 int dst_len = of_dpa_mask2prefix(mask->ipv4.addr.dst);
2392 nkey->ip_dst = g_strdup_printf("%s/%d", dst, dst_len);
2393 }
2394 break;
2395 }
2396 }
2397
2398 if (flow->action.goto_tbl) {
2399 naction->has_goto_tbl = true;
2400 naction->goto_tbl = flow->action.goto_tbl;
2401 }
2402
2403 if (flow->action.write.group_id) {
2404 naction->has_group_id = true;
2405 naction->group_id = flow->action.write.group_id;
2406 }
2407
2408 if (flow->action.apply.new_vlan_id) {
2409 naction->has_new_vlan_id = true;
2410 naction->new_vlan_id = flow->action.apply.new_vlan_id;
2411 }
2412
2413 QAPI_LIST_PREPEND(flow_context->list, nflow);
2414 }
2415
qmp_query_rocker_of_dpa_flows(const char * name,bool has_tbl_id,uint32_t tbl_id,Error ** errp)2416 RockerOfDpaFlowList *qmp_query_rocker_of_dpa_flows(const char *name,
2417 bool has_tbl_id,
2418 uint32_t tbl_id,
2419 Error **errp)
2420 {
2421 struct rocker *r;
2422 struct world *w;
2423 struct of_dpa *of_dpa;
2424 struct of_dpa_flow_fill_context fill_context = {
2425 .list = NULL,
2426 .tbl_id = tbl_id,
2427 };
2428
2429 r = rocker_find(name);
2430 if (!r) {
2431 error_setg(errp, "rocker %s not found", name);
2432 return NULL;
2433 }
2434
2435 w = rocker_get_world(r, ROCKER_WORLD_TYPE_OF_DPA);
2436 if (!w) {
2437 error_setg(errp, "rocker %s doesn't have OF-DPA world", name);
2438 return NULL;
2439 }
2440
2441 of_dpa = world_private(w);
2442
2443 g_hash_table_foreach(of_dpa->flow_tbl, of_dpa_flow_fill, &fill_context);
2444
2445 return fill_context.list;
2446 }
2447
2448 struct of_dpa_group_fill_context {
2449 RockerOfDpaGroupList *list;
2450 uint8_t type;
2451 };
2452
of_dpa_group_fill(void * key,void * value,void * user_data)2453 static void of_dpa_group_fill(void *key, void *value, void *user_data)
2454 {
2455 struct of_dpa_group *group = value;
2456 struct of_dpa_group_fill_context *flow_context = user_data;
2457 RockerOfDpaGroup *ngroup;
2458 int i;
2459
2460 if (flow_context->type != 9 &&
2461 flow_context->type != ROCKER_GROUP_TYPE_GET(group->id)) {
2462 return;
2463 }
2464
2465 ngroup = g_malloc0(sizeof(*ngroup));
2466
2467 ngroup->id = group->id;
2468
2469 ngroup->type = ROCKER_GROUP_TYPE_GET(group->id);
2470
2471 switch (ngroup->type) {
2472 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2473 ngroup->has_vlan_id = true;
2474 ngroup->vlan_id = ROCKER_GROUP_VLAN_GET(group->id);
2475 ngroup->has_pport = true;
2476 ngroup->pport = ROCKER_GROUP_PORT_GET(group->id);
2477 ngroup->has_out_pport = true;
2478 ngroup->out_pport = group->l2_interface.out_pport;
2479 ngroup->has_pop_vlan = true;
2480 ngroup->pop_vlan = group->l2_interface.pop_vlan;
2481 break;
2482 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2483 ngroup->has_index = true;
2484 ngroup->index = ROCKER_GROUP_INDEX_LONG_GET(group->id);
2485 ngroup->has_group_id = true;
2486 ngroup->group_id = group->l2_rewrite.group_id;
2487 if (group->l2_rewrite.vlan_id) {
2488 ngroup->has_set_vlan_id = true;
2489 ngroup->set_vlan_id = ntohs(group->l2_rewrite.vlan_id);
2490 }
2491 if (memcmp(group->l2_rewrite.src_mac.a, zero_mac.a, ETH_ALEN)) {
2492 ngroup->set_eth_src =
2493 qemu_mac_strdup_printf(group->l2_rewrite.src_mac.a);
2494 }
2495 if (memcmp(group->l2_rewrite.dst_mac.a, zero_mac.a, ETH_ALEN)) {
2496 ngroup->set_eth_dst =
2497 qemu_mac_strdup_printf(group->l2_rewrite.dst_mac.a);
2498 }
2499 break;
2500 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2501 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2502 ngroup->has_vlan_id = true;
2503 ngroup->vlan_id = ROCKER_GROUP_VLAN_GET(group->id);
2504 ngroup->has_index = true;
2505 ngroup->index = ROCKER_GROUP_INDEX_GET(group->id);
2506 for (i = 0; i < group->l2_flood.group_count; i++) {
2507 ngroup->has_group_ids = true;
2508 QAPI_LIST_PREPEND(ngroup->group_ids, group->l2_flood.group_ids[i]);
2509 }
2510 break;
2511 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2512 ngroup->has_index = true;
2513 ngroup->index = ROCKER_GROUP_INDEX_LONG_GET(group->id);
2514 ngroup->has_group_id = true;
2515 ngroup->group_id = group->l3_unicast.group_id;
2516 if (group->l3_unicast.vlan_id) {
2517 ngroup->has_set_vlan_id = true;
2518 ngroup->set_vlan_id = ntohs(group->l3_unicast.vlan_id);
2519 }
2520 if (memcmp(group->l3_unicast.src_mac.a, zero_mac.a, ETH_ALEN)) {
2521 ngroup->set_eth_src =
2522 qemu_mac_strdup_printf(group->l3_unicast.src_mac.a);
2523 }
2524 if (memcmp(group->l3_unicast.dst_mac.a, zero_mac.a, ETH_ALEN)) {
2525 ngroup->set_eth_dst =
2526 qemu_mac_strdup_printf(group->l3_unicast.dst_mac.a);
2527 }
2528 if (group->l3_unicast.ttl_check) {
2529 ngroup->has_ttl_check = true;
2530 ngroup->ttl_check = group->l3_unicast.ttl_check;
2531 }
2532 break;
2533 }
2534
2535 QAPI_LIST_PREPEND(flow_context->list, ngroup);
2536 }
2537
qmp_query_rocker_of_dpa_groups(const char * name,bool has_type,uint8_t type,Error ** errp)2538 RockerOfDpaGroupList *qmp_query_rocker_of_dpa_groups(const char *name,
2539 bool has_type,
2540 uint8_t type,
2541 Error **errp)
2542 {
2543 struct rocker *r;
2544 struct world *w;
2545 struct of_dpa *of_dpa;
2546 struct of_dpa_group_fill_context fill_context = {
2547 .list = NULL,
2548 .type = type,
2549 };
2550
2551 r = rocker_find(name);
2552 if (!r) {
2553 error_setg(errp, "rocker %s not found", name);
2554 return NULL;
2555 }
2556
2557 w = rocker_get_world(r, ROCKER_WORLD_TYPE_OF_DPA);
2558 if (!w) {
2559 error_setg(errp, "rocker %s doesn't have OF-DPA world", name);
2560 return NULL;
2561 }
2562
2563 of_dpa = world_private(w);
2564
2565 g_hash_table_foreach(of_dpa->group_tbl, of_dpa_group_fill, &fill_context);
2566
2567 return fill_context.list;
2568 }
2569
2570 static WorldOps of_dpa_ops = {
2571 .name = "ofdpa",
2572 .init = of_dpa_init,
2573 .uninit = of_dpa_uninit,
2574 .ig = of_dpa_ig,
2575 .cmd = of_dpa_cmd,
2576 };
2577
of_dpa_world_alloc(Rocker * r)2578 World *of_dpa_world_alloc(Rocker *r)
2579 {
2580 return world_alloc(r, sizeof(OfDpa), ROCKER_WORLD_TYPE_OF_DPA, &of_dpa_ops);
2581 }
2582