xref: /openbmc/qemu/hw/net/rocker/rocker_of_dpa.c (revision 10500ce2)
1 /*
2  * QEMU rocker switch emulation - OF-DPA flow processing support
3  *
4  * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  */
16 
17 #include "net/eth.h"
18 #include "qemu/iov.h"
19 #include "qemu/timer.h"
20 #include "qmp-commands.h"
21 
22 #include "rocker.h"
23 #include "rocker_hw.h"
24 #include "rocker_fp.h"
25 #include "rocker_tlv.h"
26 #include "rocker_world.h"
27 #include "rocker_desc.h"
28 #include "rocker_of_dpa.h"
29 
30 static const MACAddr zero_mac = { .a = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } };
31 static const MACAddr ff_mac =   { .a = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } };
32 
33 typedef struct of_dpa {
34     World *world;
35     GHashTable *flow_tbl;
36     GHashTable *group_tbl;
37     unsigned int flow_tbl_max_size;
38     unsigned int group_tbl_max_size;
39 } OfDpa;
40 
41 /* flow_key stolen mostly from OVS
42  *
43  * Note: fields that compare with network packet header fields
44  * are stored in network order (BE) to avoid per-packet field
45  * byte-swaps.
46  */
47 
48 typedef struct of_dpa_flow_key {
49     uint32_t in_pport;               /* ingress port */
50     uint32_t tunnel_id;              /* overlay tunnel id */
51     uint32_t tbl_id;                 /* table id */
52     struct {
53         __be16 vlan_id;              /* 0 if no VLAN */
54         MACAddr src;                 /* ethernet source address */
55         MACAddr dst;                 /* ethernet destination address */
56         __be16 type;                 /* ethernet frame type */
57     } eth;
58     struct {
59         uint8_t proto;               /* IP protocol or ARP opcode */
60         uint8_t tos;                 /* IP ToS */
61         uint8_t ttl;                 /* IP TTL/hop limit */
62         uint8_t frag;                /* one of FRAG_TYPE_* */
63     } ip;
64     union {
65         struct {
66             struct {
67                 __be32 src;          /* IP source address */
68                 __be32 dst;          /* IP destination address */
69             } addr;
70             union {
71                 struct {
72                     __be16 src;      /* TCP/UDP/SCTP source port */
73                     __be16 dst;      /* TCP/UDP/SCTP destination port */
74                     __be16 flags;    /* TCP flags */
75                 } tp;
76                 struct {
77                     MACAddr sha;     /* ARP source hardware address */
78                     MACAddr tha;     /* ARP target hardware address */
79                 } arp;
80             };
81         } ipv4;
82         struct {
83             struct {
84                 Ipv6Addr src;       /* IPv6 source address */
85                 Ipv6Addr dst;       /* IPv6 destination address */
86             } addr;
87             __be32 label;            /* IPv6 flow label */
88             struct {
89                 __be16 src;          /* TCP/UDP/SCTP source port */
90                 __be16 dst;          /* TCP/UDP/SCTP destination port */
91                 __be16 flags;        /* TCP flags */
92             } tp;
93             struct {
94                 Ipv6Addr target;    /* ND target address */
95                 MACAddr sll;         /* ND source link layer address */
96                 MACAddr tll;         /* ND target link layer address */
97             } nd;
98         } ipv6;
99     };
100     int width;                       /* how many uint64_t's in key? */
101 } OfDpaFlowKey;
102 
103 /* Width of key which includes field 'f' in u64s, rounded up */
104 #define FLOW_KEY_WIDTH(f) \
105     ((offsetof(OfDpaFlowKey, f) + \
106       sizeof(((OfDpaFlowKey *)0)->f) + \
107       sizeof(uint64_t) - 1) / sizeof(uint64_t))
108 
109 typedef struct of_dpa_flow_action {
110     uint32_t goto_tbl;
111     struct {
112         uint32_t group_id;
113         uint32_t tun_log_lport;
114         __be16 vlan_id;
115     } write;
116     struct {
117         __be16 new_vlan_id;
118         uint32_t out_pport;
119         uint8_t copy_to_cpu;
120         __be16 vlan_id;
121     } apply;
122 } OfDpaFlowAction;
123 
124 typedef struct of_dpa_flow {
125     uint32_t lpm;
126     uint32_t priority;
127     uint32_t hardtime;
128     uint32_t idletime;
129     uint64_t cookie;
130     OfDpaFlowKey key;
131     OfDpaFlowKey mask;
132     OfDpaFlowAction action;
133     struct {
134         uint64_t hits;
135         int64_t install_time;
136         int64_t refresh_time;
137         uint64_t rx_pkts;
138         uint64_t tx_pkts;
139     } stats;
140 } OfDpaFlow;
141 
142 typedef struct of_dpa_flow_pkt_fields {
143     uint32_t tunnel_id;
144     struct eth_header *ethhdr;
145     __be16 *h_proto;
146     struct vlan_header *vlanhdr;
147     struct ip_header *ipv4hdr;
148     struct ip6_header *ipv6hdr;
149     Ipv6Addr *ipv6_src_addr;
150     Ipv6Addr *ipv6_dst_addr;
151 } OfDpaFlowPktFields;
152 
153 typedef struct of_dpa_flow_context {
154     uint32_t in_pport;
155     uint32_t tunnel_id;
156     struct iovec *iov;
157     int iovcnt;
158     struct eth_header ethhdr_rewrite;
159     struct vlan_header vlanhdr_rewrite;
160     struct vlan_header vlanhdr;
161     OfDpa *of_dpa;
162     OfDpaFlowPktFields fields;
163     OfDpaFlowAction action_set;
164 } OfDpaFlowContext;
165 
166 typedef struct of_dpa_flow_match {
167     OfDpaFlowKey value;
168     OfDpaFlow *best;
169 } OfDpaFlowMatch;
170 
171 typedef struct of_dpa_group {
172     uint32_t id;
173     union {
174         struct {
175             uint32_t out_pport;
176             uint8_t pop_vlan;
177         } l2_interface;
178         struct {
179             uint32_t group_id;
180             MACAddr src_mac;
181             MACAddr dst_mac;
182             __be16 vlan_id;
183         } l2_rewrite;
184         struct {
185             uint16_t group_count;
186             uint32_t *group_ids;
187         } l2_flood;
188         struct {
189             uint32_t group_id;
190             MACAddr src_mac;
191             MACAddr dst_mac;
192             __be16 vlan_id;
193             uint8_t ttl_check;
194         } l3_unicast;
195     };
196 } OfDpaGroup;
197 
198 static int of_dpa_mask2prefix(__be32 mask)
199 {
200     int i;
201     int count = 32;
202 
203     for (i = 0; i < 32; i++) {
204         if (!(ntohl(mask) & ((2 << i) - 1))) {
205             count--;
206         }
207     }
208 
209     return count;
210 }
211 
212 #if defined(DEBUG_ROCKER)
213 static void of_dpa_flow_key_dump(OfDpaFlowKey *key, OfDpaFlowKey *mask)
214 {
215     char buf[512], *b = buf, *mac;
216 
217     b += sprintf(b, " tbl %2d", key->tbl_id);
218 
219     if (key->in_pport || (mask && mask->in_pport)) {
220         b += sprintf(b, " in_pport %2d", key->in_pport);
221         if (mask && mask->in_pport != 0xffffffff) {
222             b += sprintf(b, "/0x%08x", key->in_pport);
223         }
224     }
225 
226     if (key->tunnel_id || (mask && mask->tunnel_id)) {
227         b += sprintf(b, " tun %8d", key->tunnel_id);
228         if (mask && mask->tunnel_id != 0xffffffff) {
229             b += sprintf(b, "/0x%08x", key->tunnel_id);
230         }
231     }
232 
233     if (key->eth.vlan_id || (mask && mask->eth.vlan_id)) {
234         b += sprintf(b, " vlan %4d", ntohs(key->eth.vlan_id));
235         if (mask && mask->eth.vlan_id != 0xffff) {
236             b += sprintf(b, "/0x%04x", ntohs(key->eth.vlan_id));
237         }
238     }
239 
240     if (memcmp(key->eth.src.a, zero_mac.a, ETH_ALEN) ||
241         (mask && memcmp(mask->eth.src.a, zero_mac.a, ETH_ALEN))) {
242         mac = qemu_mac_strdup_printf(key->eth.src.a);
243         b += sprintf(b, " src %s", mac);
244         g_free(mac);
245         if (mask && memcmp(mask->eth.src.a, ff_mac.a, ETH_ALEN)) {
246             mac = qemu_mac_strdup_printf(mask->eth.src.a);
247             b += sprintf(b, "/%s", mac);
248             g_free(mac);
249         }
250     }
251 
252     if (memcmp(key->eth.dst.a, zero_mac.a, ETH_ALEN) ||
253         (mask && memcmp(mask->eth.dst.a, zero_mac.a, ETH_ALEN))) {
254         mac = qemu_mac_strdup_printf(key->eth.dst.a);
255         b += sprintf(b, " dst %s", mac);
256         g_free(mac);
257         if (mask && memcmp(mask->eth.dst.a, ff_mac.a, ETH_ALEN)) {
258             mac = qemu_mac_strdup_printf(mask->eth.dst.a);
259             b += sprintf(b, "/%s", mac);
260             g_free(mac);
261         }
262     }
263 
264     if (key->eth.type || (mask && mask->eth.type)) {
265         b += sprintf(b, " type 0x%04x", ntohs(key->eth.type));
266         if (mask && mask->eth.type != 0xffff) {
267             b += sprintf(b, "/0x%04x", ntohs(mask->eth.type));
268         }
269         switch (ntohs(key->eth.type)) {
270         case 0x0800:
271         case 0x86dd:
272             if (key->ip.proto || (mask && mask->ip.proto)) {
273                 b += sprintf(b, " ip proto %2d", key->ip.proto);
274                 if (mask && mask->ip.proto != 0xff) {
275                     b += sprintf(b, "/0x%02x", mask->ip.proto);
276                 }
277             }
278             if (key->ip.tos || (mask && mask->ip.tos)) {
279                 b += sprintf(b, " ip tos %2d", key->ip.tos);
280                 if (mask && mask->ip.tos != 0xff) {
281                     b += sprintf(b, "/0x%02x", mask->ip.tos);
282                 }
283             }
284             break;
285         }
286         switch (ntohs(key->eth.type)) {
287         case 0x0800:
288             if (key->ipv4.addr.dst || (mask && mask->ipv4.addr.dst)) {
289                 b += sprintf(b, " dst %s",
290                     inet_ntoa(*(struct in_addr *)&key->ipv4.addr.dst));
291                 if (mask) {
292                     b += sprintf(b, "/%d",
293                                  of_dpa_mask2prefix(mask->ipv4.addr.dst));
294                 }
295             }
296             break;
297         }
298     }
299 
300     DPRINTF("%s\n", buf);
301 }
302 #else
303 #define of_dpa_flow_key_dump(k, m)
304 #endif
305 
306 static void _of_dpa_flow_match(void *key, void *value, void *user_data)
307 {
308     OfDpaFlow *flow = value;
309     OfDpaFlowMatch *match = user_data;
310     uint64_t *k = (uint64_t *)&flow->key;
311     uint64_t *m = (uint64_t *)&flow->mask;
312     uint64_t *v = (uint64_t *)&match->value;
313     int i;
314 
315     if (flow->key.tbl_id == match->value.tbl_id) {
316         of_dpa_flow_key_dump(&flow->key, &flow->mask);
317     }
318 
319     if (flow->key.width > match->value.width) {
320         return;
321     }
322 
323     for (i = 0; i < flow->key.width; i++, k++, m++, v++) {
324         if ((~*k & *m & *v) | (*k & *m & ~*v)) {
325             return;
326         }
327     }
328 
329     DPRINTF("match\n");
330 
331     if (!match->best ||
332         flow->priority > match->best->priority ||
333         flow->lpm > match->best->lpm) {
334         match->best = flow;
335     }
336 }
337 
338 static OfDpaFlow *of_dpa_flow_match(OfDpa *of_dpa, OfDpaFlowMatch *match)
339 {
340     DPRINTF("\nnew search\n");
341     of_dpa_flow_key_dump(&match->value, NULL);
342 
343     g_hash_table_foreach(of_dpa->flow_tbl, _of_dpa_flow_match, match);
344 
345     return match->best;
346 }
347 
348 static OfDpaFlow *of_dpa_flow_find(OfDpa *of_dpa, uint64_t cookie)
349 {
350     return g_hash_table_lookup(of_dpa->flow_tbl, &cookie);
351 }
352 
353 static int of_dpa_flow_add(OfDpa *of_dpa, OfDpaFlow *flow)
354 {
355     g_hash_table_insert(of_dpa->flow_tbl, &flow->cookie, flow);
356 
357     return ROCKER_OK;
358 }
359 
360 static void of_dpa_flow_del(OfDpa *of_dpa, OfDpaFlow *flow)
361 {
362     g_hash_table_remove(of_dpa->flow_tbl, &flow->cookie);
363 }
364 
365 static OfDpaFlow *of_dpa_flow_alloc(uint64_t cookie)
366 {
367     OfDpaFlow *flow;
368     int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
369 
370     flow = g_malloc0(sizeof(OfDpaFlow));
371     if (!flow) {
372         return NULL;
373     }
374 
375     flow->cookie = cookie;
376     flow->mask.tbl_id = 0xffffffff;
377 
378     flow->stats.install_time = flow->stats.refresh_time = now;
379 
380     return flow;
381 }
382 
383 static void of_dpa_flow_pkt_hdr_reset(OfDpaFlowContext *fc)
384 {
385     OfDpaFlowPktFields *fields = &fc->fields;
386 
387     fc->iov[0].iov_base = fields->ethhdr;
388     fc->iov[0].iov_len = sizeof(struct eth_header);
389     fc->iov[1].iov_base = fields->vlanhdr;
390     fc->iov[1].iov_len = fields->vlanhdr ? sizeof(struct vlan_header) : 0;
391 }
392 
393 static void of_dpa_flow_pkt_parse(OfDpaFlowContext *fc,
394                                   const struct iovec *iov, int iovcnt)
395 {
396     OfDpaFlowPktFields *fields = &fc->fields;
397     size_t sofar = 0;
398     int i;
399 
400     sofar += sizeof(struct eth_header);
401     if (iov->iov_len < sofar) {
402         DPRINTF("flow_pkt_parse underrun on eth_header\n");
403         return;
404     }
405 
406     fields->ethhdr = iov->iov_base;
407     fields->h_proto = &fields->ethhdr->h_proto;
408 
409     if (ntohs(*fields->h_proto) == ETH_P_VLAN) {
410         sofar += sizeof(struct vlan_header);
411         if (iov->iov_len < sofar) {
412             DPRINTF("flow_pkt_parse underrun on vlan_header\n");
413             return;
414         }
415         fields->vlanhdr = (struct vlan_header *)(fields->ethhdr + 1);
416         fields->h_proto = &fields->vlanhdr->h_proto;
417     }
418 
419     switch (ntohs(*fields->h_proto)) {
420     case ETH_P_IP:
421         sofar += sizeof(struct ip_header);
422         if (iov->iov_len < sofar) {
423             DPRINTF("flow_pkt_parse underrun on ip_header\n");
424             return;
425         }
426         fields->ipv4hdr = (struct ip_header *)(fields->h_proto + 1);
427         break;
428     case ETH_P_IPV6:
429         sofar += sizeof(struct ip6_header);
430         if (iov->iov_len < sofar) {
431             DPRINTF("flow_pkt_parse underrun on ip6_header\n");
432             return;
433         }
434         fields->ipv6hdr = (struct ip6_header *)(fields->h_proto + 1);
435         break;
436     }
437 
438     /* To facilitate (potential) VLAN tag insertion, Make a
439      * copy of the iov and insert two new vectors at the
440      * beginning for eth hdr and vlan hdr.  No data is copied,
441      * just the vectors.
442      */
443 
444     of_dpa_flow_pkt_hdr_reset(fc);
445 
446     fc->iov[2].iov_base = fields->h_proto + 1;
447     fc->iov[2].iov_len = iov->iov_len - fc->iov[0].iov_len - fc->iov[1].iov_len;
448 
449     for (i = 1; i < iovcnt; i++) {
450         fc->iov[i+2] = iov[i];
451     }
452 
453     fc->iovcnt = iovcnt + 2;
454 }
455 
456 static void of_dpa_flow_pkt_insert_vlan(OfDpaFlowContext *fc, __be16 vlan_id)
457 {
458     OfDpaFlowPktFields *fields = &fc->fields;
459     uint16_t h_proto = fields->ethhdr->h_proto;
460 
461     if (fields->vlanhdr) {
462         DPRINTF("flow_pkt_insert_vlan packet already has vlan\n");
463         return;
464     }
465 
466     fields->ethhdr->h_proto = htons(ETH_P_VLAN);
467     fields->vlanhdr = &fc->vlanhdr;
468     fields->vlanhdr->h_tci = vlan_id;
469     fields->vlanhdr->h_proto = h_proto;
470     fields->h_proto = &fields->vlanhdr->h_proto;
471 
472     fc->iov[1].iov_base = fields->vlanhdr;
473     fc->iov[1].iov_len = sizeof(struct vlan_header);
474 }
475 
476 static void of_dpa_flow_pkt_strip_vlan(OfDpaFlowContext *fc)
477 {
478     OfDpaFlowPktFields *fields = &fc->fields;
479 
480     if (!fields->vlanhdr) {
481         return;
482     }
483 
484     fc->iov[0].iov_len -= sizeof(fields->ethhdr->h_proto);
485     fc->iov[1].iov_base = fields->h_proto;
486     fc->iov[1].iov_len = sizeof(fields->ethhdr->h_proto);
487 }
488 
489 static void of_dpa_flow_pkt_hdr_rewrite(OfDpaFlowContext *fc,
490                                         uint8_t *src_mac, uint8_t *dst_mac,
491                                         __be16 vlan_id)
492 {
493     OfDpaFlowPktFields *fields = &fc->fields;
494 
495     if (src_mac || dst_mac) {
496         memcpy(&fc->ethhdr_rewrite, fields->ethhdr, sizeof(struct eth_header));
497         if (src_mac && memcmp(src_mac, zero_mac.a, ETH_ALEN)) {
498             memcpy(fc->ethhdr_rewrite.h_source, src_mac, ETH_ALEN);
499         }
500         if (dst_mac && memcmp(dst_mac, zero_mac.a, ETH_ALEN)) {
501             memcpy(fc->ethhdr_rewrite.h_dest, dst_mac, ETH_ALEN);
502         }
503         fc->iov[0].iov_base = &fc->ethhdr_rewrite;
504     }
505 
506     if (vlan_id && fields->vlanhdr) {
507         fc->vlanhdr_rewrite = fc->vlanhdr;
508         fc->vlanhdr_rewrite.h_tci = vlan_id;
509         fc->iov[1].iov_base = &fc->vlanhdr_rewrite;
510     }
511 }
512 
513 static void of_dpa_flow_ig_tbl(OfDpaFlowContext *fc, uint32_t tbl_id);
514 
515 static void of_dpa_ig_port_build_match(OfDpaFlowContext *fc,
516                                        OfDpaFlowMatch *match)
517 {
518     match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
519     match->value.in_pport = fc->in_pport;
520     match->value.width = FLOW_KEY_WIDTH(tbl_id);
521 }
522 
523 static void of_dpa_ig_port_miss(OfDpaFlowContext *fc)
524 {
525     uint32_t port;
526 
527     /* The default on miss is for packets from physical ports
528      * to go to the VLAN Flow Table. There is no default rule
529      * for packets from logical ports, which are dropped on miss.
530      */
531 
532     if (fp_port_from_pport(fc->in_pport, &port)) {
533         of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_VLAN);
534     }
535 }
536 
537 static void of_dpa_vlan_build_match(OfDpaFlowContext *fc,
538                                     OfDpaFlowMatch *match)
539 {
540     match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
541     match->value.in_pport = fc->in_pport;
542     if (fc->fields.vlanhdr) {
543         match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
544     }
545     match->value.width = FLOW_KEY_WIDTH(eth.vlan_id);
546 }
547 
548 static void of_dpa_vlan_insert(OfDpaFlowContext *fc,
549                                OfDpaFlow *flow)
550 {
551     if (flow->action.apply.new_vlan_id) {
552         of_dpa_flow_pkt_insert_vlan(fc, flow->action.apply.new_vlan_id);
553     }
554 }
555 
556 static void of_dpa_term_mac_build_match(OfDpaFlowContext *fc,
557                                         OfDpaFlowMatch *match)
558 {
559     match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
560     match->value.in_pport = fc->in_pport;
561     match->value.eth.type = *fc->fields.h_proto;
562     match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
563     memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
564            sizeof(match->value.eth.dst.a));
565     match->value.width = FLOW_KEY_WIDTH(eth.type);
566 }
567 
568 static void of_dpa_term_mac_miss(OfDpaFlowContext *fc)
569 {
570     of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_BRIDGING);
571 }
572 
573 static void of_dpa_apply_actions(OfDpaFlowContext *fc,
574                                  OfDpaFlow *flow)
575 {
576     fc->action_set.apply.copy_to_cpu = flow->action.apply.copy_to_cpu;
577     fc->action_set.apply.vlan_id = flow->key.eth.vlan_id;
578 }
579 
580 static void of_dpa_bridging_build_match(OfDpaFlowContext *fc,
581                                         OfDpaFlowMatch *match)
582 {
583     match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
584     if (fc->fields.vlanhdr) {
585         match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
586     } else if (fc->tunnel_id) {
587         match->value.tunnel_id = fc->tunnel_id;
588     }
589     memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
590            sizeof(match->value.eth.dst.a));
591     match->value.width = FLOW_KEY_WIDTH(eth.dst);
592 }
593 
594 static void of_dpa_bridging_learn(OfDpaFlowContext *fc,
595                                   OfDpaFlow *dst_flow)
596 {
597     OfDpaFlowMatch match = { { 0, }, };
598     OfDpaFlow *flow;
599     uint8_t *addr;
600     uint16_t vlan_id;
601     int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
602     int64_t refresh_delay = 1;
603 
604     /* Do a lookup in bridge table by src_mac/vlan */
605 
606     addr = fc->fields.ethhdr->h_source;
607     vlan_id = fc->fields.vlanhdr->h_tci;
608 
609     match.value.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
610     match.value.eth.vlan_id = vlan_id;
611     memcpy(match.value.eth.dst.a, addr, sizeof(match.value.eth.dst.a));
612     match.value.width = FLOW_KEY_WIDTH(eth.dst);
613 
614     flow = of_dpa_flow_match(fc->of_dpa, &match);
615     if (flow) {
616         if (!memcmp(flow->mask.eth.dst.a, ff_mac.a,
617                     sizeof(flow->mask.eth.dst.a))) {
618             /* src_mac/vlan already learned; if in_port and out_port
619              * don't match, the end station has moved and the port
620              * needs updating */
621             /* XXX implement the in_port/out_port check */
622             if (now - flow->stats.refresh_time < refresh_delay) {
623                 return;
624             }
625             flow->stats.refresh_time = now;
626         }
627     }
628 
629     /* Let driver know about mac/vlan.  This may be a new mac/vlan
630      * or a refresh of existing mac/vlan that's been hit after the
631      * refresh_delay.
632      */
633 
634     rocker_event_mac_vlan_seen(world_rocker(fc->of_dpa->world),
635                                fc->in_pport, addr, vlan_id);
636 }
637 
638 static void of_dpa_bridging_miss(OfDpaFlowContext *fc)
639 {
640     of_dpa_bridging_learn(fc, NULL);
641     of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
642 }
643 
644 static void of_dpa_bridging_action_write(OfDpaFlowContext *fc,
645                                          OfDpaFlow *flow)
646 {
647     if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
648         fc->action_set.write.group_id = flow->action.write.group_id;
649     }
650     fc->action_set.write.tun_log_lport = flow->action.write.tun_log_lport;
651 }
652 
653 static void of_dpa_unicast_routing_build_match(OfDpaFlowContext *fc,
654                                                OfDpaFlowMatch *match)
655 {
656     match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
657     match->value.eth.type = *fc->fields.h_proto;
658     if (fc->fields.ipv4hdr) {
659         match->value.ipv4.addr.dst = fc->fields.ipv4hdr->ip_dst;
660     }
661     if (fc->fields.ipv6_dst_addr) {
662         memcpy(&match->value.ipv6.addr.dst, fc->fields.ipv6_dst_addr,
663                sizeof(match->value.ipv6.addr.dst));
664     }
665     match->value.width = FLOW_KEY_WIDTH(ipv6.addr.dst);
666 }
667 
668 static void of_dpa_unicast_routing_miss(OfDpaFlowContext *fc)
669 {
670     of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
671 }
672 
673 static void of_dpa_unicast_routing_action_write(OfDpaFlowContext *fc,
674                                                 OfDpaFlow *flow)
675 {
676     if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
677         fc->action_set.write.group_id = flow->action.write.group_id;
678     }
679 }
680 
681 static void
682 of_dpa_multicast_routing_build_match(OfDpaFlowContext *fc,
683                                      OfDpaFlowMatch *match)
684 {
685     match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
686     match->value.eth.type = *fc->fields.h_proto;
687     match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
688     if (fc->fields.ipv4hdr) {
689         match->value.ipv4.addr.src = fc->fields.ipv4hdr->ip_src;
690         match->value.ipv4.addr.dst = fc->fields.ipv4hdr->ip_dst;
691     }
692     if (fc->fields.ipv6_src_addr) {
693         memcpy(&match->value.ipv6.addr.src, fc->fields.ipv6_src_addr,
694                sizeof(match->value.ipv6.addr.src));
695     }
696     if (fc->fields.ipv6_dst_addr) {
697         memcpy(&match->value.ipv6.addr.dst, fc->fields.ipv6_dst_addr,
698                sizeof(match->value.ipv6.addr.dst));
699     }
700     match->value.width = FLOW_KEY_WIDTH(ipv6.addr.dst);
701 }
702 
703 static void of_dpa_multicast_routing_miss(OfDpaFlowContext *fc)
704 {
705     of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
706 }
707 
708 static void
709 of_dpa_multicast_routing_action_write(OfDpaFlowContext *fc,
710                                       OfDpaFlow *flow)
711 {
712     if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
713         fc->action_set.write.group_id = flow->action.write.group_id;
714     }
715     fc->action_set.write.vlan_id = flow->action.write.vlan_id;
716 }
717 
718 static void of_dpa_acl_build_match(OfDpaFlowContext *fc,
719                                    OfDpaFlowMatch *match)
720 {
721     match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
722     match->value.in_pport = fc->in_pport;
723     memcpy(match->value.eth.src.a, fc->fields.ethhdr->h_source,
724            sizeof(match->value.eth.src.a));
725     memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
726            sizeof(match->value.eth.dst.a));
727     match->value.eth.type = *fc->fields.h_proto;
728     match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
729     match->value.width = FLOW_KEY_WIDTH(eth.type);
730     if (fc->fields.ipv4hdr) {
731         match->value.ip.proto = fc->fields.ipv4hdr->ip_p;
732         match->value.ip.tos = fc->fields.ipv4hdr->ip_tos;
733         match->value.width = FLOW_KEY_WIDTH(ip.tos);
734     } else if (fc->fields.ipv6hdr) {
735         match->value.ip.proto =
736             fc->fields.ipv6hdr->ip6_ctlun.ip6_un1.ip6_un1_nxt;
737         match->value.ip.tos = 0; /* XXX what goes here? */
738         match->value.width = FLOW_KEY_WIDTH(ip.tos);
739     }
740 }
741 
742 static void of_dpa_eg(OfDpaFlowContext *fc);
743 static void of_dpa_acl_hit(OfDpaFlowContext *fc,
744                            OfDpaFlow *dst_flow)
745 {
746     of_dpa_eg(fc);
747 }
748 
749 static void of_dpa_acl_action_write(OfDpaFlowContext *fc,
750                                     OfDpaFlow *flow)
751 {
752     if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
753         fc->action_set.write.group_id = flow->action.write.group_id;
754     }
755 }
756 
757 static void of_dpa_drop(OfDpaFlowContext *fc)
758 {
759     /* drop packet */
760 }
761 
762 static OfDpaGroup *of_dpa_group_find(OfDpa *of_dpa,
763                                               uint32_t group_id)
764 {
765     return g_hash_table_lookup(of_dpa->group_tbl, &group_id);
766 }
767 
768 static int of_dpa_group_add(OfDpa *of_dpa, OfDpaGroup *group)
769 {
770     g_hash_table_insert(of_dpa->group_tbl, &group->id, group);
771 
772     return 0;
773 }
774 
775 #if 0
776 static int of_dpa_group_mod(OfDpa *of_dpa, OfDpaGroup *group)
777 {
778     OfDpaGroup *old_group = of_dpa_group_find(of_dpa, group->id);
779 
780     if (!old_group) {
781         return -ENOENT;
782     }
783 
784     /* XXX */
785 
786     return 0;
787 }
788 #endif
789 
790 static int of_dpa_group_del(OfDpa *of_dpa, OfDpaGroup *group)
791 {
792     g_hash_table_remove(of_dpa->group_tbl, &group->id);
793 
794     return 0;
795 }
796 
797 #if 0
798 static int of_dpa_group_get_stats(OfDpa *of_dpa, uint32_t id)
799 {
800     OfDpaGroup *group = of_dpa_group_find(of_dpa, id);
801 
802     if (!group) {
803         return -ENOENT;
804     }
805 
806     /* XXX get/return stats */
807 
808     return 0;
809 }
810 #endif
811 
812 static OfDpaGroup *of_dpa_group_alloc(uint32_t id)
813 {
814     OfDpaGroup *group = g_malloc0(sizeof(OfDpaGroup));
815 
816     if (!group) {
817         return NULL;
818     }
819 
820     group->id = id;
821 
822     return group;
823 }
824 
825 static void of_dpa_output_l2_interface(OfDpaFlowContext *fc,
826                                        OfDpaGroup *group)
827 {
828     if (group->l2_interface.pop_vlan) {
829         of_dpa_flow_pkt_strip_vlan(fc);
830     }
831 
832     /* Note: By default, and as per the OpenFlow 1.3.1
833      * specification, a packet cannot be forwarded back
834      * to the IN_PORT from which it came in. An action
835      * bucket that specifies the particular packet's
836      * egress port is not evaluated.
837      */
838 
839     if (group->l2_interface.out_pport == 0) {
840         rx_produce(fc->of_dpa->world, fc->in_pport, fc->iov, fc->iovcnt);
841     } else if (group->l2_interface.out_pport != fc->in_pport) {
842         rocker_port_eg(world_rocker(fc->of_dpa->world),
843                        group->l2_interface.out_pport,
844                        fc->iov, fc->iovcnt);
845     }
846 }
847 
848 static void of_dpa_output_l2_rewrite(OfDpaFlowContext *fc,
849                                      OfDpaGroup *group)
850 {
851     OfDpaGroup *l2_group =
852         of_dpa_group_find(fc->of_dpa, group->l2_rewrite.group_id);
853 
854     if (!l2_group) {
855         return;
856     }
857 
858     of_dpa_flow_pkt_hdr_rewrite(fc, group->l2_rewrite.src_mac.a,
859                          group->l2_rewrite.dst_mac.a,
860                          group->l2_rewrite.vlan_id);
861     of_dpa_output_l2_interface(fc, l2_group);
862 }
863 
864 static void of_dpa_output_l2_flood(OfDpaFlowContext *fc,
865                                    OfDpaGroup *group)
866 {
867     OfDpaGroup *l2_group;
868     int i;
869 
870     for (i = 0; i < group->l2_flood.group_count; i++) {
871         of_dpa_flow_pkt_hdr_reset(fc);
872         l2_group = of_dpa_group_find(fc->of_dpa, group->l2_flood.group_ids[i]);
873         if (!l2_group) {
874             continue;
875         }
876         switch (ROCKER_GROUP_TYPE_GET(l2_group->id)) {
877         case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
878             of_dpa_output_l2_interface(fc, l2_group);
879             break;
880         case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
881             of_dpa_output_l2_rewrite(fc, l2_group);
882             break;
883         }
884     }
885 }
886 
887 static void of_dpa_output_l3_unicast(OfDpaFlowContext *fc, OfDpaGroup *group)
888 {
889     OfDpaGroup *l2_group =
890         of_dpa_group_find(fc->of_dpa, group->l3_unicast.group_id);
891 
892     if (!l2_group) {
893         return;
894     }
895 
896     of_dpa_flow_pkt_hdr_rewrite(fc, group->l3_unicast.src_mac.a,
897                                 group->l3_unicast.dst_mac.a,
898                                 group->l3_unicast.vlan_id);
899     /* XXX need ttl_check */
900     of_dpa_output_l2_interface(fc, l2_group);
901 }
902 
903 static void of_dpa_eg(OfDpaFlowContext *fc)
904 {
905     OfDpaFlowAction *set = &fc->action_set;
906     OfDpaGroup *group;
907     uint32_t group_id;
908 
909     /* send a copy of pkt to CPU (controller)? */
910 
911     if (set->apply.copy_to_cpu) {
912         group_id = ROCKER_GROUP_L2_INTERFACE(set->apply.vlan_id, 0);
913         group = of_dpa_group_find(fc->of_dpa, group_id);
914         if (group) {
915             of_dpa_output_l2_interface(fc, group);
916             of_dpa_flow_pkt_hdr_reset(fc);
917         }
918     }
919 
920     /* process group write actions */
921 
922     if (!set->write.group_id) {
923         return;
924     }
925 
926     group = of_dpa_group_find(fc->of_dpa, set->write.group_id);
927     if (!group) {
928         return;
929     }
930 
931     switch (ROCKER_GROUP_TYPE_GET(group->id)) {
932     case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
933         of_dpa_output_l2_interface(fc, group);
934         break;
935     case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
936         of_dpa_output_l2_rewrite(fc, group);
937         break;
938     case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
939     case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
940         of_dpa_output_l2_flood(fc, group);
941         break;
942     case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
943         of_dpa_output_l3_unicast(fc, group);
944         break;
945     }
946 }
947 
948 typedef struct of_dpa_flow_tbl_ops {
949     void (*build_match)(OfDpaFlowContext *fc, OfDpaFlowMatch *match);
950     void (*hit)(OfDpaFlowContext *fc, OfDpaFlow *flow);
951     void (*miss)(OfDpaFlowContext *fc);
952     void (*hit_no_goto)(OfDpaFlowContext *fc);
953     void (*action_apply)(OfDpaFlowContext *fc, OfDpaFlow *flow);
954     void (*action_write)(OfDpaFlowContext *fc, OfDpaFlow *flow);
955 } OfDpaFlowTblOps;
956 
957 static OfDpaFlowTblOps of_dpa_tbl_ops[] = {
958     [ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT] = {
959         .build_match = of_dpa_ig_port_build_match,
960         .miss = of_dpa_ig_port_miss,
961         .hit_no_goto = of_dpa_drop,
962     },
963     [ROCKER_OF_DPA_TABLE_ID_VLAN] = {
964         .build_match = of_dpa_vlan_build_match,
965         .hit_no_goto = of_dpa_drop,
966         .action_apply = of_dpa_vlan_insert,
967     },
968     [ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC] = {
969         .build_match = of_dpa_term_mac_build_match,
970         .miss = of_dpa_term_mac_miss,
971         .hit_no_goto = of_dpa_drop,
972         .action_apply = of_dpa_apply_actions,
973     },
974     [ROCKER_OF_DPA_TABLE_ID_BRIDGING] = {
975         .build_match = of_dpa_bridging_build_match,
976         .hit = of_dpa_bridging_learn,
977         .miss = of_dpa_bridging_miss,
978         .hit_no_goto = of_dpa_drop,
979         .action_apply = of_dpa_apply_actions,
980         .action_write = of_dpa_bridging_action_write,
981     },
982     [ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING] = {
983         .build_match = of_dpa_unicast_routing_build_match,
984         .miss = of_dpa_unicast_routing_miss,
985         .hit_no_goto = of_dpa_drop,
986         .action_write = of_dpa_unicast_routing_action_write,
987     },
988     [ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING] = {
989         .build_match = of_dpa_multicast_routing_build_match,
990         .miss = of_dpa_multicast_routing_miss,
991         .hit_no_goto = of_dpa_drop,
992         .action_write = of_dpa_multicast_routing_action_write,
993     },
994     [ROCKER_OF_DPA_TABLE_ID_ACL_POLICY] = {
995         .build_match = of_dpa_acl_build_match,
996         .hit = of_dpa_acl_hit,
997         .miss = of_dpa_eg,
998         .action_apply = of_dpa_apply_actions,
999         .action_write = of_dpa_acl_action_write,
1000     },
1001 };
1002 
1003 static void of_dpa_flow_ig_tbl(OfDpaFlowContext *fc, uint32_t tbl_id)
1004 {
1005     OfDpaFlowTblOps *ops = &of_dpa_tbl_ops[tbl_id];
1006     OfDpaFlowMatch match = { { 0, }, };
1007     OfDpaFlow *flow;
1008 
1009     if (ops->build_match) {
1010         ops->build_match(fc, &match);
1011     } else {
1012         return;
1013     }
1014 
1015     flow = of_dpa_flow_match(fc->of_dpa, &match);
1016     if (!flow) {
1017         if (ops->miss) {
1018             ops->miss(fc);
1019         }
1020         return;
1021     }
1022 
1023     flow->stats.hits++;
1024 
1025     if (ops->action_apply) {
1026         ops->action_apply(fc, flow);
1027     }
1028 
1029     if (ops->action_write) {
1030         ops->action_write(fc, flow);
1031     }
1032 
1033     if (ops->hit) {
1034         ops->hit(fc, flow);
1035     }
1036 
1037     if (flow->action.goto_tbl) {
1038         of_dpa_flow_ig_tbl(fc, flow->action.goto_tbl);
1039     } else if (ops->hit_no_goto) {
1040         ops->hit_no_goto(fc);
1041     }
1042 
1043     /* drop packet */
1044 }
1045 
1046 static ssize_t of_dpa_ig(World *world, uint32_t pport,
1047                          const struct iovec *iov, int iovcnt)
1048 {
1049     struct iovec iov_copy[iovcnt + 2];
1050     OfDpaFlowContext fc = {
1051         .of_dpa = world_private(world),
1052         .in_pport = pport,
1053         .iov = iov_copy,
1054         .iovcnt = iovcnt + 2,
1055     };
1056 
1057     of_dpa_flow_pkt_parse(&fc, iov, iovcnt);
1058     of_dpa_flow_ig_tbl(&fc, ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT);
1059 
1060     return iov_size(iov, iovcnt);
1061 }
1062 
1063 #define ROCKER_TUNNEL_LPORT 0x00010000
1064 
1065 static int of_dpa_cmd_add_ig_port(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1066 {
1067     OfDpaFlowKey *key = &flow->key;
1068     OfDpaFlowKey *mask = &flow->mask;
1069     OfDpaFlowAction *action = &flow->action;
1070     bool overlay_tunnel;
1071 
1072     if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1073         !flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1074         return -ROCKER_EINVAL;
1075     }
1076 
1077     key->tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
1078     key->width = FLOW_KEY_WIDTH(tbl_id);
1079 
1080     key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1081     if (flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]) {
1082         mask->in_pport =
1083             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1084     }
1085 
1086     overlay_tunnel = !!(key->in_pport & ROCKER_TUNNEL_LPORT);
1087 
1088     action->goto_tbl =
1089         rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1090 
1091     if (!overlay_tunnel && action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_VLAN) {
1092         return -ROCKER_EINVAL;
1093     }
1094 
1095     if (overlay_tunnel && action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_BRIDGING) {
1096         return -ROCKER_EINVAL;
1097     }
1098 
1099     return ROCKER_OK;
1100 }
1101 
1102 static int of_dpa_cmd_add_vlan(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1103 {
1104     OfDpaFlowKey *key = &flow->key;
1105     OfDpaFlowKey *mask = &flow->mask;
1106     OfDpaFlowAction *action = &flow->action;
1107     uint32_t port;
1108     bool untagged;
1109 
1110     if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1111         !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1112         DPRINTF("Must give in_pport and vlan_id to install VLAN tbl entry\n");
1113         return -ROCKER_EINVAL;
1114     }
1115 
1116     key->tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
1117     key->width = FLOW_KEY_WIDTH(eth.vlan_id);
1118 
1119     key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1120     if (!fp_port_from_pport(key->in_pport, &port)) {
1121         DPRINTF("in_pport (%d) not a front-panel port\n", key->in_pport);
1122         return -ROCKER_EINVAL;
1123     }
1124     mask->in_pport = 0xffffffff;
1125 
1126     key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1127 
1128     if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1129         mask->eth.vlan_id =
1130             rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1131     }
1132 
1133     if (key->eth.vlan_id) {
1134         untagged = false; /* filtering */
1135     } else {
1136         untagged = true;
1137     }
1138 
1139     if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1140         action->goto_tbl =
1141             rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1142         if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC) {
1143             DPRINTF("Goto tbl (%d) must be TERM_MAC\n", action->goto_tbl);
1144             return -ROCKER_EINVAL;
1145         }
1146     }
1147 
1148     if (untagged) {
1149         if (!flow_tlvs[ROCKER_TLV_OF_DPA_NEW_VLAN_ID]) {
1150             DPRINTF("Must specify new vlan_id if untagged\n");
1151             return -ROCKER_EINVAL;
1152         }
1153         action->apply.new_vlan_id =
1154             rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_NEW_VLAN_ID]);
1155         if (1 > ntohs(action->apply.new_vlan_id) ||
1156             ntohs(action->apply.new_vlan_id) > 4095) {
1157             DPRINTF("New vlan_id (%d) must be between 1 and 4095\n",
1158                     ntohs(action->apply.new_vlan_id));
1159             return -ROCKER_EINVAL;
1160         }
1161     }
1162 
1163     return ROCKER_OK;
1164 }
1165 
1166 static int of_dpa_cmd_add_term_mac(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1167 {
1168     OfDpaFlowKey *key = &flow->key;
1169     OfDpaFlowKey *mask = &flow->mask;
1170     OfDpaFlowAction *action = &flow->action;
1171     const MACAddr ipv4_mcast = { .a = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 } };
1172     const MACAddr ipv4_mask =  { .a = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 } };
1173     const MACAddr ipv6_mcast = { .a = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 } };
1174     const MACAddr ipv6_mask =  { .a = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } };
1175     uint32_t port;
1176     bool unicast = false;
1177     bool multicast = false;
1178 
1179     if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1180         !flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK] ||
1181         !flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE] ||
1182         !flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC] ||
1183         !flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK] ||
1184         !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID] ||
1185         !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1186         return -ROCKER_EINVAL;
1187     }
1188 
1189     key->tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
1190     key->width = FLOW_KEY_WIDTH(eth.type);
1191 
1192     key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1193     if (!fp_port_from_pport(key->in_pport, &port)) {
1194         return -ROCKER_EINVAL;
1195     }
1196     mask->in_pport =
1197         rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1198 
1199     key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1200     if (key->eth.type != htons(0x0800) && key->eth.type != htons(0x86dd)) {
1201         return -ROCKER_EINVAL;
1202     }
1203     mask->eth.type = htons(0xffff);
1204 
1205     memcpy(key->eth.dst.a,
1206            rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1207            sizeof(key->eth.dst.a));
1208     memcpy(mask->eth.dst.a,
1209            rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1210            sizeof(mask->eth.dst.a));
1211 
1212     if ((key->eth.dst.a[0] & 0x01) == 0x00) {
1213         unicast = true;
1214     }
1215 
1216     /* only two wildcard rules are acceptable for IPv4 and IPv6 multicast */
1217     if (memcmp(key->eth.dst.a, ipv4_mcast.a, sizeof(key->eth.dst.a)) == 0 &&
1218         memcmp(mask->eth.dst.a, ipv4_mask.a, sizeof(mask->eth.dst.a)) == 0) {
1219         multicast = true;
1220     }
1221     if (memcmp(key->eth.dst.a, ipv6_mcast.a, sizeof(key->eth.dst.a)) == 0 &&
1222         memcmp(mask->eth.dst.a, ipv6_mask.a, sizeof(mask->eth.dst.a)) == 0) {
1223         multicast = true;
1224     }
1225 
1226     if (!unicast && !multicast) {
1227         return -ROCKER_EINVAL;
1228     }
1229 
1230     key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1231     mask->eth.vlan_id =
1232         rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1233 
1234     if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1235         action->goto_tbl =
1236             rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1237 
1238         if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING &&
1239             action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING) {
1240             return -ROCKER_EINVAL;
1241         }
1242 
1243         if (unicast &&
1244             action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING) {
1245             return -ROCKER_EINVAL;
1246         }
1247 
1248         if (multicast &&
1249             action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING) {
1250             return -ROCKER_EINVAL;
1251         }
1252     }
1253 
1254     if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1255         action->apply.copy_to_cpu =
1256             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1257     }
1258 
1259     return ROCKER_OK;
1260 }
1261 
1262 static int of_dpa_cmd_add_bridging(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1263 {
1264     OfDpaFlowKey *key = &flow->key;
1265     OfDpaFlowKey *mask = &flow->mask;
1266     OfDpaFlowAction *action = &flow->action;
1267     bool unicast = false;
1268     bool dst_mac = false;
1269     bool dst_mac_mask = false;
1270     enum {
1271         BRIDGING_MODE_UNKNOWN,
1272         BRIDGING_MODE_VLAN_UCAST,
1273         BRIDGING_MODE_VLAN_MCAST,
1274         BRIDGING_MODE_VLAN_DFLT,
1275         BRIDGING_MODE_TUNNEL_UCAST,
1276         BRIDGING_MODE_TUNNEL_MCAST,
1277         BRIDGING_MODE_TUNNEL_DFLT,
1278     } mode = BRIDGING_MODE_UNKNOWN;
1279 
1280     key->tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
1281 
1282     if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1283         key->eth.vlan_id =
1284             rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1285         mask->eth.vlan_id = 0xffff;
1286         key->width = FLOW_KEY_WIDTH(eth.vlan_id);
1287     }
1288 
1289     if (flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]) {
1290         key->tunnel_id =
1291             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]);
1292         mask->tunnel_id = 0xffffffff;
1293         key->width = FLOW_KEY_WIDTH(tunnel_id);
1294     }
1295 
1296     /* can't do VLAN bridging and tunnel bridging at same time */
1297     if (key->eth.vlan_id && key->tunnel_id) {
1298         DPRINTF("can't do VLAN bridging and tunnel bridging at same time\n");
1299         return -ROCKER_EINVAL;
1300     }
1301 
1302     if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1303         memcpy(key->eth.dst.a,
1304                rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1305                sizeof(key->eth.dst.a));
1306         key->width = FLOW_KEY_WIDTH(eth.dst);
1307         dst_mac = true;
1308         unicast = (key->eth.dst.a[0] & 0x01) == 0x00;
1309     }
1310 
1311     if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]) {
1312         memcpy(mask->eth.dst.a,
1313                rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1314                sizeof(mask->eth.dst.a));
1315         key->width = FLOW_KEY_WIDTH(eth.dst);
1316         dst_mac_mask = true;
1317     } else if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1318         memcpy(mask->eth.dst.a, ff_mac.a, sizeof(mask->eth.dst.a));
1319     }
1320 
1321     if (key->eth.vlan_id) {
1322         if (dst_mac && !dst_mac_mask) {
1323             mode = unicast ? BRIDGING_MODE_VLAN_UCAST :
1324                              BRIDGING_MODE_VLAN_MCAST;
1325         } else if ((dst_mac && dst_mac_mask) || !dst_mac) {
1326             mode = BRIDGING_MODE_VLAN_DFLT;
1327         }
1328     } else if (key->tunnel_id) {
1329         if (dst_mac && !dst_mac_mask) {
1330             mode = unicast ? BRIDGING_MODE_TUNNEL_UCAST :
1331                              BRIDGING_MODE_TUNNEL_MCAST;
1332         } else if ((dst_mac && dst_mac_mask) || !dst_mac) {
1333             mode = BRIDGING_MODE_TUNNEL_DFLT;
1334         }
1335     }
1336 
1337     if (mode == BRIDGING_MODE_UNKNOWN) {
1338         DPRINTF("Unknown bridging mode\n");
1339         return -ROCKER_EINVAL;
1340     }
1341 
1342     if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1343         action->goto_tbl =
1344             rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1345         if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1346             DPRINTF("Briding goto tbl must be ACL policy\n");
1347             return -ROCKER_EINVAL;
1348         }
1349     }
1350 
1351     if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1352         action->write.group_id =
1353             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1354         switch (mode) {
1355         case BRIDGING_MODE_VLAN_UCAST:
1356             if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1357                 ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) {
1358                 DPRINTF("Bridging mode vlan ucast needs L2 "
1359                         "interface group (0x%08x)\n",
1360                         action->write.group_id);
1361                 return -ROCKER_EINVAL;
1362             }
1363             break;
1364         case BRIDGING_MODE_VLAN_MCAST:
1365             if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1366                 ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST) {
1367                 DPRINTF("Bridging mode vlan mcast needs L2 "
1368                         "mcast group (0x%08x)\n",
1369                         action->write.group_id);
1370                 return -ROCKER_EINVAL;
1371             }
1372             break;
1373         case BRIDGING_MODE_VLAN_DFLT:
1374             if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1375                 ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD) {
1376                 DPRINTF("Bridging mode vlan dflt needs L2 "
1377                         "flood group (0x%08x)\n",
1378                         action->write.group_id);
1379                 return -ROCKER_EINVAL;
1380             }
1381             break;
1382         case BRIDGING_MODE_TUNNEL_MCAST:
1383             if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1384                 ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY) {
1385                 DPRINTF("Bridging mode tunnel mcast needs L2 "
1386                         "overlay group (0x%08x)\n",
1387                         action->write.group_id);
1388                 return -ROCKER_EINVAL;
1389             }
1390             break;
1391         case BRIDGING_MODE_TUNNEL_DFLT:
1392             if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1393                 ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY) {
1394                 DPRINTF("Bridging mode tunnel dflt needs L2 "
1395                         "overlay group (0x%08x)\n",
1396                         action->write.group_id);
1397                 return -ROCKER_EINVAL;
1398             }
1399             break;
1400         default:
1401             return -ROCKER_EINVAL;
1402         }
1403     }
1404 
1405     if (flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_LPORT]) {
1406         action->write.tun_log_lport =
1407             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_LPORT]);
1408         if (mode != BRIDGING_MODE_TUNNEL_UCAST) {
1409             DPRINTF("Have tunnel logical port but not "
1410                     "in bridging tunnel mode\n");
1411             return -ROCKER_EINVAL;
1412         }
1413     }
1414 
1415     if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1416         action->apply.copy_to_cpu =
1417             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1418     }
1419 
1420     return ROCKER_OK;
1421 }
1422 
1423 static int of_dpa_cmd_add_unicast_routing(OfDpaFlow *flow,
1424                                           RockerTlv **flow_tlvs)
1425 {
1426     OfDpaFlowKey *key = &flow->key;
1427     OfDpaFlowKey *mask = &flow->mask;
1428     OfDpaFlowAction *action = &flow->action;
1429     enum {
1430         UNICAST_ROUTING_MODE_UNKNOWN,
1431         UNICAST_ROUTING_MODE_IPV4,
1432         UNICAST_ROUTING_MODE_IPV6,
1433     } mode = UNICAST_ROUTING_MODE_UNKNOWN;
1434     uint8_t type;
1435 
1436     if (!flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]) {
1437         return -ROCKER_EINVAL;
1438     }
1439 
1440     key->tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
1441     key->width = FLOW_KEY_WIDTH(ipv6.addr.dst);
1442 
1443     key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1444     switch (ntohs(key->eth.type)) {
1445     case 0x0800:
1446         mode = UNICAST_ROUTING_MODE_IPV4;
1447         break;
1448     case 0x86dd:
1449         mode = UNICAST_ROUTING_MODE_IPV6;
1450         break;
1451     default:
1452         return -ROCKER_EINVAL;
1453     }
1454     mask->eth.type = htons(0xffff);
1455 
1456     switch (mode) {
1457     case UNICAST_ROUTING_MODE_IPV4:
1458         if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]) {
1459             return -ROCKER_EINVAL;
1460         }
1461         key->ipv4.addr.dst =
1462             rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]);
1463         if (ipv4_addr_is_multicast(key->ipv4.addr.dst)) {
1464             return -ROCKER_EINVAL;
1465         }
1466         flow->lpm = of_dpa_mask2prefix(htonl(0xffffffff));
1467         if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP_MASK]) {
1468             mask->ipv4.addr.dst =
1469                 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP_MASK]);
1470             flow->lpm = of_dpa_mask2prefix(mask->ipv4.addr.dst);
1471         }
1472         break;
1473     case UNICAST_ROUTING_MODE_IPV6:
1474         if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]) {
1475             return -ROCKER_EINVAL;
1476         }
1477         memcpy(&key->ipv6.addr.dst,
1478                rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]),
1479                sizeof(key->ipv6.addr.dst));
1480         if (ipv6_addr_is_multicast(&key->ipv6.addr.dst)) {
1481             return -ROCKER_EINVAL;
1482         }
1483         if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6_MASK]) {
1484             memcpy(&mask->ipv6.addr.dst,
1485                    rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6_MASK]),
1486                    sizeof(mask->ipv6.addr.dst));
1487         }
1488         break;
1489     default:
1490         return -ROCKER_EINVAL;
1491     }
1492 
1493     if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1494         action->goto_tbl =
1495             rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1496         if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1497             return -ROCKER_EINVAL;
1498         }
1499     }
1500 
1501     if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1502         action->write.group_id =
1503             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1504         type = ROCKER_GROUP_TYPE_GET(action->write.group_id);
1505         if (type != ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE &&
1506             type != ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST &&
1507             type != ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP) {
1508             return -ROCKER_EINVAL;
1509         }
1510     }
1511 
1512     return ROCKER_OK;
1513 }
1514 
1515 static int of_dpa_cmd_add_multicast_routing(OfDpaFlow *flow,
1516                                             RockerTlv **flow_tlvs)
1517 {
1518     OfDpaFlowKey *key = &flow->key;
1519     OfDpaFlowKey *mask = &flow->mask;
1520     OfDpaFlowAction *action = &flow->action;
1521     enum {
1522         MULTICAST_ROUTING_MODE_UNKNOWN,
1523         MULTICAST_ROUTING_MODE_IPV4,
1524         MULTICAST_ROUTING_MODE_IPV6,
1525     } mode = MULTICAST_ROUTING_MODE_UNKNOWN;
1526 
1527     if (!flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE] ||
1528         !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1529         return -ROCKER_EINVAL;
1530     }
1531 
1532     key->tbl_id = ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
1533     key->width = FLOW_KEY_WIDTH(ipv6.addr.dst);
1534 
1535     key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1536     switch (ntohs(key->eth.type)) {
1537     case 0x0800:
1538         mode = MULTICAST_ROUTING_MODE_IPV4;
1539         break;
1540     case 0x86dd:
1541         mode = MULTICAST_ROUTING_MODE_IPV6;
1542         break;
1543     default:
1544         return -ROCKER_EINVAL;
1545     }
1546 
1547     key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1548 
1549     switch (mode) {
1550     case MULTICAST_ROUTING_MODE_IPV4:
1551 
1552         if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]) {
1553             key->ipv4.addr.src =
1554                 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]);
1555         }
1556 
1557         if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP_MASK]) {
1558             mask->ipv4.addr.src =
1559                 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP_MASK]);
1560         }
1561 
1562         if (!flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]) {
1563             if (mask->ipv4.addr.src != 0) {
1564                 return -ROCKER_EINVAL;
1565             }
1566         }
1567 
1568         if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]) {
1569             return -ROCKER_EINVAL;
1570         }
1571 
1572         key->ipv4.addr.dst =
1573             rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]);
1574         if (!ipv4_addr_is_multicast(key->ipv4.addr.dst)) {
1575             return -ROCKER_EINVAL;
1576         }
1577 
1578         break;
1579 
1580     case MULTICAST_ROUTING_MODE_IPV6:
1581 
1582         if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]) {
1583             memcpy(&key->ipv6.addr.src,
1584                    rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]),
1585                    sizeof(key->ipv6.addr.src));
1586         }
1587 
1588         if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6_MASK]) {
1589             memcpy(&mask->ipv6.addr.src,
1590                    rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6_MASK]),
1591                    sizeof(mask->ipv6.addr.src));
1592         }
1593 
1594         if (!flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]) {
1595             if (mask->ipv6.addr.src.addr32[0] != 0 &&
1596                 mask->ipv6.addr.src.addr32[1] != 0 &&
1597                 mask->ipv6.addr.src.addr32[2] != 0 &&
1598                 mask->ipv6.addr.src.addr32[3] != 0) {
1599                 return -ROCKER_EINVAL;
1600             }
1601         }
1602 
1603         if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]) {
1604             return -ROCKER_EINVAL;
1605         }
1606 
1607         memcpy(&key->ipv6.addr.dst,
1608                rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]),
1609                sizeof(key->ipv6.addr.dst));
1610         if (!ipv6_addr_is_multicast(&key->ipv6.addr.dst)) {
1611             return -ROCKER_EINVAL;
1612         }
1613 
1614         break;
1615 
1616     default:
1617         return -ROCKER_EINVAL;
1618     }
1619 
1620     if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1621         action->goto_tbl =
1622             rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1623         if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1624             return -ROCKER_EINVAL;
1625         }
1626     }
1627 
1628     if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1629         action->write.group_id =
1630             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1631         if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1632             ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST) {
1633             return -ROCKER_EINVAL;
1634         }
1635         action->write.vlan_id = key->eth.vlan_id;
1636     }
1637 
1638     return ROCKER_OK;
1639 }
1640 
1641 static int of_dpa_cmd_add_acl_ip(OfDpaFlowKey *key, OfDpaFlowKey *mask,
1642                                  RockerTlv **flow_tlvs)
1643 {
1644     key->width = FLOW_KEY_WIDTH(ip.tos);
1645 
1646     key->ip.proto = 0;
1647     key->ip.tos = 0;
1648     mask->ip.proto = 0;
1649     mask->ip.tos = 0;
1650 
1651     if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO]) {
1652         key->ip.proto =
1653             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO]);
1654     }
1655     if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO_MASK]) {
1656         mask->ip.proto =
1657             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO_MASK]);
1658     }
1659     if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP]) {
1660         key->ip.tos =
1661             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP]);
1662     }
1663     if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP_MASK]) {
1664         mask->ip.tos =
1665             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP_MASK]);
1666     }
1667     if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN]) {
1668         key->ip.tos |=
1669             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN]) << 6;
1670     }
1671     if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN_MASK]) {
1672         mask->ip.tos |=
1673             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN_MASK]) << 6;
1674     }
1675 
1676     return ROCKER_OK;
1677 }
1678 
1679 static int of_dpa_cmd_add_acl(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1680 {
1681     OfDpaFlowKey *key = &flow->key;
1682     OfDpaFlowKey *mask = &flow->mask;
1683     OfDpaFlowAction *action = &flow->action;
1684     enum {
1685         ACL_MODE_UNKNOWN,
1686         ACL_MODE_IPV4_VLAN,
1687         ACL_MODE_IPV6_VLAN,
1688         ACL_MODE_IPV4_TENANT,
1689         ACL_MODE_IPV6_TENANT,
1690         ACL_MODE_NON_IP_VLAN,
1691         ACL_MODE_NON_IP_TENANT,
1692         ACL_MODE_ANY_VLAN,
1693         ACL_MODE_ANY_TENANT,
1694     } mode = ACL_MODE_UNKNOWN;
1695     int err = ROCKER_OK;
1696 
1697     if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1698         !flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]) {
1699         return -ROCKER_EINVAL;
1700     }
1701 
1702     if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID] &&
1703         flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]) {
1704         return -ROCKER_EINVAL;
1705     }
1706 
1707     key->tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1708     key->width = FLOW_KEY_WIDTH(eth.type);
1709 
1710     key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1711     if (flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]) {
1712         mask->in_pport =
1713             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1714     }
1715 
1716     if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
1717         memcpy(key->eth.src.a,
1718                rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
1719                sizeof(key->eth.src.a));
1720     }
1721 
1722     if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC_MASK]) {
1723         memcpy(mask->eth.src.a,
1724                rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC_MASK]),
1725                sizeof(mask->eth.src.a));
1726     }
1727 
1728     if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1729         memcpy(key->eth.dst.a,
1730                rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1731                sizeof(key->eth.dst.a));
1732     }
1733 
1734     if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]) {
1735         memcpy(mask->eth.dst.a,
1736                rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1737                sizeof(mask->eth.dst.a));
1738     }
1739 
1740     key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1741     if (key->eth.type) {
1742         mask->eth.type = 0xffff;
1743     }
1744 
1745     if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1746         key->eth.vlan_id =
1747             rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1748     }
1749 
1750     if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1751         mask->eth.vlan_id =
1752             rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1753     }
1754 
1755     switch (ntohs(key->eth.type)) {
1756     case 0x0000:
1757         mode = (key->eth.vlan_id) ? ACL_MODE_ANY_VLAN : ACL_MODE_ANY_TENANT;
1758         break;
1759     case 0x0800:
1760         mode = (key->eth.vlan_id) ? ACL_MODE_IPV4_VLAN : ACL_MODE_IPV4_TENANT;
1761         break;
1762     case 0x86dd:
1763         mode = (key->eth.vlan_id) ? ACL_MODE_IPV6_VLAN : ACL_MODE_IPV6_TENANT;
1764         break;
1765     default:
1766         mode = (key->eth.vlan_id) ? ACL_MODE_NON_IP_VLAN :
1767                                     ACL_MODE_NON_IP_TENANT;
1768         break;
1769     }
1770 
1771     /* XXX only supporting VLAN modes for now */
1772     if (mode != ACL_MODE_IPV4_VLAN &&
1773         mode != ACL_MODE_IPV6_VLAN &&
1774         mode != ACL_MODE_NON_IP_VLAN &&
1775         mode != ACL_MODE_ANY_VLAN) {
1776         return -ROCKER_EINVAL;
1777     }
1778 
1779     switch (ntohs(key->eth.type)) {
1780     case 0x0800:
1781     case 0x86dd:
1782         err = of_dpa_cmd_add_acl_ip(key, mask, flow_tlvs);
1783         break;
1784     }
1785 
1786     if (err) {
1787         return err;
1788     }
1789 
1790     if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1791         action->write.group_id =
1792             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1793     }
1794 
1795     if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1796         action->apply.copy_to_cpu =
1797             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1798     }
1799 
1800     return ROCKER_OK;
1801 }
1802 
1803 static int of_dpa_cmd_flow_add_mod(OfDpa *of_dpa, OfDpaFlow *flow,
1804                                    RockerTlv **flow_tlvs)
1805 {
1806     enum rocker_of_dpa_table_id tbl;
1807     int err = ROCKER_OK;
1808 
1809     if (!flow_tlvs[ROCKER_TLV_OF_DPA_TABLE_ID] ||
1810         !flow_tlvs[ROCKER_TLV_OF_DPA_PRIORITY] ||
1811         !flow_tlvs[ROCKER_TLV_OF_DPA_HARDTIME]) {
1812         return -ROCKER_EINVAL;
1813     }
1814 
1815     tbl = rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_TABLE_ID]);
1816     flow->priority = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_PRIORITY]);
1817     flow->hardtime = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_HARDTIME]);
1818 
1819     if (flow_tlvs[ROCKER_TLV_OF_DPA_IDLETIME]) {
1820         if (tbl == ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT ||
1821             tbl == ROCKER_OF_DPA_TABLE_ID_VLAN ||
1822             tbl == ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC) {
1823             return -ROCKER_EINVAL;
1824         }
1825         flow->idletime =
1826             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IDLETIME]);
1827     }
1828 
1829     switch (tbl) {
1830     case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
1831         err = of_dpa_cmd_add_ig_port(flow, flow_tlvs);
1832         break;
1833     case ROCKER_OF_DPA_TABLE_ID_VLAN:
1834         err = of_dpa_cmd_add_vlan(flow, flow_tlvs);
1835         break;
1836     case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
1837         err = of_dpa_cmd_add_term_mac(flow, flow_tlvs);
1838         break;
1839     case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
1840         err = of_dpa_cmd_add_bridging(flow, flow_tlvs);
1841         break;
1842     case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
1843         err = of_dpa_cmd_add_unicast_routing(flow, flow_tlvs);
1844         break;
1845     case ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING:
1846         err = of_dpa_cmd_add_multicast_routing(flow, flow_tlvs);
1847         break;
1848     case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
1849         err = of_dpa_cmd_add_acl(flow, flow_tlvs);
1850         break;
1851     }
1852 
1853     return err;
1854 }
1855 
1856 static int of_dpa_cmd_flow_add(OfDpa *of_dpa, uint64_t cookie,
1857                                RockerTlv **flow_tlvs)
1858 {
1859     OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1860     int err = ROCKER_OK;
1861 
1862     if (flow) {
1863         return -ROCKER_EEXIST;
1864     }
1865 
1866     flow = of_dpa_flow_alloc(cookie);
1867     if (!flow) {
1868         return -ROCKER_ENOMEM;
1869     }
1870 
1871     err = of_dpa_cmd_flow_add_mod(of_dpa, flow, flow_tlvs);
1872     if (err) {
1873         g_free(flow);
1874         return err;
1875     }
1876 
1877     return of_dpa_flow_add(of_dpa, flow);
1878 }
1879 
1880 static int of_dpa_cmd_flow_mod(OfDpa *of_dpa, uint64_t cookie,
1881                                RockerTlv **flow_tlvs)
1882 {
1883     OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1884 
1885     if (!flow) {
1886         return -ROCKER_ENOENT;
1887     }
1888 
1889     return of_dpa_cmd_flow_add_mod(of_dpa, flow, flow_tlvs);
1890 }
1891 
1892 static int of_dpa_cmd_flow_del(OfDpa *of_dpa, uint64_t cookie)
1893 {
1894     OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1895 
1896     if (!flow) {
1897         return -ROCKER_ENOENT;
1898     }
1899 
1900     of_dpa_flow_del(of_dpa, flow);
1901 
1902     return ROCKER_OK;
1903 }
1904 
1905 static int of_dpa_cmd_flow_get_stats(OfDpa *of_dpa, uint64_t cookie,
1906                                      struct desc_info *info, char *buf)
1907 {
1908     OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1909     size_t tlv_size;
1910     int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
1911     int pos;
1912 
1913     if (!flow) {
1914         return -ROCKER_ENOENT;
1915     }
1916 
1917     tlv_size = rocker_tlv_total_size(sizeof(uint32_t)) +  /* duration */
1918                rocker_tlv_total_size(sizeof(uint64_t)) +  /* rx_pkts */
1919                rocker_tlv_total_size(sizeof(uint64_t));   /* tx_ptks */
1920 
1921     if (tlv_size > desc_buf_size(info)) {
1922         return -ROCKER_EMSGSIZE;
1923     }
1924 
1925     pos = 0;
1926     rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION,
1927                         (int32_t)(now - flow->stats.install_time));
1928     rocker_tlv_put_le64(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS,
1929                         flow->stats.rx_pkts);
1930     rocker_tlv_put_le64(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS,
1931                         flow->stats.tx_pkts);
1932 
1933     return desc_set_buf(info, tlv_size);
1934 }
1935 
1936 static int of_dpa_flow_cmd(OfDpa *of_dpa, struct desc_info *info,
1937                            char *buf, uint16_t cmd,
1938                            RockerTlv **flow_tlvs)
1939 {
1940     uint64_t cookie;
1941 
1942     if (!flow_tlvs[ROCKER_TLV_OF_DPA_COOKIE]) {
1943         return -ROCKER_EINVAL;
1944     }
1945 
1946     cookie = rocker_tlv_get_le64(flow_tlvs[ROCKER_TLV_OF_DPA_COOKIE]);
1947 
1948     switch (cmd) {
1949     case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD:
1950         return of_dpa_cmd_flow_add(of_dpa, cookie, flow_tlvs);
1951     case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD:
1952         return of_dpa_cmd_flow_mod(of_dpa, cookie, flow_tlvs);
1953     case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL:
1954         return of_dpa_cmd_flow_del(of_dpa, cookie);
1955     case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS:
1956         return of_dpa_cmd_flow_get_stats(of_dpa, cookie, info, buf);
1957     }
1958 
1959     return -ROCKER_ENOTSUP;
1960 }
1961 
1962 static int of_dpa_cmd_add_l2_interface(OfDpaGroup *group,
1963                                        RockerTlv **group_tlvs)
1964 {
1965     if (!group_tlvs[ROCKER_TLV_OF_DPA_OUT_PPORT] ||
1966         !group_tlvs[ROCKER_TLV_OF_DPA_POP_VLAN]) {
1967         return -ROCKER_EINVAL;
1968     }
1969 
1970     group->l2_interface.out_pport =
1971         rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_OUT_PPORT]);
1972     group->l2_interface.pop_vlan =
1973         rocker_tlv_get_u8(group_tlvs[ROCKER_TLV_OF_DPA_POP_VLAN]);
1974 
1975     return ROCKER_OK;
1976 }
1977 
1978 static int of_dpa_cmd_add_l2_rewrite(OfDpa *of_dpa, OfDpaGroup *group,
1979                                      RockerTlv **group_tlvs)
1980 {
1981     OfDpaGroup *l2_interface_group;
1982 
1983     if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]) {
1984         return -ROCKER_EINVAL;
1985     }
1986 
1987     group->l2_rewrite.group_id =
1988         rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]);
1989 
1990     l2_interface_group = of_dpa_group_find(of_dpa, group->l2_rewrite.group_id);
1991     if (!l2_interface_group ||
1992         ROCKER_GROUP_TYPE_GET(l2_interface_group->id) !=
1993                               ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) {
1994         DPRINTF("l2 rewrite group needs a valid l2 interface group\n");
1995         return -ROCKER_EINVAL;
1996     }
1997 
1998     if (group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
1999         memcpy(group->l2_rewrite.src_mac.a,
2000                rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
2001                sizeof(group->l2_rewrite.src_mac.a));
2002     }
2003 
2004     if (group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
2005         memcpy(group->l2_rewrite.dst_mac.a,
2006                rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
2007                sizeof(group->l2_rewrite.dst_mac.a));
2008     }
2009 
2010     if (group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
2011         group->l2_rewrite.vlan_id =
2012             rocker_tlv_get_u16(group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
2013         if (ROCKER_GROUP_VLAN_GET(l2_interface_group->id) !=
2014             (ntohs(group->l2_rewrite.vlan_id) & VLAN_VID_MASK)) {
2015             DPRINTF("Set VLAN ID must be same as L2 interface group\n");
2016             return -ROCKER_EINVAL;
2017         }
2018     }
2019 
2020     return ROCKER_OK;
2021 }
2022 
2023 static int of_dpa_cmd_add_l2_flood(OfDpa *of_dpa, OfDpaGroup *group,
2024                                    RockerTlv **group_tlvs)
2025 {
2026     OfDpaGroup *l2_group;
2027     RockerTlv **tlvs;
2028     int err;
2029     int i;
2030 
2031     if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_COUNT] ||
2032         !group_tlvs[ROCKER_TLV_OF_DPA_GROUP_IDS]) {
2033         return -ROCKER_EINVAL;
2034     }
2035 
2036     group->l2_flood.group_count =
2037         rocker_tlv_get_le16(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_COUNT]);
2038 
2039     tlvs = g_malloc0((group->l2_flood.group_count + 1) *
2040                      sizeof(RockerTlv *));
2041     if (!tlvs) {
2042         return -ROCKER_ENOMEM;
2043     }
2044 
2045     g_free(group->l2_flood.group_ids);
2046     group->l2_flood.group_ids =
2047         g_malloc0(group->l2_flood.group_count * sizeof(uint32_t));
2048     if (!group->l2_flood.group_ids) {
2049         err = -ROCKER_ENOMEM;
2050         goto err_out;
2051     }
2052 
2053     rocker_tlv_parse_nested(tlvs, group->l2_flood.group_count,
2054                             group_tlvs[ROCKER_TLV_OF_DPA_GROUP_IDS]);
2055 
2056     for (i = 0; i < group->l2_flood.group_count; i++) {
2057         group->l2_flood.group_ids[i] = rocker_tlv_get_le32(tlvs[i + 1]);
2058     }
2059 
2060     /* All of the L2 interface groups referenced by the L2 flood
2061      * must have same VLAN
2062      */
2063 
2064     for (i = 0; i < group->l2_flood.group_count; i++) {
2065         l2_group = of_dpa_group_find(of_dpa, group->l2_flood.group_ids[i]);
2066         if (!l2_group) {
2067             continue;
2068         }
2069         if ((ROCKER_GROUP_TYPE_GET(l2_group->id) ==
2070              ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) &&
2071             (ROCKER_GROUP_VLAN_GET(l2_group->id) !=
2072              ROCKER_GROUP_VLAN_GET(group->id))) {
2073             DPRINTF("l2 interface group 0x%08x VLAN doesn't match l2 "
2074                     "flood group 0x%08x\n",
2075                     group->l2_flood.group_ids[i], group->id);
2076             err = -ROCKER_EINVAL;
2077             goto err_out;
2078         }
2079     }
2080 
2081     g_free(tlvs);
2082     return ROCKER_OK;
2083 
2084 err_out:
2085     group->l2_flood.group_count = 0;
2086     g_free(group->l2_flood.group_ids);
2087     g_free(tlvs);
2088 
2089     return err;
2090 }
2091 
2092 static int of_dpa_cmd_add_l3_unicast(OfDpaGroup *group, RockerTlv **group_tlvs)
2093 {
2094     if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]) {
2095         return -ROCKER_EINVAL;
2096     }
2097 
2098     group->l3_unicast.group_id =
2099         rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]);
2100 
2101     if (group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
2102         memcpy(group->l3_unicast.src_mac.a,
2103                rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
2104                sizeof(group->l3_unicast.src_mac.a));
2105     }
2106 
2107     if (group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
2108         memcpy(group->l3_unicast.dst_mac.a,
2109                rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
2110                sizeof(group->l3_unicast.dst_mac.a));
2111     }
2112 
2113     if (group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
2114         group->l3_unicast.vlan_id =
2115             rocker_tlv_get_u16(group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
2116     }
2117 
2118     if (group_tlvs[ROCKER_TLV_OF_DPA_TTL_CHECK]) {
2119         group->l3_unicast.ttl_check =
2120             rocker_tlv_get_u8(group_tlvs[ROCKER_TLV_OF_DPA_TTL_CHECK]);
2121     }
2122 
2123     return ROCKER_OK;
2124 }
2125 
2126 static int of_dpa_cmd_group_do(OfDpa *of_dpa, uint32_t group_id,
2127                                OfDpaGroup *group, RockerTlv **group_tlvs)
2128 {
2129     uint8_t type = ROCKER_GROUP_TYPE_GET(group_id);
2130 
2131     switch (type) {
2132     case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2133         return of_dpa_cmd_add_l2_interface(group, group_tlvs);
2134     case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2135         return of_dpa_cmd_add_l2_rewrite(of_dpa, group, group_tlvs);
2136     case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2137     /* Treat L2 multicast group same as a L2 flood group */
2138     case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2139         return of_dpa_cmd_add_l2_flood(of_dpa, group, group_tlvs);
2140     case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2141         return of_dpa_cmd_add_l3_unicast(group, group_tlvs);
2142     }
2143 
2144     return -ROCKER_ENOTSUP;
2145 }
2146 
2147 static int of_dpa_cmd_group_add(OfDpa *of_dpa, uint32_t group_id,
2148                                 RockerTlv **group_tlvs)
2149 {
2150     OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2151     int err;
2152 
2153     if (group) {
2154         return -ROCKER_EEXIST;
2155     }
2156 
2157     group = of_dpa_group_alloc(group_id);
2158     if (!group) {
2159         return -ROCKER_ENOMEM;
2160     }
2161 
2162     err = of_dpa_cmd_group_do(of_dpa, group_id, group, group_tlvs);
2163     if (err) {
2164         goto err_cmd_add;
2165     }
2166 
2167     err = of_dpa_group_add(of_dpa, group);
2168     if (err) {
2169         goto err_cmd_add;
2170     }
2171 
2172     return ROCKER_OK;
2173 
2174 err_cmd_add:
2175     g_free(group);
2176     return err;
2177 }
2178 
2179 static int of_dpa_cmd_group_mod(OfDpa *of_dpa, uint32_t group_id,
2180                                 RockerTlv **group_tlvs)
2181 {
2182     OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2183 
2184     if (!group) {
2185         return -ROCKER_ENOENT;
2186     }
2187 
2188     return of_dpa_cmd_group_do(of_dpa, group_id, group, group_tlvs);
2189 }
2190 
2191 static int of_dpa_cmd_group_del(OfDpa *of_dpa, uint32_t group_id)
2192 {
2193     OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2194 
2195     if (!group) {
2196         return -ROCKER_ENOENT;
2197     }
2198 
2199     return of_dpa_group_del(of_dpa, group);
2200 }
2201 
2202 static int of_dpa_cmd_group_get_stats(OfDpa *of_dpa, uint32_t group_id,
2203                                       struct desc_info *info, char *buf)
2204 {
2205     return -ROCKER_ENOTSUP;
2206 }
2207 
2208 static int of_dpa_group_cmd(OfDpa *of_dpa, struct desc_info *info,
2209                             char *buf, uint16_t cmd, RockerTlv **group_tlvs)
2210 {
2211     uint32_t group_id;
2212 
2213     if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
2214         return -ROCKER_EINVAL;
2215     }
2216 
2217     group_id = rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
2218 
2219     switch (cmd) {
2220     case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD:
2221         return of_dpa_cmd_group_add(of_dpa, group_id, group_tlvs);
2222     case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD:
2223         return of_dpa_cmd_group_mod(of_dpa, group_id, group_tlvs);
2224     case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL:
2225         return of_dpa_cmd_group_del(of_dpa, group_id);
2226     case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS:
2227         return of_dpa_cmd_group_get_stats(of_dpa, group_id, info, buf);
2228     }
2229 
2230     return -ROCKER_ENOTSUP;
2231 }
2232 
2233 static int of_dpa_cmd(World *world, struct desc_info *info,
2234                       char *buf, uint16_t cmd, RockerTlv *cmd_info_tlv)
2235 {
2236     OfDpa *of_dpa = world_private(world);
2237     RockerTlv *tlvs[ROCKER_TLV_OF_DPA_MAX + 1];
2238 
2239     rocker_tlv_parse_nested(tlvs, ROCKER_TLV_OF_DPA_MAX, cmd_info_tlv);
2240 
2241     switch (cmd) {
2242     case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD:
2243     case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD:
2244     case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL:
2245     case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS:
2246         return of_dpa_flow_cmd(of_dpa, info, buf, cmd, tlvs);
2247     case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD:
2248     case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD:
2249     case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL:
2250     case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS:
2251         return of_dpa_group_cmd(of_dpa, info, buf, cmd, tlvs);
2252     }
2253 
2254     return -ROCKER_ENOTSUP;
2255 }
2256 
2257 static gboolean rocker_int64_equal(gconstpointer v1, gconstpointer v2)
2258 {
2259     return *((const uint64_t *)v1) == *((const uint64_t *)v2);
2260 }
2261 
2262 static guint rocker_int64_hash(gconstpointer v)
2263 {
2264     return (guint)*(const uint64_t *)v;
2265 }
2266 
2267 static int of_dpa_init(World *world)
2268 {
2269     OfDpa *of_dpa = world_private(world);
2270 
2271     of_dpa->world = world;
2272 
2273     of_dpa->flow_tbl = g_hash_table_new_full(rocker_int64_hash,
2274                                              rocker_int64_equal,
2275                                              NULL, g_free);
2276     if (!of_dpa->flow_tbl) {
2277         return -ENOMEM;
2278     }
2279 
2280     of_dpa->group_tbl = g_hash_table_new_full(g_int_hash, g_int_equal,
2281                                               NULL, g_free);
2282     if (!of_dpa->group_tbl) {
2283         goto err_group_tbl;
2284     }
2285 
2286     /* XXX hardcode some artificial table max values */
2287     of_dpa->flow_tbl_max_size = 100;
2288     of_dpa->group_tbl_max_size = 100;
2289 
2290     return 0;
2291 
2292 err_group_tbl:
2293     g_hash_table_destroy(of_dpa->flow_tbl);
2294     return -ENOMEM;
2295 }
2296 
2297 static void of_dpa_uninit(World *world)
2298 {
2299     OfDpa *of_dpa = world_private(world);
2300 
2301     g_hash_table_destroy(of_dpa->group_tbl);
2302     g_hash_table_destroy(of_dpa->flow_tbl);
2303 }
2304 
2305 struct of_dpa_flow_fill_context {
2306     RockerOfDpaFlowList *list;
2307     uint32_t tbl_id;
2308 };
2309 
2310 static void of_dpa_flow_fill(void *cookie, void *value, void *user_data)
2311 {
2312     struct of_dpa_flow *flow = value;
2313     struct of_dpa_flow_key *key = &flow->key;
2314     struct of_dpa_flow_key *mask = &flow->mask;
2315     struct of_dpa_flow_fill_context *flow_context = user_data;
2316     RockerOfDpaFlowList *new;
2317     RockerOfDpaFlow *nflow;
2318     RockerOfDpaFlowKey *nkey;
2319     RockerOfDpaFlowMask *nmask;
2320     RockerOfDpaFlowAction *naction;
2321 
2322     if (flow_context->tbl_id != -1 &&
2323         flow_context->tbl_id != key->tbl_id) {
2324         return;
2325     }
2326 
2327     new = g_malloc0(sizeof(*new));
2328     nflow = new->value = g_malloc0(sizeof(*nflow));
2329     nkey = nflow->key = g_malloc0(sizeof(*nkey));
2330     nmask = nflow->mask = g_malloc0(sizeof(*nmask));
2331     naction = nflow->action = g_malloc0(sizeof(*naction));
2332 
2333     nflow->cookie = flow->cookie;
2334     nflow->hits = flow->stats.hits;
2335     nkey->priority = flow->priority;
2336     nkey->tbl_id = key->tbl_id;
2337 
2338     if (key->in_pport || mask->in_pport) {
2339         nkey->has_in_pport = true;
2340         nkey->in_pport = key->in_pport;
2341     }
2342 
2343     if (nkey->has_in_pport && mask->in_pport != 0xffffffff) {
2344         nmask->has_in_pport = true;
2345         nmask->in_pport = mask->in_pport;
2346     }
2347 
2348     if (key->eth.vlan_id || mask->eth.vlan_id) {
2349         nkey->has_vlan_id = true;
2350         nkey->vlan_id = ntohs(key->eth.vlan_id);
2351     }
2352 
2353     if (nkey->has_vlan_id && mask->eth.vlan_id != 0xffff) {
2354         nmask->has_vlan_id = true;
2355         nmask->vlan_id = ntohs(mask->eth.vlan_id);
2356     }
2357 
2358     if (key->tunnel_id || mask->tunnel_id) {
2359         nkey->has_tunnel_id = true;
2360         nkey->tunnel_id = key->tunnel_id;
2361     }
2362 
2363     if (nkey->has_tunnel_id && mask->tunnel_id != 0xffffffff) {
2364         nmask->has_tunnel_id = true;
2365         nmask->tunnel_id = mask->tunnel_id;
2366     }
2367 
2368     if (memcmp(key->eth.src.a, zero_mac.a, ETH_ALEN) ||
2369         memcmp(mask->eth.src.a, zero_mac.a, ETH_ALEN)) {
2370         nkey->has_eth_src = true;
2371         nkey->eth_src = qemu_mac_strdup_printf(key->eth.src.a);
2372     }
2373 
2374     if (nkey->has_eth_src && memcmp(mask->eth.src.a, ff_mac.a, ETH_ALEN)) {
2375         nmask->has_eth_src = true;
2376         nmask->eth_src = qemu_mac_strdup_printf(mask->eth.src.a);
2377     }
2378 
2379     if (memcmp(key->eth.dst.a, zero_mac.a, ETH_ALEN) ||
2380         memcmp(mask->eth.dst.a, zero_mac.a, ETH_ALEN)) {
2381         nkey->has_eth_dst = true;
2382         nkey->eth_dst = qemu_mac_strdup_printf(key->eth.dst.a);
2383     }
2384 
2385     if (nkey->has_eth_dst && memcmp(mask->eth.dst.a, ff_mac.a, ETH_ALEN)) {
2386         nmask->has_eth_dst = true;
2387         nmask->eth_dst = qemu_mac_strdup_printf(mask->eth.dst.a);
2388     }
2389 
2390     if (key->eth.type) {
2391 
2392         nkey->has_eth_type = true;
2393         nkey->eth_type = ntohs(key->eth.type);
2394 
2395         switch (ntohs(key->eth.type)) {
2396         case 0x0800:
2397         case 0x86dd:
2398             if (key->ip.proto || mask->ip.proto) {
2399                 nkey->has_ip_proto = true;
2400                 nkey->ip_proto = key->ip.proto;
2401             }
2402             if (nkey->has_ip_proto && mask->ip.proto != 0xff) {
2403                 nmask->has_ip_proto = true;
2404                 nmask->ip_proto = mask->ip.proto;
2405             }
2406             if (key->ip.tos || mask->ip.tos) {
2407                 nkey->has_ip_tos = true;
2408                 nkey->ip_tos = key->ip.tos;
2409             }
2410             if (nkey->has_ip_tos && mask->ip.tos != 0xff) {
2411                 nmask->has_ip_tos = true;
2412                 nmask->ip_tos = mask->ip.tos;
2413             }
2414             break;
2415         }
2416 
2417         switch (ntohs(key->eth.type)) {
2418         case 0x0800:
2419             if (key->ipv4.addr.dst || mask->ipv4.addr.dst) {
2420                 char *dst = inet_ntoa(*(struct in_addr *)&key->ipv4.addr.dst);
2421                 int dst_len = of_dpa_mask2prefix(mask->ipv4.addr.dst);
2422                 nkey->has_ip_dst = true;
2423                 nkey->ip_dst = g_strdup_printf("%s/%d", dst, dst_len);
2424             }
2425             break;
2426         }
2427     }
2428 
2429     if (flow->action.goto_tbl) {
2430         naction->has_goto_tbl = true;
2431         naction->goto_tbl = flow->action.goto_tbl;
2432     }
2433 
2434     if (flow->action.write.group_id) {
2435         naction->has_group_id = true;
2436         naction->group_id = flow->action.write.group_id;
2437     }
2438 
2439     if (flow->action.apply.new_vlan_id) {
2440         naction->has_new_vlan_id = true;
2441         naction->new_vlan_id = flow->action.apply.new_vlan_id;
2442     }
2443 
2444     new->next = flow_context->list;
2445     flow_context->list = new;
2446 }
2447 
2448 RockerOfDpaFlowList *qmp_query_rocker_of_dpa_flows(const char *name,
2449                                                    bool has_tbl_id,
2450                                                    uint32_t tbl_id,
2451                                                    Error **errp)
2452 {
2453     struct rocker *r;
2454     struct world *w;
2455     struct of_dpa *of_dpa;
2456     struct of_dpa_flow_fill_context fill_context = {
2457         .list = NULL,
2458         .tbl_id = tbl_id,
2459     };
2460 
2461     r = rocker_find(name);
2462     if (!r) {
2463         error_set(errp, ERROR_CLASS_GENERIC_ERROR,
2464                   "rocker %s not found", name);
2465         return NULL;
2466     }
2467 
2468     w = rocker_get_world(r, ROCKER_WORLD_TYPE_OF_DPA);
2469     if (!w) {
2470         error_set(errp, ERROR_CLASS_GENERIC_ERROR,
2471                   "rocker %s doesn't have OF-DPA world", name);
2472         return NULL;
2473     }
2474 
2475     of_dpa = world_private(w);
2476 
2477     g_hash_table_foreach(of_dpa->flow_tbl, of_dpa_flow_fill, &fill_context);
2478 
2479     return fill_context.list;
2480 }
2481 
2482 struct of_dpa_group_fill_context {
2483     RockerOfDpaGroupList *list;
2484     uint8_t type;
2485 };
2486 
2487 static void of_dpa_group_fill(void *key, void *value, void *user_data)
2488 {
2489     struct of_dpa_group *group = value;
2490     struct of_dpa_group_fill_context *flow_context = user_data;
2491     RockerOfDpaGroupList *new;
2492     RockerOfDpaGroup *ngroup;
2493     struct uint32List *id;
2494     int i;
2495 
2496     if (flow_context->type != 9 &&
2497         flow_context->type != ROCKER_GROUP_TYPE_GET(group->id)) {
2498         return;
2499     }
2500 
2501     new = g_malloc0(sizeof(*new));
2502     ngroup = new->value = g_malloc0(sizeof(*ngroup));
2503 
2504     ngroup->id = group->id;
2505 
2506     ngroup->type = ROCKER_GROUP_TYPE_GET(group->id);
2507 
2508     switch (ngroup->type) {
2509     case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2510         ngroup->has_vlan_id = true;
2511         ngroup->vlan_id = ROCKER_GROUP_VLAN_GET(group->id);
2512         ngroup->has_pport = true;
2513         ngroup->pport = ROCKER_GROUP_PORT_GET(group->id);
2514         ngroup->has_out_pport = true;
2515         ngroup->out_pport = group->l2_interface.out_pport;
2516         ngroup->has_pop_vlan = true;
2517         ngroup->pop_vlan = group->l2_interface.pop_vlan;
2518         break;
2519     case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2520         ngroup->has_index = true;
2521         ngroup->index = ROCKER_GROUP_INDEX_LONG_GET(group->id);
2522         ngroup->has_group_id = true;
2523         ngroup->group_id = group->l2_rewrite.group_id;
2524         if (group->l2_rewrite.vlan_id) {
2525             ngroup->has_set_vlan_id = true;
2526             ngroup->set_vlan_id = ntohs(group->l2_rewrite.vlan_id);
2527         }
2528         break;
2529         if (memcmp(group->l2_rewrite.src_mac.a, zero_mac.a, ETH_ALEN)) {
2530             ngroup->has_set_eth_src = true;
2531             ngroup->set_eth_src =
2532                 qemu_mac_strdup_printf(group->l2_rewrite.src_mac.a);
2533         }
2534         if (memcmp(group->l2_rewrite.dst_mac.a, zero_mac.a, ETH_ALEN)) {
2535             ngroup->has_set_eth_dst = true;
2536             ngroup->set_eth_dst =
2537                 qemu_mac_strdup_printf(group->l2_rewrite.dst_mac.a);
2538         }
2539     case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2540     case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2541         ngroup->has_vlan_id = true;
2542         ngroup->vlan_id = ROCKER_GROUP_VLAN_GET(group->id);
2543         ngroup->has_index = true;
2544         ngroup->index = ROCKER_GROUP_INDEX_GET(group->id);
2545         for (i = 0; i < group->l2_flood.group_count; i++) {
2546             ngroup->has_group_ids = true;
2547             id = g_malloc0(sizeof(*id));
2548             id->value = group->l2_flood.group_ids[i];
2549             id->next = ngroup->group_ids;
2550             ngroup->group_ids = id;
2551         }
2552         break;
2553     case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2554         ngroup->has_index = true;
2555         ngroup->index = ROCKER_GROUP_INDEX_LONG_GET(group->id);
2556         ngroup->has_group_id = true;
2557         ngroup->group_id = group->l3_unicast.group_id;
2558         if (group->l3_unicast.vlan_id) {
2559             ngroup->has_set_vlan_id = true;
2560             ngroup->set_vlan_id = ntohs(group->l3_unicast.vlan_id);
2561         }
2562         if (memcmp(group->l3_unicast.src_mac.a, zero_mac.a, ETH_ALEN)) {
2563             ngroup->has_set_eth_src = true;
2564             ngroup->set_eth_src =
2565                 qemu_mac_strdup_printf(group->l3_unicast.src_mac.a);
2566         }
2567         if (memcmp(group->l3_unicast.dst_mac.a, zero_mac.a, ETH_ALEN)) {
2568             ngroup->has_set_eth_dst = true;
2569             ngroup->set_eth_dst =
2570                 qemu_mac_strdup_printf(group->l3_unicast.dst_mac.a);
2571         }
2572         if (group->l3_unicast.ttl_check) {
2573             ngroup->has_ttl_check = true;
2574             ngroup->ttl_check = group->l3_unicast.ttl_check;
2575         }
2576         break;
2577     }
2578 
2579     new->next = flow_context->list;
2580     flow_context->list = new;
2581 }
2582 
2583 RockerOfDpaGroupList *qmp_query_rocker_of_dpa_groups(const char *name,
2584                                                      bool has_type,
2585                                                      uint8_t type,
2586                                                      Error **errp)
2587 {
2588     struct rocker *r;
2589     struct world *w;
2590     struct of_dpa *of_dpa;
2591     struct of_dpa_group_fill_context fill_context = {
2592         .list = NULL,
2593         .type = type,
2594     };
2595 
2596     r = rocker_find(name);
2597     if (!r) {
2598         error_set(errp, ERROR_CLASS_GENERIC_ERROR,
2599                   "rocker %s not found", name);
2600         return NULL;
2601     }
2602 
2603     w = rocker_get_world(r, ROCKER_WORLD_TYPE_OF_DPA);
2604     if (!w) {
2605         error_set(errp, ERROR_CLASS_GENERIC_ERROR,
2606                   "rocker %s doesn't have OF-DPA world", name);
2607         return NULL;
2608     }
2609 
2610     of_dpa = world_private(w);
2611 
2612     g_hash_table_foreach(of_dpa->group_tbl, of_dpa_group_fill, &fill_context);
2613 
2614     return fill_context.list;
2615 }
2616 
2617 static WorldOps of_dpa_ops = {
2618     .init = of_dpa_init,
2619     .uninit = of_dpa_uninit,
2620     .ig = of_dpa_ig,
2621     .cmd = of_dpa_cmd,
2622 };
2623 
2624 World *of_dpa_world_alloc(Rocker *r)
2625 {
2626     return world_alloc(r, sizeof(OfDpa), ROCKER_WORLD_TYPE_OF_DPA, &of_dpa_ops);
2627 }
2628