xref: /openbmc/qemu/hw/net/rocker/rocker_of_dpa.c (revision 9c4218e9)
1 /*
2  * QEMU rocker switch emulation - OF-DPA flow processing support
3  *
4  * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  */
16 
17 #include "qemu/osdep.h"
18 #include "net/eth.h"
19 #include "qemu/iov.h"
20 #include "qemu/timer.h"
21 #include "qmp-commands.h"
22 
23 #include "rocker.h"
24 #include "rocker_hw.h"
25 #include "rocker_fp.h"
26 #include "rocker_tlv.h"
27 #include "rocker_world.h"
28 #include "rocker_desc.h"
29 #include "rocker_of_dpa.h"
30 
31 static const MACAddr zero_mac = { .a = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } };
32 static const MACAddr ff_mac =   { .a = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } };
33 
34 typedef struct of_dpa {
35     World *world;
36     GHashTable *flow_tbl;
37     GHashTable *group_tbl;
38     unsigned int flow_tbl_max_size;
39     unsigned int group_tbl_max_size;
40 } OfDpa;
41 
42 /* flow_key stolen mostly from OVS
43  *
44  * Note: fields that compare with network packet header fields
45  * are stored in network order (BE) to avoid per-packet field
46  * byte-swaps.
47  */
48 
49 typedef struct of_dpa_flow_key {
50     uint32_t in_pport;               /* ingress port */
51     uint32_t tunnel_id;              /* overlay tunnel id */
52     uint32_t tbl_id;                 /* table id */
53     struct {
54         __be16 vlan_id;              /* 0 if no VLAN */
55         MACAddr src;                 /* ethernet source address */
56         MACAddr dst;                 /* ethernet destination address */
57         __be16 type;                 /* ethernet frame type */
58     } eth;
59     struct {
60         uint8_t proto;               /* IP protocol or ARP opcode */
61         uint8_t tos;                 /* IP ToS */
62         uint8_t ttl;                 /* IP TTL/hop limit */
63         uint8_t frag;                /* one of FRAG_TYPE_* */
64     } ip;
65     union {
66         struct {
67             struct {
68                 __be32 src;          /* IP source address */
69                 __be32 dst;          /* IP destination address */
70             } addr;
71             union {
72                 struct {
73                     __be16 src;      /* TCP/UDP/SCTP source port */
74                     __be16 dst;      /* TCP/UDP/SCTP destination port */
75                     __be16 flags;    /* TCP flags */
76                 } tp;
77                 struct {
78                     MACAddr sha;     /* ARP source hardware address */
79                     MACAddr tha;     /* ARP target hardware address */
80                 } arp;
81             };
82         } ipv4;
83         struct {
84             struct {
85                 Ipv6Addr src;       /* IPv6 source address */
86                 Ipv6Addr dst;       /* IPv6 destination address */
87             } addr;
88             __be32 label;            /* IPv6 flow label */
89             struct {
90                 __be16 src;          /* TCP/UDP/SCTP source port */
91                 __be16 dst;          /* TCP/UDP/SCTP destination port */
92                 __be16 flags;        /* TCP flags */
93             } tp;
94             struct {
95                 Ipv6Addr target;    /* ND target address */
96                 MACAddr sll;         /* ND source link layer address */
97                 MACAddr tll;         /* ND target link layer address */
98             } nd;
99         } ipv6;
100     };
101     int width;                       /* how many uint64_t's in key? */
102 } OfDpaFlowKey;
103 
104 /* Width of key which includes field 'f' in u64s, rounded up */
105 #define FLOW_KEY_WIDTH(f) \
106     ((offsetof(OfDpaFlowKey, f) + \
107       sizeof(((OfDpaFlowKey *)0)->f) + \
108       sizeof(uint64_t) - 1) / sizeof(uint64_t))
109 
110 typedef struct of_dpa_flow_action {
111     uint32_t goto_tbl;
112     struct {
113         uint32_t group_id;
114         uint32_t tun_log_lport;
115         __be16 vlan_id;
116     } write;
117     struct {
118         __be16 new_vlan_id;
119         uint32_t out_pport;
120         uint8_t copy_to_cpu;
121         __be16 vlan_id;
122     } apply;
123 } OfDpaFlowAction;
124 
125 typedef struct of_dpa_flow {
126     uint32_t lpm;
127     uint32_t priority;
128     uint32_t hardtime;
129     uint32_t idletime;
130     uint64_t cookie;
131     OfDpaFlowKey key;
132     OfDpaFlowKey mask;
133     OfDpaFlowAction action;
134     struct {
135         uint64_t hits;
136         int64_t install_time;
137         int64_t refresh_time;
138         uint64_t rx_pkts;
139         uint64_t tx_pkts;
140     } stats;
141 } OfDpaFlow;
142 
143 typedef struct of_dpa_flow_pkt_fields {
144     uint32_t tunnel_id;
145     struct eth_header *ethhdr;
146     __be16 *h_proto;
147     struct vlan_header *vlanhdr;
148     struct ip_header *ipv4hdr;
149     struct ip6_header *ipv6hdr;
150     Ipv6Addr *ipv6_src_addr;
151     Ipv6Addr *ipv6_dst_addr;
152 } OfDpaFlowPktFields;
153 
154 typedef struct of_dpa_flow_context {
155     uint32_t in_pport;
156     uint32_t tunnel_id;
157     struct iovec *iov;
158     int iovcnt;
159     struct eth_header ethhdr_rewrite;
160     struct vlan_header vlanhdr_rewrite;
161     struct vlan_header vlanhdr;
162     OfDpa *of_dpa;
163     OfDpaFlowPktFields fields;
164     OfDpaFlowAction action_set;
165 } OfDpaFlowContext;
166 
167 typedef struct of_dpa_flow_match {
168     OfDpaFlowKey value;
169     OfDpaFlow *best;
170 } OfDpaFlowMatch;
171 
172 typedef struct of_dpa_group {
173     uint32_t id;
174     union {
175         struct {
176             uint32_t out_pport;
177             uint8_t pop_vlan;
178         } l2_interface;
179         struct {
180             uint32_t group_id;
181             MACAddr src_mac;
182             MACAddr dst_mac;
183             __be16 vlan_id;
184         } l2_rewrite;
185         struct {
186             uint16_t group_count;
187             uint32_t *group_ids;
188         } l2_flood;
189         struct {
190             uint32_t group_id;
191             MACAddr src_mac;
192             MACAddr dst_mac;
193             __be16 vlan_id;
194             uint8_t ttl_check;
195         } l3_unicast;
196     };
197 } OfDpaGroup;
198 
199 static int of_dpa_mask2prefix(__be32 mask)
200 {
201     int i;
202     int count = 32;
203 
204     for (i = 0; i < 32; i++) {
205         if (!(ntohl(mask) & ((2 << i) - 1))) {
206             count--;
207         }
208     }
209 
210     return count;
211 }
212 
213 #if defined(DEBUG_ROCKER)
214 static void of_dpa_flow_key_dump(OfDpaFlowKey *key, OfDpaFlowKey *mask)
215 {
216     char buf[512], *b = buf, *mac;
217 
218     b += sprintf(b, " tbl %2d", key->tbl_id);
219 
220     if (key->in_pport || (mask && mask->in_pport)) {
221         b += sprintf(b, " in_pport %2d", key->in_pport);
222         if (mask && mask->in_pport != 0xffffffff) {
223             b += sprintf(b, "/0x%08x", key->in_pport);
224         }
225     }
226 
227     if (key->tunnel_id || (mask && mask->tunnel_id)) {
228         b += sprintf(b, " tun %8d", key->tunnel_id);
229         if (mask && mask->tunnel_id != 0xffffffff) {
230             b += sprintf(b, "/0x%08x", key->tunnel_id);
231         }
232     }
233 
234     if (key->eth.vlan_id || (mask && mask->eth.vlan_id)) {
235         b += sprintf(b, " vlan %4d", ntohs(key->eth.vlan_id));
236         if (mask && mask->eth.vlan_id != 0xffff) {
237             b += sprintf(b, "/0x%04x", ntohs(key->eth.vlan_id));
238         }
239     }
240 
241     if (memcmp(key->eth.src.a, zero_mac.a, ETH_ALEN) ||
242         (mask && memcmp(mask->eth.src.a, zero_mac.a, ETH_ALEN))) {
243         mac = qemu_mac_strdup_printf(key->eth.src.a);
244         b += sprintf(b, " src %s", mac);
245         g_free(mac);
246         if (mask && memcmp(mask->eth.src.a, ff_mac.a, ETH_ALEN)) {
247             mac = qemu_mac_strdup_printf(mask->eth.src.a);
248             b += sprintf(b, "/%s", mac);
249             g_free(mac);
250         }
251     }
252 
253     if (memcmp(key->eth.dst.a, zero_mac.a, ETH_ALEN) ||
254         (mask && memcmp(mask->eth.dst.a, zero_mac.a, ETH_ALEN))) {
255         mac = qemu_mac_strdup_printf(key->eth.dst.a);
256         b += sprintf(b, " dst %s", mac);
257         g_free(mac);
258         if (mask && memcmp(mask->eth.dst.a, ff_mac.a, ETH_ALEN)) {
259             mac = qemu_mac_strdup_printf(mask->eth.dst.a);
260             b += sprintf(b, "/%s", mac);
261             g_free(mac);
262         }
263     }
264 
265     if (key->eth.type || (mask && mask->eth.type)) {
266         b += sprintf(b, " type 0x%04x", ntohs(key->eth.type));
267         if (mask && mask->eth.type != 0xffff) {
268             b += sprintf(b, "/0x%04x", ntohs(mask->eth.type));
269         }
270         switch (ntohs(key->eth.type)) {
271         case 0x0800:
272         case 0x86dd:
273             if (key->ip.proto || (mask && mask->ip.proto)) {
274                 b += sprintf(b, " ip proto %2d", key->ip.proto);
275                 if (mask && mask->ip.proto != 0xff) {
276                     b += sprintf(b, "/0x%02x", mask->ip.proto);
277                 }
278             }
279             if (key->ip.tos || (mask && mask->ip.tos)) {
280                 b += sprintf(b, " ip tos %2d", key->ip.tos);
281                 if (mask && mask->ip.tos != 0xff) {
282                     b += sprintf(b, "/0x%02x", mask->ip.tos);
283                 }
284             }
285             break;
286         }
287         switch (ntohs(key->eth.type)) {
288         case 0x0800:
289             if (key->ipv4.addr.dst || (mask && mask->ipv4.addr.dst)) {
290                 b += sprintf(b, " dst %s",
291                     inet_ntoa(*(struct in_addr *)&key->ipv4.addr.dst));
292                 if (mask) {
293                     b += sprintf(b, "/%d",
294                                  of_dpa_mask2prefix(mask->ipv4.addr.dst));
295                 }
296             }
297             break;
298         }
299     }
300 
301     DPRINTF("%s\n", buf);
302 }
303 #else
304 #define of_dpa_flow_key_dump(k, m)
305 #endif
306 
307 static void _of_dpa_flow_match(void *key, void *value, void *user_data)
308 {
309     OfDpaFlow *flow = value;
310     OfDpaFlowMatch *match = user_data;
311     uint64_t *k = (uint64_t *)&flow->key;
312     uint64_t *m = (uint64_t *)&flow->mask;
313     uint64_t *v = (uint64_t *)&match->value;
314     int i;
315 
316     if (flow->key.tbl_id == match->value.tbl_id) {
317         of_dpa_flow_key_dump(&flow->key, &flow->mask);
318     }
319 
320     if (flow->key.width > match->value.width) {
321         return;
322     }
323 
324     for (i = 0; i < flow->key.width; i++, k++, m++, v++) {
325         if ((~*k & *m & *v) | (*k & *m & ~*v)) {
326             return;
327         }
328     }
329 
330     DPRINTF("match\n");
331 
332     if (!match->best ||
333         flow->priority > match->best->priority ||
334         flow->lpm > match->best->lpm) {
335         match->best = flow;
336     }
337 }
338 
339 static OfDpaFlow *of_dpa_flow_match(OfDpa *of_dpa, OfDpaFlowMatch *match)
340 {
341     DPRINTF("\nnew search\n");
342     of_dpa_flow_key_dump(&match->value, NULL);
343 
344     g_hash_table_foreach(of_dpa->flow_tbl, _of_dpa_flow_match, match);
345 
346     return match->best;
347 }
348 
349 static OfDpaFlow *of_dpa_flow_find(OfDpa *of_dpa, uint64_t cookie)
350 {
351     return g_hash_table_lookup(of_dpa->flow_tbl, &cookie);
352 }
353 
354 static int of_dpa_flow_add(OfDpa *of_dpa, OfDpaFlow *flow)
355 {
356     g_hash_table_insert(of_dpa->flow_tbl, &flow->cookie, flow);
357 
358     return ROCKER_OK;
359 }
360 
361 static void of_dpa_flow_del(OfDpa *of_dpa, OfDpaFlow *flow)
362 {
363     g_hash_table_remove(of_dpa->flow_tbl, &flow->cookie);
364 }
365 
366 static OfDpaFlow *of_dpa_flow_alloc(uint64_t cookie)
367 {
368     OfDpaFlow *flow;
369     int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
370 
371     flow = g_new0(OfDpaFlow, 1);
372     if (!flow) {
373         return NULL;
374     }
375 
376     flow->cookie = cookie;
377     flow->mask.tbl_id = 0xffffffff;
378 
379     flow->stats.install_time = flow->stats.refresh_time = now;
380 
381     return flow;
382 }
383 
384 static void of_dpa_flow_pkt_hdr_reset(OfDpaFlowContext *fc)
385 {
386     OfDpaFlowPktFields *fields = &fc->fields;
387 
388     fc->iov[0].iov_base = fields->ethhdr;
389     fc->iov[0].iov_len = sizeof(struct eth_header);
390     fc->iov[1].iov_base = fields->vlanhdr;
391     fc->iov[1].iov_len = fields->vlanhdr ? sizeof(struct vlan_header) : 0;
392 }
393 
394 static void of_dpa_flow_pkt_parse(OfDpaFlowContext *fc,
395                                   const struct iovec *iov, int iovcnt)
396 {
397     OfDpaFlowPktFields *fields = &fc->fields;
398     size_t sofar = 0;
399     int i;
400 
401     sofar += sizeof(struct eth_header);
402     if (iov->iov_len < sofar) {
403         DPRINTF("flow_pkt_parse underrun on eth_header\n");
404         return;
405     }
406 
407     fields->ethhdr = iov->iov_base;
408     fields->h_proto = &fields->ethhdr->h_proto;
409 
410     if (ntohs(*fields->h_proto) == ETH_P_VLAN) {
411         sofar += sizeof(struct vlan_header);
412         if (iov->iov_len < sofar) {
413             DPRINTF("flow_pkt_parse underrun on vlan_header\n");
414             return;
415         }
416         fields->vlanhdr = (struct vlan_header *)(fields->ethhdr + 1);
417         fields->h_proto = &fields->vlanhdr->h_proto;
418     }
419 
420     switch (ntohs(*fields->h_proto)) {
421     case ETH_P_IP:
422         sofar += sizeof(struct ip_header);
423         if (iov->iov_len < sofar) {
424             DPRINTF("flow_pkt_parse underrun on ip_header\n");
425             return;
426         }
427         fields->ipv4hdr = (struct ip_header *)(fields->h_proto + 1);
428         break;
429     case ETH_P_IPV6:
430         sofar += sizeof(struct ip6_header);
431         if (iov->iov_len < sofar) {
432             DPRINTF("flow_pkt_parse underrun on ip6_header\n");
433             return;
434         }
435         fields->ipv6hdr = (struct ip6_header *)(fields->h_proto + 1);
436         break;
437     }
438 
439     /* To facilitate (potential) VLAN tag insertion, Make a
440      * copy of the iov and insert two new vectors at the
441      * beginning for eth hdr and vlan hdr.  No data is copied,
442      * just the vectors.
443      */
444 
445     of_dpa_flow_pkt_hdr_reset(fc);
446 
447     fc->iov[2].iov_base = fields->h_proto + 1;
448     fc->iov[2].iov_len = iov->iov_len - fc->iov[0].iov_len - fc->iov[1].iov_len;
449 
450     for (i = 1; i < iovcnt; i++) {
451         fc->iov[i+2] = iov[i];
452     }
453 
454     fc->iovcnt = iovcnt + 2;
455 }
456 
457 static void of_dpa_flow_pkt_insert_vlan(OfDpaFlowContext *fc, __be16 vlan_id)
458 {
459     OfDpaFlowPktFields *fields = &fc->fields;
460     uint16_t h_proto = fields->ethhdr->h_proto;
461 
462     if (fields->vlanhdr) {
463         DPRINTF("flow_pkt_insert_vlan packet already has vlan\n");
464         return;
465     }
466 
467     fields->ethhdr->h_proto = htons(ETH_P_VLAN);
468     fields->vlanhdr = &fc->vlanhdr;
469     fields->vlanhdr->h_tci = vlan_id;
470     fields->vlanhdr->h_proto = h_proto;
471     fields->h_proto = &fields->vlanhdr->h_proto;
472 
473     fc->iov[1].iov_base = fields->vlanhdr;
474     fc->iov[1].iov_len = sizeof(struct vlan_header);
475 }
476 
477 static void of_dpa_flow_pkt_strip_vlan(OfDpaFlowContext *fc)
478 {
479     OfDpaFlowPktFields *fields = &fc->fields;
480 
481     if (!fields->vlanhdr) {
482         return;
483     }
484 
485     fc->iov[0].iov_len -= sizeof(fields->ethhdr->h_proto);
486     fc->iov[1].iov_base = fields->h_proto;
487     fc->iov[1].iov_len = sizeof(fields->ethhdr->h_proto);
488 }
489 
490 static void of_dpa_flow_pkt_hdr_rewrite(OfDpaFlowContext *fc,
491                                         uint8_t *src_mac, uint8_t *dst_mac,
492                                         __be16 vlan_id)
493 {
494     OfDpaFlowPktFields *fields = &fc->fields;
495 
496     if (src_mac || dst_mac) {
497         memcpy(&fc->ethhdr_rewrite, fields->ethhdr, sizeof(struct eth_header));
498         if (src_mac && memcmp(src_mac, zero_mac.a, ETH_ALEN)) {
499             memcpy(fc->ethhdr_rewrite.h_source, src_mac, ETH_ALEN);
500         }
501         if (dst_mac && memcmp(dst_mac, zero_mac.a, ETH_ALEN)) {
502             memcpy(fc->ethhdr_rewrite.h_dest, dst_mac, ETH_ALEN);
503         }
504         fc->iov[0].iov_base = &fc->ethhdr_rewrite;
505     }
506 
507     if (vlan_id && fields->vlanhdr) {
508         fc->vlanhdr_rewrite = fc->vlanhdr;
509         fc->vlanhdr_rewrite.h_tci = vlan_id;
510         fc->iov[1].iov_base = &fc->vlanhdr_rewrite;
511     }
512 }
513 
514 static void of_dpa_flow_ig_tbl(OfDpaFlowContext *fc, uint32_t tbl_id);
515 
516 static void of_dpa_ig_port_build_match(OfDpaFlowContext *fc,
517                                        OfDpaFlowMatch *match)
518 {
519     match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
520     match->value.in_pport = fc->in_pport;
521     match->value.width = FLOW_KEY_WIDTH(tbl_id);
522 }
523 
524 static void of_dpa_ig_port_miss(OfDpaFlowContext *fc)
525 {
526     uint32_t port;
527 
528     /* The default on miss is for packets from physical ports
529      * to go to the VLAN Flow Table. There is no default rule
530      * for packets from logical ports, which are dropped on miss.
531      */
532 
533     if (fp_port_from_pport(fc->in_pport, &port)) {
534         of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_VLAN);
535     }
536 }
537 
538 static void of_dpa_vlan_build_match(OfDpaFlowContext *fc,
539                                     OfDpaFlowMatch *match)
540 {
541     match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
542     match->value.in_pport = fc->in_pport;
543     if (fc->fields.vlanhdr) {
544         match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
545     }
546     match->value.width = FLOW_KEY_WIDTH(eth.vlan_id);
547 }
548 
549 static void of_dpa_vlan_insert(OfDpaFlowContext *fc,
550                                OfDpaFlow *flow)
551 {
552     if (flow->action.apply.new_vlan_id) {
553         of_dpa_flow_pkt_insert_vlan(fc, flow->action.apply.new_vlan_id);
554     }
555 }
556 
557 static void of_dpa_term_mac_build_match(OfDpaFlowContext *fc,
558                                         OfDpaFlowMatch *match)
559 {
560     match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
561     match->value.in_pport = fc->in_pport;
562     match->value.eth.type = *fc->fields.h_proto;
563     match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
564     memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
565            sizeof(match->value.eth.dst.a));
566     match->value.width = FLOW_KEY_WIDTH(eth.type);
567 }
568 
569 static void of_dpa_term_mac_miss(OfDpaFlowContext *fc)
570 {
571     of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_BRIDGING);
572 }
573 
574 static void of_dpa_apply_actions(OfDpaFlowContext *fc,
575                                  OfDpaFlow *flow)
576 {
577     fc->action_set.apply.copy_to_cpu = flow->action.apply.copy_to_cpu;
578     fc->action_set.apply.vlan_id = flow->key.eth.vlan_id;
579 }
580 
581 static void of_dpa_bridging_build_match(OfDpaFlowContext *fc,
582                                         OfDpaFlowMatch *match)
583 {
584     match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
585     if (fc->fields.vlanhdr) {
586         match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
587     } else if (fc->tunnel_id) {
588         match->value.tunnel_id = fc->tunnel_id;
589     }
590     memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
591            sizeof(match->value.eth.dst.a));
592     match->value.width = FLOW_KEY_WIDTH(eth.dst);
593 }
594 
595 static void of_dpa_bridging_learn(OfDpaFlowContext *fc,
596                                   OfDpaFlow *dst_flow)
597 {
598     OfDpaFlowMatch match = { { 0, }, };
599     OfDpaFlow *flow;
600     uint8_t *addr;
601     uint16_t vlan_id;
602     int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
603     int64_t refresh_delay = 1;
604 
605     /* Do a lookup in bridge table by src_mac/vlan */
606 
607     addr = fc->fields.ethhdr->h_source;
608     vlan_id = fc->fields.vlanhdr->h_tci;
609 
610     match.value.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
611     match.value.eth.vlan_id = vlan_id;
612     memcpy(match.value.eth.dst.a, addr, sizeof(match.value.eth.dst.a));
613     match.value.width = FLOW_KEY_WIDTH(eth.dst);
614 
615     flow = of_dpa_flow_match(fc->of_dpa, &match);
616     if (flow) {
617         if (!memcmp(flow->mask.eth.dst.a, ff_mac.a,
618                     sizeof(flow->mask.eth.dst.a))) {
619             /* src_mac/vlan already learned; if in_port and out_port
620              * don't match, the end station has moved and the port
621              * needs updating */
622             /* XXX implement the in_port/out_port check */
623             if (now - flow->stats.refresh_time < refresh_delay) {
624                 return;
625             }
626             flow->stats.refresh_time = now;
627         }
628     }
629 
630     /* Let driver know about mac/vlan.  This may be a new mac/vlan
631      * or a refresh of existing mac/vlan that's been hit after the
632      * refresh_delay.
633      */
634 
635     rocker_event_mac_vlan_seen(world_rocker(fc->of_dpa->world),
636                                fc->in_pport, addr, vlan_id);
637 }
638 
639 static void of_dpa_bridging_miss(OfDpaFlowContext *fc)
640 {
641     of_dpa_bridging_learn(fc, NULL);
642     of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
643 }
644 
645 static void of_dpa_bridging_action_write(OfDpaFlowContext *fc,
646                                          OfDpaFlow *flow)
647 {
648     if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
649         fc->action_set.write.group_id = flow->action.write.group_id;
650     }
651     fc->action_set.write.tun_log_lport = flow->action.write.tun_log_lport;
652 }
653 
654 static void of_dpa_unicast_routing_build_match(OfDpaFlowContext *fc,
655                                                OfDpaFlowMatch *match)
656 {
657     match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
658     match->value.eth.type = *fc->fields.h_proto;
659     if (fc->fields.ipv4hdr) {
660         match->value.ipv4.addr.dst = fc->fields.ipv4hdr->ip_dst;
661     }
662     if (fc->fields.ipv6_dst_addr) {
663         memcpy(&match->value.ipv6.addr.dst, fc->fields.ipv6_dst_addr,
664                sizeof(match->value.ipv6.addr.dst));
665     }
666     match->value.width = FLOW_KEY_WIDTH(ipv6.addr.dst);
667 }
668 
669 static void of_dpa_unicast_routing_miss(OfDpaFlowContext *fc)
670 {
671     of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
672 }
673 
674 static void of_dpa_unicast_routing_action_write(OfDpaFlowContext *fc,
675                                                 OfDpaFlow *flow)
676 {
677     if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
678         fc->action_set.write.group_id = flow->action.write.group_id;
679     }
680 }
681 
682 static void
683 of_dpa_multicast_routing_build_match(OfDpaFlowContext *fc,
684                                      OfDpaFlowMatch *match)
685 {
686     match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
687     match->value.eth.type = *fc->fields.h_proto;
688     match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
689     if (fc->fields.ipv4hdr) {
690         match->value.ipv4.addr.src = fc->fields.ipv4hdr->ip_src;
691         match->value.ipv4.addr.dst = fc->fields.ipv4hdr->ip_dst;
692     }
693     if (fc->fields.ipv6_src_addr) {
694         memcpy(&match->value.ipv6.addr.src, fc->fields.ipv6_src_addr,
695                sizeof(match->value.ipv6.addr.src));
696     }
697     if (fc->fields.ipv6_dst_addr) {
698         memcpy(&match->value.ipv6.addr.dst, fc->fields.ipv6_dst_addr,
699                sizeof(match->value.ipv6.addr.dst));
700     }
701     match->value.width = FLOW_KEY_WIDTH(ipv6.addr.dst);
702 }
703 
704 static void of_dpa_multicast_routing_miss(OfDpaFlowContext *fc)
705 {
706     of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
707 }
708 
709 static void
710 of_dpa_multicast_routing_action_write(OfDpaFlowContext *fc,
711                                       OfDpaFlow *flow)
712 {
713     if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
714         fc->action_set.write.group_id = flow->action.write.group_id;
715     }
716     fc->action_set.write.vlan_id = flow->action.write.vlan_id;
717 }
718 
719 static void of_dpa_acl_build_match(OfDpaFlowContext *fc,
720                                    OfDpaFlowMatch *match)
721 {
722     match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
723     match->value.in_pport = fc->in_pport;
724     memcpy(match->value.eth.src.a, fc->fields.ethhdr->h_source,
725            sizeof(match->value.eth.src.a));
726     memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
727            sizeof(match->value.eth.dst.a));
728     match->value.eth.type = *fc->fields.h_proto;
729     match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
730     match->value.width = FLOW_KEY_WIDTH(eth.type);
731     if (fc->fields.ipv4hdr) {
732         match->value.ip.proto = fc->fields.ipv4hdr->ip_p;
733         match->value.ip.tos = fc->fields.ipv4hdr->ip_tos;
734         match->value.width = FLOW_KEY_WIDTH(ip.tos);
735     } else if (fc->fields.ipv6hdr) {
736         match->value.ip.proto =
737             fc->fields.ipv6hdr->ip6_ctlun.ip6_un1.ip6_un1_nxt;
738         match->value.ip.tos = 0; /* XXX what goes here? */
739         match->value.width = FLOW_KEY_WIDTH(ip.tos);
740     }
741 }
742 
743 static void of_dpa_eg(OfDpaFlowContext *fc);
744 static void of_dpa_acl_hit(OfDpaFlowContext *fc,
745                            OfDpaFlow *dst_flow)
746 {
747     of_dpa_eg(fc);
748 }
749 
750 static void of_dpa_acl_action_write(OfDpaFlowContext *fc,
751                                     OfDpaFlow *flow)
752 {
753     if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
754         fc->action_set.write.group_id = flow->action.write.group_id;
755     }
756 }
757 
758 static void of_dpa_drop(OfDpaFlowContext *fc)
759 {
760     /* drop packet */
761 }
762 
763 static OfDpaGroup *of_dpa_group_find(OfDpa *of_dpa,
764                                               uint32_t group_id)
765 {
766     return g_hash_table_lookup(of_dpa->group_tbl, &group_id);
767 }
768 
769 static int of_dpa_group_add(OfDpa *of_dpa, OfDpaGroup *group)
770 {
771     g_hash_table_insert(of_dpa->group_tbl, &group->id, group);
772 
773     return 0;
774 }
775 
776 #if 0
777 static int of_dpa_group_mod(OfDpa *of_dpa, OfDpaGroup *group)
778 {
779     OfDpaGroup *old_group = of_dpa_group_find(of_dpa, group->id);
780 
781     if (!old_group) {
782         return -ENOENT;
783     }
784 
785     /* XXX */
786 
787     return 0;
788 }
789 #endif
790 
791 static int of_dpa_group_del(OfDpa *of_dpa, OfDpaGroup *group)
792 {
793     g_hash_table_remove(of_dpa->group_tbl, &group->id);
794 
795     return 0;
796 }
797 
798 #if 0
799 static int of_dpa_group_get_stats(OfDpa *of_dpa, uint32_t id)
800 {
801     OfDpaGroup *group = of_dpa_group_find(of_dpa, id);
802 
803     if (!group) {
804         return -ENOENT;
805     }
806 
807     /* XXX get/return stats */
808 
809     return 0;
810 }
811 #endif
812 
813 static OfDpaGroup *of_dpa_group_alloc(uint32_t id)
814 {
815     OfDpaGroup *group = g_new0(OfDpaGroup, 1);
816 
817     if (!group) {
818         return NULL;
819     }
820 
821     group->id = id;
822 
823     return group;
824 }
825 
826 static void of_dpa_output_l2_interface(OfDpaFlowContext *fc,
827                                        OfDpaGroup *group)
828 {
829     uint8_t copy_to_cpu = fc->action_set.apply.copy_to_cpu;
830 
831     if (group->l2_interface.pop_vlan) {
832         of_dpa_flow_pkt_strip_vlan(fc);
833     }
834 
835     /* Note: By default, and as per the OpenFlow 1.3.1
836      * specification, a packet cannot be forwarded back
837      * to the IN_PORT from which it came in. An action
838      * bucket that specifies the particular packet's
839      * egress port is not evaluated.
840      */
841 
842     if (group->l2_interface.out_pport == 0) {
843         rx_produce(fc->of_dpa->world, fc->in_pport, fc->iov, fc->iovcnt,
844                    copy_to_cpu);
845     } else if (group->l2_interface.out_pport != fc->in_pport) {
846         rocker_port_eg(world_rocker(fc->of_dpa->world),
847                        group->l2_interface.out_pport,
848                        fc->iov, fc->iovcnt);
849     }
850 }
851 
852 static void of_dpa_output_l2_rewrite(OfDpaFlowContext *fc,
853                                      OfDpaGroup *group)
854 {
855     OfDpaGroup *l2_group =
856         of_dpa_group_find(fc->of_dpa, group->l2_rewrite.group_id);
857 
858     if (!l2_group) {
859         return;
860     }
861 
862     of_dpa_flow_pkt_hdr_rewrite(fc, group->l2_rewrite.src_mac.a,
863                          group->l2_rewrite.dst_mac.a,
864                          group->l2_rewrite.vlan_id);
865     of_dpa_output_l2_interface(fc, l2_group);
866 }
867 
868 static void of_dpa_output_l2_flood(OfDpaFlowContext *fc,
869                                    OfDpaGroup *group)
870 {
871     OfDpaGroup *l2_group;
872     int i;
873 
874     for (i = 0; i < group->l2_flood.group_count; i++) {
875         of_dpa_flow_pkt_hdr_reset(fc);
876         l2_group = of_dpa_group_find(fc->of_dpa, group->l2_flood.group_ids[i]);
877         if (!l2_group) {
878             continue;
879         }
880         switch (ROCKER_GROUP_TYPE_GET(l2_group->id)) {
881         case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
882             of_dpa_output_l2_interface(fc, l2_group);
883             break;
884         case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
885             of_dpa_output_l2_rewrite(fc, l2_group);
886             break;
887         }
888     }
889 }
890 
891 static void of_dpa_output_l3_unicast(OfDpaFlowContext *fc, OfDpaGroup *group)
892 {
893     OfDpaGroup *l2_group =
894         of_dpa_group_find(fc->of_dpa, group->l3_unicast.group_id);
895 
896     if (!l2_group) {
897         return;
898     }
899 
900     of_dpa_flow_pkt_hdr_rewrite(fc, group->l3_unicast.src_mac.a,
901                                 group->l3_unicast.dst_mac.a,
902                                 group->l3_unicast.vlan_id);
903     /* XXX need ttl_check */
904     of_dpa_output_l2_interface(fc, l2_group);
905 }
906 
907 static void of_dpa_eg(OfDpaFlowContext *fc)
908 {
909     OfDpaFlowAction *set = &fc->action_set;
910     OfDpaGroup *group;
911     uint32_t group_id;
912 
913     /* send a copy of pkt to CPU (controller)? */
914 
915     if (set->apply.copy_to_cpu) {
916         group_id = ROCKER_GROUP_L2_INTERFACE(set->apply.vlan_id, 0);
917         group = of_dpa_group_find(fc->of_dpa, group_id);
918         if (group) {
919             of_dpa_output_l2_interface(fc, group);
920             of_dpa_flow_pkt_hdr_reset(fc);
921         }
922     }
923 
924     /* process group write actions */
925 
926     if (!set->write.group_id) {
927         return;
928     }
929 
930     group = of_dpa_group_find(fc->of_dpa, set->write.group_id);
931     if (!group) {
932         return;
933     }
934 
935     switch (ROCKER_GROUP_TYPE_GET(group->id)) {
936     case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
937         of_dpa_output_l2_interface(fc, group);
938         break;
939     case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
940         of_dpa_output_l2_rewrite(fc, group);
941         break;
942     case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
943     case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
944         of_dpa_output_l2_flood(fc, group);
945         break;
946     case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
947         of_dpa_output_l3_unicast(fc, group);
948         break;
949     }
950 }
951 
952 typedef struct of_dpa_flow_tbl_ops {
953     void (*build_match)(OfDpaFlowContext *fc, OfDpaFlowMatch *match);
954     void (*hit)(OfDpaFlowContext *fc, OfDpaFlow *flow);
955     void (*miss)(OfDpaFlowContext *fc);
956     void (*hit_no_goto)(OfDpaFlowContext *fc);
957     void (*action_apply)(OfDpaFlowContext *fc, OfDpaFlow *flow);
958     void (*action_write)(OfDpaFlowContext *fc, OfDpaFlow *flow);
959 } OfDpaFlowTblOps;
960 
961 static OfDpaFlowTblOps of_dpa_tbl_ops[] = {
962     [ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT] = {
963         .build_match = of_dpa_ig_port_build_match,
964         .miss = of_dpa_ig_port_miss,
965         .hit_no_goto = of_dpa_drop,
966     },
967     [ROCKER_OF_DPA_TABLE_ID_VLAN] = {
968         .build_match = of_dpa_vlan_build_match,
969         .hit_no_goto = of_dpa_drop,
970         .action_apply = of_dpa_vlan_insert,
971     },
972     [ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC] = {
973         .build_match = of_dpa_term_mac_build_match,
974         .miss = of_dpa_term_mac_miss,
975         .hit_no_goto = of_dpa_drop,
976         .action_apply = of_dpa_apply_actions,
977     },
978     [ROCKER_OF_DPA_TABLE_ID_BRIDGING] = {
979         .build_match = of_dpa_bridging_build_match,
980         .hit = of_dpa_bridging_learn,
981         .miss = of_dpa_bridging_miss,
982         .hit_no_goto = of_dpa_drop,
983         .action_apply = of_dpa_apply_actions,
984         .action_write = of_dpa_bridging_action_write,
985     },
986     [ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING] = {
987         .build_match = of_dpa_unicast_routing_build_match,
988         .miss = of_dpa_unicast_routing_miss,
989         .hit_no_goto = of_dpa_drop,
990         .action_write = of_dpa_unicast_routing_action_write,
991     },
992     [ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING] = {
993         .build_match = of_dpa_multicast_routing_build_match,
994         .miss = of_dpa_multicast_routing_miss,
995         .hit_no_goto = of_dpa_drop,
996         .action_write = of_dpa_multicast_routing_action_write,
997     },
998     [ROCKER_OF_DPA_TABLE_ID_ACL_POLICY] = {
999         .build_match = of_dpa_acl_build_match,
1000         .hit = of_dpa_acl_hit,
1001         .miss = of_dpa_eg,
1002         .action_apply = of_dpa_apply_actions,
1003         .action_write = of_dpa_acl_action_write,
1004     },
1005 };
1006 
1007 static void of_dpa_flow_ig_tbl(OfDpaFlowContext *fc, uint32_t tbl_id)
1008 {
1009     OfDpaFlowTblOps *ops = &of_dpa_tbl_ops[tbl_id];
1010     OfDpaFlowMatch match = { { 0, }, };
1011     OfDpaFlow *flow;
1012 
1013     if (ops->build_match) {
1014         ops->build_match(fc, &match);
1015     } else {
1016         return;
1017     }
1018 
1019     flow = of_dpa_flow_match(fc->of_dpa, &match);
1020     if (!flow) {
1021         if (ops->miss) {
1022             ops->miss(fc);
1023         }
1024         return;
1025     }
1026 
1027     flow->stats.hits++;
1028 
1029     if (ops->action_apply) {
1030         ops->action_apply(fc, flow);
1031     }
1032 
1033     if (ops->action_write) {
1034         ops->action_write(fc, flow);
1035     }
1036 
1037     if (ops->hit) {
1038         ops->hit(fc, flow);
1039     }
1040 
1041     if (flow->action.goto_tbl) {
1042         of_dpa_flow_ig_tbl(fc, flow->action.goto_tbl);
1043     } else if (ops->hit_no_goto) {
1044         ops->hit_no_goto(fc);
1045     }
1046 
1047     /* drop packet */
1048 }
1049 
1050 static ssize_t of_dpa_ig(World *world, uint32_t pport,
1051                          const struct iovec *iov, int iovcnt)
1052 {
1053     struct iovec iov_copy[iovcnt + 2];
1054     OfDpaFlowContext fc = {
1055         .of_dpa = world_private(world),
1056         .in_pport = pport,
1057         .iov = iov_copy,
1058         .iovcnt = iovcnt + 2,
1059     };
1060 
1061     of_dpa_flow_pkt_parse(&fc, iov, iovcnt);
1062     of_dpa_flow_ig_tbl(&fc, ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT);
1063 
1064     return iov_size(iov, iovcnt);
1065 }
1066 
1067 #define ROCKER_TUNNEL_LPORT 0x00010000
1068 
1069 static int of_dpa_cmd_add_ig_port(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1070 {
1071     OfDpaFlowKey *key = &flow->key;
1072     OfDpaFlowKey *mask = &flow->mask;
1073     OfDpaFlowAction *action = &flow->action;
1074     bool overlay_tunnel;
1075 
1076     if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1077         !flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1078         return -ROCKER_EINVAL;
1079     }
1080 
1081     key->tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
1082     key->width = FLOW_KEY_WIDTH(tbl_id);
1083 
1084     key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1085     if (flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]) {
1086         mask->in_pport =
1087             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1088     }
1089 
1090     overlay_tunnel = !!(key->in_pport & ROCKER_TUNNEL_LPORT);
1091 
1092     action->goto_tbl =
1093         rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1094 
1095     if (!overlay_tunnel && action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_VLAN) {
1096         return -ROCKER_EINVAL;
1097     }
1098 
1099     if (overlay_tunnel && action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_BRIDGING) {
1100         return -ROCKER_EINVAL;
1101     }
1102 
1103     return ROCKER_OK;
1104 }
1105 
1106 static int of_dpa_cmd_add_vlan(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1107 {
1108     OfDpaFlowKey *key = &flow->key;
1109     OfDpaFlowKey *mask = &flow->mask;
1110     OfDpaFlowAction *action = &flow->action;
1111     uint32_t port;
1112     bool untagged;
1113 
1114     if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1115         !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1116         DPRINTF("Must give in_pport and vlan_id to install VLAN tbl entry\n");
1117         return -ROCKER_EINVAL;
1118     }
1119 
1120     key->tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
1121     key->width = FLOW_KEY_WIDTH(eth.vlan_id);
1122 
1123     key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1124     if (!fp_port_from_pport(key->in_pport, &port)) {
1125         DPRINTF("in_pport (%d) not a front-panel port\n", key->in_pport);
1126         return -ROCKER_EINVAL;
1127     }
1128     mask->in_pport = 0xffffffff;
1129 
1130     key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1131 
1132     if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1133         mask->eth.vlan_id =
1134             rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1135     }
1136 
1137     if (key->eth.vlan_id) {
1138         untagged = false; /* filtering */
1139     } else {
1140         untagged = true;
1141     }
1142 
1143     if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1144         action->goto_tbl =
1145             rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1146         if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC) {
1147             DPRINTF("Goto tbl (%d) must be TERM_MAC\n", action->goto_tbl);
1148             return -ROCKER_EINVAL;
1149         }
1150     }
1151 
1152     if (untagged) {
1153         if (!flow_tlvs[ROCKER_TLV_OF_DPA_NEW_VLAN_ID]) {
1154             DPRINTF("Must specify new vlan_id if untagged\n");
1155             return -ROCKER_EINVAL;
1156         }
1157         action->apply.new_vlan_id =
1158             rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_NEW_VLAN_ID]);
1159         if (1 > ntohs(action->apply.new_vlan_id) ||
1160             ntohs(action->apply.new_vlan_id) > 4095) {
1161             DPRINTF("New vlan_id (%d) must be between 1 and 4095\n",
1162                     ntohs(action->apply.new_vlan_id));
1163             return -ROCKER_EINVAL;
1164         }
1165     }
1166 
1167     return ROCKER_OK;
1168 }
1169 
1170 static int of_dpa_cmd_add_term_mac(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1171 {
1172     OfDpaFlowKey *key = &flow->key;
1173     OfDpaFlowKey *mask = &flow->mask;
1174     OfDpaFlowAction *action = &flow->action;
1175     const MACAddr ipv4_mcast = { .a = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 } };
1176     const MACAddr ipv4_mask =  { .a = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 } };
1177     const MACAddr ipv6_mcast = { .a = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 } };
1178     const MACAddr ipv6_mask =  { .a = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } };
1179     uint32_t port;
1180     bool unicast = false;
1181     bool multicast = false;
1182 
1183     if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1184         !flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK] ||
1185         !flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE] ||
1186         !flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC] ||
1187         !flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK] ||
1188         !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID] ||
1189         !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1190         return -ROCKER_EINVAL;
1191     }
1192 
1193     key->tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
1194     key->width = FLOW_KEY_WIDTH(eth.type);
1195 
1196     key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1197     if (!fp_port_from_pport(key->in_pport, &port)) {
1198         return -ROCKER_EINVAL;
1199     }
1200     mask->in_pport =
1201         rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1202 
1203     key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1204     if (key->eth.type != htons(0x0800) && key->eth.type != htons(0x86dd)) {
1205         return -ROCKER_EINVAL;
1206     }
1207     mask->eth.type = htons(0xffff);
1208 
1209     memcpy(key->eth.dst.a,
1210            rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1211            sizeof(key->eth.dst.a));
1212     memcpy(mask->eth.dst.a,
1213            rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1214            sizeof(mask->eth.dst.a));
1215 
1216     if ((key->eth.dst.a[0] & 0x01) == 0x00) {
1217         unicast = true;
1218     }
1219 
1220     /* only two wildcard rules are acceptable for IPv4 and IPv6 multicast */
1221     if (memcmp(key->eth.dst.a, ipv4_mcast.a, sizeof(key->eth.dst.a)) == 0 &&
1222         memcmp(mask->eth.dst.a, ipv4_mask.a, sizeof(mask->eth.dst.a)) == 0) {
1223         multicast = true;
1224     }
1225     if (memcmp(key->eth.dst.a, ipv6_mcast.a, sizeof(key->eth.dst.a)) == 0 &&
1226         memcmp(mask->eth.dst.a, ipv6_mask.a, sizeof(mask->eth.dst.a)) == 0) {
1227         multicast = true;
1228     }
1229 
1230     if (!unicast && !multicast) {
1231         return -ROCKER_EINVAL;
1232     }
1233 
1234     key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1235     mask->eth.vlan_id =
1236         rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1237 
1238     if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1239         action->goto_tbl =
1240             rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1241 
1242         if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING &&
1243             action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING) {
1244             return -ROCKER_EINVAL;
1245         }
1246 
1247         if (unicast &&
1248             action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING) {
1249             return -ROCKER_EINVAL;
1250         }
1251 
1252         if (multicast &&
1253             action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING) {
1254             return -ROCKER_EINVAL;
1255         }
1256     }
1257 
1258     if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1259         action->apply.copy_to_cpu =
1260             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1261     }
1262 
1263     return ROCKER_OK;
1264 }
1265 
1266 static int of_dpa_cmd_add_bridging(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1267 {
1268     OfDpaFlowKey *key = &flow->key;
1269     OfDpaFlowKey *mask = &flow->mask;
1270     OfDpaFlowAction *action = &flow->action;
1271     bool unicast = false;
1272     bool dst_mac = false;
1273     bool dst_mac_mask = false;
1274     enum {
1275         BRIDGING_MODE_UNKNOWN,
1276         BRIDGING_MODE_VLAN_UCAST,
1277         BRIDGING_MODE_VLAN_MCAST,
1278         BRIDGING_MODE_VLAN_DFLT,
1279         BRIDGING_MODE_TUNNEL_UCAST,
1280         BRIDGING_MODE_TUNNEL_MCAST,
1281         BRIDGING_MODE_TUNNEL_DFLT,
1282     } mode = BRIDGING_MODE_UNKNOWN;
1283 
1284     key->tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
1285 
1286     if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1287         key->eth.vlan_id =
1288             rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1289         mask->eth.vlan_id = 0xffff;
1290         key->width = FLOW_KEY_WIDTH(eth.vlan_id);
1291     }
1292 
1293     if (flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]) {
1294         key->tunnel_id =
1295             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]);
1296         mask->tunnel_id = 0xffffffff;
1297         key->width = FLOW_KEY_WIDTH(tunnel_id);
1298     }
1299 
1300     /* can't do VLAN bridging and tunnel bridging at same time */
1301     if (key->eth.vlan_id && key->tunnel_id) {
1302         DPRINTF("can't do VLAN bridging and tunnel bridging at same time\n");
1303         return -ROCKER_EINVAL;
1304     }
1305 
1306     if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1307         memcpy(key->eth.dst.a,
1308                rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1309                sizeof(key->eth.dst.a));
1310         key->width = FLOW_KEY_WIDTH(eth.dst);
1311         dst_mac = true;
1312         unicast = (key->eth.dst.a[0] & 0x01) == 0x00;
1313     }
1314 
1315     if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]) {
1316         memcpy(mask->eth.dst.a,
1317                rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1318                sizeof(mask->eth.dst.a));
1319         key->width = FLOW_KEY_WIDTH(eth.dst);
1320         dst_mac_mask = true;
1321     } else if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1322         memcpy(mask->eth.dst.a, ff_mac.a, sizeof(mask->eth.dst.a));
1323     }
1324 
1325     if (key->eth.vlan_id) {
1326         if (dst_mac && !dst_mac_mask) {
1327             mode = unicast ? BRIDGING_MODE_VLAN_UCAST :
1328                              BRIDGING_MODE_VLAN_MCAST;
1329         } else if ((dst_mac && dst_mac_mask) || !dst_mac) {
1330             mode = BRIDGING_MODE_VLAN_DFLT;
1331         }
1332     } else if (key->tunnel_id) {
1333         if (dst_mac && !dst_mac_mask) {
1334             mode = unicast ? BRIDGING_MODE_TUNNEL_UCAST :
1335                              BRIDGING_MODE_TUNNEL_MCAST;
1336         } else if ((dst_mac && dst_mac_mask) || !dst_mac) {
1337             mode = BRIDGING_MODE_TUNNEL_DFLT;
1338         }
1339     }
1340 
1341     if (mode == BRIDGING_MODE_UNKNOWN) {
1342         DPRINTF("Unknown bridging mode\n");
1343         return -ROCKER_EINVAL;
1344     }
1345 
1346     if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1347         action->goto_tbl =
1348             rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1349         if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1350             DPRINTF("Briding goto tbl must be ACL policy\n");
1351             return -ROCKER_EINVAL;
1352         }
1353     }
1354 
1355     if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1356         action->write.group_id =
1357             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1358         switch (mode) {
1359         case BRIDGING_MODE_VLAN_UCAST:
1360             if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1361                 ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) {
1362                 DPRINTF("Bridging mode vlan ucast needs L2 "
1363                         "interface group (0x%08x)\n",
1364                         action->write.group_id);
1365                 return -ROCKER_EINVAL;
1366             }
1367             break;
1368         case BRIDGING_MODE_VLAN_MCAST:
1369             if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1370                 ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST) {
1371                 DPRINTF("Bridging mode vlan mcast needs L2 "
1372                         "mcast group (0x%08x)\n",
1373                         action->write.group_id);
1374                 return -ROCKER_EINVAL;
1375             }
1376             break;
1377         case BRIDGING_MODE_VLAN_DFLT:
1378             if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1379                 ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD) {
1380                 DPRINTF("Bridging mode vlan dflt needs L2 "
1381                         "flood group (0x%08x)\n",
1382                         action->write.group_id);
1383                 return -ROCKER_EINVAL;
1384             }
1385             break;
1386         case BRIDGING_MODE_TUNNEL_MCAST:
1387             if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1388                 ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY) {
1389                 DPRINTF("Bridging mode tunnel mcast needs L2 "
1390                         "overlay group (0x%08x)\n",
1391                         action->write.group_id);
1392                 return -ROCKER_EINVAL;
1393             }
1394             break;
1395         case BRIDGING_MODE_TUNNEL_DFLT:
1396             if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1397                 ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY) {
1398                 DPRINTF("Bridging mode tunnel dflt needs L2 "
1399                         "overlay group (0x%08x)\n",
1400                         action->write.group_id);
1401                 return -ROCKER_EINVAL;
1402             }
1403             break;
1404         default:
1405             return -ROCKER_EINVAL;
1406         }
1407     }
1408 
1409     if (flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_LPORT]) {
1410         action->write.tun_log_lport =
1411             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_LPORT]);
1412         if (mode != BRIDGING_MODE_TUNNEL_UCAST) {
1413             DPRINTF("Have tunnel logical port but not "
1414                     "in bridging tunnel mode\n");
1415             return -ROCKER_EINVAL;
1416         }
1417     }
1418 
1419     if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1420         action->apply.copy_to_cpu =
1421             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1422     }
1423 
1424     return ROCKER_OK;
1425 }
1426 
1427 static int of_dpa_cmd_add_unicast_routing(OfDpaFlow *flow,
1428                                           RockerTlv **flow_tlvs)
1429 {
1430     OfDpaFlowKey *key = &flow->key;
1431     OfDpaFlowKey *mask = &flow->mask;
1432     OfDpaFlowAction *action = &flow->action;
1433     enum {
1434         UNICAST_ROUTING_MODE_UNKNOWN,
1435         UNICAST_ROUTING_MODE_IPV4,
1436         UNICAST_ROUTING_MODE_IPV6,
1437     } mode = UNICAST_ROUTING_MODE_UNKNOWN;
1438     uint8_t type;
1439 
1440     if (!flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]) {
1441         return -ROCKER_EINVAL;
1442     }
1443 
1444     key->tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
1445     key->width = FLOW_KEY_WIDTH(ipv6.addr.dst);
1446 
1447     key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1448     switch (ntohs(key->eth.type)) {
1449     case 0x0800:
1450         mode = UNICAST_ROUTING_MODE_IPV4;
1451         break;
1452     case 0x86dd:
1453         mode = UNICAST_ROUTING_MODE_IPV6;
1454         break;
1455     default:
1456         return -ROCKER_EINVAL;
1457     }
1458     mask->eth.type = htons(0xffff);
1459 
1460     switch (mode) {
1461     case UNICAST_ROUTING_MODE_IPV4:
1462         if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]) {
1463             return -ROCKER_EINVAL;
1464         }
1465         key->ipv4.addr.dst =
1466             rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]);
1467         if (ipv4_addr_is_multicast(key->ipv4.addr.dst)) {
1468             return -ROCKER_EINVAL;
1469         }
1470         flow->lpm = of_dpa_mask2prefix(htonl(0xffffffff));
1471         if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP_MASK]) {
1472             mask->ipv4.addr.dst =
1473                 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP_MASK]);
1474             flow->lpm = of_dpa_mask2prefix(mask->ipv4.addr.dst);
1475         }
1476         break;
1477     case UNICAST_ROUTING_MODE_IPV6:
1478         if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]) {
1479             return -ROCKER_EINVAL;
1480         }
1481         memcpy(&key->ipv6.addr.dst,
1482                rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]),
1483                sizeof(key->ipv6.addr.dst));
1484         if (ipv6_addr_is_multicast(&key->ipv6.addr.dst)) {
1485             return -ROCKER_EINVAL;
1486         }
1487         if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6_MASK]) {
1488             memcpy(&mask->ipv6.addr.dst,
1489                    rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6_MASK]),
1490                    sizeof(mask->ipv6.addr.dst));
1491         }
1492         break;
1493     default:
1494         return -ROCKER_EINVAL;
1495     }
1496 
1497     if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1498         action->goto_tbl =
1499             rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1500         if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1501             return -ROCKER_EINVAL;
1502         }
1503     }
1504 
1505     if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1506         action->write.group_id =
1507             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1508         type = ROCKER_GROUP_TYPE_GET(action->write.group_id);
1509         if (type != ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE &&
1510             type != ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST &&
1511             type != ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP) {
1512             return -ROCKER_EINVAL;
1513         }
1514     }
1515 
1516     return ROCKER_OK;
1517 }
1518 
1519 static int of_dpa_cmd_add_multicast_routing(OfDpaFlow *flow,
1520                                             RockerTlv **flow_tlvs)
1521 {
1522     OfDpaFlowKey *key = &flow->key;
1523     OfDpaFlowKey *mask = &flow->mask;
1524     OfDpaFlowAction *action = &flow->action;
1525     enum {
1526         MULTICAST_ROUTING_MODE_UNKNOWN,
1527         MULTICAST_ROUTING_MODE_IPV4,
1528         MULTICAST_ROUTING_MODE_IPV6,
1529     } mode = MULTICAST_ROUTING_MODE_UNKNOWN;
1530 
1531     if (!flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE] ||
1532         !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1533         return -ROCKER_EINVAL;
1534     }
1535 
1536     key->tbl_id = ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
1537     key->width = FLOW_KEY_WIDTH(ipv6.addr.dst);
1538 
1539     key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1540     switch (ntohs(key->eth.type)) {
1541     case 0x0800:
1542         mode = MULTICAST_ROUTING_MODE_IPV4;
1543         break;
1544     case 0x86dd:
1545         mode = MULTICAST_ROUTING_MODE_IPV6;
1546         break;
1547     default:
1548         return -ROCKER_EINVAL;
1549     }
1550 
1551     key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1552 
1553     switch (mode) {
1554     case MULTICAST_ROUTING_MODE_IPV4:
1555 
1556         if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]) {
1557             key->ipv4.addr.src =
1558                 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]);
1559         }
1560 
1561         if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP_MASK]) {
1562             mask->ipv4.addr.src =
1563                 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP_MASK]);
1564         }
1565 
1566         if (!flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]) {
1567             if (mask->ipv4.addr.src != 0) {
1568                 return -ROCKER_EINVAL;
1569             }
1570         }
1571 
1572         if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]) {
1573             return -ROCKER_EINVAL;
1574         }
1575 
1576         key->ipv4.addr.dst =
1577             rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]);
1578         if (!ipv4_addr_is_multicast(key->ipv4.addr.dst)) {
1579             return -ROCKER_EINVAL;
1580         }
1581 
1582         break;
1583 
1584     case MULTICAST_ROUTING_MODE_IPV6:
1585 
1586         if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]) {
1587             memcpy(&key->ipv6.addr.src,
1588                    rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]),
1589                    sizeof(key->ipv6.addr.src));
1590         }
1591 
1592         if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6_MASK]) {
1593             memcpy(&mask->ipv6.addr.src,
1594                    rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6_MASK]),
1595                    sizeof(mask->ipv6.addr.src));
1596         }
1597 
1598         if (!flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]) {
1599             if (mask->ipv6.addr.src.addr32[0] != 0 &&
1600                 mask->ipv6.addr.src.addr32[1] != 0 &&
1601                 mask->ipv6.addr.src.addr32[2] != 0 &&
1602                 mask->ipv6.addr.src.addr32[3] != 0) {
1603                 return -ROCKER_EINVAL;
1604             }
1605         }
1606 
1607         if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]) {
1608             return -ROCKER_EINVAL;
1609         }
1610 
1611         memcpy(&key->ipv6.addr.dst,
1612                rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]),
1613                sizeof(key->ipv6.addr.dst));
1614         if (!ipv6_addr_is_multicast(&key->ipv6.addr.dst)) {
1615             return -ROCKER_EINVAL;
1616         }
1617 
1618         break;
1619 
1620     default:
1621         return -ROCKER_EINVAL;
1622     }
1623 
1624     if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1625         action->goto_tbl =
1626             rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1627         if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1628             return -ROCKER_EINVAL;
1629         }
1630     }
1631 
1632     if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1633         action->write.group_id =
1634             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1635         if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1636             ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST) {
1637             return -ROCKER_EINVAL;
1638         }
1639         action->write.vlan_id = key->eth.vlan_id;
1640     }
1641 
1642     return ROCKER_OK;
1643 }
1644 
1645 static int of_dpa_cmd_add_acl_ip(OfDpaFlowKey *key, OfDpaFlowKey *mask,
1646                                  RockerTlv **flow_tlvs)
1647 {
1648     key->width = FLOW_KEY_WIDTH(ip.tos);
1649 
1650     key->ip.proto = 0;
1651     key->ip.tos = 0;
1652     mask->ip.proto = 0;
1653     mask->ip.tos = 0;
1654 
1655     if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO]) {
1656         key->ip.proto =
1657             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO]);
1658     }
1659     if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO_MASK]) {
1660         mask->ip.proto =
1661             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO_MASK]);
1662     }
1663     if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP]) {
1664         key->ip.tos =
1665             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP]);
1666     }
1667     if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP_MASK]) {
1668         mask->ip.tos =
1669             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP_MASK]);
1670     }
1671     if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN]) {
1672         key->ip.tos |=
1673             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN]) << 6;
1674     }
1675     if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN_MASK]) {
1676         mask->ip.tos |=
1677             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN_MASK]) << 6;
1678     }
1679 
1680     return ROCKER_OK;
1681 }
1682 
1683 static int of_dpa_cmd_add_acl(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1684 {
1685     OfDpaFlowKey *key = &flow->key;
1686     OfDpaFlowKey *mask = &flow->mask;
1687     OfDpaFlowAction *action = &flow->action;
1688     enum {
1689         ACL_MODE_UNKNOWN,
1690         ACL_MODE_IPV4_VLAN,
1691         ACL_MODE_IPV6_VLAN,
1692         ACL_MODE_IPV4_TENANT,
1693         ACL_MODE_IPV6_TENANT,
1694         ACL_MODE_NON_IP_VLAN,
1695         ACL_MODE_NON_IP_TENANT,
1696         ACL_MODE_ANY_VLAN,
1697         ACL_MODE_ANY_TENANT,
1698     } mode = ACL_MODE_UNKNOWN;
1699     int err = ROCKER_OK;
1700 
1701     if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1702         !flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]) {
1703         return -ROCKER_EINVAL;
1704     }
1705 
1706     if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID] &&
1707         flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]) {
1708         return -ROCKER_EINVAL;
1709     }
1710 
1711     key->tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1712     key->width = FLOW_KEY_WIDTH(eth.type);
1713 
1714     key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1715     if (flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]) {
1716         mask->in_pport =
1717             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1718     }
1719 
1720     if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
1721         memcpy(key->eth.src.a,
1722                rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
1723                sizeof(key->eth.src.a));
1724     }
1725 
1726     if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC_MASK]) {
1727         memcpy(mask->eth.src.a,
1728                rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC_MASK]),
1729                sizeof(mask->eth.src.a));
1730     }
1731 
1732     if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1733         memcpy(key->eth.dst.a,
1734                rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1735                sizeof(key->eth.dst.a));
1736     }
1737 
1738     if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]) {
1739         memcpy(mask->eth.dst.a,
1740                rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1741                sizeof(mask->eth.dst.a));
1742     }
1743 
1744     key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1745     if (key->eth.type) {
1746         mask->eth.type = 0xffff;
1747     }
1748 
1749     if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1750         key->eth.vlan_id =
1751             rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1752     }
1753 
1754     if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1755         mask->eth.vlan_id =
1756             rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1757     }
1758 
1759     switch (ntohs(key->eth.type)) {
1760     case 0x0000:
1761         mode = (key->eth.vlan_id) ? ACL_MODE_ANY_VLAN : ACL_MODE_ANY_TENANT;
1762         break;
1763     case 0x0800:
1764         mode = (key->eth.vlan_id) ? ACL_MODE_IPV4_VLAN : ACL_MODE_IPV4_TENANT;
1765         break;
1766     case 0x86dd:
1767         mode = (key->eth.vlan_id) ? ACL_MODE_IPV6_VLAN : ACL_MODE_IPV6_TENANT;
1768         break;
1769     default:
1770         mode = (key->eth.vlan_id) ? ACL_MODE_NON_IP_VLAN :
1771                                     ACL_MODE_NON_IP_TENANT;
1772         break;
1773     }
1774 
1775     /* XXX only supporting VLAN modes for now */
1776     if (mode != ACL_MODE_IPV4_VLAN &&
1777         mode != ACL_MODE_IPV6_VLAN &&
1778         mode != ACL_MODE_NON_IP_VLAN &&
1779         mode != ACL_MODE_ANY_VLAN) {
1780         return -ROCKER_EINVAL;
1781     }
1782 
1783     switch (ntohs(key->eth.type)) {
1784     case 0x0800:
1785     case 0x86dd:
1786         err = of_dpa_cmd_add_acl_ip(key, mask, flow_tlvs);
1787         break;
1788     }
1789 
1790     if (err) {
1791         return err;
1792     }
1793 
1794     if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1795         action->write.group_id =
1796             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1797     }
1798 
1799     if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1800         action->apply.copy_to_cpu =
1801             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1802     }
1803 
1804     return ROCKER_OK;
1805 }
1806 
1807 static int of_dpa_cmd_flow_add_mod(OfDpa *of_dpa, OfDpaFlow *flow,
1808                                    RockerTlv **flow_tlvs)
1809 {
1810     enum rocker_of_dpa_table_id tbl;
1811     int err = ROCKER_OK;
1812 
1813     if (!flow_tlvs[ROCKER_TLV_OF_DPA_TABLE_ID] ||
1814         !flow_tlvs[ROCKER_TLV_OF_DPA_PRIORITY] ||
1815         !flow_tlvs[ROCKER_TLV_OF_DPA_HARDTIME]) {
1816         return -ROCKER_EINVAL;
1817     }
1818 
1819     tbl = rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_TABLE_ID]);
1820     flow->priority = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_PRIORITY]);
1821     flow->hardtime = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_HARDTIME]);
1822 
1823     if (flow_tlvs[ROCKER_TLV_OF_DPA_IDLETIME]) {
1824         if (tbl == ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT ||
1825             tbl == ROCKER_OF_DPA_TABLE_ID_VLAN ||
1826             tbl == ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC) {
1827             return -ROCKER_EINVAL;
1828         }
1829         flow->idletime =
1830             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IDLETIME]);
1831     }
1832 
1833     switch (tbl) {
1834     case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
1835         err = of_dpa_cmd_add_ig_port(flow, flow_tlvs);
1836         break;
1837     case ROCKER_OF_DPA_TABLE_ID_VLAN:
1838         err = of_dpa_cmd_add_vlan(flow, flow_tlvs);
1839         break;
1840     case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
1841         err = of_dpa_cmd_add_term_mac(flow, flow_tlvs);
1842         break;
1843     case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
1844         err = of_dpa_cmd_add_bridging(flow, flow_tlvs);
1845         break;
1846     case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
1847         err = of_dpa_cmd_add_unicast_routing(flow, flow_tlvs);
1848         break;
1849     case ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING:
1850         err = of_dpa_cmd_add_multicast_routing(flow, flow_tlvs);
1851         break;
1852     case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
1853         err = of_dpa_cmd_add_acl(flow, flow_tlvs);
1854         break;
1855     }
1856 
1857     return err;
1858 }
1859 
1860 static int of_dpa_cmd_flow_add(OfDpa *of_dpa, uint64_t cookie,
1861                                RockerTlv **flow_tlvs)
1862 {
1863     OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1864     int err = ROCKER_OK;
1865 
1866     if (flow) {
1867         return -ROCKER_EEXIST;
1868     }
1869 
1870     flow = of_dpa_flow_alloc(cookie);
1871     if (!flow) {
1872         return -ROCKER_ENOMEM;
1873     }
1874 
1875     err = of_dpa_cmd_flow_add_mod(of_dpa, flow, flow_tlvs);
1876     if (err) {
1877         g_free(flow);
1878         return err;
1879     }
1880 
1881     return of_dpa_flow_add(of_dpa, flow);
1882 }
1883 
1884 static int of_dpa_cmd_flow_mod(OfDpa *of_dpa, uint64_t cookie,
1885                                RockerTlv **flow_tlvs)
1886 {
1887     OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1888 
1889     if (!flow) {
1890         return -ROCKER_ENOENT;
1891     }
1892 
1893     return of_dpa_cmd_flow_add_mod(of_dpa, flow, flow_tlvs);
1894 }
1895 
1896 static int of_dpa_cmd_flow_del(OfDpa *of_dpa, uint64_t cookie)
1897 {
1898     OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1899 
1900     if (!flow) {
1901         return -ROCKER_ENOENT;
1902     }
1903 
1904     of_dpa_flow_del(of_dpa, flow);
1905 
1906     return ROCKER_OK;
1907 }
1908 
1909 static int of_dpa_cmd_flow_get_stats(OfDpa *of_dpa, uint64_t cookie,
1910                                      struct desc_info *info, char *buf)
1911 {
1912     OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1913     size_t tlv_size;
1914     int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
1915     int pos;
1916 
1917     if (!flow) {
1918         return -ROCKER_ENOENT;
1919     }
1920 
1921     tlv_size = rocker_tlv_total_size(sizeof(uint32_t)) +  /* duration */
1922                rocker_tlv_total_size(sizeof(uint64_t)) +  /* rx_pkts */
1923                rocker_tlv_total_size(sizeof(uint64_t));   /* tx_ptks */
1924 
1925     if (tlv_size > desc_buf_size(info)) {
1926         return -ROCKER_EMSGSIZE;
1927     }
1928 
1929     pos = 0;
1930     rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION,
1931                         (int32_t)(now - flow->stats.install_time));
1932     rocker_tlv_put_le64(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS,
1933                         flow->stats.rx_pkts);
1934     rocker_tlv_put_le64(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS,
1935                         flow->stats.tx_pkts);
1936 
1937     return desc_set_buf(info, tlv_size);
1938 }
1939 
1940 static int of_dpa_flow_cmd(OfDpa *of_dpa, struct desc_info *info,
1941                            char *buf, uint16_t cmd,
1942                            RockerTlv **flow_tlvs)
1943 {
1944     uint64_t cookie;
1945 
1946     if (!flow_tlvs[ROCKER_TLV_OF_DPA_COOKIE]) {
1947         return -ROCKER_EINVAL;
1948     }
1949 
1950     cookie = rocker_tlv_get_le64(flow_tlvs[ROCKER_TLV_OF_DPA_COOKIE]);
1951 
1952     switch (cmd) {
1953     case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD:
1954         return of_dpa_cmd_flow_add(of_dpa, cookie, flow_tlvs);
1955     case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD:
1956         return of_dpa_cmd_flow_mod(of_dpa, cookie, flow_tlvs);
1957     case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL:
1958         return of_dpa_cmd_flow_del(of_dpa, cookie);
1959     case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS:
1960         return of_dpa_cmd_flow_get_stats(of_dpa, cookie, info, buf);
1961     }
1962 
1963     return -ROCKER_ENOTSUP;
1964 }
1965 
1966 static int of_dpa_cmd_add_l2_interface(OfDpaGroup *group,
1967                                        RockerTlv **group_tlvs)
1968 {
1969     if (!group_tlvs[ROCKER_TLV_OF_DPA_OUT_PPORT] ||
1970         !group_tlvs[ROCKER_TLV_OF_DPA_POP_VLAN]) {
1971         return -ROCKER_EINVAL;
1972     }
1973 
1974     group->l2_interface.out_pport =
1975         rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_OUT_PPORT]);
1976     group->l2_interface.pop_vlan =
1977         rocker_tlv_get_u8(group_tlvs[ROCKER_TLV_OF_DPA_POP_VLAN]);
1978 
1979     return ROCKER_OK;
1980 }
1981 
1982 static int of_dpa_cmd_add_l2_rewrite(OfDpa *of_dpa, OfDpaGroup *group,
1983                                      RockerTlv **group_tlvs)
1984 {
1985     OfDpaGroup *l2_interface_group;
1986 
1987     if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]) {
1988         return -ROCKER_EINVAL;
1989     }
1990 
1991     group->l2_rewrite.group_id =
1992         rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]);
1993 
1994     l2_interface_group = of_dpa_group_find(of_dpa, group->l2_rewrite.group_id);
1995     if (!l2_interface_group ||
1996         ROCKER_GROUP_TYPE_GET(l2_interface_group->id) !=
1997                               ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) {
1998         DPRINTF("l2 rewrite group needs a valid l2 interface group\n");
1999         return -ROCKER_EINVAL;
2000     }
2001 
2002     if (group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
2003         memcpy(group->l2_rewrite.src_mac.a,
2004                rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
2005                sizeof(group->l2_rewrite.src_mac.a));
2006     }
2007 
2008     if (group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
2009         memcpy(group->l2_rewrite.dst_mac.a,
2010                rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
2011                sizeof(group->l2_rewrite.dst_mac.a));
2012     }
2013 
2014     if (group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
2015         group->l2_rewrite.vlan_id =
2016             rocker_tlv_get_u16(group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
2017         if (ROCKER_GROUP_VLAN_GET(l2_interface_group->id) !=
2018             (ntohs(group->l2_rewrite.vlan_id) & VLAN_VID_MASK)) {
2019             DPRINTF("Set VLAN ID must be same as L2 interface group\n");
2020             return -ROCKER_EINVAL;
2021         }
2022     }
2023 
2024     return ROCKER_OK;
2025 }
2026 
2027 static int of_dpa_cmd_add_l2_flood(OfDpa *of_dpa, OfDpaGroup *group,
2028                                    RockerTlv **group_tlvs)
2029 {
2030     OfDpaGroup *l2_group;
2031     RockerTlv **tlvs;
2032     int err;
2033     int i;
2034 
2035     if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_COUNT] ||
2036         !group_tlvs[ROCKER_TLV_OF_DPA_GROUP_IDS]) {
2037         return -ROCKER_EINVAL;
2038     }
2039 
2040     group->l2_flood.group_count =
2041         rocker_tlv_get_le16(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_COUNT]);
2042 
2043     tlvs = g_new0(RockerTlv *, group->l2_flood.group_count + 1);
2044     if (!tlvs) {
2045         return -ROCKER_ENOMEM;
2046     }
2047 
2048     g_free(group->l2_flood.group_ids);
2049     group->l2_flood.group_ids =
2050         g_new0(uint32_t, group->l2_flood.group_count);
2051     if (!group->l2_flood.group_ids) {
2052         err = -ROCKER_ENOMEM;
2053         goto err_out;
2054     }
2055 
2056     rocker_tlv_parse_nested(tlvs, group->l2_flood.group_count,
2057                             group_tlvs[ROCKER_TLV_OF_DPA_GROUP_IDS]);
2058 
2059     for (i = 0; i < group->l2_flood.group_count; i++) {
2060         group->l2_flood.group_ids[i] = rocker_tlv_get_le32(tlvs[i + 1]);
2061     }
2062 
2063     /* All of the L2 interface groups referenced by the L2 flood
2064      * must have same VLAN
2065      */
2066 
2067     for (i = 0; i < group->l2_flood.group_count; i++) {
2068         l2_group = of_dpa_group_find(of_dpa, group->l2_flood.group_ids[i]);
2069         if (!l2_group) {
2070             continue;
2071         }
2072         if ((ROCKER_GROUP_TYPE_GET(l2_group->id) ==
2073              ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) &&
2074             (ROCKER_GROUP_VLAN_GET(l2_group->id) !=
2075              ROCKER_GROUP_VLAN_GET(group->id))) {
2076             DPRINTF("l2 interface group 0x%08x VLAN doesn't match l2 "
2077                     "flood group 0x%08x\n",
2078                     group->l2_flood.group_ids[i], group->id);
2079             err = -ROCKER_EINVAL;
2080             goto err_out;
2081         }
2082     }
2083 
2084     g_free(tlvs);
2085     return ROCKER_OK;
2086 
2087 err_out:
2088     group->l2_flood.group_count = 0;
2089     g_free(group->l2_flood.group_ids);
2090     g_free(tlvs);
2091 
2092     return err;
2093 }
2094 
2095 static int of_dpa_cmd_add_l3_unicast(OfDpaGroup *group, RockerTlv **group_tlvs)
2096 {
2097     if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]) {
2098         return -ROCKER_EINVAL;
2099     }
2100 
2101     group->l3_unicast.group_id =
2102         rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]);
2103 
2104     if (group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
2105         memcpy(group->l3_unicast.src_mac.a,
2106                rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
2107                sizeof(group->l3_unicast.src_mac.a));
2108     }
2109 
2110     if (group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
2111         memcpy(group->l3_unicast.dst_mac.a,
2112                rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
2113                sizeof(group->l3_unicast.dst_mac.a));
2114     }
2115 
2116     if (group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
2117         group->l3_unicast.vlan_id =
2118             rocker_tlv_get_u16(group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
2119     }
2120 
2121     if (group_tlvs[ROCKER_TLV_OF_DPA_TTL_CHECK]) {
2122         group->l3_unicast.ttl_check =
2123             rocker_tlv_get_u8(group_tlvs[ROCKER_TLV_OF_DPA_TTL_CHECK]);
2124     }
2125 
2126     return ROCKER_OK;
2127 }
2128 
2129 static int of_dpa_cmd_group_do(OfDpa *of_dpa, uint32_t group_id,
2130                                OfDpaGroup *group, RockerTlv **group_tlvs)
2131 {
2132     uint8_t type = ROCKER_GROUP_TYPE_GET(group_id);
2133 
2134     switch (type) {
2135     case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2136         return of_dpa_cmd_add_l2_interface(group, group_tlvs);
2137     case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2138         return of_dpa_cmd_add_l2_rewrite(of_dpa, group, group_tlvs);
2139     case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2140     /* Treat L2 multicast group same as a L2 flood group */
2141     case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2142         return of_dpa_cmd_add_l2_flood(of_dpa, group, group_tlvs);
2143     case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2144         return of_dpa_cmd_add_l3_unicast(group, group_tlvs);
2145     }
2146 
2147     return -ROCKER_ENOTSUP;
2148 }
2149 
2150 static int of_dpa_cmd_group_add(OfDpa *of_dpa, uint32_t group_id,
2151                                 RockerTlv **group_tlvs)
2152 {
2153     OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2154     int err;
2155 
2156     if (group) {
2157         return -ROCKER_EEXIST;
2158     }
2159 
2160     group = of_dpa_group_alloc(group_id);
2161     if (!group) {
2162         return -ROCKER_ENOMEM;
2163     }
2164 
2165     err = of_dpa_cmd_group_do(of_dpa, group_id, group, group_tlvs);
2166     if (err) {
2167         goto err_cmd_add;
2168     }
2169 
2170     err = of_dpa_group_add(of_dpa, group);
2171     if (err) {
2172         goto err_cmd_add;
2173     }
2174 
2175     return ROCKER_OK;
2176 
2177 err_cmd_add:
2178     g_free(group);
2179     return err;
2180 }
2181 
2182 static int of_dpa_cmd_group_mod(OfDpa *of_dpa, uint32_t group_id,
2183                                 RockerTlv **group_tlvs)
2184 {
2185     OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2186 
2187     if (!group) {
2188         return -ROCKER_ENOENT;
2189     }
2190 
2191     return of_dpa_cmd_group_do(of_dpa, group_id, group, group_tlvs);
2192 }
2193 
2194 static int of_dpa_cmd_group_del(OfDpa *of_dpa, uint32_t group_id)
2195 {
2196     OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2197 
2198     if (!group) {
2199         return -ROCKER_ENOENT;
2200     }
2201 
2202     return of_dpa_group_del(of_dpa, group);
2203 }
2204 
2205 static int of_dpa_cmd_group_get_stats(OfDpa *of_dpa, uint32_t group_id,
2206                                       struct desc_info *info, char *buf)
2207 {
2208     return -ROCKER_ENOTSUP;
2209 }
2210 
2211 static int of_dpa_group_cmd(OfDpa *of_dpa, struct desc_info *info,
2212                             char *buf, uint16_t cmd, RockerTlv **group_tlvs)
2213 {
2214     uint32_t group_id;
2215 
2216     if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
2217         return -ROCKER_EINVAL;
2218     }
2219 
2220     group_id = rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
2221 
2222     switch (cmd) {
2223     case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD:
2224         return of_dpa_cmd_group_add(of_dpa, group_id, group_tlvs);
2225     case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD:
2226         return of_dpa_cmd_group_mod(of_dpa, group_id, group_tlvs);
2227     case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL:
2228         return of_dpa_cmd_group_del(of_dpa, group_id);
2229     case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS:
2230         return of_dpa_cmd_group_get_stats(of_dpa, group_id, info, buf);
2231     }
2232 
2233     return -ROCKER_ENOTSUP;
2234 }
2235 
2236 static int of_dpa_cmd(World *world, struct desc_info *info,
2237                       char *buf, uint16_t cmd, RockerTlv *cmd_info_tlv)
2238 {
2239     OfDpa *of_dpa = world_private(world);
2240     RockerTlv *tlvs[ROCKER_TLV_OF_DPA_MAX + 1];
2241 
2242     rocker_tlv_parse_nested(tlvs, ROCKER_TLV_OF_DPA_MAX, cmd_info_tlv);
2243 
2244     switch (cmd) {
2245     case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD:
2246     case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD:
2247     case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL:
2248     case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS:
2249         return of_dpa_flow_cmd(of_dpa, info, buf, cmd, tlvs);
2250     case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD:
2251     case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD:
2252     case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL:
2253     case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS:
2254         return of_dpa_group_cmd(of_dpa, info, buf, cmd, tlvs);
2255     }
2256 
2257     return -ROCKER_ENOTSUP;
2258 }
2259 
2260 static gboolean rocker_int64_equal(gconstpointer v1, gconstpointer v2)
2261 {
2262     return *((const uint64_t *)v1) == *((const uint64_t *)v2);
2263 }
2264 
2265 static guint rocker_int64_hash(gconstpointer v)
2266 {
2267     return (guint)*(const uint64_t *)v;
2268 }
2269 
2270 static int of_dpa_init(World *world)
2271 {
2272     OfDpa *of_dpa = world_private(world);
2273 
2274     of_dpa->world = world;
2275 
2276     of_dpa->flow_tbl = g_hash_table_new_full(rocker_int64_hash,
2277                                              rocker_int64_equal,
2278                                              NULL, g_free);
2279     if (!of_dpa->flow_tbl) {
2280         return -ENOMEM;
2281     }
2282 
2283     of_dpa->group_tbl = g_hash_table_new_full(g_int_hash, g_int_equal,
2284                                               NULL, g_free);
2285     if (!of_dpa->group_tbl) {
2286         goto err_group_tbl;
2287     }
2288 
2289     /* XXX hardcode some artificial table max values */
2290     of_dpa->flow_tbl_max_size = 100;
2291     of_dpa->group_tbl_max_size = 100;
2292 
2293     return 0;
2294 
2295 err_group_tbl:
2296     g_hash_table_destroy(of_dpa->flow_tbl);
2297     return -ENOMEM;
2298 }
2299 
2300 static void of_dpa_uninit(World *world)
2301 {
2302     OfDpa *of_dpa = world_private(world);
2303 
2304     g_hash_table_destroy(of_dpa->group_tbl);
2305     g_hash_table_destroy(of_dpa->flow_tbl);
2306 }
2307 
2308 struct of_dpa_flow_fill_context {
2309     RockerOfDpaFlowList *list;
2310     uint32_t tbl_id;
2311 };
2312 
2313 static void of_dpa_flow_fill(void *cookie, void *value, void *user_data)
2314 {
2315     struct of_dpa_flow *flow = value;
2316     struct of_dpa_flow_key *key = &flow->key;
2317     struct of_dpa_flow_key *mask = &flow->mask;
2318     struct of_dpa_flow_fill_context *flow_context = user_data;
2319     RockerOfDpaFlowList *new;
2320     RockerOfDpaFlow *nflow;
2321     RockerOfDpaFlowKey *nkey;
2322     RockerOfDpaFlowMask *nmask;
2323     RockerOfDpaFlowAction *naction;
2324 
2325     if (flow_context->tbl_id != -1 &&
2326         flow_context->tbl_id != key->tbl_id) {
2327         return;
2328     }
2329 
2330     new = g_malloc0(sizeof(*new));
2331     nflow = new->value = g_malloc0(sizeof(*nflow));
2332     nkey = nflow->key = g_malloc0(sizeof(*nkey));
2333     nmask = nflow->mask = g_malloc0(sizeof(*nmask));
2334     naction = nflow->action = g_malloc0(sizeof(*naction));
2335 
2336     nflow->cookie = flow->cookie;
2337     nflow->hits = flow->stats.hits;
2338     nkey->priority = flow->priority;
2339     nkey->tbl_id = key->tbl_id;
2340 
2341     if (key->in_pport || mask->in_pport) {
2342         nkey->has_in_pport = true;
2343         nkey->in_pport = key->in_pport;
2344     }
2345 
2346     if (nkey->has_in_pport && mask->in_pport != 0xffffffff) {
2347         nmask->has_in_pport = true;
2348         nmask->in_pport = mask->in_pport;
2349     }
2350 
2351     if (key->eth.vlan_id || mask->eth.vlan_id) {
2352         nkey->has_vlan_id = true;
2353         nkey->vlan_id = ntohs(key->eth.vlan_id);
2354     }
2355 
2356     if (nkey->has_vlan_id && mask->eth.vlan_id != 0xffff) {
2357         nmask->has_vlan_id = true;
2358         nmask->vlan_id = ntohs(mask->eth.vlan_id);
2359     }
2360 
2361     if (key->tunnel_id || mask->tunnel_id) {
2362         nkey->has_tunnel_id = true;
2363         nkey->tunnel_id = key->tunnel_id;
2364     }
2365 
2366     if (nkey->has_tunnel_id && mask->tunnel_id != 0xffffffff) {
2367         nmask->has_tunnel_id = true;
2368         nmask->tunnel_id = mask->tunnel_id;
2369     }
2370 
2371     if (memcmp(key->eth.src.a, zero_mac.a, ETH_ALEN) ||
2372         memcmp(mask->eth.src.a, zero_mac.a, ETH_ALEN)) {
2373         nkey->has_eth_src = true;
2374         nkey->eth_src = qemu_mac_strdup_printf(key->eth.src.a);
2375     }
2376 
2377     if (nkey->has_eth_src && memcmp(mask->eth.src.a, ff_mac.a, ETH_ALEN)) {
2378         nmask->has_eth_src = true;
2379         nmask->eth_src = qemu_mac_strdup_printf(mask->eth.src.a);
2380     }
2381 
2382     if (memcmp(key->eth.dst.a, zero_mac.a, ETH_ALEN) ||
2383         memcmp(mask->eth.dst.a, zero_mac.a, ETH_ALEN)) {
2384         nkey->has_eth_dst = true;
2385         nkey->eth_dst = qemu_mac_strdup_printf(key->eth.dst.a);
2386     }
2387 
2388     if (nkey->has_eth_dst && memcmp(mask->eth.dst.a, ff_mac.a, ETH_ALEN)) {
2389         nmask->has_eth_dst = true;
2390         nmask->eth_dst = qemu_mac_strdup_printf(mask->eth.dst.a);
2391     }
2392 
2393     if (key->eth.type) {
2394 
2395         nkey->has_eth_type = true;
2396         nkey->eth_type = ntohs(key->eth.type);
2397 
2398         switch (ntohs(key->eth.type)) {
2399         case 0x0800:
2400         case 0x86dd:
2401             if (key->ip.proto || mask->ip.proto) {
2402                 nkey->has_ip_proto = true;
2403                 nkey->ip_proto = key->ip.proto;
2404             }
2405             if (nkey->has_ip_proto && mask->ip.proto != 0xff) {
2406                 nmask->has_ip_proto = true;
2407                 nmask->ip_proto = mask->ip.proto;
2408             }
2409             if (key->ip.tos || mask->ip.tos) {
2410                 nkey->has_ip_tos = true;
2411                 nkey->ip_tos = key->ip.tos;
2412             }
2413             if (nkey->has_ip_tos && mask->ip.tos != 0xff) {
2414                 nmask->has_ip_tos = true;
2415                 nmask->ip_tos = mask->ip.tos;
2416             }
2417             break;
2418         }
2419 
2420         switch (ntohs(key->eth.type)) {
2421         case 0x0800:
2422             if (key->ipv4.addr.dst || mask->ipv4.addr.dst) {
2423                 char *dst = inet_ntoa(*(struct in_addr *)&key->ipv4.addr.dst);
2424                 int dst_len = of_dpa_mask2prefix(mask->ipv4.addr.dst);
2425                 nkey->has_ip_dst = true;
2426                 nkey->ip_dst = g_strdup_printf("%s/%d", dst, dst_len);
2427             }
2428             break;
2429         }
2430     }
2431 
2432     if (flow->action.goto_tbl) {
2433         naction->has_goto_tbl = true;
2434         naction->goto_tbl = flow->action.goto_tbl;
2435     }
2436 
2437     if (flow->action.write.group_id) {
2438         naction->has_group_id = true;
2439         naction->group_id = flow->action.write.group_id;
2440     }
2441 
2442     if (flow->action.apply.new_vlan_id) {
2443         naction->has_new_vlan_id = true;
2444         naction->new_vlan_id = flow->action.apply.new_vlan_id;
2445     }
2446 
2447     new->next = flow_context->list;
2448     flow_context->list = new;
2449 }
2450 
2451 RockerOfDpaFlowList *qmp_query_rocker_of_dpa_flows(const char *name,
2452                                                    bool has_tbl_id,
2453                                                    uint32_t tbl_id,
2454                                                    Error **errp)
2455 {
2456     struct rocker *r;
2457     struct world *w;
2458     struct of_dpa *of_dpa;
2459     struct of_dpa_flow_fill_context fill_context = {
2460         .list = NULL,
2461         .tbl_id = tbl_id,
2462     };
2463 
2464     r = rocker_find(name);
2465     if (!r) {
2466         error_setg(errp, "rocker %s not found", name);
2467         return NULL;
2468     }
2469 
2470     w = rocker_get_world(r, ROCKER_WORLD_TYPE_OF_DPA);
2471     if (!w) {
2472         error_setg(errp, "rocker %s doesn't have OF-DPA world", name);
2473         return NULL;
2474     }
2475 
2476     of_dpa = world_private(w);
2477 
2478     g_hash_table_foreach(of_dpa->flow_tbl, of_dpa_flow_fill, &fill_context);
2479 
2480     return fill_context.list;
2481 }
2482 
2483 struct of_dpa_group_fill_context {
2484     RockerOfDpaGroupList *list;
2485     uint8_t type;
2486 };
2487 
2488 static void of_dpa_group_fill(void *key, void *value, void *user_data)
2489 {
2490     struct of_dpa_group *group = value;
2491     struct of_dpa_group_fill_context *flow_context = user_data;
2492     RockerOfDpaGroupList *new;
2493     RockerOfDpaGroup *ngroup;
2494     struct uint32List *id;
2495     int i;
2496 
2497     if (flow_context->type != 9 &&
2498         flow_context->type != ROCKER_GROUP_TYPE_GET(group->id)) {
2499         return;
2500     }
2501 
2502     new = g_malloc0(sizeof(*new));
2503     ngroup = new->value = g_malloc0(sizeof(*ngroup));
2504 
2505     ngroup->id = group->id;
2506 
2507     ngroup->type = ROCKER_GROUP_TYPE_GET(group->id);
2508 
2509     switch (ngroup->type) {
2510     case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2511         ngroup->has_vlan_id = true;
2512         ngroup->vlan_id = ROCKER_GROUP_VLAN_GET(group->id);
2513         ngroup->has_pport = true;
2514         ngroup->pport = ROCKER_GROUP_PORT_GET(group->id);
2515         ngroup->has_out_pport = true;
2516         ngroup->out_pport = group->l2_interface.out_pport;
2517         ngroup->has_pop_vlan = true;
2518         ngroup->pop_vlan = group->l2_interface.pop_vlan;
2519         break;
2520     case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2521         ngroup->has_index = true;
2522         ngroup->index = ROCKER_GROUP_INDEX_LONG_GET(group->id);
2523         ngroup->has_group_id = true;
2524         ngroup->group_id = group->l2_rewrite.group_id;
2525         if (group->l2_rewrite.vlan_id) {
2526             ngroup->has_set_vlan_id = true;
2527             ngroup->set_vlan_id = ntohs(group->l2_rewrite.vlan_id);
2528         }
2529         if (memcmp(group->l2_rewrite.src_mac.a, zero_mac.a, ETH_ALEN)) {
2530             ngroup->has_set_eth_src = true;
2531             ngroup->set_eth_src =
2532                 qemu_mac_strdup_printf(group->l2_rewrite.src_mac.a);
2533         }
2534         if (memcmp(group->l2_rewrite.dst_mac.a, zero_mac.a, ETH_ALEN)) {
2535             ngroup->has_set_eth_dst = true;
2536             ngroup->set_eth_dst =
2537                 qemu_mac_strdup_printf(group->l2_rewrite.dst_mac.a);
2538         }
2539         break;
2540     case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2541     case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2542         ngroup->has_vlan_id = true;
2543         ngroup->vlan_id = ROCKER_GROUP_VLAN_GET(group->id);
2544         ngroup->has_index = true;
2545         ngroup->index = ROCKER_GROUP_INDEX_GET(group->id);
2546         for (i = 0; i < group->l2_flood.group_count; i++) {
2547             ngroup->has_group_ids = true;
2548             id = g_malloc0(sizeof(*id));
2549             id->value = group->l2_flood.group_ids[i];
2550             id->next = ngroup->group_ids;
2551             ngroup->group_ids = id;
2552         }
2553         break;
2554     case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2555         ngroup->has_index = true;
2556         ngroup->index = ROCKER_GROUP_INDEX_LONG_GET(group->id);
2557         ngroup->has_group_id = true;
2558         ngroup->group_id = group->l3_unicast.group_id;
2559         if (group->l3_unicast.vlan_id) {
2560             ngroup->has_set_vlan_id = true;
2561             ngroup->set_vlan_id = ntohs(group->l3_unicast.vlan_id);
2562         }
2563         if (memcmp(group->l3_unicast.src_mac.a, zero_mac.a, ETH_ALEN)) {
2564             ngroup->has_set_eth_src = true;
2565             ngroup->set_eth_src =
2566                 qemu_mac_strdup_printf(group->l3_unicast.src_mac.a);
2567         }
2568         if (memcmp(group->l3_unicast.dst_mac.a, zero_mac.a, ETH_ALEN)) {
2569             ngroup->has_set_eth_dst = true;
2570             ngroup->set_eth_dst =
2571                 qemu_mac_strdup_printf(group->l3_unicast.dst_mac.a);
2572         }
2573         if (group->l3_unicast.ttl_check) {
2574             ngroup->has_ttl_check = true;
2575             ngroup->ttl_check = group->l3_unicast.ttl_check;
2576         }
2577         break;
2578     }
2579 
2580     new->next = flow_context->list;
2581     flow_context->list = new;
2582 }
2583 
2584 RockerOfDpaGroupList *qmp_query_rocker_of_dpa_groups(const char *name,
2585                                                      bool has_type,
2586                                                      uint8_t type,
2587                                                      Error **errp)
2588 {
2589     struct rocker *r;
2590     struct world *w;
2591     struct of_dpa *of_dpa;
2592     struct of_dpa_group_fill_context fill_context = {
2593         .list = NULL,
2594         .type = type,
2595     };
2596 
2597     r = rocker_find(name);
2598     if (!r) {
2599         error_setg(errp, "rocker %s not found", name);
2600         return NULL;
2601     }
2602 
2603     w = rocker_get_world(r, ROCKER_WORLD_TYPE_OF_DPA);
2604     if (!w) {
2605         error_setg(errp, "rocker %s doesn't have OF-DPA world", name);
2606         return NULL;
2607     }
2608 
2609     of_dpa = world_private(w);
2610 
2611     g_hash_table_foreach(of_dpa->group_tbl, of_dpa_group_fill, &fill_context);
2612 
2613     return fill_context.list;
2614 }
2615 
2616 static WorldOps of_dpa_ops = {
2617     .init = of_dpa_init,
2618     .uninit = of_dpa_uninit,
2619     .ig = of_dpa_ig,
2620     .cmd = of_dpa_cmd,
2621 };
2622 
2623 World *of_dpa_world_alloc(Rocker *r)
2624 {
2625     return world_alloc(r, sizeof(OfDpa), ROCKER_WORLD_TYPE_OF_DPA, &of_dpa_ops);
2626 }
2627