xref: /openbmc/qemu/hw/net/rocker/rocker_of_dpa.c (revision f14eced5)
1 /*
2  * QEMU rocker switch emulation - OF-DPA flow processing support
3  *
4  * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  */
16 
17 #include "qemu/osdep.h"
18 #include "net/eth.h"
19 #include "qapi/error.h"
20 #include "qapi/qapi-commands-rocker.h"
21 #include "qemu/iov.h"
22 #include "qemu/timer.h"
23 
24 #include "rocker.h"
25 #include "rocker_hw.h"
26 #include "rocker_fp.h"
27 #include "rocker_tlv.h"
28 #include "rocker_world.h"
29 #include "rocker_desc.h"
30 #include "rocker_of_dpa.h"
31 
32 static const MACAddr zero_mac = { .a = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } };
33 static const MACAddr ff_mac =   { .a = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } };
34 
35 typedef struct of_dpa {
36     World *world;
37     GHashTable *flow_tbl;
38     GHashTable *group_tbl;
39     unsigned int flow_tbl_max_size;
40     unsigned int group_tbl_max_size;
41 } OfDpa;
42 
43 /* flow_key stolen mostly from OVS
44  *
45  * Note: fields that compare with network packet header fields
46  * are stored in network order (BE) to avoid per-packet field
47  * byte-swaps.
48  */
49 
50 typedef struct of_dpa_flow_key {
51     uint32_t in_pport;               /* ingress port */
52     uint32_t tunnel_id;              /* overlay tunnel id */
53     uint32_t tbl_id;                 /* table id */
54     struct {
55         __be16 vlan_id;              /* 0 if no VLAN */
56         MACAddr src;                 /* ethernet source address */
57         MACAddr dst;                 /* ethernet destination address */
58         __be16 type;                 /* ethernet frame type */
59     } eth;
60     struct {
61         uint8_t proto;               /* IP protocol or ARP opcode */
62         uint8_t tos;                 /* IP ToS */
63         uint8_t ttl;                 /* IP TTL/hop limit */
64         uint8_t frag;                /* one of FRAG_TYPE_* */
65     } ip;
66     union {
67         struct {
68             struct {
69                 __be32 src;          /* IP source address */
70                 __be32 dst;          /* IP destination address */
71             } addr;
72             union {
73                 struct {
74                     __be16 src;      /* TCP/UDP/SCTP source port */
75                     __be16 dst;      /* TCP/UDP/SCTP destination port */
76                     __be16 flags;    /* TCP flags */
77                 } tp;
78                 struct {
79                     MACAddr sha;     /* ARP source hardware address */
80                     MACAddr tha;     /* ARP target hardware address */
81                 } arp;
82             };
83         } ipv4;
84         struct {
85             struct {
86                 Ipv6Addr src;       /* IPv6 source address */
87                 Ipv6Addr dst;       /* IPv6 destination address */
88             } addr;
89             __be32 label;            /* IPv6 flow label */
90             struct {
91                 __be16 src;          /* TCP/UDP/SCTP source port */
92                 __be16 dst;          /* TCP/UDP/SCTP destination port */
93                 __be16 flags;        /* TCP flags */
94             } tp;
95             struct {
96                 Ipv6Addr target;    /* ND target address */
97                 MACAddr sll;         /* ND source link layer address */
98                 MACAddr tll;         /* ND target link layer address */
99             } nd;
100         } ipv6;
101     };
102     int width;                       /* how many uint64_t's in key? */
103 } OfDpaFlowKey;
104 
105 /* Width of key which includes field 'f' in u64s, rounded up */
106 #define FLOW_KEY_WIDTH(f) \
107     DIV_ROUND_UP(offsetof(OfDpaFlowKey, f) + sizeof_field(OfDpaFlowKey, f), \
108     sizeof(uint64_t))
109 
110 typedef struct of_dpa_flow_action {
111     uint32_t goto_tbl;
112     struct {
113         uint32_t group_id;
114         uint32_t tun_log_lport;
115         __be16 vlan_id;
116     } write;
117     struct {
118         __be16 new_vlan_id;
119         uint32_t out_pport;
120         uint8_t copy_to_cpu;
121         __be16 vlan_id;
122     } apply;
123 } OfDpaFlowAction;
124 
125 typedef struct of_dpa_flow {
126     uint32_t lpm;
127     uint32_t priority;
128     uint32_t hardtime;
129     uint32_t idletime;
130     uint64_t cookie;
131     OfDpaFlowKey key;
132     OfDpaFlowKey mask;
133     OfDpaFlowAction action;
134     struct {
135         uint64_t hits;
136         int64_t install_time;
137         int64_t refresh_time;
138         uint64_t rx_pkts;
139         uint64_t tx_pkts;
140     } stats;
141 } OfDpaFlow;
142 
143 typedef struct of_dpa_flow_pkt_fields {
144     uint32_t tunnel_id;
145     struct eth_header *ethhdr;
146     __be16 *h_proto;
147     struct vlan_header *vlanhdr;
148     struct ip_header *ipv4hdr;
149     struct ip6_header *ipv6hdr;
150     Ipv6Addr *ipv6_src_addr;
151     Ipv6Addr *ipv6_dst_addr;
152 } OfDpaFlowPktFields;
153 
154 typedef struct of_dpa_flow_context {
155     uint32_t in_pport;
156     uint32_t tunnel_id;
157     struct iovec *iov;
158     int iovcnt;
159     struct eth_header ethhdr_rewrite;
160     struct vlan_header vlanhdr_rewrite;
161     struct vlan_header vlanhdr;
162     OfDpa *of_dpa;
163     OfDpaFlowPktFields fields;
164     OfDpaFlowAction action_set;
165 } OfDpaFlowContext;
166 
167 typedef struct of_dpa_flow_match {
168     OfDpaFlowKey value;
169     OfDpaFlow *best;
170 } OfDpaFlowMatch;
171 
172 typedef struct of_dpa_group {
173     uint32_t id;
174     union {
175         struct {
176             uint32_t out_pport;
177             uint8_t pop_vlan;
178         } l2_interface;
179         struct {
180             uint32_t group_id;
181             MACAddr src_mac;
182             MACAddr dst_mac;
183             __be16 vlan_id;
184         } l2_rewrite;
185         struct {
186             uint16_t group_count;
187             uint32_t *group_ids;
188         } l2_flood;
189         struct {
190             uint32_t group_id;
191             MACAddr src_mac;
192             MACAddr dst_mac;
193             __be16 vlan_id;
194             uint8_t ttl_check;
195         } l3_unicast;
196     };
197 } OfDpaGroup;
198 
199 static int of_dpa_mask2prefix(__be32 mask)
200 {
201     int i;
202     int count = 32;
203 
204     for (i = 0; i < 32; i++) {
205         if (!(ntohl(mask) & ((2 << i) - 1))) {
206             count--;
207         }
208     }
209 
210     return count;
211 }
212 
213 #if defined(DEBUG_ROCKER)
214 static void of_dpa_flow_key_dump(OfDpaFlowKey *key, OfDpaFlowKey *mask)
215 {
216     char buf[512], *b = buf, *mac;
217 
218     b += sprintf(b, " tbl %2d", key->tbl_id);
219 
220     if (key->in_pport || (mask && mask->in_pport)) {
221         b += sprintf(b, " in_pport %2d", key->in_pport);
222         if (mask && mask->in_pport != 0xffffffff) {
223             b += sprintf(b, "/0x%08x", key->in_pport);
224         }
225     }
226 
227     if (key->tunnel_id || (mask && mask->tunnel_id)) {
228         b += sprintf(b, " tun %8d", key->tunnel_id);
229         if (mask && mask->tunnel_id != 0xffffffff) {
230             b += sprintf(b, "/0x%08x", key->tunnel_id);
231         }
232     }
233 
234     if (key->eth.vlan_id || (mask && mask->eth.vlan_id)) {
235         b += sprintf(b, " vlan %4d", ntohs(key->eth.vlan_id));
236         if (mask && mask->eth.vlan_id != 0xffff) {
237             b += sprintf(b, "/0x%04x", ntohs(key->eth.vlan_id));
238         }
239     }
240 
241     if (memcmp(key->eth.src.a, zero_mac.a, ETH_ALEN) ||
242         (mask && memcmp(mask->eth.src.a, zero_mac.a, ETH_ALEN))) {
243         mac = qemu_mac_strdup_printf(key->eth.src.a);
244         b += sprintf(b, " src %s", mac);
245         g_free(mac);
246         if (mask && memcmp(mask->eth.src.a, ff_mac.a, ETH_ALEN)) {
247             mac = qemu_mac_strdup_printf(mask->eth.src.a);
248             b += sprintf(b, "/%s", mac);
249             g_free(mac);
250         }
251     }
252 
253     if (memcmp(key->eth.dst.a, zero_mac.a, ETH_ALEN) ||
254         (mask && memcmp(mask->eth.dst.a, zero_mac.a, ETH_ALEN))) {
255         mac = qemu_mac_strdup_printf(key->eth.dst.a);
256         b += sprintf(b, " dst %s", mac);
257         g_free(mac);
258         if (mask && memcmp(mask->eth.dst.a, ff_mac.a, ETH_ALEN)) {
259             mac = qemu_mac_strdup_printf(mask->eth.dst.a);
260             b += sprintf(b, "/%s", mac);
261             g_free(mac);
262         }
263     }
264 
265     if (key->eth.type || (mask && mask->eth.type)) {
266         b += sprintf(b, " type 0x%04x", ntohs(key->eth.type));
267         if (mask && mask->eth.type != 0xffff) {
268             b += sprintf(b, "/0x%04x", ntohs(mask->eth.type));
269         }
270         switch (ntohs(key->eth.type)) {
271         case 0x0800:
272         case 0x86dd:
273             if (key->ip.proto || (mask && mask->ip.proto)) {
274                 b += sprintf(b, " ip proto %2d", key->ip.proto);
275                 if (mask && mask->ip.proto != 0xff) {
276                     b += sprintf(b, "/0x%02x", mask->ip.proto);
277                 }
278             }
279             if (key->ip.tos || (mask && mask->ip.tos)) {
280                 b += sprintf(b, " ip tos %2d", key->ip.tos);
281                 if (mask && mask->ip.tos != 0xff) {
282                     b += sprintf(b, "/0x%02x", mask->ip.tos);
283                 }
284             }
285             break;
286         }
287         switch (ntohs(key->eth.type)) {
288         case 0x0800:
289             if (key->ipv4.addr.dst || (mask && mask->ipv4.addr.dst)) {
290                 b += sprintf(b, " dst %s",
291                     inet_ntoa(*(struct in_addr *)&key->ipv4.addr.dst));
292                 if (mask) {
293                     b += sprintf(b, "/%d",
294                                  of_dpa_mask2prefix(mask->ipv4.addr.dst));
295                 }
296             }
297             break;
298         }
299     }
300 
301     DPRINTF("%s\n", buf);
302 }
303 #else
304 #define of_dpa_flow_key_dump(k, m)
305 #endif
306 
307 static void _of_dpa_flow_match(void *key, void *value, void *user_data)
308 {
309     OfDpaFlow *flow = value;
310     OfDpaFlowMatch *match = user_data;
311     uint64_t *k = (uint64_t *)&flow->key;
312     uint64_t *m = (uint64_t *)&flow->mask;
313     uint64_t *v = (uint64_t *)&match->value;
314     int i;
315 
316     if (flow->key.tbl_id == match->value.tbl_id) {
317         of_dpa_flow_key_dump(&flow->key, &flow->mask);
318     }
319 
320     if (flow->key.width > match->value.width) {
321         return;
322     }
323 
324     for (i = 0; i < flow->key.width; i++, k++, m++, v++) {
325         if ((~*k & *m & *v) | (*k & *m & ~*v)) {
326             return;
327         }
328     }
329 
330     DPRINTF("match\n");
331 
332     if (!match->best ||
333         flow->priority > match->best->priority ||
334         flow->lpm > match->best->lpm) {
335         match->best = flow;
336     }
337 }
338 
339 static OfDpaFlow *of_dpa_flow_match(OfDpa *of_dpa, OfDpaFlowMatch *match)
340 {
341     DPRINTF("\nnew search\n");
342     of_dpa_flow_key_dump(&match->value, NULL);
343 
344     g_hash_table_foreach(of_dpa->flow_tbl, _of_dpa_flow_match, match);
345 
346     return match->best;
347 }
348 
349 static OfDpaFlow *of_dpa_flow_find(OfDpa *of_dpa, uint64_t cookie)
350 {
351     return g_hash_table_lookup(of_dpa->flow_tbl, &cookie);
352 }
353 
354 static int of_dpa_flow_add(OfDpa *of_dpa, OfDpaFlow *flow)
355 {
356     g_hash_table_insert(of_dpa->flow_tbl, &flow->cookie, flow);
357 
358     return ROCKER_OK;
359 }
360 
361 static void of_dpa_flow_del(OfDpa *of_dpa, OfDpaFlow *flow)
362 {
363     g_hash_table_remove(of_dpa->flow_tbl, &flow->cookie);
364 }
365 
366 static OfDpaFlow *of_dpa_flow_alloc(uint64_t cookie)
367 {
368     OfDpaFlow *flow;
369     int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
370 
371     flow = g_new0(OfDpaFlow, 1);
372 
373     flow->cookie = cookie;
374     flow->mask.tbl_id = 0xffffffff;
375 
376     flow->stats.install_time = flow->stats.refresh_time = now;
377 
378     return flow;
379 }
380 
381 static void of_dpa_flow_pkt_hdr_reset(OfDpaFlowContext *fc)
382 {
383     OfDpaFlowPktFields *fields = &fc->fields;
384 
385     fc->iov[0].iov_base = fields->ethhdr;
386     fc->iov[0].iov_len = sizeof(struct eth_header);
387     fc->iov[1].iov_base = fields->vlanhdr;
388     fc->iov[1].iov_len = fields->vlanhdr ? sizeof(struct vlan_header) : 0;
389 }
390 
391 static void of_dpa_flow_pkt_parse(OfDpaFlowContext *fc,
392                                   const struct iovec *iov, int iovcnt)
393 {
394     OfDpaFlowPktFields *fields = &fc->fields;
395     size_t sofar = 0;
396     int i;
397 
398     sofar += sizeof(struct eth_header);
399     if (iov->iov_len < sofar) {
400         DPRINTF("flow_pkt_parse underrun on eth_header\n");
401         return;
402     }
403 
404     fields->ethhdr = iov->iov_base;
405     fields->h_proto = &fields->ethhdr->h_proto;
406 
407     if (ntohs(*fields->h_proto) == ETH_P_VLAN) {
408         sofar += sizeof(struct vlan_header);
409         if (iov->iov_len < sofar) {
410             DPRINTF("flow_pkt_parse underrun on vlan_header\n");
411             return;
412         }
413         fields->vlanhdr = (struct vlan_header *)(fields->ethhdr + 1);
414         fields->h_proto = &fields->vlanhdr->h_proto;
415     }
416 
417     switch (ntohs(*fields->h_proto)) {
418     case ETH_P_IP:
419         sofar += sizeof(struct ip_header);
420         if (iov->iov_len < sofar) {
421             DPRINTF("flow_pkt_parse underrun on ip_header\n");
422             return;
423         }
424         fields->ipv4hdr = (struct ip_header *)(fields->h_proto + 1);
425         break;
426     case ETH_P_IPV6:
427         sofar += sizeof(struct ip6_header);
428         if (iov->iov_len < sofar) {
429             DPRINTF("flow_pkt_parse underrun on ip6_header\n");
430             return;
431         }
432         fields->ipv6hdr = (struct ip6_header *)(fields->h_proto + 1);
433         break;
434     }
435 
436     /* To facilitate (potential) VLAN tag insertion, Make a
437      * copy of the iov and insert two new vectors at the
438      * beginning for eth hdr and vlan hdr.  No data is copied,
439      * just the vectors.
440      */
441 
442     of_dpa_flow_pkt_hdr_reset(fc);
443 
444     fc->iov[2].iov_base = fields->h_proto + 1;
445     fc->iov[2].iov_len = iov->iov_len - fc->iov[0].iov_len - fc->iov[1].iov_len;
446 
447     for (i = 1; i < iovcnt; i++) {
448         fc->iov[i+2] = iov[i];
449     }
450 
451     fc->iovcnt = iovcnt + 2;
452 }
453 
454 static void of_dpa_flow_pkt_insert_vlan(OfDpaFlowContext *fc, __be16 vlan_id)
455 {
456     OfDpaFlowPktFields *fields = &fc->fields;
457     uint16_t h_proto = fields->ethhdr->h_proto;
458 
459     if (fields->vlanhdr) {
460         DPRINTF("flow_pkt_insert_vlan packet already has vlan\n");
461         return;
462     }
463 
464     fields->ethhdr->h_proto = htons(ETH_P_VLAN);
465     fields->vlanhdr = &fc->vlanhdr;
466     fields->vlanhdr->h_tci = vlan_id;
467     fields->vlanhdr->h_proto = h_proto;
468     fields->h_proto = &fields->vlanhdr->h_proto;
469 
470     fc->iov[1].iov_base = fields->vlanhdr;
471     fc->iov[1].iov_len = sizeof(struct vlan_header);
472 }
473 
474 static void of_dpa_flow_pkt_strip_vlan(OfDpaFlowContext *fc)
475 {
476     OfDpaFlowPktFields *fields = &fc->fields;
477 
478     if (!fields->vlanhdr) {
479         return;
480     }
481 
482     fc->iov[0].iov_len -= sizeof(fields->ethhdr->h_proto);
483     fc->iov[1].iov_base = fields->h_proto;
484     fc->iov[1].iov_len = sizeof(fields->ethhdr->h_proto);
485 }
486 
487 static void of_dpa_flow_pkt_hdr_rewrite(OfDpaFlowContext *fc,
488                                         uint8_t *src_mac, uint8_t *dst_mac,
489                                         __be16 vlan_id)
490 {
491     OfDpaFlowPktFields *fields = &fc->fields;
492 
493     if (src_mac || dst_mac) {
494         memcpy(&fc->ethhdr_rewrite, fields->ethhdr, sizeof(struct eth_header));
495         if (src_mac && memcmp(src_mac, zero_mac.a, ETH_ALEN)) {
496             memcpy(fc->ethhdr_rewrite.h_source, src_mac, ETH_ALEN);
497         }
498         if (dst_mac && memcmp(dst_mac, zero_mac.a, ETH_ALEN)) {
499             memcpy(fc->ethhdr_rewrite.h_dest, dst_mac, ETH_ALEN);
500         }
501         fc->iov[0].iov_base = &fc->ethhdr_rewrite;
502     }
503 
504     if (vlan_id && fields->vlanhdr) {
505         fc->vlanhdr_rewrite = fc->vlanhdr;
506         fc->vlanhdr_rewrite.h_tci = vlan_id;
507         fc->iov[1].iov_base = &fc->vlanhdr_rewrite;
508     }
509 }
510 
511 static void of_dpa_flow_ig_tbl(OfDpaFlowContext *fc, uint32_t tbl_id);
512 
513 static void of_dpa_ig_port_build_match(OfDpaFlowContext *fc,
514                                        OfDpaFlowMatch *match)
515 {
516     match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
517     match->value.in_pport = fc->in_pport;
518     match->value.width = FLOW_KEY_WIDTH(tbl_id);
519 }
520 
521 static void of_dpa_ig_port_miss(OfDpaFlowContext *fc)
522 {
523     uint32_t port;
524 
525     /* The default on miss is for packets from physical ports
526      * to go to the VLAN Flow Table. There is no default rule
527      * for packets from logical ports, which are dropped on miss.
528      */
529 
530     if (fp_port_from_pport(fc->in_pport, &port)) {
531         of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_VLAN);
532     }
533 }
534 
535 static void of_dpa_vlan_build_match(OfDpaFlowContext *fc,
536                                     OfDpaFlowMatch *match)
537 {
538     match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
539     match->value.in_pport = fc->in_pport;
540     if (fc->fields.vlanhdr) {
541         match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
542     }
543     match->value.width = FLOW_KEY_WIDTH(eth.vlan_id);
544 }
545 
546 static void of_dpa_vlan_insert(OfDpaFlowContext *fc,
547                                OfDpaFlow *flow)
548 {
549     if (flow->action.apply.new_vlan_id) {
550         of_dpa_flow_pkt_insert_vlan(fc, flow->action.apply.new_vlan_id);
551     }
552 }
553 
554 static void of_dpa_term_mac_build_match(OfDpaFlowContext *fc,
555                                         OfDpaFlowMatch *match)
556 {
557     match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
558     match->value.in_pport = fc->in_pport;
559     match->value.eth.type = *fc->fields.h_proto;
560     match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
561     memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
562            sizeof(match->value.eth.dst.a));
563     match->value.width = FLOW_KEY_WIDTH(eth.type);
564 }
565 
566 static void of_dpa_term_mac_miss(OfDpaFlowContext *fc)
567 {
568     of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_BRIDGING);
569 }
570 
571 static void of_dpa_apply_actions(OfDpaFlowContext *fc,
572                                  OfDpaFlow *flow)
573 {
574     fc->action_set.apply.copy_to_cpu = flow->action.apply.copy_to_cpu;
575     fc->action_set.apply.vlan_id = flow->key.eth.vlan_id;
576 }
577 
578 static void of_dpa_bridging_build_match(OfDpaFlowContext *fc,
579                                         OfDpaFlowMatch *match)
580 {
581     match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
582     if (fc->fields.vlanhdr) {
583         match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
584     } else if (fc->tunnel_id) {
585         match->value.tunnel_id = fc->tunnel_id;
586     }
587     memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
588            sizeof(match->value.eth.dst.a));
589     match->value.width = FLOW_KEY_WIDTH(eth.dst);
590 }
591 
592 static void of_dpa_bridging_learn(OfDpaFlowContext *fc,
593                                   OfDpaFlow *dst_flow)
594 {
595     OfDpaFlowMatch match = { { 0, }, };
596     OfDpaFlow *flow;
597     uint8_t *addr;
598     uint16_t vlan_id;
599     int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
600     int64_t refresh_delay = 1;
601 
602     /* Do a lookup in bridge table by src_mac/vlan */
603 
604     addr = fc->fields.ethhdr->h_source;
605     vlan_id = fc->fields.vlanhdr->h_tci;
606 
607     match.value.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
608     match.value.eth.vlan_id = vlan_id;
609     memcpy(match.value.eth.dst.a, addr, sizeof(match.value.eth.dst.a));
610     match.value.width = FLOW_KEY_WIDTH(eth.dst);
611 
612     flow = of_dpa_flow_match(fc->of_dpa, &match);
613     if (flow) {
614         if (!memcmp(flow->mask.eth.dst.a, ff_mac.a,
615                     sizeof(flow->mask.eth.dst.a))) {
616             /* src_mac/vlan already learned; if in_port and out_port
617              * don't match, the end station has moved and the port
618              * needs updating */
619             /* XXX implement the in_port/out_port check */
620             if (now - flow->stats.refresh_time < refresh_delay) {
621                 return;
622             }
623             flow->stats.refresh_time = now;
624         }
625     }
626 
627     /* Let driver know about mac/vlan.  This may be a new mac/vlan
628      * or a refresh of existing mac/vlan that's been hit after the
629      * refresh_delay.
630      */
631 
632     rocker_event_mac_vlan_seen(world_rocker(fc->of_dpa->world),
633                                fc->in_pport, addr, vlan_id);
634 }
635 
636 static void of_dpa_bridging_miss(OfDpaFlowContext *fc)
637 {
638     of_dpa_bridging_learn(fc, NULL);
639     of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
640 }
641 
642 static void of_dpa_bridging_action_write(OfDpaFlowContext *fc,
643                                          OfDpaFlow *flow)
644 {
645     if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
646         fc->action_set.write.group_id = flow->action.write.group_id;
647     }
648     fc->action_set.write.tun_log_lport = flow->action.write.tun_log_lport;
649 }
650 
651 static void of_dpa_unicast_routing_build_match(OfDpaFlowContext *fc,
652                                                OfDpaFlowMatch *match)
653 {
654     match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
655     match->value.eth.type = *fc->fields.h_proto;
656     if (fc->fields.ipv4hdr) {
657         match->value.ipv4.addr.dst = fc->fields.ipv4hdr->ip_dst;
658     }
659     if (fc->fields.ipv6_dst_addr) {
660         memcpy(&match->value.ipv6.addr.dst, fc->fields.ipv6_dst_addr,
661                sizeof(match->value.ipv6.addr.dst));
662     }
663     match->value.width = FLOW_KEY_WIDTH(ipv6.addr.dst);
664 }
665 
666 static void of_dpa_unicast_routing_miss(OfDpaFlowContext *fc)
667 {
668     of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
669 }
670 
671 static void of_dpa_unicast_routing_action_write(OfDpaFlowContext *fc,
672                                                 OfDpaFlow *flow)
673 {
674     if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
675         fc->action_set.write.group_id = flow->action.write.group_id;
676     }
677 }
678 
679 static void
680 of_dpa_multicast_routing_build_match(OfDpaFlowContext *fc,
681                                      OfDpaFlowMatch *match)
682 {
683     match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
684     match->value.eth.type = *fc->fields.h_proto;
685     match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
686     if (fc->fields.ipv4hdr) {
687         match->value.ipv4.addr.src = fc->fields.ipv4hdr->ip_src;
688         match->value.ipv4.addr.dst = fc->fields.ipv4hdr->ip_dst;
689     }
690     if (fc->fields.ipv6_src_addr) {
691         memcpy(&match->value.ipv6.addr.src, fc->fields.ipv6_src_addr,
692                sizeof(match->value.ipv6.addr.src));
693     }
694     if (fc->fields.ipv6_dst_addr) {
695         memcpy(&match->value.ipv6.addr.dst, fc->fields.ipv6_dst_addr,
696                sizeof(match->value.ipv6.addr.dst));
697     }
698     match->value.width = FLOW_KEY_WIDTH(ipv6.addr.dst);
699 }
700 
701 static void of_dpa_multicast_routing_miss(OfDpaFlowContext *fc)
702 {
703     of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
704 }
705 
706 static void
707 of_dpa_multicast_routing_action_write(OfDpaFlowContext *fc,
708                                       OfDpaFlow *flow)
709 {
710     if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
711         fc->action_set.write.group_id = flow->action.write.group_id;
712     }
713     fc->action_set.write.vlan_id = flow->action.write.vlan_id;
714 }
715 
716 static void of_dpa_acl_build_match(OfDpaFlowContext *fc,
717                                    OfDpaFlowMatch *match)
718 {
719     match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
720     match->value.in_pport = fc->in_pport;
721     memcpy(match->value.eth.src.a, fc->fields.ethhdr->h_source,
722            sizeof(match->value.eth.src.a));
723     memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
724            sizeof(match->value.eth.dst.a));
725     match->value.eth.type = *fc->fields.h_proto;
726     match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
727     match->value.width = FLOW_KEY_WIDTH(eth.type);
728     if (fc->fields.ipv4hdr) {
729         match->value.ip.proto = fc->fields.ipv4hdr->ip_p;
730         match->value.ip.tos = fc->fields.ipv4hdr->ip_tos;
731         match->value.width = FLOW_KEY_WIDTH(ip.tos);
732     } else if (fc->fields.ipv6hdr) {
733         match->value.ip.proto =
734             fc->fields.ipv6hdr->ip6_ctlun.ip6_un1.ip6_un1_nxt;
735         match->value.ip.tos = 0; /* XXX what goes here? */
736         match->value.width = FLOW_KEY_WIDTH(ip.tos);
737     }
738 }
739 
740 static void of_dpa_eg(OfDpaFlowContext *fc);
741 static void of_dpa_acl_hit(OfDpaFlowContext *fc,
742                            OfDpaFlow *dst_flow)
743 {
744     of_dpa_eg(fc);
745 }
746 
747 static void of_dpa_acl_action_write(OfDpaFlowContext *fc,
748                                     OfDpaFlow *flow)
749 {
750     if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
751         fc->action_set.write.group_id = flow->action.write.group_id;
752     }
753 }
754 
755 static void of_dpa_drop(OfDpaFlowContext *fc)
756 {
757     /* drop packet */
758 }
759 
760 static OfDpaGroup *of_dpa_group_find(OfDpa *of_dpa,
761                                               uint32_t group_id)
762 {
763     return g_hash_table_lookup(of_dpa->group_tbl, &group_id);
764 }
765 
766 static int of_dpa_group_add(OfDpa *of_dpa, OfDpaGroup *group)
767 {
768     g_hash_table_insert(of_dpa->group_tbl, &group->id, group);
769 
770     return 0;
771 }
772 
773 #if 0
774 static int of_dpa_group_mod(OfDpa *of_dpa, OfDpaGroup *group)
775 {
776     OfDpaGroup *old_group = of_dpa_group_find(of_dpa, group->id);
777 
778     if (!old_group) {
779         return -ENOENT;
780     }
781 
782     /* XXX */
783 
784     return 0;
785 }
786 #endif
787 
788 static int of_dpa_group_del(OfDpa *of_dpa, OfDpaGroup *group)
789 {
790     g_hash_table_remove(of_dpa->group_tbl, &group->id);
791 
792     return 0;
793 }
794 
795 #if 0
796 static int of_dpa_group_get_stats(OfDpa *of_dpa, uint32_t id)
797 {
798     OfDpaGroup *group = of_dpa_group_find(of_dpa, id);
799 
800     if (!group) {
801         return -ENOENT;
802     }
803 
804     /* XXX get/return stats */
805 
806     return 0;
807 }
808 #endif
809 
810 static OfDpaGroup *of_dpa_group_alloc(uint32_t id)
811 {
812     OfDpaGroup *group = g_new0(OfDpaGroup, 1);
813 
814     group->id = id;
815 
816     return group;
817 }
818 
819 static void of_dpa_output_l2_interface(OfDpaFlowContext *fc,
820                                        OfDpaGroup *group)
821 {
822     uint8_t copy_to_cpu = fc->action_set.apply.copy_to_cpu;
823 
824     if (group->l2_interface.pop_vlan) {
825         of_dpa_flow_pkt_strip_vlan(fc);
826     }
827 
828     /* Note: By default, and as per the OpenFlow 1.3.1
829      * specification, a packet cannot be forwarded back
830      * to the IN_PORT from which it came in. An action
831      * bucket that specifies the particular packet's
832      * egress port is not evaluated.
833      */
834 
835     if (group->l2_interface.out_pport == 0) {
836         rx_produce(fc->of_dpa->world, fc->in_pport, fc->iov, fc->iovcnt,
837                    copy_to_cpu);
838     } else if (group->l2_interface.out_pport != fc->in_pport) {
839         rocker_port_eg(world_rocker(fc->of_dpa->world),
840                        group->l2_interface.out_pport,
841                        fc->iov, fc->iovcnt);
842     }
843 }
844 
845 static void of_dpa_output_l2_rewrite(OfDpaFlowContext *fc,
846                                      OfDpaGroup *group)
847 {
848     OfDpaGroup *l2_group =
849         of_dpa_group_find(fc->of_dpa, group->l2_rewrite.group_id);
850 
851     if (!l2_group) {
852         return;
853     }
854 
855     of_dpa_flow_pkt_hdr_rewrite(fc, group->l2_rewrite.src_mac.a,
856                          group->l2_rewrite.dst_mac.a,
857                          group->l2_rewrite.vlan_id);
858     of_dpa_output_l2_interface(fc, l2_group);
859 }
860 
861 static void of_dpa_output_l2_flood(OfDpaFlowContext *fc,
862                                    OfDpaGroup *group)
863 {
864     OfDpaGroup *l2_group;
865     int i;
866 
867     for (i = 0; i < group->l2_flood.group_count; i++) {
868         of_dpa_flow_pkt_hdr_reset(fc);
869         l2_group = of_dpa_group_find(fc->of_dpa, group->l2_flood.group_ids[i]);
870         if (!l2_group) {
871             continue;
872         }
873         switch (ROCKER_GROUP_TYPE_GET(l2_group->id)) {
874         case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
875             of_dpa_output_l2_interface(fc, l2_group);
876             break;
877         case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
878             of_dpa_output_l2_rewrite(fc, l2_group);
879             break;
880         }
881     }
882 }
883 
884 static void of_dpa_output_l3_unicast(OfDpaFlowContext *fc, OfDpaGroup *group)
885 {
886     OfDpaGroup *l2_group =
887         of_dpa_group_find(fc->of_dpa, group->l3_unicast.group_id);
888 
889     if (!l2_group) {
890         return;
891     }
892 
893     of_dpa_flow_pkt_hdr_rewrite(fc, group->l3_unicast.src_mac.a,
894                                 group->l3_unicast.dst_mac.a,
895                                 group->l3_unicast.vlan_id);
896     /* XXX need ttl_check */
897     of_dpa_output_l2_interface(fc, l2_group);
898 }
899 
900 static void of_dpa_eg(OfDpaFlowContext *fc)
901 {
902     OfDpaFlowAction *set = &fc->action_set;
903     OfDpaGroup *group;
904     uint32_t group_id;
905 
906     /* send a copy of pkt to CPU (controller)? */
907 
908     if (set->apply.copy_to_cpu) {
909         group_id = ROCKER_GROUP_L2_INTERFACE(set->apply.vlan_id, 0);
910         group = of_dpa_group_find(fc->of_dpa, group_id);
911         if (group) {
912             of_dpa_output_l2_interface(fc, group);
913             of_dpa_flow_pkt_hdr_reset(fc);
914         }
915     }
916 
917     /* process group write actions */
918 
919     if (!set->write.group_id) {
920         return;
921     }
922 
923     group = of_dpa_group_find(fc->of_dpa, set->write.group_id);
924     if (!group) {
925         return;
926     }
927 
928     switch (ROCKER_GROUP_TYPE_GET(group->id)) {
929     case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
930         of_dpa_output_l2_interface(fc, group);
931         break;
932     case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
933         of_dpa_output_l2_rewrite(fc, group);
934         break;
935     case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
936     case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
937         of_dpa_output_l2_flood(fc, group);
938         break;
939     case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
940         of_dpa_output_l3_unicast(fc, group);
941         break;
942     }
943 }
944 
945 typedef struct of_dpa_flow_tbl_ops {
946     void (*build_match)(OfDpaFlowContext *fc, OfDpaFlowMatch *match);
947     void (*hit)(OfDpaFlowContext *fc, OfDpaFlow *flow);
948     void (*miss)(OfDpaFlowContext *fc);
949     void (*hit_no_goto)(OfDpaFlowContext *fc);
950     void (*action_apply)(OfDpaFlowContext *fc, OfDpaFlow *flow);
951     void (*action_write)(OfDpaFlowContext *fc, OfDpaFlow *flow);
952 } OfDpaFlowTblOps;
953 
954 static OfDpaFlowTblOps of_dpa_tbl_ops[] = {
955     [ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT] = {
956         .build_match = of_dpa_ig_port_build_match,
957         .miss = of_dpa_ig_port_miss,
958         .hit_no_goto = of_dpa_drop,
959     },
960     [ROCKER_OF_DPA_TABLE_ID_VLAN] = {
961         .build_match = of_dpa_vlan_build_match,
962         .hit_no_goto = of_dpa_drop,
963         .action_apply = of_dpa_vlan_insert,
964     },
965     [ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC] = {
966         .build_match = of_dpa_term_mac_build_match,
967         .miss = of_dpa_term_mac_miss,
968         .hit_no_goto = of_dpa_drop,
969         .action_apply = of_dpa_apply_actions,
970     },
971     [ROCKER_OF_DPA_TABLE_ID_BRIDGING] = {
972         .build_match = of_dpa_bridging_build_match,
973         .hit = of_dpa_bridging_learn,
974         .miss = of_dpa_bridging_miss,
975         .hit_no_goto = of_dpa_drop,
976         .action_apply = of_dpa_apply_actions,
977         .action_write = of_dpa_bridging_action_write,
978     },
979     [ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING] = {
980         .build_match = of_dpa_unicast_routing_build_match,
981         .miss = of_dpa_unicast_routing_miss,
982         .hit_no_goto = of_dpa_drop,
983         .action_write = of_dpa_unicast_routing_action_write,
984     },
985     [ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING] = {
986         .build_match = of_dpa_multicast_routing_build_match,
987         .miss = of_dpa_multicast_routing_miss,
988         .hit_no_goto = of_dpa_drop,
989         .action_write = of_dpa_multicast_routing_action_write,
990     },
991     [ROCKER_OF_DPA_TABLE_ID_ACL_POLICY] = {
992         .build_match = of_dpa_acl_build_match,
993         .hit = of_dpa_acl_hit,
994         .miss = of_dpa_eg,
995         .action_apply = of_dpa_apply_actions,
996         .action_write = of_dpa_acl_action_write,
997     },
998 };
999 
1000 static void of_dpa_flow_ig_tbl(OfDpaFlowContext *fc, uint32_t tbl_id)
1001 {
1002     OfDpaFlowTblOps *ops = &of_dpa_tbl_ops[tbl_id];
1003     OfDpaFlowMatch match = { { 0, }, };
1004     OfDpaFlow *flow;
1005 
1006     if (ops->build_match) {
1007         ops->build_match(fc, &match);
1008     } else {
1009         return;
1010     }
1011 
1012     flow = of_dpa_flow_match(fc->of_dpa, &match);
1013     if (!flow) {
1014         if (ops->miss) {
1015             ops->miss(fc);
1016         }
1017         return;
1018     }
1019 
1020     flow->stats.hits++;
1021 
1022     if (ops->action_apply) {
1023         ops->action_apply(fc, flow);
1024     }
1025 
1026     if (ops->action_write) {
1027         ops->action_write(fc, flow);
1028     }
1029 
1030     if (ops->hit) {
1031         ops->hit(fc, flow);
1032     }
1033 
1034     if (flow->action.goto_tbl) {
1035         of_dpa_flow_ig_tbl(fc, flow->action.goto_tbl);
1036     } else if (ops->hit_no_goto) {
1037         ops->hit_no_goto(fc);
1038     }
1039 
1040     /* drop packet */
1041 }
1042 
1043 static ssize_t of_dpa_ig(World *world, uint32_t pport,
1044                          const struct iovec *iov, int iovcnt)
1045 {
1046     g_autofree struct iovec *iov_copy = g_new(struct iovec, iovcnt + 2);
1047     OfDpaFlowContext fc = {
1048         .of_dpa = world_private(world),
1049         .in_pport = pport,
1050         .iov = iov_copy,
1051         .iovcnt = iovcnt + 2,
1052     };
1053 
1054     of_dpa_flow_pkt_parse(&fc, iov, iovcnt);
1055     of_dpa_flow_ig_tbl(&fc, ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT);
1056 
1057     return iov_size(iov, iovcnt);
1058 }
1059 
1060 #define ROCKER_TUNNEL_LPORT 0x00010000
1061 
1062 static int of_dpa_cmd_add_ig_port(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1063 {
1064     OfDpaFlowKey *key = &flow->key;
1065     OfDpaFlowKey *mask = &flow->mask;
1066     OfDpaFlowAction *action = &flow->action;
1067     bool overlay_tunnel;
1068 
1069     if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1070         !flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1071         return -ROCKER_EINVAL;
1072     }
1073 
1074     key->tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
1075     key->width = FLOW_KEY_WIDTH(tbl_id);
1076 
1077     key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1078     if (flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]) {
1079         mask->in_pport =
1080             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1081     }
1082 
1083     overlay_tunnel = !!(key->in_pport & ROCKER_TUNNEL_LPORT);
1084 
1085     action->goto_tbl =
1086         rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1087 
1088     if (!overlay_tunnel && action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_VLAN) {
1089         return -ROCKER_EINVAL;
1090     }
1091 
1092     if (overlay_tunnel && action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_BRIDGING) {
1093         return -ROCKER_EINVAL;
1094     }
1095 
1096     return ROCKER_OK;
1097 }
1098 
1099 static int of_dpa_cmd_add_vlan(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1100 {
1101     OfDpaFlowKey *key = &flow->key;
1102     OfDpaFlowKey *mask = &flow->mask;
1103     OfDpaFlowAction *action = &flow->action;
1104     uint32_t port;
1105     bool untagged;
1106 
1107     if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1108         !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1109         DPRINTF("Must give in_pport and vlan_id to install VLAN tbl entry\n");
1110         return -ROCKER_EINVAL;
1111     }
1112 
1113     key->tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
1114     key->width = FLOW_KEY_WIDTH(eth.vlan_id);
1115 
1116     key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1117     if (!fp_port_from_pport(key->in_pport, &port)) {
1118         DPRINTF("in_pport (%d) not a front-panel port\n", key->in_pport);
1119         return -ROCKER_EINVAL;
1120     }
1121     mask->in_pport = 0xffffffff;
1122 
1123     key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1124 
1125     if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1126         mask->eth.vlan_id =
1127             rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1128     }
1129 
1130     if (key->eth.vlan_id) {
1131         untagged = false; /* filtering */
1132     } else {
1133         untagged = true;
1134     }
1135 
1136     if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1137         action->goto_tbl =
1138             rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1139         if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC) {
1140             DPRINTF("Goto tbl (%d) must be TERM_MAC\n", action->goto_tbl);
1141             return -ROCKER_EINVAL;
1142         }
1143     }
1144 
1145     if (untagged) {
1146         if (!flow_tlvs[ROCKER_TLV_OF_DPA_NEW_VLAN_ID]) {
1147             DPRINTF("Must specify new vlan_id if untagged\n");
1148             return -ROCKER_EINVAL;
1149         }
1150         action->apply.new_vlan_id =
1151             rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_NEW_VLAN_ID]);
1152         if (1 > ntohs(action->apply.new_vlan_id) ||
1153             ntohs(action->apply.new_vlan_id) > 4095) {
1154             DPRINTF("New vlan_id (%d) must be between 1 and 4095\n",
1155                     ntohs(action->apply.new_vlan_id));
1156             return -ROCKER_EINVAL;
1157         }
1158     }
1159 
1160     return ROCKER_OK;
1161 }
1162 
1163 static int of_dpa_cmd_add_term_mac(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1164 {
1165     OfDpaFlowKey *key = &flow->key;
1166     OfDpaFlowKey *mask = &flow->mask;
1167     OfDpaFlowAction *action = &flow->action;
1168     const MACAddr ipv4_mcast = { .a = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 } };
1169     const MACAddr ipv4_mask =  { .a = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 } };
1170     const MACAddr ipv6_mcast = { .a = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 } };
1171     const MACAddr ipv6_mask =  { .a = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } };
1172     uint32_t port;
1173     bool unicast = false;
1174     bool multicast = false;
1175 
1176     if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1177         !flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK] ||
1178         !flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE] ||
1179         !flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC] ||
1180         !flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK] ||
1181         !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID] ||
1182         !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1183         return -ROCKER_EINVAL;
1184     }
1185 
1186     key->tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
1187     key->width = FLOW_KEY_WIDTH(eth.type);
1188 
1189     key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1190     if (!fp_port_from_pport(key->in_pport, &port)) {
1191         return -ROCKER_EINVAL;
1192     }
1193     mask->in_pport =
1194         rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1195 
1196     key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1197     if (key->eth.type != htons(0x0800) && key->eth.type != htons(0x86dd)) {
1198         return -ROCKER_EINVAL;
1199     }
1200     mask->eth.type = htons(0xffff);
1201 
1202     memcpy(key->eth.dst.a,
1203            rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1204            sizeof(key->eth.dst.a));
1205     memcpy(mask->eth.dst.a,
1206            rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1207            sizeof(mask->eth.dst.a));
1208 
1209     if ((key->eth.dst.a[0] & 0x01) == 0x00) {
1210         unicast = true;
1211     }
1212 
1213     /* only two wildcard rules are acceptable for IPv4 and IPv6 multicast */
1214     if (memcmp(key->eth.dst.a, ipv4_mcast.a, sizeof(key->eth.dst.a)) == 0 &&
1215         memcmp(mask->eth.dst.a, ipv4_mask.a, sizeof(mask->eth.dst.a)) == 0) {
1216         multicast = true;
1217     }
1218     if (memcmp(key->eth.dst.a, ipv6_mcast.a, sizeof(key->eth.dst.a)) == 0 &&
1219         memcmp(mask->eth.dst.a, ipv6_mask.a, sizeof(mask->eth.dst.a)) == 0) {
1220         multicast = true;
1221     }
1222 
1223     if (!unicast && !multicast) {
1224         return -ROCKER_EINVAL;
1225     }
1226 
1227     key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1228     mask->eth.vlan_id =
1229         rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1230 
1231     if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1232         action->goto_tbl =
1233             rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1234 
1235         if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING &&
1236             action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING) {
1237             return -ROCKER_EINVAL;
1238         }
1239 
1240         if (unicast &&
1241             action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING) {
1242             return -ROCKER_EINVAL;
1243         }
1244 
1245         if (multicast &&
1246             action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING) {
1247             return -ROCKER_EINVAL;
1248         }
1249     }
1250 
1251     if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1252         action->apply.copy_to_cpu =
1253             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1254     }
1255 
1256     return ROCKER_OK;
1257 }
1258 
1259 static int of_dpa_cmd_add_bridging(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1260 {
1261     OfDpaFlowKey *key = &flow->key;
1262     OfDpaFlowKey *mask = &flow->mask;
1263     OfDpaFlowAction *action = &flow->action;
1264     bool unicast = false;
1265     bool dst_mac = false;
1266     bool dst_mac_mask = false;
1267     enum {
1268         BRIDGING_MODE_UNKNOWN,
1269         BRIDGING_MODE_VLAN_UCAST,
1270         BRIDGING_MODE_VLAN_MCAST,
1271         BRIDGING_MODE_VLAN_DFLT,
1272         BRIDGING_MODE_TUNNEL_UCAST,
1273         BRIDGING_MODE_TUNNEL_MCAST,
1274         BRIDGING_MODE_TUNNEL_DFLT,
1275     } mode = BRIDGING_MODE_UNKNOWN;
1276 
1277     key->tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
1278 
1279     if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1280         key->eth.vlan_id =
1281             rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1282         mask->eth.vlan_id = 0xffff;
1283         key->width = FLOW_KEY_WIDTH(eth.vlan_id);
1284     }
1285 
1286     if (flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]) {
1287         key->tunnel_id =
1288             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]);
1289         mask->tunnel_id = 0xffffffff;
1290         key->width = FLOW_KEY_WIDTH(tunnel_id);
1291     }
1292 
1293     /* can't do VLAN bridging and tunnel bridging at same time */
1294     if (key->eth.vlan_id && key->tunnel_id) {
1295         DPRINTF("can't do VLAN bridging and tunnel bridging at same time\n");
1296         return -ROCKER_EINVAL;
1297     }
1298 
1299     if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1300         memcpy(key->eth.dst.a,
1301                rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1302                sizeof(key->eth.dst.a));
1303         key->width = FLOW_KEY_WIDTH(eth.dst);
1304         dst_mac = true;
1305         unicast = (key->eth.dst.a[0] & 0x01) == 0x00;
1306     }
1307 
1308     if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]) {
1309         memcpy(mask->eth.dst.a,
1310                rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1311                sizeof(mask->eth.dst.a));
1312         key->width = FLOW_KEY_WIDTH(eth.dst);
1313         dst_mac_mask = true;
1314     } else if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1315         memcpy(mask->eth.dst.a, ff_mac.a, sizeof(mask->eth.dst.a));
1316     }
1317 
1318     if (key->eth.vlan_id) {
1319         if (dst_mac && !dst_mac_mask) {
1320             mode = unicast ? BRIDGING_MODE_VLAN_UCAST :
1321                              BRIDGING_MODE_VLAN_MCAST;
1322         } else if ((dst_mac && dst_mac_mask) || !dst_mac) {
1323             mode = BRIDGING_MODE_VLAN_DFLT;
1324         }
1325     } else if (key->tunnel_id) {
1326         if (dst_mac && !dst_mac_mask) {
1327             mode = unicast ? BRIDGING_MODE_TUNNEL_UCAST :
1328                              BRIDGING_MODE_TUNNEL_MCAST;
1329         } else if ((dst_mac && dst_mac_mask) || !dst_mac) {
1330             mode = BRIDGING_MODE_TUNNEL_DFLT;
1331         }
1332     }
1333 
1334     if (mode == BRIDGING_MODE_UNKNOWN) {
1335         DPRINTF("Unknown bridging mode\n");
1336         return -ROCKER_EINVAL;
1337     }
1338 
1339     if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1340         action->goto_tbl =
1341             rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1342         if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1343             DPRINTF("Briding goto tbl must be ACL policy\n");
1344             return -ROCKER_EINVAL;
1345         }
1346     }
1347 
1348     if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1349         action->write.group_id =
1350             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1351         switch (mode) {
1352         case BRIDGING_MODE_VLAN_UCAST:
1353             if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1354                 ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) {
1355                 DPRINTF("Bridging mode vlan ucast needs L2 "
1356                         "interface group (0x%08x)\n",
1357                         action->write.group_id);
1358                 return -ROCKER_EINVAL;
1359             }
1360             break;
1361         case BRIDGING_MODE_VLAN_MCAST:
1362             if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1363                 ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST) {
1364                 DPRINTF("Bridging mode vlan mcast needs L2 "
1365                         "mcast group (0x%08x)\n",
1366                         action->write.group_id);
1367                 return -ROCKER_EINVAL;
1368             }
1369             break;
1370         case BRIDGING_MODE_VLAN_DFLT:
1371             if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1372                 ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD) {
1373                 DPRINTF("Bridging mode vlan dflt needs L2 "
1374                         "flood group (0x%08x)\n",
1375                         action->write.group_id);
1376                 return -ROCKER_EINVAL;
1377             }
1378             break;
1379         case BRIDGING_MODE_TUNNEL_MCAST:
1380             if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1381                 ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY) {
1382                 DPRINTF("Bridging mode tunnel mcast needs L2 "
1383                         "overlay group (0x%08x)\n",
1384                         action->write.group_id);
1385                 return -ROCKER_EINVAL;
1386             }
1387             break;
1388         case BRIDGING_MODE_TUNNEL_DFLT:
1389             if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1390                 ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY) {
1391                 DPRINTF("Bridging mode tunnel dflt needs L2 "
1392                         "overlay group (0x%08x)\n",
1393                         action->write.group_id);
1394                 return -ROCKER_EINVAL;
1395             }
1396             break;
1397         default:
1398             return -ROCKER_EINVAL;
1399         }
1400     }
1401 
1402     if (flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_LPORT]) {
1403         action->write.tun_log_lport =
1404             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_LPORT]);
1405         if (mode != BRIDGING_MODE_TUNNEL_UCAST) {
1406             DPRINTF("Have tunnel logical port but not "
1407                     "in bridging tunnel mode\n");
1408             return -ROCKER_EINVAL;
1409         }
1410     }
1411 
1412     if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1413         action->apply.copy_to_cpu =
1414             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1415     }
1416 
1417     return ROCKER_OK;
1418 }
1419 
1420 static int of_dpa_cmd_add_unicast_routing(OfDpaFlow *flow,
1421                                           RockerTlv **flow_tlvs)
1422 {
1423     OfDpaFlowKey *key = &flow->key;
1424     OfDpaFlowKey *mask = &flow->mask;
1425     OfDpaFlowAction *action = &flow->action;
1426     enum {
1427         UNICAST_ROUTING_MODE_UNKNOWN,
1428         UNICAST_ROUTING_MODE_IPV4,
1429         UNICAST_ROUTING_MODE_IPV6,
1430     } mode = UNICAST_ROUTING_MODE_UNKNOWN;
1431     uint8_t type;
1432 
1433     if (!flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]) {
1434         return -ROCKER_EINVAL;
1435     }
1436 
1437     key->tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
1438     key->width = FLOW_KEY_WIDTH(ipv6.addr.dst);
1439 
1440     key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1441     switch (ntohs(key->eth.type)) {
1442     case 0x0800:
1443         mode = UNICAST_ROUTING_MODE_IPV4;
1444         break;
1445     case 0x86dd:
1446         mode = UNICAST_ROUTING_MODE_IPV6;
1447         break;
1448     default:
1449         return -ROCKER_EINVAL;
1450     }
1451     mask->eth.type = htons(0xffff);
1452 
1453     switch (mode) {
1454     case UNICAST_ROUTING_MODE_IPV4:
1455         if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]) {
1456             return -ROCKER_EINVAL;
1457         }
1458         key->ipv4.addr.dst =
1459             rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]);
1460         if (ipv4_addr_is_multicast(key->ipv4.addr.dst)) {
1461             return -ROCKER_EINVAL;
1462         }
1463         flow->lpm = of_dpa_mask2prefix(htonl(0xffffffff));
1464         if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP_MASK]) {
1465             mask->ipv4.addr.dst =
1466                 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP_MASK]);
1467             flow->lpm = of_dpa_mask2prefix(mask->ipv4.addr.dst);
1468         }
1469         break;
1470     case UNICAST_ROUTING_MODE_IPV6:
1471         if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]) {
1472             return -ROCKER_EINVAL;
1473         }
1474         memcpy(&key->ipv6.addr.dst,
1475                rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]),
1476                sizeof(key->ipv6.addr.dst));
1477         if (ipv6_addr_is_multicast(&key->ipv6.addr.dst)) {
1478             return -ROCKER_EINVAL;
1479         }
1480         if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6_MASK]) {
1481             memcpy(&mask->ipv6.addr.dst,
1482                    rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6_MASK]),
1483                    sizeof(mask->ipv6.addr.dst));
1484         }
1485         break;
1486     default:
1487         return -ROCKER_EINVAL;
1488     }
1489 
1490     if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1491         action->goto_tbl =
1492             rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1493         if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1494             return -ROCKER_EINVAL;
1495         }
1496     }
1497 
1498     if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1499         action->write.group_id =
1500             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1501         type = ROCKER_GROUP_TYPE_GET(action->write.group_id);
1502         if (type != ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE &&
1503             type != ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST &&
1504             type != ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP) {
1505             return -ROCKER_EINVAL;
1506         }
1507     }
1508 
1509     return ROCKER_OK;
1510 }
1511 
1512 static int of_dpa_cmd_add_multicast_routing(OfDpaFlow *flow,
1513                                             RockerTlv **flow_tlvs)
1514 {
1515     OfDpaFlowKey *key = &flow->key;
1516     OfDpaFlowKey *mask = &flow->mask;
1517     OfDpaFlowAction *action = &flow->action;
1518     enum {
1519         MULTICAST_ROUTING_MODE_UNKNOWN,
1520         MULTICAST_ROUTING_MODE_IPV4,
1521         MULTICAST_ROUTING_MODE_IPV6,
1522     } mode = MULTICAST_ROUTING_MODE_UNKNOWN;
1523 
1524     if (!flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE] ||
1525         !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1526         return -ROCKER_EINVAL;
1527     }
1528 
1529     key->tbl_id = ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
1530     key->width = FLOW_KEY_WIDTH(ipv6.addr.dst);
1531 
1532     key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1533     switch (ntohs(key->eth.type)) {
1534     case 0x0800:
1535         mode = MULTICAST_ROUTING_MODE_IPV4;
1536         break;
1537     case 0x86dd:
1538         mode = MULTICAST_ROUTING_MODE_IPV6;
1539         break;
1540     default:
1541         return -ROCKER_EINVAL;
1542     }
1543 
1544     key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1545 
1546     switch (mode) {
1547     case MULTICAST_ROUTING_MODE_IPV4:
1548 
1549         if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]) {
1550             key->ipv4.addr.src =
1551                 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]);
1552         }
1553 
1554         if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP_MASK]) {
1555             mask->ipv4.addr.src =
1556                 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP_MASK]);
1557         }
1558 
1559         if (!flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]) {
1560             if (mask->ipv4.addr.src != 0) {
1561                 return -ROCKER_EINVAL;
1562             }
1563         }
1564 
1565         if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]) {
1566             return -ROCKER_EINVAL;
1567         }
1568 
1569         key->ipv4.addr.dst =
1570             rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]);
1571         if (!ipv4_addr_is_multicast(key->ipv4.addr.dst)) {
1572             return -ROCKER_EINVAL;
1573         }
1574 
1575         break;
1576 
1577     case MULTICAST_ROUTING_MODE_IPV6:
1578 
1579         if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]) {
1580             memcpy(&key->ipv6.addr.src,
1581                    rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]),
1582                    sizeof(key->ipv6.addr.src));
1583         }
1584 
1585         if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6_MASK]) {
1586             memcpy(&mask->ipv6.addr.src,
1587                    rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6_MASK]),
1588                    sizeof(mask->ipv6.addr.src));
1589         }
1590 
1591         if (!flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]) {
1592             if (mask->ipv6.addr.src.addr32[0] != 0 &&
1593                 mask->ipv6.addr.src.addr32[1] != 0 &&
1594                 mask->ipv6.addr.src.addr32[2] != 0 &&
1595                 mask->ipv6.addr.src.addr32[3] != 0) {
1596                 return -ROCKER_EINVAL;
1597             }
1598         }
1599 
1600         if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]) {
1601             return -ROCKER_EINVAL;
1602         }
1603 
1604         memcpy(&key->ipv6.addr.dst,
1605                rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]),
1606                sizeof(key->ipv6.addr.dst));
1607         if (!ipv6_addr_is_multicast(&key->ipv6.addr.dst)) {
1608             return -ROCKER_EINVAL;
1609         }
1610 
1611         break;
1612 
1613     default:
1614         return -ROCKER_EINVAL;
1615     }
1616 
1617     if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1618         action->goto_tbl =
1619             rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1620         if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1621             return -ROCKER_EINVAL;
1622         }
1623     }
1624 
1625     if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1626         action->write.group_id =
1627             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1628         if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1629             ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST) {
1630             return -ROCKER_EINVAL;
1631         }
1632         action->write.vlan_id = key->eth.vlan_id;
1633     }
1634 
1635     return ROCKER_OK;
1636 }
1637 
1638 static int of_dpa_cmd_add_acl_ip(OfDpaFlowKey *key, OfDpaFlowKey *mask,
1639                                  RockerTlv **flow_tlvs)
1640 {
1641     key->width = FLOW_KEY_WIDTH(ip.tos);
1642 
1643     key->ip.proto = 0;
1644     key->ip.tos = 0;
1645     mask->ip.proto = 0;
1646     mask->ip.tos = 0;
1647 
1648     if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO]) {
1649         key->ip.proto =
1650             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO]);
1651     }
1652     if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO_MASK]) {
1653         mask->ip.proto =
1654             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO_MASK]);
1655     }
1656     if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP]) {
1657         key->ip.tos =
1658             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP]);
1659     }
1660     if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP_MASK]) {
1661         mask->ip.tos =
1662             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP_MASK]);
1663     }
1664     if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN]) {
1665         key->ip.tos |=
1666             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN]) << 6;
1667     }
1668     if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN_MASK]) {
1669         mask->ip.tos |=
1670             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN_MASK]) << 6;
1671     }
1672 
1673     return ROCKER_OK;
1674 }
1675 
1676 static int of_dpa_cmd_add_acl(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1677 {
1678     OfDpaFlowKey *key = &flow->key;
1679     OfDpaFlowKey *mask = &flow->mask;
1680     OfDpaFlowAction *action = &flow->action;
1681     enum {
1682         ACL_MODE_UNKNOWN,
1683         ACL_MODE_IPV4_VLAN,
1684         ACL_MODE_IPV6_VLAN,
1685         ACL_MODE_IPV4_TENANT,
1686         ACL_MODE_IPV6_TENANT,
1687         ACL_MODE_NON_IP_VLAN,
1688         ACL_MODE_NON_IP_TENANT,
1689         ACL_MODE_ANY_VLAN,
1690         ACL_MODE_ANY_TENANT,
1691     } mode = ACL_MODE_UNKNOWN;
1692     int err = ROCKER_OK;
1693 
1694     if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1695         !flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]) {
1696         return -ROCKER_EINVAL;
1697     }
1698 
1699     if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID] &&
1700         flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]) {
1701         return -ROCKER_EINVAL;
1702     }
1703 
1704     key->tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1705     key->width = FLOW_KEY_WIDTH(eth.type);
1706 
1707     key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1708     if (flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]) {
1709         mask->in_pport =
1710             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1711     }
1712 
1713     if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
1714         memcpy(key->eth.src.a,
1715                rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
1716                sizeof(key->eth.src.a));
1717     }
1718 
1719     if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC_MASK]) {
1720         memcpy(mask->eth.src.a,
1721                rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC_MASK]),
1722                sizeof(mask->eth.src.a));
1723     }
1724 
1725     if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1726         memcpy(key->eth.dst.a,
1727                rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1728                sizeof(key->eth.dst.a));
1729     }
1730 
1731     if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]) {
1732         memcpy(mask->eth.dst.a,
1733                rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1734                sizeof(mask->eth.dst.a));
1735     }
1736 
1737     key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1738     if (key->eth.type) {
1739         mask->eth.type = 0xffff;
1740     }
1741 
1742     if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1743         key->eth.vlan_id =
1744             rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1745     }
1746 
1747     if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1748         mask->eth.vlan_id =
1749             rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1750     }
1751 
1752     switch (ntohs(key->eth.type)) {
1753     case 0x0000:
1754         mode = (key->eth.vlan_id) ? ACL_MODE_ANY_VLAN : ACL_MODE_ANY_TENANT;
1755         break;
1756     case 0x0800:
1757         mode = (key->eth.vlan_id) ? ACL_MODE_IPV4_VLAN : ACL_MODE_IPV4_TENANT;
1758         break;
1759     case 0x86dd:
1760         mode = (key->eth.vlan_id) ? ACL_MODE_IPV6_VLAN : ACL_MODE_IPV6_TENANT;
1761         break;
1762     default:
1763         mode = (key->eth.vlan_id) ? ACL_MODE_NON_IP_VLAN :
1764                                     ACL_MODE_NON_IP_TENANT;
1765         break;
1766     }
1767 
1768     /* XXX only supporting VLAN modes for now */
1769     if (mode != ACL_MODE_IPV4_VLAN &&
1770         mode != ACL_MODE_IPV6_VLAN &&
1771         mode != ACL_MODE_NON_IP_VLAN &&
1772         mode != ACL_MODE_ANY_VLAN) {
1773         return -ROCKER_EINVAL;
1774     }
1775 
1776     switch (ntohs(key->eth.type)) {
1777     case 0x0800:
1778     case 0x86dd:
1779         err = of_dpa_cmd_add_acl_ip(key, mask, flow_tlvs);
1780         break;
1781     }
1782 
1783     if (err) {
1784         return err;
1785     }
1786 
1787     if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1788         action->write.group_id =
1789             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1790     }
1791 
1792     if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1793         action->apply.copy_to_cpu =
1794             rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1795     }
1796 
1797     return ROCKER_OK;
1798 }
1799 
1800 static int of_dpa_cmd_flow_add_mod(OfDpa *of_dpa, OfDpaFlow *flow,
1801                                    RockerTlv **flow_tlvs)
1802 {
1803     enum rocker_of_dpa_table_id tbl;
1804     int err = ROCKER_OK;
1805 
1806     if (!flow_tlvs[ROCKER_TLV_OF_DPA_TABLE_ID] ||
1807         !flow_tlvs[ROCKER_TLV_OF_DPA_PRIORITY] ||
1808         !flow_tlvs[ROCKER_TLV_OF_DPA_HARDTIME]) {
1809         return -ROCKER_EINVAL;
1810     }
1811 
1812     tbl = rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_TABLE_ID]);
1813     flow->priority = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_PRIORITY]);
1814     flow->hardtime = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_HARDTIME]);
1815 
1816     if (flow_tlvs[ROCKER_TLV_OF_DPA_IDLETIME]) {
1817         if (tbl == ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT ||
1818             tbl == ROCKER_OF_DPA_TABLE_ID_VLAN ||
1819             tbl == ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC) {
1820             return -ROCKER_EINVAL;
1821         }
1822         flow->idletime =
1823             rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IDLETIME]);
1824     }
1825 
1826     switch (tbl) {
1827     case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
1828         err = of_dpa_cmd_add_ig_port(flow, flow_tlvs);
1829         break;
1830     case ROCKER_OF_DPA_TABLE_ID_VLAN:
1831         err = of_dpa_cmd_add_vlan(flow, flow_tlvs);
1832         break;
1833     case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
1834         err = of_dpa_cmd_add_term_mac(flow, flow_tlvs);
1835         break;
1836     case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
1837         err = of_dpa_cmd_add_bridging(flow, flow_tlvs);
1838         break;
1839     case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
1840         err = of_dpa_cmd_add_unicast_routing(flow, flow_tlvs);
1841         break;
1842     case ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING:
1843         err = of_dpa_cmd_add_multicast_routing(flow, flow_tlvs);
1844         break;
1845     case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
1846         err = of_dpa_cmd_add_acl(flow, flow_tlvs);
1847         break;
1848     }
1849 
1850     return err;
1851 }
1852 
1853 static int of_dpa_cmd_flow_add(OfDpa *of_dpa, uint64_t cookie,
1854                                RockerTlv **flow_tlvs)
1855 {
1856     OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1857     int err = ROCKER_OK;
1858 
1859     if (flow) {
1860         return -ROCKER_EEXIST;
1861     }
1862 
1863     flow = of_dpa_flow_alloc(cookie);
1864 
1865     err = of_dpa_cmd_flow_add_mod(of_dpa, flow, flow_tlvs);
1866     if (err) {
1867         g_free(flow);
1868         return err;
1869     }
1870 
1871     return of_dpa_flow_add(of_dpa, flow);
1872 }
1873 
1874 static int of_dpa_cmd_flow_mod(OfDpa *of_dpa, uint64_t cookie,
1875                                RockerTlv **flow_tlvs)
1876 {
1877     OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1878 
1879     if (!flow) {
1880         return -ROCKER_ENOENT;
1881     }
1882 
1883     return of_dpa_cmd_flow_add_mod(of_dpa, flow, flow_tlvs);
1884 }
1885 
1886 static int of_dpa_cmd_flow_del(OfDpa *of_dpa, uint64_t cookie)
1887 {
1888     OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1889 
1890     if (!flow) {
1891         return -ROCKER_ENOENT;
1892     }
1893 
1894     of_dpa_flow_del(of_dpa, flow);
1895 
1896     return ROCKER_OK;
1897 }
1898 
1899 static int of_dpa_cmd_flow_get_stats(OfDpa *of_dpa, uint64_t cookie,
1900                                      struct desc_info *info, char *buf)
1901 {
1902     OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1903     size_t tlv_size;
1904     int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
1905     int pos;
1906 
1907     if (!flow) {
1908         return -ROCKER_ENOENT;
1909     }
1910 
1911     tlv_size = rocker_tlv_total_size(sizeof(uint32_t)) +  /* duration */
1912                rocker_tlv_total_size(sizeof(uint64_t)) +  /* rx_pkts */
1913                rocker_tlv_total_size(sizeof(uint64_t));   /* tx_ptks */
1914 
1915     if (tlv_size > desc_buf_size(info)) {
1916         return -ROCKER_EMSGSIZE;
1917     }
1918 
1919     pos = 0;
1920     rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION,
1921                         (int32_t)(now - flow->stats.install_time));
1922     rocker_tlv_put_le64(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS,
1923                         flow->stats.rx_pkts);
1924     rocker_tlv_put_le64(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS,
1925                         flow->stats.tx_pkts);
1926 
1927     return desc_set_buf(info, tlv_size);
1928 }
1929 
1930 static int of_dpa_flow_cmd(OfDpa *of_dpa, struct desc_info *info,
1931                            char *buf, uint16_t cmd,
1932                            RockerTlv **flow_tlvs)
1933 {
1934     uint64_t cookie;
1935 
1936     if (!flow_tlvs[ROCKER_TLV_OF_DPA_COOKIE]) {
1937         return -ROCKER_EINVAL;
1938     }
1939 
1940     cookie = rocker_tlv_get_le64(flow_tlvs[ROCKER_TLV_OF_DPA_COOKIE]);
1941 
1942     switch (cmd) {
1943     case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD:
1944         return of_dpa_cmd_flow_add(of_dpa, cookie, flow_tlvs);
1945     case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD:
1946         return of_dpa_cmd_flow_mod(of_dpa, cookie, flow_tlvs);
1947     case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL:
1948         return of_dpa_cmd_flow_del(of_dpa, cookie);
1949     case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS:
1950         return of_dpa_cmd_flow_get_stats(of_dpa, cookie, info, buf);
1951     }
1952 
1953     return -ROCKER_ENOTSUP;
1954 }
1955 
1956 static int of_dpa_cmd_add_l2_interface(OfDpaGroup *group,
1957                                        RockerTlv **group_tlvs)
1958 {
1959     if (!group_tlvs[ROCKER_TLV_OF_DPA_OUT_PPORT] ||
1960         !group_tlvs[ROCKER_TLV_OF_DPA_POP_VLAN]) {
1961         return -ROCKER_EINVAL;
1962     }
1963 
1964     group->l2_interface.out_pport =
1965         rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_OUT_PPORT]);
1966     group->l2_interface.pop_vlan =
1967         rocker_tlv_get_u8(group_tlvs[ROCKER_TLV_OF_DPA_POP_VLAN]);
1968 
1969     return ROCKER_OK;
1970 }
1971 
1972 static int of_dpa_cmd_add_l2_rewrite(OfDpa *of_dpa, OfDpaGroup *group,
1973                                      RockerTlv **group_tlvs)
1974 {
1975     OfDpaGroup *l2_interface_group;
1976 
1977     if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]) {
1978         return -ROCKER_EINVAL;
1979     }
1980 
1981     group->l2_rewrite.group_id =
1982         rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]);
1983 
1984     l2_interface_group = of_dpa_group_find(of_dpa, group->l2_rewrite.group_id);
1985     if (!l2_interface_group ||
1986         ROCKER_GROUP_TYPE_GET(l2_interface_group->id) !=
1987                               ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) {
1988         DPRINTF("l2 rewrite group needs a valid l2 interface group\n");
1989         return -ROCKER_EINVAL;
1990     }
1991 
1992     if (group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
1993         memcpy(group->l2_rewrite.src_mac.a,
1994                rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
1995                sizeof(group->l2_rewrite.src_mac.a));
1996     }
1997 
1998     if (group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1999         memcpy(group->l2_rewrite.dst_mac.a,
2000                rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
2001                sizeof(group->l2_rewrite.dst_mac.a));
2002     }
2003 
2004     if (group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
2005         group->l2_rewrite.vlan_id =
2006             rocker_tlv_get_u16(group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
2007         if (ROCKER_GROUP_VLAN_GET(l2_interface_group->id) !=
2008             (ntohs(group->l2_rewrite.vlan_id) & VLAN_VID_MASK)) {
2009             DPRINTF("Set VLAN ID must be same as L2 interface group\n");
2010             return -ROCKER_EINVAL;
2011         }
2012     }
2013 
2014     return ROCKER_OK;
2015 }
2016 
2017 static int of_dpa_cmd_add_l2_flood(OfDpa *of_dpa, OfDpaGroup *group,
2018                                    RockerTlv **group_tlvs)
2019 {
2020     OfDpaGroup *l2_group;
2021     RockerTlv **tlvs;
2022     int err;
2023     int i;
2024 
2025     if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_COUNT] ||
2026         !group_tlvs[ROCKER_TLV_OF_DPA_GROUP_IDS]) {
2027         return -ROCKER_EINVAL;
2028     }
2029 
2030     group->l2_flood.group_count =
2031         rocker_tlv_get_le16(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_COUNT]);
2032 
2033     tlvs = g_new0(RockerTlv *, group->l2_flood.group_count + 1);
2034 
2035     g_free(group->l2_flood.group_ids);
2036     group->l2_flood.group_ids =
2037         g_new0(uint32_t, group->l2_flood.group_count);
2038 
2039     rocker_tlv_parse_nested(tlvs, group->l2_flood.group_count,
2040                             group_tlvs[ROCKER_TLV_OF_DPA_GROUP_IDS]);
2041 
2042     for (i = 0; i < group->l2_flood.group_count; i++) {
2043         group->l2_flood.group_ids[i] = rocker_tlv_get_le32(tlvs[i + 1]);
2044     }
2045 
2046     /* All of the L2 interface groups referenced by the L2 flood
2047      * must have same VLAN
2048      */
2049 
2050     for (i = 0; i < group->l2_flood.group_count; i++) {
2051         l2_group = of_dpa_group_find(of_dpa, group->l2_flood.group_ids[i]);
2052         if (!l2_group) {
2053             continue;
2054         }
2055         if ((ROCKER_GROUP_TYPE_GET(l2_group->id) ==
2056              ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) &&
2057             (ROCKER_GROUP_VLAN_GET(l2_group->id) !=
2058              ROCKER_GROUP_VLAN_GET(group->id))) {
2059             DPRINTF("l2 interface group 0x%08x VLAN doesn't match l2 "
2060                     "flood group 0x%08x\n",
2061                     group->l2_flood.group_ids[i], group->id);
2062             err = -ROCKER_EINVAL;
2063             goto err_out;
2064         }
2065     }
2066 
2067     g_free(tlvs);
2068     return ROCKER_OK;
2069 
2070 err_out:
2071     group->l2_flood.group_count = 0;
2072     g_free(group->l2_flood.group_ids);
2073     g_free(tlvs);
2074 
2075     return err;
2076 }
2077 
2078 static int of_dpa_cmd_add_l3_unicast(OfDpaGroup *group, RockerTlv **group_tlvs)
2079 {
2080     if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]) {
2081         return -ROCKER_EINVAL;
2082     }
2083 
2084     group->l3_unicast.group_id =
2085         rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]);
2086 
2087     if (group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
2088         memcpy(group->l3_unicast.src_mac.a,
2089                rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
2090                sizeof(group->l3_unicast.src_mac.a));
2091     }
2092 
2093     if (group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
2094         memcpy(group->l3_unicast.dst_mac.a,
2095                rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
2096                sizeof(group->l3_unicast.dst_mac.a));
2097     }
2098 
2099     if (group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
2100         group->l3_unicast.vlan_id =
2101             rocker_tlv_get_u16(group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
2102     }
2103 
2104     if (group_tlvs[ROCKER_TLV_OF_DPA_TTL_CHECK]) {
2105         group->l3_unicast.ttl_check =
2106             rocker_tlv_get_u8(group_tlvs[ROCKER_TLV_OF_DPA_TTL_CHECK]);
2107     }
2108 
2109     return ROCKER_OK;
2110 }
2111 
2112 static int of_dpa_cmd_group_do(OfDpa *of_dpa, uint32_t group_id,
2113                                OfDpaGroup *group, RockerTlv **group_tlvs)
2114 {
2115     uint8_t type = ROCKER_GROUP_TYPE_GET(group_id);
2116 
2117     switch (type) {
2118     case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2119         return of_dpa_cmd_add_l2_interface(group, group_tlvs);
2120     case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2121         return of_dpa_cmd_add_l2_rewrite(of_dpa, group, group_tlvs);
2122     case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2123     /* Treat L2 multicast group same as a L2 flood group */
2124     case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2125         return of_dpa_cmd_add_l2_flood(of_dpa, group, group_tlvs);
2126     case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2127         return of_dpa_cmd_add_l3_unicast(group, group_tlvs);
2128     }
2129 
2130     return -ROCKER_ENOTSUP;
2131 }
2132 
2133 static int of_dpa_cmd_group_add(OfDpa *of_dpa, uint32_t group_id,
2134                                 RockerTlv **group_tlvs)
2135 {
2136     OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2137     int err;
2138 
2139     if (group) {
2140         return -ROCKER_EEXIST;
2141     }
2142 
2143     group = of_dpa_group_alloc(group_id);
2144 
2145     err = of_dpa_cmd_group_do(of_dpa, group_id, group, group_tlvs);
2146     if (err) {
2147         goto err_cmd_add;
2148     }
2149 
2150     err = of_dpa_group_add(of_dpa, group);
2151     if (err) {
2152         goto err_cmd_add;
2153     }
2154 
2155     return ROCKER_OK;
2156 
2157 err_cmd_add:
2158     g_free(group);
2159     return err;
2160 }
2161 
2162 static int of_dpa_cmd_group_mod(OfDpa *of_dpa, uint32_t group_id,
2163                                 RockerTlv **group_tlvs)
2164 {
2165     OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2166 
2167     if (!group) {
2168         return -ROCKER_ENOENT;
2169     }
2170 
2171     return of_dpa_cmd_group_do(of_dpa, group_id, group, group_tlvs);
2172 }
2173 
2174 static int of_dpa_cmd_group_del(OfDpa *of_dpa, uint32_t group_id)
2175 {
2176     OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2177 
2178     if (!group) {
2179         return -ROCKER_ENOENT;
2180     }
2181 
2182     return of_dpa_group_del(of_dpa, group);
2183 }
2184 
2185 static int of_dpa_cmd_group_get_stats(OfDpa *of_dpa, uint32_t group_id,
2186                                       struct desc_info *info, char *buf)
2187 {
2188     return -ROCKER_ENOTSUP;
2189 }
2190 
2191 static int of_dpa_group_cmd(OfDpa *of_dpa, struct desc_info *info,
2192                             char *buf, uint16_t cmd, RockerTlv **group_tlvs)
2193 {
2194     uint32_t group_id;
2195 
2196     if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
2197         return -ROCKER_EINVAL;
2198     }
2199 
2200     group_id = rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
2201 
2202     switch (cmd) {
2203     case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD:
2204         return of_dpa_cmd_group_add(of_dpa, group_id, group_tlvs);
2205     case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD:
2206         return of_dpa_cmd_group_mod(of_dpa, group_id, group_tlvs);
2207     case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL:
2208         return of_dpa_cmd_group_del(of_dpa, group_id);
2209     case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS:
2210         return of_dpa_cmd_group_get_stats(of_dpa, group_id, info, buf);
2211     }
2212 
2213     return -ROCKER_ENOTSUP;
2214 }
2215 
2216 static int of_dpa_cmd(World *world, struct desc_info *info,
2217                       char *buf, uint16_t cmd, RockerTlv *cmd_info_tlv)
2218 {
2219     OfDpa *of_dpa = world_private(world);
2220     RockerTlv *tlvs[ROCKER_TLV_OF_DPA_MAX + 1];
2221 
2222     rocker_tlv_parse_nested(tlvs, ROCKER_TLV_OF_DPA_MAX, cmd_info_tlv);
2223 
2224     switch (cmd) {
2225     case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD:
2226     case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD:
2227     case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL:
2228     case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS:
2229         return of_dpa_flow_cmd(of_dpa, info, buf, cmd, tlvs);
2230     case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD:
2231     case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD:
2232     case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL:
2233     case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS:
2234         return of_dpa_group_cmd(of_dpa, info, buf, cmd, tlvs);
2235     }
2236 
2237     return -ROCKER_ENOTSUP;
2238 }
2239 
2240 static gboolean rocker_int64_equal(gconstpointer v1, gconstpointer v2)
2241 {
2242     return *((const uint64_t *)v1) == *((const uint64_t *)v2);
2243 }
2244 
2245 static guint rocker_int64_hash(gconstpointer v)
2246 {
2247     return (guint)*(const uint64_t *)v;
2248 }
2249 
2250 static int of_dpa_init(World *world)
2251 {
2252     OfDpa *of_dpa = world_private(world);
2253 
2254     of_dpa->world = world;
2255 
2256     of_dpa->flow_tbl = g_hash_table_new_full(rocker_int64_hash,
2257                                              rocker_int64_equal,
2258                                              NULL, g_free);
2259     if (!of_dpa->flow_tbl) {
2260         return -ENOMEM;
2261     }
2262 
2263     of_dpa->group_tbl = g_hash_table_new_full(g_int_hash, g_int_equal,
2264                                               NULL, g_free);
2265     if (!of_dpa->group_tbl) {
2266         goto err_group_tbl;
2267     }
2268 
2269     /* XXX hardcode some artificial table max values */
2270     of_dpa->flow_tbl_max_size = 100;
2271     of_dpa->group_tbl_max_size = 100;
2272 
2273     return 0;
2274 
2275 err_group_tbl:
2276     g_hash_table_destroy(of_dpa->flow_tbl);
2277     return -ENOMEM;
2278 }
2279 
2280 static void of_dpa_uninit(World *world)
2281 {
2282     OfDpa *of_dpa = world_private(world);
2283 
2284     g_hash_table_destroy(of_dpa->group_tbl);
2285     g_hash_table_destroy(of_dpa->flow_tbl);
2286 }
2287 
2288 struct of_dpa_flow_fill_context {
2289     RockerOfDpaFlowList *list;
2290     uint32_t tbl_id;
2291 };
2292 
2293 static void of_dpa_flow_fill(void *cookie, void *value, void *user_data)
2294 {
2295     struct of_dpa_flow *flow = value;
2296     struct of_dpa_flow_key *key = &flow->key;
2297     struct of_dpa_flow_key *mask = &flow->mask;
2298     struct of_dpa_flow_fill_context *flow_context = user_data;
2299     RockerOfDpaFlow *nflow;
2300     RockerOfDpaFlowKey *nkey;
2301     RockerOfDpaFlowMask *nmask;
2302     RockerOfDpaFlowAction *naction;
2303 
2304     if (flow_context->tbl_id != -1 &&
2305         flow_context->tbl_id != key->tbl_id) {
2306         return;
2307     }
2308 
2309     nflow = g_malloc0(sizeof(*nflow));
2310     nkey = nflow->key = g_malloc0(sizeof(*nkey));
2311     nmask = nflow->mask = g_malloc0(sizeof(*nmask));
2312     naction = nflow->action = g_malloc0(sizeof(*naction));
2313 
2314     nflow->cookie = flow->cookie;
2315     nflow->hits = flow->stats.hits;
2316     nkey->priority = flow->priority;
2317     nkey->tbl_id = key->tbl_id;
2318 
2319     if (key->in_pport || mask->in_pport) {
2320         nkey->has_in_pport = true;
2321         nkey->in_pport = key->in_pport;
2322     }
2323 
2324     if (nkey->has_in_pport && mask->in_pport != 0xffffffff) {
2325         nmask->has_in_pport = true;
2326         nmask->in_pport = mask->in_pport;
2327     }
2328 
2329     if (key->eth.vlan_id || mask->eth.vlan_id) {
2330         nkey->has_vlan_id = true;
2331         nkey->vlan_id = ntohs(key->eth.vlan_id);
2332     }
2333 
2334     if (nkey->has_vlan_id && mask->eth.vlan_id != 0xffff) {
2335         nmask->has_vlan_id = true;
2336         nmask->vlan_id = ntohs(mask->eth.vlan_id);
2337     }
2338 
2339     if (key->tunnel_id || mask->tunnel_id) {
2340         nkey->has_tunnel_id = true;
2341         nkey->tunnel_id = key->tunnel_id;
2342     }
2343 
2344     if (nkey->has_tunnel_id && mask->tunnel_id != 0xffffffff) {
2345         nmask->has_tunnel_id = true;
2346         nmask->tunnel_id = mask->tunnel_id;
2347     }
2348 
2349     if (memcmp(key->eth.src.a, zero_mac.a, ETH_ALEN) ||
2350         memcmp(mask->eth.src.a, zero_mac.a, ETH_ALEN)) {
2351         nkey->eth_src = qemu_mac_strdup_printf(key->eth.src.a);
2352     }
2353 
2354     if (nkey->eth_src && memcmp(mask->eth.src.a, ff_mac.a, ETH_ALEN)) {
2355         nmask->eth_src = qemu_mac_strdup_printf(mask->eth.src.a);
2356     }
2357 
2358     if (memcmp(key->eth.dst.a, zero_mac.a, ETH_ALEN) ||
2359         memcmp(mask->eth.dst.a, zero_mac.a, ETH_ALEN)) {
2360         nkey->eth_dst = qemu_mac_strdup_printf(key->eth.dst.a);
2361     }
2362 
2363     if (nkey->eth_dst && memcmp(mask->eth.dst.a, ff_mac.a, ETH_ALEN)) {
2364         nmask->eth_dst = qemu_mac_strdup_printf(mask->eth.dst.a);
2365     }
2366 
2367     if (key->eth.type) {
2368 
2369         nkey->has_eth_type = true;
2370         nkey->eth_type = ntohs(key->eth.type);
2371 
2372         switch (ntohs(key->eth.type)) {
2373         case 0x0800:
2374         case 0x86dd:
2375             if (key->ip.proto || mask->ip.proto) {
2376                 nkey->has_ip_proto = true;
2377                 nkey->ip_proto = key->ip.proto;
2378             }
2379             if (nkey->has_ip_proto && mask->ip.proto != 0xff) {
2380                 nmask->has_ip_proto = true;
2381                 nmask->ip_proto = mask->ip.proto;
2382             }
2383             if (key->ip.tos || mask->ip.tos) {
2384                 nkey->has_ip_tos = true;
2385                 nkey->ip_tos = key->ip.tos;
2386             }
2387             if (nkey->has_ip_tos && mask->ip.tos != 0xff) {
2388                 nmask->has_ip_tos = true;
2389                 nmask->ip_tos = mask->ip.tos;
2390             }
2391             break;
2392         }
2393 
2394         switch (ntohs(key->eth.type)) {
2395         case 0x0800:
2396             if (key->ipv4.addr.dst || mask->ipv4.addr.dst) {
2397                 char *dst = inet_ntoa(*(struct in_addr *)&key->ipv4.addr.dst);
2398                 int dst_len = of_dpa_mask2prefix(mask->ipv4.addr.dst);
2399                 nkey->ip_dst = g_strdup_printf("%s/%d", dst, dst_len);
2400             }
2401             break;
2402         }
2403     }
2404 
2405     if (flow->action.goto_tbl) {
2406         naction->has_goto_tbl = true;
2407         naction->goto_tbl = flow->action.goto_tbl;
2408     }
2409 
2410     if (flow->action.write.group_id) {
2411         naction->has_group_id = true;
2412         naction->group_id = flow->action.write.group_id;
2413     }
2414 
2415     if (flow->action.apply.new_vlan_id) {
2416         naction->has_new_vlan_id = true;
2417         naction->new_vlan_id = flow->action.apply.new_vlan_id;
2418     }
2419 
2420     QAPI_LIST_PREPEND(flow_context->list, nflow);
2421 }
2422 
2423 RockerOfDpaFlowList *qmp_query_rocker_of_dpa_flows(const char *name,
2424                                                    bool has_tbl_id,
2425                                                    uint32_t tbl_id,
2426                                                    Error **errp)
2427 {
2428     struct rocker *r;
2429     struct world *w;
2430     struct of_dpa *of_dpa;
2431     struct of_dpa_flow_fill_context fill_context = {
2432         .list = NULL,
2433         .tbl_id = tbl_id,
2434     };
2435 
2436     r = rocker_find(name);
2437     if (!r) {
2438         error_setg(errp, "rocker %s not found", name);
2439         return NULL;
2440     }
2441 
2442     w = rocker_get_world(r, ROCKER_WORLD_TYPE_OF_DPA);
2443     if (!w) {
2444         error_setg(errp, "rocker %s doesn't have OF-DPA world", name);
2445         return NULL;
2446     }
2447 
2448     of_dpa = world_private(w);
2449 
2450     g_hash_table_foreach(of_dpa->flow_tbl, of_dpa_flow_fill, &fill_context);
2451 
2452     return fill_context.list;
2453 }
2454 
2455 struct of_dpa_group_fill_context {
2456     RockerOfDpaGroupList *list;
2457     uint8_t type;
2458 };
2459 
2460 static void of_dpa_group_fill(void *key, void *value, void *user_data)
2461 {
2462     struct of_dpa_group *group = value;
2463     struct of_dpa_group_fill_context *flow_context = user_data;
2464     RockerOfDpaGroup *ngroup;
2465     int i;
2466 
2467     if (flow_context->type != 9 &&
2468         flow_context->type != ROCKER_GROUP_TYPE_GET(group->id)) {
2469         return;
2470     }
2471 
2472     ngroup = g_malloc0(sizeof(*ngroup));
2473 
2474     ngroup->id = group->id;
2475 
2476     ngroup->type = ROCKER_GROUP_TYPE_GET(group->id);
2477 
2478     switch (ngroup->type) {
2479     case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2480         ngroup->has_vlan_id = true;
2481         ngroup->vlan_id = ROCKER_GROUP_VLAN_GET(group->id);
2482         ngroup->has_pport = true;
2483         ngroup->pport = ROCKER_GROUP_PORT_GET(group->id);
2484         ngroup->has_out_pport = true;
2485         ngroup->out_pport = group->l2_interface.out_pport;
2486         ngroup->has_pop_vlan = true;
2487         ngroup->pop_vlan = group->l2_interface.pop_vlan;
2488         break;
2489     case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2490         ngroup->has_index = true;
2491         ngroup->index = ROCKER_GROUP_INDEX_LONG_GET(group->id);
2492         ngroup->has_group_id = true;
2493         ngroup->group_id = group->l2_rewrite.group_id;
2494         if (group->l2_rewrite.vlan_id) {
2495             ngroup->has_set_vlan_id = true;
2496             ngroup->set_vlan_id = ntohs(group->l2_rewrite.vlan_id);
2497         }
2498         if (memcmp(group->l2_rewrite.src_mac.a, zero_mac.a, ETH_ALEN)) {
2499             ngroup->set_eth_src =
2500                 qemu_mac_strdup_printf(group->l2_rewrite.src_mac.a);
2501         }
2502         if (memcmp(group->l2_rewrite.dst_mac.a, zero_mac.a, ETH_ALEN)) {
2503             ngroup->set_eth_dst =
2504                 qemu_mac_strdup_printf(group->l2_rewrite.dst_mac.a);
2505         }
2506         break;
2507     case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2508     case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2509         ngroup->has_vlan_id = true;
2510         ngroup->vlan_id = ROCKER_GROUP_VLAN_GET(group->id);
2511         ngroup->has_index = true;
2512         ngroup->index = ROCKER_GROUP_INDEX_GET(group->id);
2513         for (i = 0; i < group->l2_flood.group_count; i++) {
2514             ngroup->has_group_ids = true;
2515             QAPI_LIST_PREPEND(ngroup->group_ids, group->l2_flood.group_ids[i]);
2516         }
2517         break;
2518     case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2519         ngroup->has_index = true;
2520         ngroup->index = ROCKER_GROUP_INDEX_LONG_GET(group->id);
2521         ngroup->has_group_id = true;
2522         ngroup->group_id = group->l3_unicast.group_id;
2523         if (group->l3_unicast.vlan_id) {
2524             ngroup->has_set_vlan_id = true;
2525             ngroup->set_vlan_id = ntohs(group->l3_unicast.vlan_id);
2526         }
2527         if (memcmp(group->l3_unicast.src_mac.a, zero_mac.a, ETH_ALEN)) {
2528             ngroup->set_eth_src =
2529                 qemu_mac_strdup_printf(group->l3_unicast.src_mac.a);
2530         }
2531         if (memcmp(group->l3_unicast.dst_mac.a, zero_mac.a, ETH_ALEN)) {
2532             ngroup->set_eth_dst =
2533                 qemu_mac_strdup_printf(group->l3_unicast.dst_mac.a);
2534         }
2535         if (group->l3_unicast.ttl_check) {
2536             ngroup->has_ttl_check = true;
2537             ngroup->ttl_check = group->l3_unicast.ttl_check;
2538         }
2539         break;
2540     }
2541 
2542     QAPI_LIST_PREPEND(flow_context->list, ngroup);
2543 }
2544 
2545 RockerOfDpaGroupList *qmp_query_rocker_of_dpa_groups(const char *name,
2546                                                      bool has_type,
2547                                                      uint8_t type,
2548                                                      Error **errp)
2549 {
2550     struct rocker *r;
2551     struct world *w;
2552     struct of_dpa *of_dpa;
2553     struct of_dpa_group_fill_context fill_context = {
2554         .list = NULL,
2555         .type = type,
2556     };
2557 
2558     r = rocker_find(name);
2559     if (!r) {
2560         error_setg(errp, "rocker %s not found", name);
2561         return NULL;
2562     }
2563 
2564     w = rocker_get_world(r, ROCKER_WORLD_TYPE_OF_DPA);
2565     if (!w) {
2566         error_setg(errp, "rocker %s doesn't have OF-DPA world", name);
2567         return NULL;
2568     }
2569 
2570     of_dpa = world_private(w);
2571 
2572     g_hash_table_foreach(of_dpa->group_tbl, of_dpa_group_fill, &fill_context);
2573 
2574     return fill_context.list;
2575 }
2576 
2577 static WorldOps of_dpa_ops = {
2578     .name = "ofdpa",
2579     .init = of_dpa_init,
2580     .uninit = of_dpa_uninit,
2581     .ig = of_dpa_ig,
2582     .cmd = of_dpa_cmd,
2583 };
2584 
2585 World *of_dpa_world_alloc(Rocker *r)
2586 {
2587     return world_alloc(r, sizeof(OfDpa), ROCKER_WORLD_TYPE_OF_DPA, &of_dpa_ops);
2588 }
2589