1 /*
2  * drivers/net/ethernet/rocker/rocker_ofdpa.c - Rocker switch OF-DPA-like
3  *					        implementation
4  * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5  * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/spinlock.h>
16 #include <linux/hashtable.h>
17 #include <linux/crc32.h>
18 #include <linux/netdevice.h>
19 #include <linux/inetdevice.h>
20 #include <linux/if_vlan.h>
21 #include <linux/if_bridge.h>
22 #include <net/neighbour.h>
23 #include <net/switchdev.h>
24 #include <net/ip_fib.h>
25 #include <net/arp.h>
26 
27 #include "rocker.h"
28 #include "rocker_tlv.h"
29 
30 struct ofdpa_flow_tbl_key {
31 	u32 priority;
32 	enum rocker_of_dpa_table_id tbl_id;
33 	union {
34 		struct {
35 			u32 in_pport;
36 			u32 in_pport_mask;
37 			enum rocker_of_dpa_table_id goto_tbl;
38 		} ig_port;
39 		struct {
40 			u32 in_pport;
41 			__be16 vlan_id;
42 			__be16 vlan_id_mask;
43 			enum rocker_of_dpa_table_id goto_tbl;
44 			bool untagged;
45 			__be16 new_vlan_id;
46 		} vlan;
47 		struct {
48 			u32 in_pport;
49 			u32 in_pport_mask;
50 			__be16 eth_type;
51 			u8 eth_dst[ETH_ALEN];
52 			u8 eth_dst_mask[ETH_ALEN];
53 			__be16 vlan_id;
54 			__be16 vlan_id_mask;
55 			enum rocker_of_dpa_table_id goto_tbl;
56 			bool copy_to_cpu;
57 		} term_mac;
58 		struct {
59 			__be16 eth_type;
60 			__be32 dst4;
61 			__be32 dst4_mask;
62 			enum rocker_of_dpa_table_id goto_tbl;
63 			u32 group_id;
64 		} ucast_routing;
65 		struct {
66 			u8 eth_dst[ETH_ALEN];
67 			u8 eth_dst_mask[ETH_ALEN];
68 			int has_eth_dst;
69 			int has_eth_dst_mask;
70 			__be16 vlan_id;
71 			u32 tunnel_id;
72 			enum rocker_of_dpa_table_id goto_tbl;
73 			u32 group_id;
74 			bool copy_to_cpu;
75 		} bridge;
76 		struct {
77 			u32 in_pport;
78 			u32 in_pport_mask;
79 			u8 eth_src[ETH_ALEN];
80 			u8 eth_src_mask[ETH_ALEN];
81 			u8 eth_dst[ETH_ALEN];
82 			u8 eth_dst_mask[ETH_ALEN];
83 			__be16 eth_type;
84 			__be16 vlan_id;
85 			__be16 vlan_id_mask;
86 			u8 ip_proto;
87 			u8 ip_proto_mask;
88 			u8 ip_tos;
89 			u8 ip_tos_mask;
90 			u32 group_id;
91 		} acl;
92 	};
93 };
94 
95 struct ofdpa_flow_tbl_entry {
96 	struct hlist_node entry;
97 	u32 cmd;
98 	u64 cookie;
99 	struct ofdpa_flow_tbl_key key;
100 	size_t key_len;
101 	u32 key_crc32; /* key */
102 	struct fib_info *fi;
103 };
104 
105 struct ofdpa_group_tbl_entry {
106 	struct hlist_node entry;
107 	u32 cmd;
108 	u32 group_id; /* key */
109 	u16 group_count;
110 	u32 *group_ids;
111 	union {
112 		struct {
113 			u8 pop_vlan;
114 		} l2_interface;
115 		struct {
116 			u8 eth_src[ETH_ALEN];
117 			u8 eth_dst[ETH_ALEN];
118 			__be16 vlan_id;
119 			u32 group_id;
120 		} l2_rewrite;
121 		struct {
122 			u8 eth_src[ETH_ALEN];
123 			u8 eth_dst[ETH_ALEN];
124 			__be16 vlan_id;
125 			bool ttl_check;
126 			u32 group_id;
127 		} l3_unicast;
128 	};
129 };
130 
131 struct ofdpa_fdb_tbl_entry {
132 	struct hlist_node entry;
133 	u32 key_crc32; /* key */
134 	bool learned;
135 	unsigned long touched;
136 	struct ofdpa_fdb_tbl_key {
137 		struct ofdpa_port *ofdpa_port;
138 		u8 addr[ETH_ALEN];
139 		__be16 vlan_id;
140 	} key;
141 };
142 
143 struct ofdpa_internal_vlan_tbl_entry {
144 	struct hlist_node entry;
145 	int ifindex; /* key */
146 	u32 ref_count;
147 	__be16 vlan_id;
148 };
149 
150 struct ofdpa_neigh_tbl_entry {
151 	struct hlist_node entry;
152 	__be32 ip_addr; /* key */
153 	struct net_device *dev;
154 	u32 ref_count;
155 	u32 index;
156 	u8 eth_dst[ETH_ALEN];
157 	bool ttl_check;
158 };
159 
160 enum {
161 	OFDPA_CTRL_LINK_LOCAL_MCAST,
162 	OFDPA_CTRL_LOCAL_ARP,
163 	OFDPA_CTRL_IPV4_MCAST,
164 	OFDPA_CTRL_IPV6_MCAST,
165 	OFDPA_CTRL_DFLT_BRIDGING,
166 	OFDPA_CTRL_DFLT_OVS,
167 	OFDPA_CTRL_MAX,
168 };
169 
170 #define OFDPA_INTERNAL_VLAN_ID_BASE	0x0f00
171 #define OFDPA_N_INTERNAL_VLANS		255
172 #define OFDPA_VLAN_BITMAP_LEN		BITS_TO_LONGS(VLAN_N_VID)
173 #define OFDPA_INTERNAL_VLAN_BITMAP_LEN	BITS_TO_LONGS(OFDPA_N_INTERNAL_VLANS)
174 #define OFDPA_UNTAGGED_VID 0
175 
176 struct ofdpa {
177 	struct rocker *rocker;
178 	DECLARE_HASHTABLE(flow_tbl, 16);
179 	spinlock_t flow_tbl_lock;		/* for flow tbl accesses */
180 	u64 flow_tbl_next_cookie;
181 	DECLARE_HASHTABLE(group_tbl, 16);
182 	spinlock_t group_tbl_lock;		/* for group tbl accesses */
183 	struct timer_list fdb_cleanup_timer;
184 	DECLARE_HASHTABLE(fdb_tbl, 16);
185 	spinlock_t fdb_tbl_lock;		/* for fdb tbl accesses */
186 	unsigned long internal_vlan_bitmap[OFDPA_INTERNAL_VLAN_BITMAP_LEN];
187 	DECLARE_HASHTABLE(internal_vlan_tbl, 8);
188 	spinlock_t internal_vlan_tbl_lock;	/* for vlan tbl accesses */
189 	DECLARE_HASHTABLE(neigh_tbl, 16);
190 	spinlock_t neigh_tbl_lock;		/* for neigh tbl accesses */
191 	u32 neigh_tbl_next_index;
192 	unsigned long ageing_time;
193 	bool fib_aborted;
194 };
195 
196 struct ofdpa_port {
197 	struct ofdpa *ofdpa;
198 	struct rocker_port *rocker_port;
199 	struct net_device *dev;
200 	u32 pport;
201 	struct net_device *bridge_dev;
202 	__be16 internal_vlan_id;
203 	int stp_state;
204 	u32 brport_flags;
205 	unsigned long ageing_time;
206 	bool ctrls[OFDPA_CTRL_MAX];
207 	unsigned long vlan_bitmap[OFDPA_VLAN_BITMAP_LEN];
208 };
209 
210 static const u8 zero_mac[ETH_ALEN]   = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
211 static const u8 ff_mac[ETH_ALEN]     = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
212 static const u8 ll_mac[ETH_ALEN]     = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
213 static const u8 ll_mask[ETH_ALEN]    = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
214 static const u8 mcast_mac[ETH_ALEN]  = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
215 static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
216 static const u8 ipv4_mask[ETH_ALEN]  = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
217 static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
218 static const u8 ipv6_mask[ETH_ALEN]  = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
219 
220 /* Rocker priority levels for flow table entries.  Higher
221  * priority match takes precedence over lower priority match.
222  */
223 
224 enum {
225 	OFDPA_PRIORITY_UNKNOWN = 0,
226 	OFDPA_PRIORITY_IG_PORT = 1,
227 	OFDPA_PRIORITY_VLAN = 1,
228 	OFDPA_PRIORITY_TERM_MAC_UCAST = 0,
229 	OFDPA_PRIORITY_TERM_MAC_MCAST = 1,
230 	OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
231 	OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
232 	OFDPA_PRIORITY_BRIDGING_VLAN = 3,
233 	OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
234 	OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
235 	OFDPA_PRIORITY_BRIDGING_TENANT = 3,
236 	OFDPA_PRIORITY_ACL_CTRL = 3,
237 	OFDPA_PRIORITY_ACL_NORMAL = 2,
238 	OFDPA_PRIORITY_ACL_DFLT = 1,
239 };
240 
241 static bool ofdpa_vlan_id_is_internal(__be16 vlan_id)
242 {
243 	u16 start = OFDPA_INTERNAL_VLAN_ID_BASE;
244 	u16 end = 0xffe;
245 	u16 _vlan_id = ntohs(vlan_id);
246 
247 	return (_vlan_id >= start && _vlan_id <= end);
248 }
249 
250 static __be16 ofdpa_port_vid_to_vlan(const struct ofdpa_port *ofdpa_port,
251 				     u16 vid, bool *pop_vlan)
252 {
253 	__be16 vlan_id;
254 
255 	if (pop_vlan)
256 		*pop_vlan = false;
257 	vlan_id = htons(vid);
258 	if (!vlan_id) {
259 		vlan_id = ofdpa_port->internal_vlan_id;
260 		if (pop_vlan)
261 			*pop_vlan = true;
262 	}
263 
264 	return vlan_id;
265 }
266 
267 static u16 ofdpa_port_vlan_to_vid(const struct ofdpa_port *ofdpa_port,
268 				  __be16 vlan_id)
269 {
270 	if (ofdpa_vlan_id_is_internal(vlan_id))
271 		return 0;
272 
273 	return ntohs(vlan_id);
274 }
275 
276 static bool ofdpa_port_is_slave(const struct ofdpa_port *ofdpa_port,
277 				const char *kind)
278 {
279 	return ofdpa_port->bridge_dev &&
280 		!strcmp(ofdpa_port->bridge_dev->rtnl_link_ops->kind, kind);
281 }
282 
283 static bool ofdpa_port_is_bridged(const struct ofdpa_port *ofdpa_port)
284 {
285 	return ofdpa_port_is_slave(ofdpa_port, "bridge");
286 }
287 
288 static bool ofdpa_port_is_ovsed(const struct ofdpa_port *ofdpa_port)
289 {
290 	return ofdpa_port_is_slave(ofdpa_port, "openvswitch");
291 }
292 
293 #define OFDPA_OP_FLAG_REMOVE		BIT(0)
294 #define OFDPA_OP_FLAG_NOWAIT		BIT(1)
295 #define OFDPA_OP_FLAG_LEARNED		BIT(2)
296 #define OFDPA_OP_FLAG_REFRESH		BIT(3)
297 
298 static bool ofdpa_flags_nowait(int flags)
299 {
300 	return flags & OFDPA_OP_FLAG_NOWAIT;
301 }
302 
303 /*************************************************************
304  * Flow, group, FDB, internal VLAN and neigh command prepares
305  *************************************************************/
306 
307 static int
308 ofdpa_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
309 			       const struct ofdpa_flow_tbl_entry *entry)
310 {
311 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
312 			       entry->key.ig_port.in_pport))
313 		return -EMSGSIZE;
314 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
315 			       entry->key.ig_port.in_pport_mask))
316 		return -EMSGSIZE;
317 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
318 			       entry->key.ig_port.goto_tbl))
319 		return -EMSGSIZE;
320 
321 	return 0;
322 }
323 
324 static int
325 ofdpa_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
326 			    const struct ofdpa_flow_tbl_entry *entry)
327 {
328 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
329 			       entry->key.vlan.in_pport))
330 		return -EMSGSIZE;
331 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
332 				entry->key.vlan.vlan_id))
333 		return -EMSGSIZE;
334 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
335 				entry->key.vlan.vlan_id_mask))
336 		return -EMSGSIZE;
337 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
338 			       entry->key.vlan.goto_tbl))
339 		return -EMSGSIZE;
340 	if (entry->key.vlan.untagged &&
341 	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
342 				entry->key.vlan.new_vlan_id))
343 		return -EMSGSIZE;
344 
345 	return 0;
346 }
347 
348 static int
349 ofdpa_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
350 				const struct ofdpa_flow_tbl_entry *entry)
351 {
352 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
353 			       entry->key.term_mac.in_pport))
354 		return -EMSGSIZE;
355 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
356 			       entry->key.term_mac.in_pport_mask))
357 		return -EMSGSIZE;
358 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
359 				entry->key.term_mac.eth_type))
360 		return -EMSGSIZE;
361 	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
362 			   ETH_ALEN, entry->key.term_mac.eth_dst))
363 		return -EMSGSIZE;
364 	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
365 			   ETH_ALEN, entry->key.term_mac.eth_dst_mask))
366 		return -EMSGSIZE;
367 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
368 				entry->key.term_mac.vlan_id))
369 		return -EMSGSIZE;
370 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
371 				entry->key.term_mac.vlan_id_mask))
372 		return -EMSGSIZE;
373 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
374 			       entry->key.term_mac.goto_tbl))
375 		return -EMSGSIZE;
376 	if (entry->key.term_mac.copy_to_cpu &&
377 	    rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
378 			      entry->key.term_mac.copy_to_cpu))
379 		return -EMSGSIZE;
380 
381 	return 0;
382 }
383 
384 static int
385 ofdpa_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
386 				     const struct ofdpa_flow_tbl_entry *entry)
387 {
388 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
389 				entry->key.ucast_routing.eth_type))
390 		return -EMSGSIZE;
391 	if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
392 				entry->key.ucast_routing.dst4))
393 		return -EMSGSIZE;
394 	if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
395 				entry->key.ucast_routing.dst4_mask))
396 		return -EMSGSIZE;
397 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
398 			       entry->key.ucast_routing.goto_tbl))
399 		return -EMSGSIZE;
400 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
401 			       entry->key.ucast_routing.group_id))
402 		return -EMSGSIZE;
403 
404 	return 0;
405 }
406 
407 static int
408 ofdpa_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
409 			      const struct ofdpa_flow_tbl_entry *entry)
410 {
411 	if (entry->key.bridge.has_eth_dst &&
412 	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
413 			   ETH_ALEN, entry->key.bridge.eth_dst))
414 		return -EMSGSIZE;
415 	if (entry->key.bridge.has_eth_dst_mask &&
416 	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
417 			   ETH_ALEN, entry->key.bridge.eth_dst_mask))
418 		return -EMSGSIZE;
419 	if (entry->key.bridge.vlan_id &&
420 	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
421 				entry->key.bridge.vlan_id))
422 		return -EMSGSIZE;
423 	if (entry->key.bridge.tunnel_id &&
424 	    rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
425 			       entry->key.bridge.tunnel_id))
426 		return -EMSGSIZE;
427 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
428 			       entry->key.bridge.goto_tbl))
429 		return -EMSGSIZE;
430 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
431 			       entry->key.bridge.group_id))
432 		return -EMSGSIZE;
433 	if (entry->key.bridge.copy_to_cpu &&
434 	    rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
435 			      entry->key.bridge.copy_to_cpu))
436 		return -EMSGSIZE;
437 
438 	return 0;
439 }
440 
441 static int
442 ofdpa_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
443 			   const struct ofdpa_flow_tbl_entry *entry)
444 {
445 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
446 			       entry->key.acl.in_pport))
447 		return -EMSGSIZE;
448 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
449 			       entry->key.acl.in_pport_mask))
450 		return -EMSGSIZE;
451 	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
452 			   ETH_ALEN, entry->key.acl.eth_src))
453 		return -EMSGSIZE;
454 	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
455 			   ETH_ALEN, entry->key.acl.eth_src_mask))
456 		return -EMSGSIZE;
457 	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
458 			   ETH_ALEN, entry->key.acl.eth_dst))
459 		return -EMSGSIZE;
460 	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
461 			   ETH_ALEN, entry->key.acl.eth_dst_mask))
462 		return -EMSGSIZE;
463 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
464 				entry->key.acl.eth_type))
465 		return -EMSGSIZE;
466 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
467 				entry->key.acl.vlan_id))
468 		return -EMSGSIZE;
469 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
470 				entry->key.acl.vlan_id_mask))
471 		return -EMSGSIZE;
472 
473 	switch (ntohs(entry->key.acl.eth_type)) {
474 	case ETH_P_IP:
475 	case ETH_P_IPV6:
476 		if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
477 				      entry->key.acl.ip_proto))
478 			return -EMSGSIZE;
479 		if (rocker_tlv_put_u8(desc_info,
480 				      ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
481 				      entry->key.acl.ip_proto_mask))
482 			return -EMSGSIZE;
483 		if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
484 				      entry->key.acl.ip_tos & 0x3f))
485 			return -EMSGSIZE;
486 		if (rocker_tlv_put_u8(desc_info,
487 				      ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
488 				      entry->key.acl.ip_tos_mask & 0x3f))
489 			return -EMSGSIZE;
490 		if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
491 				      (entry->key.acl.ip_tos & 0xc0) >> 6))
492 			return -EMSGSIZE;
493 		if (rocker_tlv_put_u8(desc_info,
494 				      ROCKER_TLV_OF_DPA_IP_ECN_MASK,
495 				      (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
496 			return -EMSGSIZE;
497 		break;
498 	}
499 
500 	if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
501 	    rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
502 			       entry->key.acl.group_id))
503 		return -EMSGSIZE;
504 
505 	return 0;
506 }
507 
508 static int ofdpa_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
509 				  struct rocker_desc_info *desc_info,
510 				  void *priv)
511 {
512 	const struct ofdpa_flow_tbl_entry *entry = priv;
513 	struct rocker_tlv *cmd_info;
514 	int err = 0;
515 
516 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
517 		return -EMSGSIZE;
518 	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
519 	if (!cmd_info)
520 		return -EMSGSIZE;
521 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
522 			       entry->key.tbl_id))
523 		return -EMSGSIZE;
524 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
525 			       entry->key.priority))
526 		return -EMSGSIZE;
527 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
528 		return -EMSGSIZE;
529 	if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
530 			       entry->cookie))
531 		return -EMSGSIZE;
532 
533 	switch (entry->key.tbl_id) {
534 	case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
535 		err = ofdpa_cmd_flow_tbl_add_ig_port(desc_info, entry);
536 		break;
537 	case ROCKER_OF_DPA_TABLE_ID_VLAN:
538 		err = ofdpa_cmd_flow_tbl_add_vlan(desc_info, entry);
539 		break;
540 	case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
541 		err = ofdpa_cmd_flow_tbl_add_term_mac(desc_info, entry);
542 		break;
543 	case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
544 		err = ofdpa_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
545 		break;
546 	case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
547 		err = ofdpa_cmd_flow_tbl_add_bridge(desc_info, entry);
548 		break;
549 	case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
550 		err = ofdpa_cmd_flow_tbl_add_acl(desc_info, entry);
551 		break;
552 	default:
553 		err = -ENOTSUPP;
554 		break;
555 	}
556 
557 	if (err)
558 		return err;
559 
560 	rocker_tlv_nest_end(desc_info, cmd_info);
561 
562 	return 0;
563 }
564 
565 static int ofdpa_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
566 				  struct rocker_desc_info *desc_info,
567 				  void *priv)
568 {
569 	const struct ofdpa_flow_tbl_entry *entry = priv;
570 	struct rocker_tlv *cmd_info;
571 
572 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
573 		return -EMSGSIZE;
574 	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
575 	if (!cmd_info)
576 		return -EMSGSIZE;
577 	if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
578 			       entry->cookie))
579 		return -EMSGSIZE;
580 	rocker_tlv_nest_end(desc_info, cmd_info);
581 
582 	return 0;
583 }
584 
585 static int
586 ofdpa_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
587 				     struct ofdpa_group_tbl_entry *entry)
588 {
589 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
590 			       ROCKER_GROUP_PORT_GET(entry->group_id)))
591 		return -EMSGSIZE;
592 	if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
593 			      entry->l2_interface.pop_vlan))
594 		return -EMSGSIZE;
595 
596 	return 0;
597 }
598 
599 static int
600 ofdpa_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
601 				   const struct ofdpa_group_tbl_entry *entry)
602 {
603 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
604 			       entry->l2_rewrite.group_id))
605 		return -EMSGSIZE;
606 	if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
607 	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
608 			   ETH_ALEN, entry->l2_rewrite.eth_src))
609 		return -EMSGSIZE;
610 	if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
611 	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
612 			   ETH_ALEN, entry->l2_rewrite.eth_dst))
613 		return -EMSGSIZE;
614 	if (entry->l2_rewrite.vlan_id &&
615 	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
616 				entry->l2_rewrite.vlan_id))
617 		return -EMSGSIZE;
618 
619 	return 0;
620 }
621 
622 static int
623 ofdpa_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
624 				  const struct ofdpa_group_tbl_entry *entry)
625 {
626 	int i;
627 	struct rocker_tlv *group_ids;
628 
629 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
630 			       entry->group_count))
631 		return -EMSGSIZE;
632 
633 	group_ids = rocker_tlv_nest_start(desc_info,
634 					  ROCKER_TLV_OF_DPA_GROUP_IDS);
635 	if (!group_ids)
636 		return -EMSGSIZE;
637 
638 	for (i = 0; i < entry->group_count; i++)
639 		/* Note TLV array is 1-based */
640 		if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
641 			return -EMSGSIZE;
642 
643 	rocker_tlv_nest_end(desc_info, group_ids);
644 
645 	return 0;
646 }
647 
648 static int
649 ofdpa_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
650 				   const struct ofdpa_group_tbl_entry *entry)
651 {
652 	if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
653 	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
654 			   ETH_ALEN, entry->l3_unicast.eth_src))
655 		return -EMSGSIZE;
656 	if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
657 	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
658 			   ETH_ALEN, entry->l3_unicast.eth_dst))
659 		return -EMSGSIZE;
660 	if (entry->l3_unicast.vlan_id &&
661 	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
662 				entry->l3_unicast.vlan_id))
663 		return -EMSGSIZE;
664 	if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
665 			      entry->l3_unicast.ttl_check))
666 		return -EMSGSIZE;
667 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
668 			       entry->l3_unicast.group_id))
669 		return -EMSGSIZE;
670 
671 	return 0;
672 }
673 
674 static int ofdpa_cmd_group_tbl_add(const struct rocker_port *rocker_port,
675 				   struct rocker_desc_info *desc_info,
676 				   void *priv)
677 {
678 	struct ofdpa_group_tbl_entry *entry = priv;
679 	struct rocker_tlv *cmd_info;
680 	int err = 0;
681 
682 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
683 		return -EMSGSIZE;
684 	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
685 	if (!cmd_info)
686 		return -EMSGSIZE;
687 
688 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
689 			       entry->group_id))
690 		return -EMSGSIZE;
691 
692 	switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
693 	case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
694 		err = ofdpa_cmd_group_tbl_add_l2_interface(desc_info, entry);
695 		break;
696 	case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
697 		err = ofdpa_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
698 		break;
699 	case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
700 	case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
701 		err = ofdpa_cmd_group_tbl_add_group_ids(desc_info, entry);
702 		break;
703 	case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
704 		err = ofdpa_cmd_group_tbl_add_l3_unicast(desc_info, entry);
705 		break;
706 	default:
707 		err = -ENOTSUPP;
708 		break;
709 	}
710 
711 	if (err)
712 		return err;
713 
714 	rocker_tlv_nest_end(desc_info, cmd_info);
715 
716 	return 0;
717 }
718 
719 static int ofdpa_cmd_group_tbl_del(const struct rocker_port *rocker_port,
720 				   struct rocker_desc_info *desc_info,
721 				   void *priv)
722 {
723 	const struct ofdpa_group_tbl_entry *entry = priv;
724 	struct rocker_tlv *cmd_info;
725 
726 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
727 		return -EMSGSIZE;
728 	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
729 	if (!cmd_info)
730 		return -EMSGSIZE;
731 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
732 			       entry->group_id))
733 		return -EMSGSIZE;
734 	rocker_tlv_nest_end(desc_info, cmd_info);
735 
736 	return 0;
737 }
738 
739 /***************************************************
740  * Flow, group, FDB, internal VLAN and neigh tables
741  ***************************************************/
742 
743 static struct ofdpa_flow_tbl_entry *
744 ofdpa_flow_tbl_find(const struct ofdpa *ofdpa,
745 		    const struct ofdpa_flow_tbl_entry *match)
746 {
747 	struct ofdpa_flow_tbl_entry *found;
748 	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
749 
750 	hash_for_each_possible(ofdpa->flow_tbl, found,
751 			       entry, match->key_crc32) {
752 		if (memcmp(&found->key, &match->key, key_len) == 0)
753 			return found;
754 	}
755 
756 	return NULL;
757 }
758 
759 static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port,
760 			      int flags, struct ofdpa_flow_tbl_entry *match)
761 {
762 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
763 	struct ofdpa_flow_tbl_entry *found;
764 	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
765 	unsigned long lock_flags;
766 
767 	match->key_crc32 = crc32(~0, &match->key, key_len);
768 
769 	spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
770 
771 	found = ofdpa_flow_tbl_find(ofdpa, match);
772 
773 	if (found) {
774 		match->cookie = found->cookie;
775 		hash_del(&found->entry);
776 		kfree(found);
777 		found = match;
778 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
779 	} else {
780 		found = match;
781 		found->cookie = ofdpa->flow_tbl_next_cookie++;
782 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
783 	}
784 
785 	hash_add(ofdpa->flow_tbl, &found->entry, found->key_crc32);
786 	spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
787 
788 	return rocker_cmd_exec(ofdpa_port->rocker_port,
789 			       ofdpa_flags_nowait(flags),
790 			       ofdpa_cmd_flow_tbl_add,
791 			       found, NULL, NULL);
792 }
793 
794 static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port,
795 			      int flags, struct ofdpa_flow_tbl_entry *match)
796 {
797 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
798 	struct ofdpa_flow_tbl_entry *found;
799 	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
800 	unsigned long lock_flags;
801 	int err = 0;
802 
803 	match->key_crc32 = crc32(~0, &match->key, key_len);
804 
805 	spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
806 
807 	found = ofdpa_flow_tbl_find(ofdpa, match);
808 
809 	if (found) {
810 		hash_del(&found->entry);
811 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
812 	}
813 
814 	spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
815 
816 	kfree(match);
817 
818 	if (found) {
819 		err = rocker_cmd_exec(ofdpa_port->rocker_port,
820 				      ofdpa_flags_nowait(flags),
821 				      ofdpa_cmd_flow_tbl_del,
822 				      found, NULL, NULL);
823 		kfree(found);
824 	}
825 
826 	return err;
827 }
828 
829 static int ofdpa_flow_tbl_do(struct ofdpa_port *ofdpa_port, int flags,
830 			     struct ofdpa_flow_tbl_entry *entry)
831 {
832 	if (flags & OFDPA_OP_FLAG_REMOVE)
833 		return ofdpa_flow_tbl_del(ofdpa_port, flags, entry);
834 	else
835 		return ofdpa_flow_tbl_add(ofdpa_port, flags, entry);
836 }
837 
838 static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port, int flags,
839 				  u32 in_pport, u32 in_pport_mask,
840 				  enum rocker_of_dpa_table_id goto_tbl)
841 {
842 	struct ofdpa_flow_tbl_entry *entry;
843 
844 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
845 	if (!entry)
846 		return -ENOMEM;
847 
848 	entry->key.priority = OFDPA_PRIORITY_IG_PORT;
849 	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
850 	entry->key.ig_port.in_pport = in_pport;
851 	entry->key.ig_port.in_pport_mask = in_pport_mask;
852 	entry->key.ig_port.goto_tbl = goto_tbl;
853 
854 	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
855 }
856 
857 static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port,
858 			       int flags,
859 			       u32 in_pport, __be16 vlan_id,
860 			       __be16 vlan_id_mask,
861 			       enum rocker_of_dpa_table_id goto_tbl,
862 			       bool untagged, __be16 new_vlan_id)
863 {
864 	struct ofdpa_flow_tbl_entry *entry;
865 
866 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
867 	if (!entry)
868 		return -ENOMEM;
869 
870 	entry->key.priority = OFDPA_PRIORITY_VLAN;
871 	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
872 	entry->key.vlan.in_pport = in_pport;
873 	entry->key.vlan.vlan_id = vlan_id;
874 	entry->key.vlan.vlan_id_mask = vlan_id_mask;
875 	entry->key.vlan.goto_tbl = goto_tbl;
876 
877 	entry->key.vlan.untagged = untagged;
878 	entry->key.vlan.new_vlan_id = new_vlan_id;
879 
880 	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
881 }
882 
883 static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port,
884 				   u32 in_pport, u32 in_pport_mask,
885 				   __be16 eth_type, const u8 *eth_dst,
886 				   const u8 *eth_dst_mask, __be16 vlan_id,
887 				   __be16 vlan_id_mask, bool copy_to_cpu,
888 				   int flags)
889 {
890 	struct ofdpa_flow_tbl_entry *entry;
891 
892 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
893 	if (!entry)
894 		return -ENOMEM;
895 
896 	if (is_multicast_ether_addr(eth_dst)) {
897 		entry->key.priority = OFDPA_PRIORITY_TERM_MAC_MCAST;
898 		entry->key.term_mac.goto_tbl =
899 			 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
900 	} else {
901 		entry->key.priority = OFDPA_PRIORITY_TERM_MAC_UCAST;
902 		entry->key.term_mac.goto_tbl =
903 			 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
904 	}
905 
906 	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
907 	entry->key.term_mac.in_pport = in_pport;
908 	entry->key.term_mac.in_pport_mask = in_pport_mask;
909 	entry->key.term_mac.eth_type = eth_type;
910 	ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
911 	ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
912 	entry->key.term_mac.vlan_id = vlan_id;
913 	entry->key.term_mac.vlan_id_mask = vlan_id_mask;
914 	entry->key.term_mac.copy_to_cpu = copy_to_cpu;
915 
916 	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
917 }
918 
919 static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port,
920 				 int flags, const u8 *eth_dst,
921 				 const u8 *eth_dst_mask,  __be16 vlan_id,
922 				 u32 tunnel_id,
923 				 enum rocker_of_dpa_table_id goto_tbl,
924 				 u32 group_id, bool copy_to_cpu)
925 {
926 	struct ofdpa_flow_tbl_entry *entry;
927 	u32 priority;
928 	bool vlan_bridging = !!vlan_id;
929 	bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
930 	bool wild = false;
931 
932 	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
933 	if (!entry)
934 		return -ENOMEM;
935 
936 	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
937 
938 	if (eth_dst) {
939 		entry->key.bridge.has_eth_dst = 1;
940 		ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
941 	}
942 	if (eth_dst_mask) {
943 		entry->key.bridge.has_eth_dst_mask = 1;
944 		ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
945 		if (!ether_addr_equal(eth_dst_mask, ff_mac))
946 			wild = true;
947 	}
948 
949 	priority = OFDPA_PRIORITY_UNKNOWN;
950 	if (vlan_bridging && dflt && wild)
951 		priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
952 	else if (vlan_bridging && dflt && !wild)
953 		priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
954 	else if (vlan_bridging && !dflt)
955 		priority = OFDPA_PRIORITY_BRIDGING_VLAN;
956 	else if (!vlan_bridging && dflt && wild)
957 		priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
958 	else if (!vlan_bridging && dflt && !wild)
959 		priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
960 	else if (!vlan_bridging && !dflt)
961 		priority = OFDPA_PRIORITY_BRIDGING_TENANT;
962 
963 	entry->key.priority = priority;
964 	entry->key.bridge.vlan_id = vlan_id;
965 	entry->key.bridge.tunnel_id = tunnel_id;
966 	entry->key.bridge.goto_tbl = goto_tbl;
967 	entry->key.bridge.group_id = group_id;
968 	entry->key.bridge.copy_to_cpu = copy_to_cpu;
969 
970 	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
971 }
972 
973 static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
974 					 __be16 eth_type, __be32 dst,
975 					 __be32 dst_mask, u32 priority,
976 					 enum rocker_of_dpa_table_id goto_tbl,
977 					 u32 group_id, struct fib_info *fi,
978 					 int flags)
979 {
980 	struct ofdpa_flow_tbl_entry *entry;
981 
982 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
983 	if (!entry)
984 		return -ENOMEM;
985 
986 	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
987 	entry->key.priority = priority;
988 	entry->key.ucast_routing.eth_type = eth_type;
989 	entry->key.ucast_routing.dst4 = dst;
990 	entry->key.ucast_routing.dst4_mask = dst_mask;
991 	entry->key.ucast_routing.goto_tbl = goto_tbl;
992 	entry->key.ucast_routing.group_id = group_id;
993 	entry->key_len = offsetof(struct ofdpa_flow_tbl_key,
994 				  ucast_routing.group_id);
995 	entry->fi = fi;
996 
997 	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
998 }
999 
1000 static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port, int flags,
1001 			      u32 in_pport, u32 in_pport_mask,
1002 			      const u8 *eth_src, const u8 *eth_src_mask,
1003 			      const u8 *eth_dst, const u8 *eth_dst_mask,
1004 			      __be16 eth_type, __be16 vlan_id,
1005 			      __be16 vlan_id_mask, u8 ip_proto,
1006 			      u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
1007 			      u32 group_id)
1008 {
1009 	u32 priority;
1010 	struct ofdpa_flow_tbl_entry *entry;
1011 
1012 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1013 	if (!entry)
1014 		return -ENOMEM;
1015 
1016 	priority = OFDPA_PRIORITY_ACL_NORMAL;
1017 	if (eth_dst && eth_dst_mask) {
1018 		if (ether_addr_equal(eth_dst_mask, mcast_mac))
1019 			priority = OFDPA_PRIORITY_ACL_DFLT;
1020 		else if (is_link_local_ether_addr(eth_dst))
1021 			priority = OFDPA_PRIORITY_ACL_CTRL;
1022 	}
1023 
1024 	entry->key.priority = priority;
1025 	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1026 	entry->key.acl.in_pport = in_pport;
1027 	entry->key.acl.in_pport_mask = in_pport_mask;
1028 
1029 	if (eth_src)
1030 		ether_addr_copy(entry->key.acl.eth_src, eth_src);
1031 	if (eth_src_mask)
1032 		ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
1033 	if (eth_dst)
1034 		ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
1035 	if (eth_dst_mask)
1036 		ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
1037 
1038 	entry->key.acl.eth_type = eth_type;
1039 	entry->key.acl.vlan_id = vlan_id;
1040 	entry->key.acl.vlan_id_mask = vlan_id_mask;
1041 	entry->key.acl.ip_proto = ip_proto;
1042 	entry->key.acl.ip_proto_mask = ip_proto_mask;
1043 	entry->key.acl.ip_tos = ip_tos;
1044 	entry->key.acl.ip_tos_mask = ip_tos_mask;
1045 	entry->key.acl.group_id = group_id;
1046 
1047 	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
1048 }
1049 
1050 static struct ofdpa_group_tbl_entry *
1051 ofdpa_group_tbl_find(const struct ofdpa *ofdpa,
1052 		     const struct ofdpa_group_tbl_entry *match)
1053 {
1054 	struct ofdpa_group_tbl_entry *found;
1055 
1056 	hash_for_each_possible(ofdpa->group_tbl, found,
1057 			       entry, match->group_id) {
1058 		if (found->group_id == match->group_id)
1059 			return found;
1060 	}
1061 
1062 	return NULL;
1063 }
1064 
1065 static void ofdpa_group_tbl_entry_free(struct ofdpa_group_tbl_entry *entry)
1066 {
1067 	switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
1068 	case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
1069 	case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
1070 		kfree(entry->group_ids);
1071 		break;
1072 	default:
1073 		break;
1074 	}
1075 	kfree(entry);
1076 }
1077 
1078 static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port, int flags,
1079 			       struct ofdpa_group_tbl_entry *match)
1080 {
1081 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1082 	struct ofdpa_group_tbl_entry *found;
1083 	unsigned long lock_flags;
1084 
1085 	spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
1086 
1087 	found = ofdpa_group_tbl_find(ofdpa, match);
1088 
1089 	if (found) {
1090 		hash_del(&found->entry);
1091 		ofdpa_group_tbl_entry_free(found);
1092 		found = match;
1093 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
1094 	} else {
1095 		found = match;
1096 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
1097 	}
1098 
1099 	hash_add(ofdpa->group_tbl, &found->entry, found->group_id);
1100 
1101 	spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
1102 
1103 	return rocker_cmd_exec(ofdpa_port->rocker_port,
1104 			       ofdpa_flags_nowait(flags),
1105 			       ofdpa_cmd_group_tbl_add,
1106 			       found, NULL, NULL);
1107 }
1108 
1109 static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port, int flags,
1110 			       struct ofdpa_group_tbl_entry *match)
1111 {
1112 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1113 	struct ofdpa_group_tbl_entry *found;
1114 	unsigned long lock_flags;
1115 	int err = 0;
1116 
1117 	spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
1118 
1119 	found = ofdpa_group_tbl_find(ofdpa, match);
1120 
1121 	if (found) {
1122 		hash_del(&found->entry);
1123 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
1124 	}
1125 
1126 	spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
1127 
1128 	ofdpa_group_tbl_entry_free(match);
1129 
1130 	if (found) {
1131 		err = rocker_cmd_exec(ofdpa_port->rocker_port,
1132 				      ofdpa_flags_nowait(flags),
1133 				      ofdpa_cmd_group_tbl_del,
1134 				      found, NULL, NULL);
1135 		ofdpa_group_tbl_entry_free(found);
1136 	}
1137 
1138 	return err;
1139 }
1140 
1141 static int ofdpa_group_tbl_do(struct ofdpa_port *ofdpa_port, int flags,
1142 			      struct ofdpa_group_tbl_entry *entry)
1143 {
1144 	if (flags & OFDPA_OP_FLAG_REMOVE)
1145 		return ofdpa_group_tbl_del(ofdpa_port, flags, entry);
1146 	else
1147 		return ofdpa_group_tbl_add(ofdpa_port, flags, entry);
1148 }
1149 
1150 static int ofdpa_group_l2_interface(struct ofdpa_port *ofdpa_port,
1151 				    int flags, __be16 vlan_id,
1152 				    u32 out_pport, int pop_vlan)
1153 {
1154 	struct ofdpa_group_tbl_entry *entry;
1155 
1156 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1157 	if (!entry)
1158 		return -ENOMEM;
1159 
1160 	entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1161 	entry->l2_interface.pop_vlan = pop_vlan;
1162 
1163 	return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
1164 }
1165 
1166 static int ofdpa_group_l2_fan_out(struct ofdpa_port *ofdpa_port,
1167 				  int flags, u8 group_count,
1168 				  const u32 *group_ids, u32 group_id)
1169 {
1170 	struct ofdpa_group_tbl_entry *entry;
1171 
1172 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1173 	if (!entry)
1174 		return -ENOMEM;
1175 
1176 	entry->group_id = group_id;
1177 	entry->group_count = group_count;
1178 
1179 	entry->group_ids = kcalloc(group_count, sizeof(u32), GFP_KERNEL);
1180 	if (!entry->group_ids) {
1181 		kfree(entry);
1182 		return -ENOMEM;
1183 	}
1184 	memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
1185 
1186 	return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
1187 }
1188 
1189 static int ofdpa_group_l2_flood(struct ofdpa_port *ofdpa_port,
1190 				int flags, __be16 vlan_id,
1191 				u8 group_count,	const u32 *group_ids,
1192 				u32 group_id)
1193 {
1194 	return ofdpa_group_l2_fan_out(ofdpa_port, flags,
1195 				      group_count, group_ids,
1196 				      group_id);
1197 }
1198 
1199 static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port, int flags,
1200 				  u32 index, const u8 *src_mac, const u8 *dst_mac,
1201 				  __be16 vlan_id, bool ttl_check, u32 pport)
1202 {
1203 	struct ofdpa_group_tbl_entry *entry;
1204 
1205 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1206 	if (!entry)
1207 		return -ENOMEM;
1208 
1209 	entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
1210 	if (src_mac)
1211 		ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
1212 	if (dst_mac)
1213 		ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
1214 	entry->l3_unicast.vlan_id = vlan_id;
1215 	entry->l3_unicast.ttl_check = ttl_check;
1216 	entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
1217 
1218 	return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
1219 }
1220 
1221 static struct ofdpa_neigh_tbl_entry *
1222 ofdpa_neigh_tbl_find(const struct ofdpa *ofdpa, __be32 ip_addr)
1223 {
1224 	struct ofdpa_neigh_tbl_entry *found;
1225 
1226 	hash_for_each_possible(ofdpa->neigh_tbl, found,
1227 			       entry, be32_to_cpu(ip_addr))
1228 		if (found->ip_addr == ip_addr)
1229 			return found;
1230 
1231 	return NULL;
1232 }
1233 
1234 static void ofdpa_neigh_add(struct ofdpa *ofdpa,
1235 			    struct ofdpa_neigh_tbl_entry *entry)
1236 {
1237 	entry->index = ofdpa->neigh_tbl_next_index++;
1238 	entry->ref_count++;
1239 	hash_add(ofdpa->neigh_tbl, &entry->entry,
1240 		 be32_to_cpu(entry->ip_addr));
1241 }
1242 
1243 static void ofdpa_neigh_del(struct ofdpa_neigh_tbl_entry *entry)
1244 {
1245 	if (--entry->ref_count == 0) {
1246 		hash_del(&entry->entry);
1247 		kfree(entry);
1248 	}
1249 }
1250 
1251 static void ofdpa_neigh_update(struct ofdpa_neigh_tbl_entry *entry,
1252 			       const u8 *eth_dst, bool ttl_check)
1253 {
1254 	if (eth_dst) {
1255 		ether_addr_copy(entry->eth_dst, eth_dst);
1256 		entry->ttl_check = ttl_check;
1257 	} else {
1258 		entry->ref_count++;
1259 	}
1260 }
1261 
1262 static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
1263 				 int flags, __be32 ip_addr, const u8 *eth_dst)
1264 {
1265 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1266 	struct ofdpa_neigh_tbl_entry *entry;
1267 	struct ofdpa_neigh_tbl_entry *found;
1268 	unsigned long lock_flags;
1269 	__be16 eth_type = htons(ETH_P_IP);
1270 	enum rocker_of_dpa_table_id goto_tbl =
1271 			ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1272 	u32 group_id;
1273 	u32 priority = 0;
1274 	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1275 	bool updating;
1276 	bool removing;
1277 	int err = 0;
1278 
1279 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1280 	if (!entry)
1281 		return -ENOMEM;
1282 
1283 	spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1284 
1285 	found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
1286 
1287 	updating = found && adding;
1288 	removing = found && !adding;
1289 	adding = !found && adding;
1290 
1291 	if (adding) {
1292 		entry->ip_addr = ip_addr;
1293 		entry->dev = ofdpa_port->dev;
1294 		ether_addr_copy(entry->eth_dst, eth_dst);
1295 		entry->ttl_check = true;
1296 		ofdpa_neigh_add(ofdpa, entry);
1297 	} else if (removing) {
1298 		memcpy(entry, found, sizeof(*entry));
1299 		ofdpa_neigh_del(found);
1300 	} else if (updating) {
1301 		ofdpa_neigh_update(found, eth_dst, true);
1302 		memcpy(entry, found, sizeof(*entry));
1303 	} else {
1304 		err = -ENOENT;
1305 	}
1306 
1307 	spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
1308 
1309 	if (err)
1310 		goto err_out;
1311 
1312 	/* For each active neighbor, we have an L3 unicast group and
1313 	 * a /32 route to the neighbor, which uses the L3 unicast
1314 	 * group.  The L3 unicast group can also be referred to by
1315 	 * other routes' nexthops.
1316 	 */
1317 
1318 	err = ofdpa_group_l3_unicast(ofdpa_port, flags,
1319 				     entry->index,
1320 				     ofdpa_port->dev->dev_addr,
1321 				     entry->eth_dst,
1322 				     ofdpa_port->internal_vlan_id,
1323 				     entry->ttl_check,
1324 				     ofdpa_port->pport);
1325 	if (err) {
1326 		netdev_err(ofdpa_port->dev, "Error (%d) L3 unicast group index %d\n",
1327 			   err, entry->index);
1328 		goto err_out;
1329 	}
1330 
1331 	if (adding || removing) {
1332 		group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
1333 		err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port,
1334 						    eth_type, ip_addr,
1335 						    inet_make_mask(32),
1336 						    priority, goto_tbl,
1337 						    group_id, NULL, flags);
1338 
1339 		if (err)
1340 			netdev_err(ofdpa_port->dev, "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
1341 				   err, &entry->ip_addr, group_id);
1342 	}
1343 
1344 err_out:
1345 	if (!adding)
1346 		kfree(entry);
1347 
1348 	return err;
1349 }
1350 
1351 static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port,
1352 				   __be32 ip_addr)
1353 {
1354 	struct net_device *dev = ofdpa_port->dev;
1355 	struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
1356 	int err = 0;
1357 
1358 	if (!n) {
1359 		n = neigh_create(&arp_tbl, &ip_addr, dev);
1360 		if (IS_ERR(n))
1361 			return PTR_ERR(n);
1362 	}
1363 
1364 	/* If the neigh is already resolved, then go ahead and
1365 	 * install the entry, otherwise start the ARP process to
1366 	 * resolve the neigh.
1367 	 */
1368 
1369 	if (n->nud_state & NUD_VALID)
1370 		err = ofdpa_port_ipv4_neigh(ofdpa_port, 0,
1371 					    ip_addr, n->ha);
1372 	else
1373 		neigh_event_send(n, NULL);
1374 
1375 	neigh_release(n);
1376 	return err;
1377 }
1378 
1379 static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
1380 			      int flags, __be32 ip_addr, u32 *index)
1381 {
1382 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1383 	struct ofdpa_neigh_tbl_entry *entry;
1384 	struct ofdpa_neigh_tbl_entry *found;
1385 	unsigned long lock_flags;
1386 	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1387 	bool updating;
1388 	bool removing;
1389 	bool resolved = true;
1390 	int err = 0;
1391 
1392 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1393 	if (!entry)
1394 		return -ENOMEM;
1395 
1396 	spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1397 
1398 	found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
1399 
1400 	updating = found && adding;
1401 	removing = found && !adding;
1402 	adding = !found && adding;
1403 
1404 	if (adding) {
1405 		entry->ip_addr = ip_addr;
1406 		entry->dev = ofdpa_port->dev;
1407 		ofdpa_neigh_add(ofdpa, entry);
1408 		*index = entry->index;
1409 		resolved = false;
1410 	} else if (removing) {
1411 		*index = found->index;
1412 		ofdpa_neigh_del(found);
1413 	} else if (updating) {
1414 		ofdpa_neigh_update(found, NULL, false);
1415 		resolved = !is_zero_ether_addr(found->eth_dst);
1416 		*index = found->index;
1417 	} else {
1418 		err = -ENOENT;
1419 	}
1420 
1421 	spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
1422 
1423 	if (!adding)
1424 		kfree(entry);
1425 
1426 	if (err)
1427 		return err;
1428 
1429 	/* Resolved means neigh ip_addr is resolved to neigh mac. */
1430 
1431 	if (!resolved)
1432 		err = ofdpa_port_ipv4_resolve(ofdpa_port, ip_addr);
1433 
1434 	return err;
1435 }
1436 
1437 static struct ofdpa_port *ofdpa_port_get(const struct ofdpa *ofdpa,
1438 					 int port_index)
1439 {
1440 	struct rocker_port *rocker_port;
1441 
1442 	rocker_port = ofdpa->rocker->ports[port_index];
1443 	return rocker_port ? rocker_port->wpriv : NULL;
1444 }
1445 
1446 static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port,
1447 				       int flags, __be16 vlan_id)
1448 {
1449 	struct ofdpa_port *p;
1450 	const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1451 	unsigned int port_count = ofdpa->rocker->port_count;
1452 	u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
1453 	u32 *group_ids;
1454 	u8 group_count = 0;
1455 	int err = 0;
1456 	int i;
1457 
1458 	group_ids = kcalloc(port_count, sizeof(u32), GFP_KERNEL);
1459 	if (!group_ids)
1460 		return -ENOMEM;
1461 
1462 	/* Adjust the flood group for this VLAN.  The flood group
1463 	 * references an L2 interface group for each port in this
1464 	 * VLAN.
1465 	 */
1466 
1467 	for (i = 0; i < port_count; i++) {
1468 		p = ofdpa_port_get(ofdpa, i);
1469 		if (!p)
1470 			continue;
1471 		if (!ofdpa_port_is_bridged(p))
1472 			continue;
1473 		if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
1474 			group_ids[group_count++] =
1475 				ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
1476 		}
1477 	}
1478 
1479 	/* If there are no bridged ports in this VLAN, we're done */
1480 	if (group_count == 0)
1481 		goto no_ports_in_vlan;
1482 
1483 	err = ofdpa_group_l2_flood(ofdpa_port, flags, vlan_id,
1484 				   group_count, group_ids, group_id);
1485 	if (err)
1486 		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
1487 
1488 no_ports_in_vlan:
1489 	kfree(group_ids);
1490 	return err;
1491 }
1492 
1493 static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port, int flags,
1494 				     __be16 vlan_id, bool pop_vlan)
1495 {
1496 	const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1497 	unsigned int port_count = ofdpa->rocker->port_count;
1498 	struct ofdpa_port *p;
1499 	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1500 	u32 out_pport;
1501 	int ref = 0;
1502 	int err;
1503 	int i;
1504 
1505 	/* An L2 interface group for this port in this VLAN, but
1506 	 * only when port STP state is LEARNING|FORWARDING.
1507 	 */
1508 
1509 	if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
1510 	    ofdpa_port->stp_state == BR_STATE_FORWARDING) {
1511 		out_pport = ofdpa_port->pport;
1512 		err = ofdpa_group_l2_interface(ofdpa_port, flags,
1513 					       vlan_id, out_pport, pop_vlan);
1514 		if (err) {
1515 			netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
1516 				   err, out_pport);
1517 			return err;
1518 		}
1519 	}
1520 
1521 	/* An L2 interface group for this VLAN to CPU port.
1522 	 * Add when first port joins this VLAN and destroy when
1523 	 * last port leaves this VLAN.
1524 	 */
1525 
1526 	for (i = 0; i < port_count; i++) {
1527 		p = ofdpa_port_get(ofdpa, i);
1528 		if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
1529 			ref++;
1530 	}
1531 
1532 	if ((!adding || ref != 1) && (adding || ref != 0))
1533 		return 0;
1534 
1535 	out_pport = 0;
1536 	err = ofdpa_group_l2_interface(ofdpa_port, flags,
1537 				       vlan_id, out_pport, pop_vlan);
1538 	if (err) {
1539 		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for CPU port\n", err);
1540 		return err;
1541 	}
1542 
1543 	return 0;
1544 }
1545 
1546 static struct ofdpa_ctrl {
1547 	const u8 *eth_dst;
1548 	const u8 *eth_dst_mask;
1549 	__be16 eth_type;
1550 	bool acl;
1551 	bool bridge;
1552 	bool term;
1553 	bool copy_to_cpu;
1554 } ofdpa_ctrls[] = {
1555 	[OFDPA_CTRL_LINK_LOCAL_MCAST] = {
1556 		/* pass link local multicast pkts up to CPU for filtering */
1557 		.eth_dst = ll_mac,
1558 		.eth_dst_mask = ll_mask,
1559 		.acl = true,
1560 	},
1561 	[OFDPA_CTRL_LOCAL_ARP] = {
1562 		/* pass local ARP pkts up to CPU */
1563 		.eth_dst = zero_mac,
1564 		.eth_dst_mask = zero_mac,
1565 		.eth_type = htons(ETH_P_ARP),
1566 		.acl = true,
1567 	},
1568 	[OFDPA_CTRL_IPV4_MCAST] = {
1569 		/* pass IPv4 mcast pkts up to CPU, RFC 1112 */
1570 		.eth_dst = ipv4_mcast,
1571 		.eth_dst_mask = ipv4_mask,
1572 		.eth_type = htons(ETH_P_IP),
1573 		.term  = true,
1574 		.copy_to_cpu = true,
1575 	},
1576 	[OFDPA_CTRL_IPV6_MCAST] = {
1577 		/* pass IPv6 mcast pkts up to CPU, RFC 2464 */
1578 		.eth_dst = ipv6_mcast,
1579 		.eth_dst_mask = ipv6_mask,
1580 		.eth_type = htons(ETH_P_IPV6),
1581 		.term  = true,
1582 		.copy_to_cpu = true,
1583 	},
1584 	[OFDPA_CTRL_DFLT_BRIDGING] = {
1585 		/* flood any pkts on vlan */
1586 		.bridge = true,
1587 		.copy_to_cpu = true,
1588 	},
1589 	[OFDPA_CTRL_DFLT_OVS] = {
1590 		/* pass all pkts up to CPU */
1591 		.eth_dst = zero_mac,
1592 		.eth_dst_mask = zero_mac,
1593 		.acl = true,
1594 	},
1595 };
1596 
1597 static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port, int flags,
1598 				    const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1599 {
1600 	u32 in_pport = ofdpa_port->pport;
1601 	u32 in_pport_mask = 0xffffffff;
1602 	u32 out_pport = 0;
1603 	const u8 *eth_src = NULL;
1604 	const u8 *eth_src_mask = NULL;
1605 	__be16 vlan_id_mask = htons(0xffff);
1606 	u8 ip_proto = 0;
1607 	u8 ip_proto_mask = 0;
1608 	u8 ip_tos = 0;
1609 	u8 ip_tos_mask = 0;
1610 	u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1611 	int err;
1612 
1613 	err = ofdpa_flow_tbl_acl(ofdpa_port, flags,
1614 				 in_pport, in_pport_mask,
1615 				 eth_src, eth_src_mask,
1616 				 ctrl->eth_dst, ctrl->eth_dst_mask,
1617 				 ctrl->eth_type,
1618 				 vlan_id, vlan_id_mask,
1619 				 ip_proto, ip_proto_mask,
1620 				 ip_tos, ip_tos_mask,
1621 				 group_id);
1622 
1623 	if (err)
1624 		netdev_err(ofdpa_port->dev, "Error (%d) ctrl ACL\n", err);
1625 
1626 	return err;
1627 }
1628 
1629 static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port,
1630 				       int flags, const struct ofdpa_ctrl *ctrl,
1631 				       __be16 vlan_id)
1632 {
1633 	enum rocker_of_dpa_table_id goto_tbl =
1634 			ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1635 	u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
1636 	u32 tunnel_id = 0;
1637 	int err;
1638 
1639 	if (!ofdpa_port_is_bridged(ofdpa_port))
1640 		return 0;
1641 
1642 	err = ofdpa_flow_tbl_bridge(ofdpa_port, flags,
1643 				    ctrl->eth_dst, ctrl->eth_dst_mask,
1644 				    vlan_id, tunnel_id,
1645 				    goto_tbl, group_id, ctrl->copy_to_cpu);
1646 
1647 	if (err)
1648 		netdev_err(ofdpa_port->dev, "Error (%d) ctrl FLOOD\n", err);
1649 
1650 	return err;
1651 }
1652 
1653 static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port, int flags,
1654 				     const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1655 {
1656 	u32 in_pport_mask = 0xffffffff;
1657 	__be16 vlan_id_mask = htons(0xffff);
1658 	int err;
1659 
1660 	if (ntohs(vlan_id) == 0)
1661 		vlan_id = ofdpa_port->internal_vlan_id;
1662 
1663 	err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport, in_pport_mask,
1664 				      ctrl->eth_type, ctrl->eth_dst,
1665 				      ctrl->eth_dst_mask, vlan_id,
1666 				      vlan_id_mask, ctrl->copy_to_cpu,
1667 				      flags);
1668 
1669 	if (err)
1670 		netdev_err(ofdpa_port->dev, "Error (%d) ctrl term\n", err);
1671 
1672 	return err;
1673 }
1674 
1675 static int ofdpa_port_ctrl_vlan(struct ofdpa_port *ofdpa_port, int flags,
1676 				const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1677 {
1678 	if (ctrl->acl)
1679 		return ofdpa_port_ctrl_vlan_acl(ofdpa_port, flags,
1680 						ctrl, vlan_id);
1681 	if (ctrl->bridge)
1682 		return ofdpa_port_ctrl_vlan_bridge(ofdpa_port, flags,
1683 						   ctrl, vlan_id);
1684 
1685 	if (ctrl->term)
1686 		return ofdpa_port_ctrl_vlan_term(ofdpa_port, flags,
1687 						 ctrl, vlan_id);
1688 
1689 	return -EOPNOTSUPP;
1690 }
1691 
1692 static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port, int flags,
1693 				    __be16 vlan_id)
1694 {
1695 	int err = 0;
1696 	int i;
1697 
1698 	for (i = 0; i < OFDPA_CTRL_MAX; i++) {
1699 		if (ofdpa_port->ctrls[i]) {
1700 			err = ofdpa_port_ctrl_vlan(ofdpa_port, flags,
1701 						   &ofdpa_ctrls[i], vlan_id);
1702 			if (err)
1703 				return err;
1704 		}
1705 	}
1706 
1707 	return err;
1708 }
1709 
1710 static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port, int flags,
1711 			   const struct ofdpa_ctrl *ctrl)
1712 {
1713 	u16 vid;
1714 	int err = 0;
1715 
1716 	for (vid = 1; vid < VLAN_N_VID; vid++) {
1717 		if (!test_bit(vid, ofdpa_port->vlan_bitmap))
1718 			continue;
1719 		err = ofdpa_port_ctrl_vlan(ofdpa_port, flags,
1720 					   ctrl, htons(vid));
1721 		if (err)
1722 			break;
1723 	}
1724 
1725 	return err;
1726 }
1727 
1728 static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port, int flags,
1729 			   u16 vid)
1730 {
1731 	enum rocker_of_dpa_table_id goto_tbl =
1732 			ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
1733 	u32 in_pport = ofdpa_port->pport;
1734 	__be16 vlan_id = htons(vid);
1735 	__be16 vlan_id_mask = htons(0xffff);
1736 	__be16 internal_vlan_id;
1737 	bool untagged;
1738 	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1739 	int err;
1740 
1741 	internal_vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, &untagged);
1742 
1743 	if (adding &&
1744 	    test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
1745 		return 0; /* already added */
1746 	else if (!adding &&
1747 		 !test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
1748 		return 0; /* already removed */
1749 
1750 	change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
1751 
1752 	if (adding) {
1753 		err = ofdpa_port_ctrl_vlan_add(ofdpa_port, flags,
1754 					       internal_vlan_id);
1755 		if (err) {
1756 			netdev_err(ofdpa_port->dev, "Error (%d) port ctrl vlan add\n", err);
1757 			goto err_vlan_add;
1758 		}
1759 	}
1760 
1761 	err = ofdpa_port_vlan_l2_groups(ofdpa_port, flags,
1762 					internal_vlan_id, untagged);
1763 	if (err) {
1764 		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 groups\n", err);
1765 		goto err_vlan_l2_groups;
1766 	}
1767 
1768 	err = ofdpa_port_vlan_flood_group(ofdpa_port, flags,
1769 					  internal_vlan_id);
1770 	if (err) {
1771 		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
1772 		goto err_flood_group;
1773 	}
1774 
1775 	err = ofdpa_flow_tbl_vlan(ofdpa_port, flags,
1776 				  in_pport, vlan_id, vlan_id_mask,
1777 				  goto_tbl, untagged, internal_vlan_id);
1778 	if (err)
1779 		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN table\n", err);
1780 
1781 	return 0;
1782 
1783 err_vlan_add:
1784 err_vlan_l2_groups:
1785 err_flood_group:
1786 	change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
1787 	return err;
1788 }
1789 
1790 static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port, int flags)
1791 {
1792 	enum rocker_of_dpa_table_id goto_tbl;
1793 	u32 in_pport;
1794 	u32 in_pport_mask;
1795 	int err;
1796 
1797 	/* Normal Ethernet Frames.  Matches pkts from any local physical
1798 	 * ports.  Goto VLAN tbl.
1799 	 */
1800 
1801 	in_pport = 0;
1802 	in_pport_mask = 0xffff0000;
1803 	goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
1804 
1805 	err = ofdpa_flow_tbl_ig_port(ofdpa_port, flags,
1806 				     in_pport, in_pport_mask,
1807 				     goto_tbl);
1808 	if (err)
1809 		netdev_err(ofdpa_port->dev, "Error (%d) ingress port table entry\n", err);
1810 
1811 	return err;
1812 }
1813 
1814 struct ofdpa_fdb_learn_work {
1815 	struct work_struct work;
1816 	struct ofdpa_port *ofdpa_port;
1817 	int flags;
1818 	u8 addr[ETH_ALEN];
1819 	u16 vid;
1820 };
1821 
1822 static void ofdpa_port_fdb_learn_work(struct work_struct *work)
1823 {
1824 	const struct ofdpa_fdb_learn_work *lw =
1825 		container_of(work, struct ofdpa_fdb_learn_work, work);
1826 	bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE);
1827 	bool learned = (lw->flags & OFDPA_OP_FLAG_LEARNED);
1828 	struct switchdev_notifier_fdb_info info;
1829 
1830 	info.addr = lw->addr;
1831 	info.vid = lw->vid;
1832 
1833 	rtnl_lock();
1834 	if (learned && removing)
1835 		call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
1836 					 lw->ofdpa_port->dev, &info.info);
1837 	else if (learned && !removing)
1838 		call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
1839 					 lw->ofdpa_port->dev, &info.info);
1840 	rtnl_unlock();
1841 
1842 	kfree(work);
1843 }
1844 
1845 static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port,
1846 				int flags, const u8 *addr, __be16 vlan_id)
1847 {
1848 	struct ofdpa_fdb_learn_work *lw;
1849 	enum rocker_of_dpa_table_id goto_tbl =
1850 			ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1851 	u32 out_pport = ofdpa_port->pport;
1852 	u32 tunnel_id = 0;
1853 	u32 group_id = ROCKER_GROUP_NONE;
1854 	bool copy_to_cpu = false;
1855 	int err;
1856 
1857 	if (ofdpa_port_is_bridged(ofdpa_port))
1858 		group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1859 
1860 	if (!(flags & OFDPA_OP_FLAG_REFRESH)) {
1861 		err = ofdpa_flow_tbl_bridge(ofdpa_port, flags, addr,
1862 					    NULL, vlan_id, tunnel_id, goto_tbl,
1863 					    group_id, copy_to_cpu);
1864 		if (err)
1865 			return err;
1866 	}
1867 
1868 	if (!ofdpa_port_is_bridged(ofdpa_port))
1869 		return 0;
1870 
1871 	lw = kzalloc(sizeof(*lw), GFP_ATOMIC);
1872 	if (!lw)
1873 		return -ENOMEM;
1874 
1875 	INIT_WORK(&lw->work, ofdpa_port_fdb_learn_work);
1876 
1877 	lw->ofdpa_port = ofdpa_port;
1878 	lw->flags = flags;
1879 	ether_addr_copy(lw->addr, addr);
1880 	lw->vid = ofdpa_port_vlan_to_vid(ofdpa_port, vlan_id);
1881 
1882 	schedule_work(&lw->work);
1883 	return 0;
1884 }
1885 
1886 static struct ofdpa_fdb_tbl_entry *
1887 ofdpa_fdb_tbl_find(const struct ofdpa *ofdpa,
1888 		   const struct ofdpa_fdb_tbl_entry *match)
1889 {
1890 	struct ofdpa_fdb_tbl_entry *found;
1891 
1892 	hash_for_each_possible(ofdpa->fdb_tbl, found, entry, match->key_crc32)
1893 		if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
1894 			return found;
1895 
1896 	return NULL;
1897 }
1898 
1899 static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
1900 			  const unsigned char *addr,
1901 			  __be16 vlan_id, int flags)
1902 {
1903 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1904 	struct ofdpa_fdb_tbl_entry *fdb;
1905 	struct ofdpa_fdb_tbl_entry *found;
1906 	bool removing = (flags & OFDPA_OP_FLAG_REMOVE);
1907 	unsigned long lock_flags;
1908 
1909 	fdb = kzalloc(sizeof(*fdb), GFP_KERNEL);
1910 	if (!fdb)
1911 		return -ENOMEM;
1912 
1913 	fdb->learned = (flags & OFDPA_OP_FLAG_LEARNED);
1914 	fdb->touched = jiffies;
1915 	fdb->key.ofdpa_port = ofdpa_port;
1916 	ether_addr_copy(fdb->key.addr, addr);
1917 	fdb->key.vlan_id = vlan_id;
1918 	fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
1919 
1920 	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
1921 
1922 	found = ofdpa_fdb_tbl_find(ofdpa, fdb);
1923 
1924 	if (found) {
1925 		found->touched = jiffies;
1926 		if (removing) {
1927 			kfree(fdb);
1928 			hash_del(&found->entry);
1929 		}
1930 	} else if (!removing) {
1931 		hash_add(ofdpa->fdb_tbl, &fdb->entry,
1932 			 fdb->key_crc32);
1933 	}
1934 
1935 	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
1936 
1937 	/* Check if adding and already exists, or removing and can't find */
1938 	if (!found != !removing) {
1939 		kfree(fdb);
1940 		if (!found && removing)
1941 			return 0;
1942 		/* Refreshing existing to update aging timers */
1943 		flags |= OFDPA_OP_FLAG_REFRESH;
1944 	}
1945 
1946 	return ofdpa_port_fdb_learn(ofdpa_port, flags, addr, vlan_id);
1947 }
1948 
1949 static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port, int flags)
1950 {
1951 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1952 	struct ofdpa_fdb_tbl_entry *found;
1953 	unsigned long lock_flags;
1954 	struct hlist_node *tmp;
1955 	int bkt;
1956 	int err = 0;
1957 
1958 	if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
1959 	    ofdpa_port->stp_state == BR_STATE_FORWARDING)
1960 		return 0;
1961 
1962 	flags |= OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE;
1963 
1964 	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
1965 
1966 	hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) {
1967 		if (found->key.ofdpa_port != ofdpa_port)
1968 			continue;
1969 		if (!found->learned)
1970 			continue;
1971 		err = ofdpa_port_fdb_learn(ofdpa_port, flags,
1972 					   found->key.addr,
1973 					   found->key.vlan_id);
1974 		if (err)
1975 			goto err_out;
1976 		hash_del(&found->entry);
1977 	}
1978 
1979 err_out:
1980 	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
1981 
1982 	return err;
1983 }
1984 
1985 static void ofdpa_fdb_cleanup(struct timer_list *t)
1986 {
1987 	struct ofdpa *ofdpa = from_timer(ofdpa, t, fdb_cleanup_timer);
1988 	struct ofdpa_port *ofdpa_port;
1989 	struct ofdpa_fdb_tbl_entry *entry;
1990 	struct hlist_node *tmp;
1991 	unsigned long next_timer = jiffies + ofdpa->ageing_time;
1992 	unsigned long expires;
1993 	unsigned long lock_flags;
1994 	int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE |
1995 		    OFDPA_OP_FLAG_LEARNED;
1996 	int bkt;
1997 
1998 	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
1999 
2000 	hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, entry, entry) {
2001 		if (!entry->learned)
2002 			continue;
2003 		ofdpa_port = entry->key.ofdpa_port;
2004 		expires = entry->touched + ofdpa_port->ageing_time;
2005 		if (time_before_eq(expires, jiffies)) {
2006 			ofdpa_port_fdb_learn(ofdpa_port, flags,
2007 					     entry->key.addr,
2008 					     entry->key.vlan_id);
2009 			hash_del(&entry->entry);
2010 		} else if (time_before(expires, next_timer)) {
2011 			next_timer = expires;
2012 		}
2013 	}
2014 
2015 	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2016 
2017 	mod_timer(&ofdpa->fdb_cleanup_timer, round_jiffies_up(next_timer));
2018 }
2019 
2020 static int ofdpa_port_router_mac(struct ofdpa_port *ofdpa_port,
2021 				 int flags, __be16 vlan_id)
2022 {
2023 	u32 in_pport_mask = 0xffffffff;
2024 	__be16 eth_type;
2025 	const u8 *dst_mac_mask = ff_mac;
2026 	__be16 vlan_id_mask = htons(0xffff);
2027 	bool copy_to_cpu = false;
2028 	int err;
2029 
2030 	if (ntohs(vlan_id) == 0)
2031 		vlan_id = ofdpa_port->internal_vlan_id;
2032 
2033 	eth_type = htons(ETH_P_IP);
2034 	err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport,
2035 				      in_pport_mask, eth_type,
2036 				      ofdpa_port->dev->dev_addr,
2037 				      dst_mac_mask, vlan_id, vlan_id_mask,
2038 				      copy_to_cpu, flags);
2039 	if (err)
2040 		return err;
2041 
2042 	eth_type = htons(ETH_P_IPV6);
2043 	err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport,
2044 				      in_pport_mask, eth_type,
2045 				      ofdpa_port->dev->dev_addr,
2046 				      dst_mac_mask, vlan_id, vlan_id_mask,
2047 				      copy_to_cpu, flags);
2048 
2049 	return err;
2050 }
2051 
2052 static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port, int flags)
2053 {
2054 	bool pop_vlan;
2055 	u32 out_pport;
2056 	__be16 vlan_id;
2057 	u16 vid;
2058 	int err;
2059 
2060 	/* Port will be forwarding-enabled if its STP state is LEARNING
2061 	 * or FORWARDING.  Traffic from CPU can still egress, regardless of
2062 	 * port STP state.  Use L2 interface group on port VLANs as a way
2063 	 * to toggle port forwarding: if forwarding is disabled, L2
2064 	 * interface group will not exist.
2065 	 */
2066 
2067 	if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
2068 	    ofdpa_port->stp_state != BR_STATE_FORWARDING)
2069 		flags |= OFDPA_OP_FLAG_REMOVE;
2070 
2071 	out_pport = ofdpa_port->pport;
2072 	for (vid = 1; vid < VLAN_N_VID; vid++) {
2073 		if (!test_bit(vid, ofdpa_port->vlan_bitmap))
2074 			continue;
2075 		vlan_id = htons(vid);
2076 		pop_vlan = ofdpa_vlan_id_is_internal(vlan_id);
2077 		err = ofdpa_group_l2_interface(ofdpa_port, flags,
2078 					       vlan_id, out_pport, pop_vlan);
2079 		if (err) {
2080 			netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
2081 				   err, out_pport);
2082 			return err;
2083 		}
2084 	}
2085 
2086 	return 0;
2087 }
2088 
2089 static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
2090 				 int flags, u8 state)
2091 {
2092 	bool want[OFDPA_CTRL_MAX] = { 0, };
2093 	bool prev_ctrls[OFDPA_CTRL_MAX];
2094 	u8 prev_state;
2095 	int err;
2096 	int i;
2097 
2098 	memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
2099 	prev_state = ofdpa_port->stp_state;
2100 
2101 	if (ofdpa_port->stp_state == state)
2102 		return 0;
2103 
2104 	ofdpa_port->stp_state = state;
2105 
2106 	switch (state) {
2107 	case BR_STATE_DISABLED:
2108 		/* port is completely disabled */
2109 		break;
2110 	case BR_STATE_LISTENING:
2111 	case BR_STATE_BLOCKING:
2112 		want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
2113 		break;
2114 	case BR_STATE_LEARNING:
2115 	case BR_STATE_FORWARDING:
2116 		if (!ofdpa_port_is_ovsed(ofdpa_port))
2117 			want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
2118 		want[OFDPA_CTRL_IPV4_MCAST] = true;
2119 		want[OFDPA_CTRL_IPV6_MCAST] = true;
2120 		if (ofdpa_port_is_bridged(ofdpa_port))
2121 			want[OFDPA_CTRL_DFLT_BRIDGING] = true;
2122 		else if (ofdpa_port_is_ovsed(ofdpa_port))
2123 			want[OFDPA_CTRL_DFLT_OVS] = true;
2124 		else
2125 			want[OFDPA_CTRL_LOCAL_ARP] = true;
2126 		break;
2127 	}
2128 
2129 	for (i = 0; i < OFDPA_CTRL_MAX; i++) {
2130 		if (want[i] != ofdpa_port->ctrls[i]) {
2131 			int ctrl_flags = flags |
2132 					 (want[i] ? 0 : OFDPA_OP_FLAG_REMOVE);
2133 			err = ofdpa_port_ctrl(ofdpa_port, ctrl_flags,
2134 					      &ofdpa_ctrls[i]);
2135 			if (err)
2136 				goto err_port_ctrl;
2137 			ofdpa_port->ctrls[i] = want[i];
2138 		}
2139 	}
2140 
2141 	err = ofdpa_port_fdb_flush(ofdpa_port, flags);
2142 	if (err)
2143 		goto err_fdb_flush;
2144 
2145 	err = ofdpa_port_fwding(ofdpa_port, flags);
2146 	if (err)
2147 		goto err_port_fwding;
2148 
2149 	return 0;
2150 
2151 err_port_ctrl:
2152 err_fdb_flush:
2153 err_port_fwding:
2154 	memcpy(ofdpa_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
2155 	ofdpa_port->stp_state = prev_state;
2156 	return err;
2157 }
2158 
2159 static int ofdpa_port_fwd_enable(struct ofdpa_port *ofdpa_port, int flags)
2160 {
2161 	if (ofdpa_port_is_bridged(ofdpa_port))
2162 		/* bridge STP will enable port */
2163 		return 0;
2164 
2165 	/* port is not bridged, so simulate going to FORWARDING state */
2166 	return ofdpa_port_stp_update(ofdpa_port, flags,
2167 				     BR_STATE_FORWARDING);
2168 }
2169 
2170 static int ofdpa_port_fwd_disable(struct ofdpa_port *ofdpa_port, int flags)
2171 {
2172 	if (ofdpa_port_is_bridged(ofdpa_port))
2173 		/* bridge STP will disable port */
2174 		return 0;
2175 
2176 	/* port is not bridged, so simulate going to DISABLED state */
2177 	return ofdpa_port_stp_update(ofdpa_port, flags,
2178 				     BR_STATE_DISABLED);
2179 }
2180 
2181 static int ofdpa_port_vlan_add(struct ofdpa_port *ofdpa_port,
2182 			       u16 vid, u16 flags)
2183 {
2184 	int err;
2185 
2186 	/* XXX deal with flags for PVID and untagged */
2187 
2188 	err = ofdpa_port_vlan(ofdpa_port, 0, vid);
2189 	if (err)
2190 		return err;
2191 
2192 	err = ofdpa_port_router_mac(ofdpa_port, 0, htons(vid));
2193 	if (err)
2194 		ofdpa_port_vlan(ofdpa_port,
2195 				OFDPA_OP_FLAG_REMOVE, vid);
2196 
2197 	return err;
2198 }
2199 
2200 static int ofdpa_port_vlan_del(struct ofdpa_port *ofdpa_port,
2201 			       u16 vid, u16 flags)
2202 {
2203 	int err;
2204 
2205 	err = ofdpa_port_router_mac(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
2206 				    htons(vid));
2207 	if (err)
2208 		return err;
2209 
2210 	return ofdpa_port_vlan(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
2211 			       vid);
2212 }
2213 
2214 static struct ofdpa_internal_vlan_tbl_entry *
2215 ofdpa_internal_vlan_tbl_find(const struct ofdpa *ofdpa, int ifindex)
2216 {
2217 	struct ofdpa_internal_vlan_tbl_entry *found;
2218 
2219 	hash_for_each_possible(ofdpa->internal_vlan_tbl, found,
2220 			       entry, ifindex) {
2221 		if (found->ifindex == ifindex)
2222 			return found;
2223 	}
2224 
2225 	return NULL;
2226 }
2227 
2228 static __be16 ofdpa_port_internal_vlan_id_get(struct ofdpa_port *ofdpa_port,
2229 					      int ifindex)
2230 {
2231 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2232 	struct ofdpa_internal_vlan_tbl_entry *entry;
2233 	struct ofdpa_internal_vlan_tbl_entry *found;
2234 	unsigned long lock_flags;
2235 	int i;
2236 
2237 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2238 	if (!entry)
2239 		return 0;
2240 
2241 	entry->ifindex = ifindex;
2242 
2243 	spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2244 
2245 	found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
2246 	if (found) {
2247 		kfree(entry);
2248 		goto found;
2249 	}
2250 
2251 	found = entry;
2252 	hash_add(ofdpa->internal_vlan_tbl, &found->entry, found->ifindex);
2253 
2254 	for (i = 0; i < OFDPA_N_INTERNAL_VLANS; i++) {
2255 		if (test_and_set_bit(i, ofdpa->internal_vlan_bitmap))
2256 			continue;
2257 		found->vlan_id = htons(OFDPA_INTERNAL_VLAN_ID_BASE + i);
2258 		goto found;
2259 	}
2260 
2261 	netdev_err(ofdpa_port->dev, "Out of internal VLAN IDs\n");
2262 
2263 found:
2264 	found->ref_count++;
2265 	spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2266 
2267 	return found->vlan_id;
2268 }
2269 
2270 static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port,  __be32 dst,
2271 			       int dst_len, struct fib_info *fi, u32 tb_id,
2272 			       int flags)
2273 {
2274 	const struct fib_nh *nh;
2275 	__be16 eth_type = htons(ETH_P_IP);
2276 	__be32 dst_mask = inet_make_mask(dst_len);
2277 	__be16 internal_vlan_id = ofdpa_port->internal_vlan_id;
2278 	u32 priority = fi->fib_priority;
2279 	enum rocker_of_dpa_table_id goto_tbl =
2280 		ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2281 	u32 group_id;
2282 	bool nh_on_port;
2283 	bool has_gw;
2284 	u32 index;
2285 	int err;
2286 
2287 	/* XXX support ECMP */
2288 
2289 	nh = fi->fib_nh;
2290 	nh_on_port = (fi->fib_dev == ofdpa_port->dev);
2291 	has_gw = !!nh->nh_gw;
2292 
2293 	if (has_gw && nh_on_port) {
2294 		err = ofdpa_port_ipv4_nh(ofdpa_port, flags,
2295 					 nh->nh_gw, &index);
2296 		if (err)
2297 			return err;
2298 
2299 		group_id = ROCKER_GROUP_L3_UNICAST(index);
2300 	} else {
2301 		/* Send to CPU for processing */
2302 		group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
2303 	}
2304 
2305 	err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, eth_type, dst,
2306 					    dst_mask, priority, goto_tbl,
2307 					    group_id, fi, flags);
2308 	if (err)
2309 		netdev_err(ofdpa_port->dev, "Error (%d) IPv4 route %pI4\n",
2310 			   err, &dst);
2311 
2312 	return err;
2313 }
2314 
2315 static void
2316 ofdpa_port_internal_vlan_id_put(const struct ofdpa_port *ofdpa_port,
2317 				int ifindex)
2318 {
2319 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2320 	struct ofdpa_internal_vlan_tbl_entry *found;
2321 	unsigned long lock_flags;
2322 	unsigned long bit;
2323 
2324 	spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2325 
2326 	found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
2327 	if (!found) {
2328 		netdev_err(ofdpa_port->dev,
2329 			   "ifindex (%d) not found in internal VLAN tbl\n",
2330 			   ifindex);
2331 		goto not_found;
2332 	}
2333 
2334 	if (--found->ref_count <= 0) {
2335 		bit = ntohs(found->vlan_id) - OFDPA_INTERNAL_VLAN_ID_BASE;
2336 		clear_bit(bit, ofdpa->internal_vlan_bitmap);
2337 		hash_del(&found->entry);
2338 		kfree(found);
2339 	}
2340 
2341 not_found:
2342 	spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2343 }
2344 
2345 /**********************************
2346  * Rocker world ops implementation
2347  **********************************/
2348 
2349 static int ofdpa_init(struct rocker *rocker)
2350 {
2351 	struct ofdpa *ofdpa = rocker->wpriv;
2352 
2353 	ofdpa->rocker = rocker;
2354 
2355 	hash_init(ofdpa->flow_tbl);
2356 	spin_lock_init(&ofdpa->flow_tbl_lock);
2357 
2358 	hash_init(ofdpa->group_tbl);
2359 	spin_lock_init(&ofdpa->group_tbl_lock);
2360 
2361 	hash_init(ofdpa->fdb_tbl);
2362 	spin_lock_init(&ofdpa->fdb_tbl_lock);
2363 
2364 	hash_init(ofdpa->internal_vlan_tbl);
2365 	spin_lock_init(&ofdpa->internal_vlan_tbl_lock);
2366 
2367 	hash_init(ofdpa->neigh_tbl);
2368 	spin_lock_init(&ofdpa->neigh_tbl_lock);
2369 
2370 	timer_setup(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup, 0);
2371 	mod_timer(&ofdpa->fdb_cleanup_timer, jiffies);
2372 
2373 	ofdpa->ageing_time = BR_DEFAULT_AGEING_TIME;
2374 
2375 	return 0;
2376 }
2377 
2378 static void ofdpa_fini(struct rocker *rocker)
2379 {
2380 	struct ofdpa *ofdpa = rocker->wpriv;
2381 
2382 	unsigned long flags;
2383 	struct ofdpa_flow_tbl_entry *flow_entry;
2384 	struct ofdpa_group_tbl_entry *group_entry;
2385 	struct ofdpa_fdb_tbl_entry *fdb_entry;
2386 	struct ofdpa_internal_vlan_tbl_entry *internal_vlan_entry;
2387 	struct ofdpa_neigh_tbl_entry *neigh_entry;
2388 	struct hlist_node *tmp;
2389 	int bkt;
2390 
2391 	del_timer_sync(&ofdpa->fdb_cleanup_timer);
2392 	flush_workqueue(rocker->rocker_owq);
2393 
2394 	spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
2395 	hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry)
2396 		hash_del(&flow_entry->entry);
2397 	spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
2398 
2399 	spin_lock_irqsave(&ofdpa->group_tbl_lock, flags);
2400 	hash_for_each_safe(ofdpa->group_tbl, bkt, tmp, group_entry, entry)
2401 		hash_del(&group_entry->entry);
2402 	spin_unlock_irqrestore(&ofdpa->group_tbl_lock, flags);
2403 
2404 	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, flags);
2405 	hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, fdb_entry, entry)
2406 		hash_del(&fdb_entry->entry);
2407 	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, flags);
2408 
2409 	spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, flags);
2410 	hash_for_each_safe(ofdpa->internal_vlan_tbl, bkt,
2411 			   tmp, internal_vlan_entry, entry)
2412 		hash_del(&internal_vlan_entry->entry);
2413 	spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, flags);
2414 
2415 	spin_lock_irqsave(&ofdpa->neigh_tbl_lock, flags);
2416 	hash_for_each_safe(ofdpa->neigh_tbl, bkt, tmp, neigh_entry, entry)
2417 		hash_del(&neigh_entry->entry);
2418 	spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, flags);
2419 }
2420 
2421 static int ofdpa_port_pre_init(struct rocker_port *rocker_port)
2422 {
2423 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2424 
2425 	ofdpa_port->ofdpa = rocker_port->rocker->wpriv;
2426 	ofdpa_port->rocker_port = rocker_port;
2427 	ofdpa_port->dev = rocker_port->dev;
2428 	ofdpa_port->pport = rocker_port->pport;
2429 	ofdpa_port->brport_flags = BR_LEARNING;
2430 	ofdpa_port->ageing_time = BR_DEFAULT_AGEING_TIME;
2431 	return 0;
2432 }
2433 
2434 static int ofdpa_port_init(struct rocker_port *rocker_port)
2435 {
2436 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2437 	int err;
2438 
2439 	rocker_port_set_learning(rocker_port,
2440 				 !!(ofdpa_port->brport_flags & BR_LEARNING));
2441 
2442 	err = ofdpa_port_ig_tbl(ofdpa_port, 0);
2443 	if (err) {
2444 		netdev_err(ofdpa_port->dev, "install ig port table failed\n");
2445 		return err;
2446 	}
2447 
2448 	ofdpa_port->internal_vlan_id =
2449 		ofdpa_port_internal_vlan_id_get(ofdpa_port,
2450 						ofdpa_port->dev->ifindex);
2451 
2452 	err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2453 	if (err) {
2454 		netdev_err(ofdpa_port->dev, "install untagged VLAN failed\n");
2455 		goto err_untagged_vlan;
2456 	}
2457 	return 0;
2458 
2459 err_untagged_vlan:
2460 	ofdpa_port_ig_tbl(ofdpa_port, OFDPA_OP_FLAG_REMOVE);
2461 	return err;
2462 }
2463 
2464 static void ofdpa_port_fini(struct rocker_port *rocker_port)
2465 {
2466 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2467 
2468 	ofdpa_port_ig_tbl(ofdpa_port, OFDPA_OP_FLAG_REMOVE);
2469 }
2470 
2471 static int ofdpa_port_open(struct rocker_port *rocker_port)
2472 {
2473 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2474 
2475 	return ofdpa_port_fwd_enable(ofdpa_port, 0);
2476 }
2477 
2478 static void ofdpa_port_stop(struct rocker_port *rocker_port)
2479 {
2480 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2481 
2482 	ofdpa_port_fwd_disable(ofdpa_port, OFDPA_OP_FLAG_NOWAIT);
2483 }
2484 
2485 static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port,
2486 					 u8 state)
2487 {
2488 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2489 
2490 	return ofdpa_port_stp_update(ofdpa_port, 0, state);
2491 }
2492 
2493 static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
2494 					    unsigned long brport_flags,
2495 					    struct switchdev_trans *trans)
2496 {
2497 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2498 	unsigned long orig_flags;
2499 	int err = 0;
2500 
2501 	orig_flags = ofdpa_port->brport_flags;
2502 	ofdpa_port->brport_flags = brport_flags;
2503 	if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING &&
2504 	    !switchdev_trans_ph_prepare(trans))
2505 		err = rocker_port_set_learning(ofdpa_port->rocker_port,
2506 					       !!(ofdpa_port->brport_flags & BR_LEARNING));
2507 
2508 	if (switchdev_trans_ph_prepare(trans))
2509 		ofdpa_port->brport_flags = orig_flags;
2510 
2511 	return err;
2512 }
2513 
2514 static int
2515 ofdpa_port_attr_bridge_flags_get(const struct rocker_port *rocker_port,
2516 				 unsigned long *p_brport_flags)
2517 {
2518 	const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2519 
2520 	*p_brport_flags = ofdpa_port->brport_flags;
2521 	return 0;
2522 }
2523 
2524 static int
2525 ofdpa_port_attr_bridge_flags_support_get(const struct rocker_port *
2526 					 rocker_port,
2527 					 unsigned long *
2528 					 p_brport_flags_support)
2529 {
2530 	*p_brport_flags_support = BR_LEARNING;
2531 	return 0;
2532 }
2533 
2534 static int
2535 ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
2536 				       u32 ageing_time,
2537 				       struct switchdev_trans *trans)
2538 {
2539 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2540 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2541 
2542 	if (!switchdev_trans_ph_prepare(trans)) {
2543 		ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time);
2544 		if (ofdpa_port->ageing_time < ofdpa->ageing_time)
2545 			ofdpa->ageing_time = ofdpa_port->ageing_time;
2546 		mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies);
2547 	}
2548 
2549 	return 0;
2550 }
2551 
2552 static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port,
2553 				   const struct switchdev_obj_port_vlan *vlan)
2554 {
2555 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2556 	u16 vid;
2557 	int err;
2558 
2559 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2560 		err = ofdpa_port_vlan_add(ofdpa_port, vid, vlan->flags);
2561 		if (err)
2562 			return err;
2563 	}
2564 
2565 	return 0;
2566 }
2567 
2568 static int ofdpa_port_obj_vlan_del(struct rocker_port *rocker_port,
2569 				   const struct switchdev_obj_port_vlan *vlan)
2570 {
2571 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2572 	u16 vid;
2573 	int err;
2574 
2575 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2576 		err = ofdpa_port_vlan_del(ofdpa_port, vid, vlan->flags);
2577 		if (err)
2578 			return err;
2579 	}
2580 
2581 	return 0;
2582 }
2583 
2584 static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port,
2585 				  u16 vid, const unsigned char *addr)
2586 {
2587 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2588 	__be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, NULL);
2589 
2590 	if (!ofdpa_port_is_bridged(ofdpa_port))
2591 		return -EINVAL;
2592 
2593 	return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, 0);
2594 }
2595 
2596 static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port,
2597 				  u16 vid, const unsigned char *addr)
2598 {
2599 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2600 	__be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, NULL);
2601 	int flags = OFDPA_OP_FLAG_REMOVE;
2602 
2603 	if (!ofdpa_port_is_bridged(ofdpa_port))
2604 		return -EINVAL;
2605 
2606 	return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, flags);
2607 }
2608 
2609 static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port,
2610 				  struct net_device *bridge)
2611 {
2612 	int err;
2613 
2614 	/* Port is joining bridge, so the internal VLAN for the
2615 	 * port is going to change to the bridge internal VLAN.
2616 	 * Let's remove untagged VLAN (vid=0) from port and
2617 	 * re-add once internal VLAN has changed.
2618 	 */
2619 
2620 	err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2621 	if (err)
2622 		return err;
2623 
2624 	ofdpa_port_internal_vlan_id_put(ofdpa_port,
2625 					ofdpa_port->dev->ifindex);
2626 	ofdpa_port->internal_vlan_id =
2627 		ofdpa_port_internal_vlan_id_get(ofdpa_port, bridge->ifindex);
2628 
2629 	ofdpa_port->bridge_dev = bridge;
2630 
2631 	return ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2632 }
2633 
2634 static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port)
2635 {
2636 	int err;
2637 
2638 	err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2639 	if (err)
2640 		return err;
2641 
2642 	ofdpa_port_internal_vlan_id_put(ofdpa_port,
2643 					ofdpa_port->bridge_dev->ifindex);
2644 	ofdpa_port->internal_vlan_id =
2645 		ofdpa_port_internal_vlan_id_get(ofdpa_port,
2646 						ofdpa_port->dev->ifindex);
2647 
2648 	ofdpa_port->bridge_dev = NULL;
2649 
2650 	err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2651 	if (err)
2652 		return err;
2653 
2654 	if (ofdpa_port->dev->flags & IFF_UP)
2655 		err = ofdpa_port_fwd_enable(ofdpa_port, 0);
2656 
2657 	return err;
2658 }
2659 
2660 static int ofdpa_port_ovs_changed(struct ofdpa_port *ofdpa_port,
2661 				  struct net_device *master)
2662 {
2663 	int err;
2664 
2665 	ofdpa_port->bridge_dev = master;
2666 
2667 	err = ofdpa_port_fwd_disable(ofdpa_port, 0);
2668 	if (err)
2669 		return err;
2670 	err = ofdpa_port_fwd_enable(ofdpa_port, 0);
2671 
2672 	return err;
2673 }
2674 
2675 static int ofdpa_port_master_linked(struct rocker_port *rocker_port,
2676 				    struct net_device *master)
2677 {
2678 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2679 	int err = 0;
2680 
2681 	if (netif_is_bridge_master(master))
2682 		err = ofdpa_port_bridge_join(ofdpa_port, master);
2683 	else if (netif_is_ovs_master(master))
2684 		err = ofdpa_port_ovs_changed(ofdpa_port, master);
2685 	return err;
2686 }
2687 
2688 static int ofdpa_port_master_unlinked(struct rocker_port *rocker_port,
2689 				      struct net_device *master)
2690 {
2691 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2692 	int err = 0;
2693 
2694 	if (ofdpa_port_is_bridged(ofdpa_port))
2695 		err = ofdpa_port_bridge_leave(ofdpa_port);
2696 	else if (ofdpa_port_is_ovsed(ofdpa_port))
2697 		err = ofdpa_port_ovs_changed(ofdpa_port, NULL);
2698 	return err;
2699 }
2700 
2701 static int ofdpa_port_neigh_update(struct rocker_port *rocker_port,
2702 				   struct neighbour *n)
2703 {
2704 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2705 	int flags = (n->nud_state & NUD_VALID ? 0 : OFDPA_OP_FLAG_REMOVE) |
2706 						    OFDPA_OP_FLAG_NOWAIT;
2707 	__be32 ip_addr = *(__be32 *) n->primary_key;
2708 
2709 	return ofdpa_port_ipv4_neigh(ofdpa_port, flags, ip_addr, n->ha);
2710 }
2711 
2712 static int ofdpa_port_neigh_destroy(struct rocker_port *rocker_port,
2713 				    struct neighbour *n)
2714 {
2715 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2716 	int flags = OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT;
2717 	__be32 ip_addr = *(__be32 *) n->primary_key;
2718 
2719 	return ofdpa_port_ipv4_neigh(ofdpa_port, flags, ip_addr, n->ha);
2720 }
2721 
2722 static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
2723 				       const unsigned char *addr,
2724 				       __be16 vlan_id)
2725 {
2726 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2727 	int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_LEARNED;
2728 
2729 	if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
2730 	    ofdpa_port->stp_state != BR_STATE_FORWARDING)
2731 		return 0;
2732 
2733 	return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, flags);
2734 }
2735 
2736 static struct ofdpa_port *ofdpa_port_dev_lower_find(struct net_device *dev,
2737 						    struct rocker *rocker)
2738 {
2739 	struct rocker_port *rocker_port;
2740 
2741 	rocker_port = rocker_port_dev_lower_find(dev, rocker);
2742 	return rocker_port ? rocker_port->wpriv : NULL;
2743 }
2744 
2745 static int ofdpa_fib4_add(struct rocker *rocker,
2746 			  const struct fib_entry_notifier_info *fen_info)
2747 {
2748 	struct ofdpa *ofdpa = rocker->wpriv;
2749 	struct ofdpa_port *ofdpa_port;
2750 	int err;
2751 
2752 	if (ofdpa->fib_aborted)
2753 		return 0;
2754 	ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
2755 	if (!ofdpa_port)
2756 		return 0;
2757 	err = ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
2758 				  fen_info->dst_len, fen_info->fi,
2759 				  fen_info->tb_id, 0);
2760 	if (err)
2761 		return err;
2762 	fen_info->fi->fib_nh->nh_flags |= RTNH_F_OFFLOAD;
2763 	return 0;
2764 }
2765 
2766 static int ofdpa_fib4_del(struct rocker *rocker,
2767 			  const struct fib_entry_notifier_info *fen_info)
2768 {
2769 	struct ofdpa *ofdpa = rocker->wpriv;
2770 	struct ofdpa_port *ofdpa_port;
2771 
2772 	if (ofdpa->fib_aborted)
2773 		return 0;
2774 	ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
2775 	if (!ofdpa_port)
2776 		return 0;
2777 	fen_info->fi->fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
2778 	return ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
2779 				   fen_info->dst_len, fen_info->fi,
2780 				   fen_info->tb_id, OFDPA_OP_FLAG_REMOVE);
2781 }
2782 
2783 static void ofdpa_fib4_abort(struct rocker *rocker)
2784 {
2785 	struct ofdpa *ofdpa = rocker->wpriv;
2786 	struct ofdpa_port *ofdpa_port;
2787 	struct ofdpa_flow_tbl_entry *flow_entry;
2788 	struct hlist_node *tmp;
2789 	unsigned long flags;
2790 	int bkt;
2791 
2792 	if (ofdpa->fib_aborted)
2793 		return;
2794 
2795 	spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
2796 	hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry) {
2797 		if (flow_entry->key.tbl_id !=
2798 		    ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING)
2799 			continue;
2800 		ofdpa_port = ofdpa_port_dev_lower_find(flow_entry->fi->fib_dev,
2801 						       rocker);
2802 		if (!ofdpa_port)
2803 			continue;
2804 		flow_entry->fi->fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
2805 		ofdpa_flow_tbl_del(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
2806 				   flow_entry);
2807 	}
2808 	spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
2809 	ofdpa->fib_aborted = true;
2810 }
2811 
2812 struct rocker_world_ops rocker_ofdpa_ops = {
2813 	.kind = "ofdpa",
2814 	.priv_size = sizeof(struct ofdpa),
2815 	.port_priv_size = sizeof(struct ofdpa_port),
2816 	.mode = ROCKER_PORT_MODE_OF_DPA,
2817 	.init = ofdpa_init,
2818 	.fini = ofdpa_fini,
2819 	.port_pre_init = ofdpa_port_pre_init,
2820 	.port_init = ofdpa_port_init,
2821 	.port_fini = ofdpa_port_fini,
2822 	.port_open = ofdpa_port_open,
2823 	.port_stop = ofdpa_port_stop,
2824 	.port_attr_stp_state_set = ofdpa_port_attr_stp_state_set,
2825 	.port_attr_bridge_flags_set = ofdpa_port_attr_bridge_flags_set,
2826 	.port_attr_bridge_flags_get = ofdpa_port_attr_bridge_flags_get,
2827 	.port_attr_bridge_flags_support_get = ofdpa_port_attr_bridge_flags_support_get,
2828 	.port_attr_bridge_ageing_time_set = ofdpa_port_attr_bridge_ageing_time_set,
2829 	.port_obj_vlan_add = ofdpa_port_obj_vlan_add,
2830 	.port_obj_vlan_del = ofdpa_port_obj_vlan_del,
2831 	.port_obj_fdb_add = ofdpa_port_obj_fdb_add,
2832 	.port_obj_fdb_del = ofdpa_port_obj_fdb_del,
2833 	.port_master_linked = ofdpa_port_master_linked,
2834 	.port_master_unlinked = ofdpa_port_master_unlinked,
2835 	.port_neigh_update = ofdpa_port_neigh_update,
2836 	.port_neigh_destroy = ofdpa_port_neigh_destroy,
2837 	.port_ev_mac_vlan_seen = ofdpa_port_ev_mac_vlan_seen,
2838 	.fib4_add = ofdpa_fib4_add,
2839 	.fib4_del = ofdpa_fib4_del,
2840 	.fib4_abort = ofdpa_fib4_abort,
2841 };
2842