1 /*
2  * drivers/net/ethernet/rocker/rocker_ofdpa.c - Rocker switch OF-DPA-like
3  *					        implementation
4  * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5  * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/spinlock.h>
16 #include <linux/hashtable.h>
17 #include <linux/crc32.h>
18 #include <linux/netdevice.h>
19 #include <linux/inetdevice.h>
20 #include <linux/if_vlan.h>
21 #include <linux/if_bridge.h>
22 #include <net/neighbour.h>
23 #include <net/switchdev.h>
24 #include <net/ip_fib.h>
25 #include <net/arp.h>
26 
27 #include "rocker.h"
28 #include "rocker_tlv.h"
29 
30 struct ofdpa_flow_tbl_key {
31 	u32 priority;
32 	enum rocker_of_dpa_table_id tbl_id;
33 	union {
34 		struct {
35 			u32 in_pport;
36 			u32 in_pport_mask;
37 			enum rocker_of_dpa_table_id goto_tbl;
38 		} ig_port;
39 		struct {
40 			u32 in_pport;
41 			__be16 vlan_id;
42 			__be16 vlan_id_mask;
43 			enum rocker_of_dpa_table_id goto_tbl;
44 			bool untagged;
45 			__be16 new_vlan_id;
46 		} vlan;
47 		struct {
48 			u32 in_pport;
49 			u32 in_pport_mask;
50 			__be16 eth_type;
51 			u8 eth_dst[ETH_ALEN];
52 			u8 eth_dst_mask[ETH_ALEN];
53 			__be16 vlan_id;
54 			__be16 vlan_id_mask;
55 			enum rocker_of_dpa_table_id goto_tbl;
56 			bool copy_to_cpu;
57 		} term_mac;
58 		struct {
59 			__be16 eth_type;
60 			__be32 dst4;
61 			__be32 dst4_mask;
62 			enum rocker_of_dpa_table_id goto_tbl;
63 			u32 group_id;
64 		} ucast_routing;
65 		struct {
66 			u8 eth_dst[ETH_ALEN];
67 			u8 eth_dst_mask[ETH_ALEN];
68 			int has_eth_dst;
69 			int has_eth_dst_mask;
70 			__be16 vlan_id;
71 			u32 tunnel_id;
72 			enum rocker_of_dpa_table_id goto_tbl;
73 			u32 group_id;
74 			bool copy_to_cpu;
75 		} bridge;
76 		struct {
77 			u32 in_pport;
78 			u32 in_pport_mask;
79 			u8 eth_src[ETH_ALEN];
80 			u8 eth_src_mask[ETH_ALEN];
81 			u8 eth_dst[ETH_ALEN];
82 			u8 eth_dst_mask[ETH_ALEN];
83 			__be16 eth_type;
84 			__be16 vlan_id;
85 			__be16 vlan_id_mask;
86 			u8 ip_proto;
87 			u8 ip_proto_mask;
88 			u8 ip_tos;
89 			u8 ip_tos_mask;
90 			u32 group_id;
91 		} acl;
92 	};
93 };
94 
95 struct ofdpa_flow_tbl_entry {
96 	struct hlist_node entry;
97 	u32 cmd;
98 	u64 cookie;
99 	struct ofdpa_flow_tbl_key key;
100 	size_t key_len;
101 	u32 key_crc32; /* key */
102 };
103 
104 struct ofdpa_group_tbl_entry {
105 	struct hlist_node entry;
106 	u32 cmd;
107 	u32 group_id; /* key */
108 	u16 group_count;
109 	u32 *group_ids;
110 	union {
111 		struct {
112 			u8 pop_vlan;
113 		} l2_interface;
114 		struct {
115 			u8 eth_src[ETH_ALEN];
116 			u8 eth_dst[ETH_ALEN];
117 			__be16 vlan_id;
118 			u32 group_id;
119 		} l2_rewrite;
120 		struct {
121 			u8 eth_src[ETH_ALEN];
122 			u8 eth_dst[ETH_ALEN];
123 			__be16 vlan_id;
124 			bool ttl_check;
125 			u32 group_id;
126 		} l3_unicast;
127 	};
128 };
129 
130 struct ofdpa_fdb_tbl_entry {
131 	struct hlist_node entry;
132 	u32 key_crc32; /* key */
133 	bool learned;
134 	unsigned long touched;
135 	struct ofdpa_fdb_tbl_key {
136 		struct ofdpa_port *ofdpa_port;
137 		u8 addr[ETH_ALEN];
138 		__be16 vlan_id;
139 	} key;
140 };
141 
142 struct ofdpa_internal_vlan_tbl_entry {
143 	struct hlist_node entry;
144 	int ifindex; /* key */
145 	u32 ref_count;
146 	__be16 vlan_id;
147 };
148 
149 struct ofdpa_neigh_tbl_entry {
150 	struct hlist_node entry;
151 	__be32 ip_addr; /* key */
152 	struct net_device *dev;
153 	u32 ref_count;
154 	u32 index;
155 	u8 eth_dst[ETH_ALEN];
156 	bool ttl_check;
157 };
158 
159 enum {
160 	OFDPA_CTRL_LINK_LOCAL_MCAST,
161 	OFDPA_CTRL_LOCAL_ARP,
162 	OFDPA_CTRL_IPV4_MCAST,
163 	OFDPA_CTRL_IPV6_MCAST,
164 	OFDPA_CTRL_DFLT_BRIDGING,
165 	OFDPA_CTRL_DFLT_OVS,
166 	OFDPA_CTRL_MAX,
167 };
168 
169 #define OFDPA_INTERNAL_VLAN_ID_BASE	0x0f00
170 #define OFDPA_N_INTERNAL_VLANS		255
171 #define OFDPA_VLAN_BITMAP_LEN		BITS_TO_LONGS(VLAN_N_VID)
172 #define OFDPA_INTERNAL_VLAN_BITMAP_LEN	BITS_TO_LONGS(OFDPA_N_INTERNAL_VLANS)
173 #define OFDPA_UNTAGGED_VID 0
174 
175 struct ofdpa {
176 	struct rocker *rocker;
177 	DECLARE_HASHTABLE(flow_tbl, 16);
178 	spinlock_t flow_tbl_lock;		/* for flow tbl accesses */
179 	u64 flow_tbl_next_cookie;
180 	DECLARE_HASHTABLE(group_tbl, 16);
181 	spinlock_t group_tbl_lock;		/* for group tbl accesses */
182 	struct timer_list fdb_cleanup_timer;
183 	DECLARE_HASHTABLE(fdb_tbl, 16);
184 	spinlock_t fdb_tbl_lock;		/* for fdb tbl accesses */
185 	unsigned long internal_vlan_bitmap[OFDPA_INTERNAL_VLAN_BITMAP_LEN];
186 	DECLARE_HASHTABLE(internal_vlan_tbl, 8);
187 	spinlock_t internal_vlan_tbl_lock;	/* for vlan tbl accesses */
188 	DECLARE_HASHTABLE(neigh_tbl, 16);
189 	spinlock_t neigh_tbl_lock;		/* for neigh tbl accesses */
190 	u32 neigh_tbl_next_index;
191 	unsigned long ageing_time;
192 };
193 
194 struct ofdpa_port {
195 	struct ofdpa *ofdpa;
196 	struct rocker_port *rocker_port;
197 	struct net_device *dev;
198 	u32 pport;
199 	struct net_device *bridge_dev;
200 	__be16 internal_vlan_id;
201 	int stp_state;
202 	u32 brport_flags;
203 	unsigned long ageing_time;
204 	bool ctrls[OFDPA_CTRL_MAX];
205 	unsigned long vlan_bitmap[OFDPA_VLAN_BITMAP_LEN];
206 };
207 
208 static const u8 zero_mac[ETH_ALEN]   = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
209 static const u8 ff_mac[ETH_ALEN]     = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
210 static const u8 ll_mac[ETH_ALEN]     = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
211 static const u8 ll_mask[ETH_ALEN]    = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
212 static const u8 mcast_mac[ETH_ALEN]  = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
213 static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
214 static const u8 ipv4_mask[ETH_ALEN]  = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
215 static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
216 static const u8 ipv6_mask[ETH_ALEN]  = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
217 
218 /* Rocker priority levels for flow table entries.  Higher
219  * priority match takes precedence over lower priority match.
220  */
221 
222 enum {
223 	OFDPA_PRIORITY_UNKNOWN = 0,
224 	OFDPA_PRIORITY_IG_PORT = 1,
225 	OFDPA_PRIORITY_VLAN = 1,
226 	OFDPA_PRIORITY_TERM_MAC_UCAST = 0,
227 	OFDPA_PRIORITY_TERM_MAC_MCAST = 1,
228 	OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
229 	OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
230 	OFDPA_PRIORITY_BRIDGING_VLAN = 3,
231 	OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
232 	OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
233 	OFDPA_PRIORITY_BRIDGING_TENANT = 3,
234 	OFDPA_PRIORITY_ACL_CTRL = 3,
235 	OFDPA_PRIORITY_ACL_NORMAL = 2,
236 	OFDPA_PRIORITY_ACL_DFLT = 1,
237 };
238 
239 static bool ofdpa_vlan_id_is_internal(__be16 vlan_id)
240 {
241 	u16 start = OFDPA_INTERNAL_VLAN_ID_BASE;
242 	u16 end = 0xffe;
243 	u16 _vlan_id = ntohs(vlan_id);
244 
245 	return (_vlan_id >= start && _vlan_id <= end);
246 }
247 
248 static __be16 ofdpa_port_vid_to_vlan(const struct ofdpa_port *ofdpa_port,
249 				     u16 vid, bool *pop_vlan)
250 {
251 	__be16 vlan_id;
252 
253 	if (pop_vlan)
254 		*pop_vlan = false;
255 	vlan_id = htons(vid);
256 	if (!vlan_id) {
257 		vlan_id = ofdpa_port->internal_vlan_id;
258 		if (pop_vlan)
259 			*pop_vlan = true;
260 	}
261 
262 	return vlan_id;
263 }
264 
265 static u16 ofdpa_port_vlan_to_vid(const struct ofdpa_port *ofdpa_port,
266 				  __be16 vlan_id)
267 {
268 	if (ofdpa_vlan_id_is_internal(vlan_id))
269 		return 0;
270 
271 	return ntohs(vlan_id);
272 }
273 
274 static bool ofdpa_port_is_slave(const struct ofdpa_port *ofdpa_port,
275 				const char *kind)
276 {
277 	return ofdpa_port->bridge_dev &&
278 		!strcmp(ofdpa_port->bridge_dev->rtnl_link_ops->kind, kind);
279 }
280 
281 static bool ofdpa_port_is_bridged(const struct ofdpa_port *ofdpa_port)
282 {
283 	return ofdpa_port_is_slave(ofdpa_port, "bridge");
284 }
285 
286 static bool ofdpa_port_is_ovsed(const struct ofdpa_port *ofdpa_port)
287 {
288 	return ofdpa_port_is_slave(ofdpa_port, "openvswitch");
289 }
290 
291 #define OFDPA_OP_FLAG_REMOVE		BIT(0)
292 #define OFDPA_OP_FLAG_NOWAIT		BIT(1)
293 #define OFDPA_OP_FLAG_LEARNED		BIT(2)
294 #define OFDPA_OP_FLAG_REFRESH		BIT(3)
295 
296 static bool ofdpa_flags_nowait(int flags)
297 {
298 	return flags & OFDPA_OP_FLAG_NOWAIT;
299 }
300 
301 static void *__ofdpa_mem_alloc(struct switchdev_trans *trans, int flags,
302 			       size_t size)
303 {
304 	struct switchdev_trans_item *elem = NULL;
305 	gfp_t gfp_flags = (flags & OFDPA_OP_FLAG_NOWAIT) ?
306 			  GFP_ATOMIC : GFP_KERNEL;
307 
308 	/* If in transaction prepare phase, allocate the memory
309 	 * and enqueue it on a transaction.  If in transaction
310 	 * commit phase, dequeue the memory from the transaction
311 	 * rather than re-allocating the memory.  The idea is the
312 	 * driver code paths for prepare and commit are identical
313 	 * so the memory allocated in the prepare phase is the
314 	 * memory used in the commit phase.
315 	 */
316 
317 	if (!trans) {
318 		elem = kzalloc(size + sizeof(*elem), gfp_flags);
319 	} else if (switchdev_trans_ph_prepare(trans)) {
320 		elem = kzalloc(size + sizeof(*elem), gfp_flags);
321 		if (!elem)
322 			return NULL;
323 		switchdev_trans_item_enqueue(trans, elem, kfree, elem);
324 	} else {
325 		elem = switchdev_trans_item_dequeue(trans);
326 	}
327 
328 	return elem ? elem + 1 : NULL;
329 }
330 
331 static void *ofdpa_kzalloc(struct switchdev_trans *trans, int flags,
332 			   size_t size)
333 {
334 	return __ofdpa_mem_alloc(trans, flags, size);
335 }
336 
337 static void *ofdpa_kcalloc(struct switchdev_trans *trans, int flags,
338 			   size_t n, size_t size)
339 {
340 	return __ofdpa_mem_alloc(trans, flags, n * size);
341 }
342 
343 static void ofdpa_kfree(struct switchdev_trans *trans, const void *mem)
344 {
345 	struct switchdev_trans_item *elem;
346 
347 	/* Frees are ignored if in transaction prepare phase.  The
348 	 * memory remains on the per-port list until freed in the
349 	 * commit phase.
350 	 */
351 
352 	if (switchdev_trans_ph_prepare(trans))
353 		return;
354 
355 	elem = (struct switchdev_trans_item *) mem - 1;
356 	kfree(elem);
357 }
358 
359 /*************************************************************
360  * Flow, group, FDB, internal VLAN and neigh command prepares
361  *************************************************************/
362 
363 static int
364 ofdpa_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
365 			       const struct ofdpa_flow_tbl_entry *entry)
366 {
367 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
368 			       entry->key.ig_port.in_pport))
369 		return -EMSGSIZE;
370 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
371 			       entry->key.ig_port.in_pport_mask))
372 		return -EMSGSIZE;
373 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
374 			       entry->key.ig_port.goto_tbl))
375 		return -EMSGSIZE;
376 
377 	return 0;
378 }
379 
380 static int
381 ofdpa_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
382 			    const struct ofdpa_flow_tbl_entry *entry)
383 {
384 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
385 			       entry->key.vlan.in_pport))
386 		return -EMSGSIZE;
387 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
388 				entry->key.vlan.vlan_id))
389 		return -EMSGSIZE;
390 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
391 				entry->key.vlan.vlan_id_mask))
392 		return -EMSGSIZE;
393 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
394 			       entry->key.vlan.goto_tbl))
395 		return -EMSGSIZE;
396 	if (entry->key.vlan.untagged &&
397 	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
398 				entry->key.vlan.new_vlan_id))
399 		return -EMSGSIZE;
400 
401 	return 0;
402 }
403 
404 static int
405 ofdpa_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
406 				const struct ofdpa_flow_tbl_entry *entry)
407 {
408 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
409 			       entry->key.term_mac.in_pport))
410 		return -EMSGSIZE;
411 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
412 			       entry->key.term_mac.in_pport_mask))
413 		return -EMSGSIZE;
414 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
415 				entry->key.term_mac.eth_type))
416 		return -EMSGSIZE;
417 	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
418 			   ETH_ALEN, entry->key.term_mac.eth_dst))
419 		return -EMSGSIZE;
420 	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
421 			   ETH_ALEN, entry->key.term_mac.eth_dst_mask))
422 		return -EMSGSIZE;
423 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
424 				entry->key.term_mac.vlan_id))
425 		return -EMSGSIZE;
426 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
427 				entry->key.term_mac.vlan_id_mask))
428 		return -EMSGSIZE;
429 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
430 			       entry->key.term_mac.goto_tbl))
431 		return -EMSGSIZE;
432 	if (entry->key.term_mac.copy_to_cpu &&
433 	    rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
434 			      entry->key.term_mac.copy_to_cpu))
435 		return -EMSGSIZE;
436 
437 	return 0;
438 }
439 
440 static int
441 ofdpa_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
442 				     const struct ofdpa_flow_tbl_entry *entry)
443 {
444 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
445 				entry->key.ucast_routing.eth_type))
446 		return -EMSGSIZE;
447 	if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
448 				entry->key.ucast_routing.dst4))
449 		return -EMSGSIZE;
450 	if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
451 				entry->key.ucast_routing.dst4_mask))
452 		return -EMSGSIZE;
453 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
454 			       entry->key.ucast_routing.goto_tbl))
455 		return -EMSGSIZE;
456 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
457 			       entry->key.ucast_routing.group_id))
458 		return -EMSGSIZE;
459 
460 	return 0;
461 }
462 
463 static int
464 ofdpa_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
465 			      const struct ofdpa_flow_tbl_entry *entry)
466 {
467 	if (entry->key.bridge.has_eth_dst &&
468 	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
469 			   ETH_ALEN, entry->key.bridge.eth_dst))
470 		return -EMSGSIZE;
471 	if (entry->key.bridge.has_eth_dst_mask &&
472 	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
473 			   ETH_ALEN, entry->key.bridge.eth_dst_mask))
474 		return -EMSGSIZE;
475 	if (entry->key.bridge.vlan_id &&
476 	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
477 				entry->key.bridge.vlan_id))
478 		return -EMSGSIZE;
479 	if (entry->key.bridge.tunnel_id &&
480 	    rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
481 			       entry->key.bridge.tunnel_id))
482 		return -EMSGSIZE;
483 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
484 			       entry->key.bridge.goto_tbl))
485 		return -EMSGSIZE;
486 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
487 			       entry->key.bridge.group_id))
488 		return -EMSGSIZE;
489 	if (entry->key.bridge.copy_to_cpu &&
490 	    rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
491 			      entry->key.bridge.copy_to_cpu))
492 		return -EMSGSIZE;
493 
494 	return 0;
495 }
496 
497 static int
498 ofdpa_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
499 			   const struct ofdpa_flow_tbl_entry *entry)
500 {
501 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
502 			       entry->key.acl.in_pport))
503 		return -EMSGSIZE;
504 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
505 			       entry->key.acl.in_pport_mask))
506 		return -EMSGSIZE;
507 	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
508 			   ETH_ALEN, entry->key.acl.eth_src))
509 		return -EMSGSIZE;
510 	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
511 			   ETH_ALEN, entry->key.acl.eth_src_mask))
512 		return -EMSGSIZE;
513 	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
514 			   ETH_ALEN, entry->key.acl.eth_dst))
515 		return -EMSGSIZE;
516 	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
517 			   ETH_ALEN, entry->key.acl.eth_dst_mask))
518 		return -EMSGSIZE;
519 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
520 				entry->key.acl.eth_type))
521 		return -EMSGSIZE;
522 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
523 				entry->key.acl.vlan_id))
524 		return -EMSGSIZE;
525 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
526 				entry->key.acl.vlan_id_mask))
527 		return -EMSGSIZE;
528 
529 	switch (ntohs(entry->key.acl.eth_type)) {
530 	case ETH_P_IP:
531 	case ETH_P_IPV6:
532 		if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
533 				      entry->key.acl.ip_proto))
534 			return -EMSGSIZE;
535 		if (rocker_tlv_put_u8(desc_info,
536 				      ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
537 				      entry->key.acl.ip_proto_mask))
538 			return -EMSGSIZE;
539 		if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
540 				      entry->key.acl.ip_tos & 0x3f))
541 			return -EMSGSIZE;
542 		if (rocker_tlv_put_u8(desc_info,
543 				      ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
544 				      entry->key.acl.ip_tos_mask & 0x3f))
545 			return -EMSGSIZE;
546 		if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
547 				      (entry->key.acl.ip_tos & 0xc0) >> 6))
548 			return -EMSGSIZE;
549 		if (rocker_tlv_put_u8(desc_info,
550 				      ROCKER_TLV_OF_DPA_IP_ECN_MASK,
551 				      (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
552 			return -EMSGSIZE;
553 		break;
554 	}
555 
556 	if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
557 	    rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
558 			       entry->key.acl.group_id))
559 		return -EMSGSIZE;
560 
561 	return 0;
562 }
563 
564 static int ofdpa_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
565 				  struct rocker_desc_info *desc_info,
566 				  void *priv)
567 {
568 	const struct ofdpa_flow_tbl_entry *entry = priv;
569 	struct rocker_tlv *cmd_info;
570 	int err = 0;
571 
572 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
573 		return -EMSGSIZE;
574 	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
575 	if (!cmd_info)
576 		return -EMSGSIZE;
577 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
578 			       entry->key.tbl_id))
579 		return -EMSGSIZE;
580 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
581 			       entry->key.priority))
582 		return -EMSGSIZE;
583 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
584 		return -EMSGSIZE;
585 	if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
586 			       entry->cookie))
587 		return -EMSGSIZE;
588 
589 	switch (entry->key.tbl_id) {
590 	case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
591 		err = ofdpa_cmd_flow_tbl_add_ig_port(desc_info, entry);
592 		break;
593 	case ROCKER_OF_DPA_TABLE_ID_VLAN:
594 		err = ofdpa_cmd_flow_tbl_add_vlan(desc_info, entry);
595 		break;
596 	case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
597 		err = ofdpa_cmd_flow_tbl_add_term_mac(desc_info, entry);
598 		break;
599 	case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
600 		err = ofdpa_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
601 		break;
602 	case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
603 		err = ofdpa_cmd_flow_tbl_add_bridge(desc_info, entry);
604 		break;
605 	case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
606 		err = ofdpa_cmd_flow_tbl_add_acl(desc_info, entry);
607 		break;
608 	default:
609 		err = -ENOTSUPP;
610 		break;
611 	}
612 
613 	if (err)
614 		return err;
615 
616 	rocker_tlv_nest_end(desc_info, cmd_info);
617 
618 	return 0;
619 }
620 
621 static int ofdpa_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
622 				  struct rocker_desc_info *desc_info,
623 				  void *priv)
624 {
625 	const struct ofdpa_flow_tbl_entry *entry = priv;
626 	struct rocker_tlv *cmd_info;
627 
628 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
629 		return -EMSGSIZE;
630 	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
631 	if (!cmd_info)
632 		return -EMSGSIZE;
633 	if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
634 			       entry->cookie))
635 		return -EMSGSIZE;
636 	rocker_tlv_nest_end(desc_info, cmd_info);
637 
638 	return 0;
639 }
640 
641 static int
642 ofdpa_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
643 				     struct ofdpa_group_tbl_entry *entry)
644 {
645 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
646 			       ROCKER_GROUP_PORT_GET(entry->group_id)))
647 		return -EMSGSIZE;
648 	if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
649 			      entry->l2_interface.pop_vlan))
650 		return -EMSGSIZE;
651 
652 	return 0;
653 }
654 
655 static int
656 ofdpa_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
657 				   const struct ofdpa_group_tbl_entry *entry)
658 {
659 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
660 			       entry->l2_rewrite.group_id))
661 		return -EMSGSIZE;
662 	if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
663 	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
664 			   ETH_ALEN, entry->l2_rewrite.eth_src))
665 		return -EMSGSIZE;
666 	if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
667 	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
668 			   ETH_ALEN, entry->l2_rewrite.eth_dst))
669 		return -EMSGSIZE;
670 	if (entry->l2_rewrite.vlan_id &&
671 	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
672 				entry->l2_rewrite.vlan_id))
673 		return -EMSGSIZE;
674 
675 	return 0;
676 }
677 
678 static int
679 ofdpa_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
680 				  const struct ofdpa_group_tbl_entry *entry)
681 {
682 	int i;
683 	struct rocker_tlv *group_ids;
684 
685 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
686 			       entry->group_count))
687 		return -EMSGSIZE;
688 
689 	group_ids = rocker_tlv_nest_start(desc_info,
690 					  ROCKER_TLV_OF_DPA_GROUP_IDS);
691 	if (!group_ids)
692 		return -EMSGSIZE;
693 
694 	for (i = 0; i < entry->group_count; i++)
695 		/* Note TLV array is 1-based */
696 		if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
697 			return -EMSGSIZE;
698 
699 	rocker_tlv_nest_end(desc_info, group_ids);
700 
701 	return 0;
702 }
703 
704 static int
705 ofdpa_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
706 				   const struct ofdpa_group_tbl_entry *entry)
707 {
708 	if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
709 	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
710 			   ETH_ALEN, entry->l3_unicast.eth_src))
711 		return -EMSGSIZE;
712 	if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
713 	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
714 			   ETH_ALEN, entry->l3_unicast.eth_dst))
715 		return -EMSGSIZE;
716 	if (entry->l3_unicast.vlan_id &&
717 	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
718 				entry->l3_unicast.vlan_id))
719 		return -EMSGSIZE;
720 	if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
721 			      entry->l3_unicast.ttl_check))
722 		return -EMSGSIZE;
723 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
724 			       entry->l3_unicast.group_id))
725 		return -EMSGSIZE;
726 
727 	return 0;
728 }
729 
730 static int ofdpa_cmd_group_tbl_add(const struct rocker_port *rocker_port,
731 				   struct rocker_desc_info *desc_info,
732 				   void *priv)
733 {
734 	struct ofdpa_group_tbl_entry *entry = priv;
735 	struct rocker_tlv *cmd_info;
736 	int err = 0;
737 
738 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
739 		return -EMSGSIZE;
740 	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
741 	if (!cmd_info)
742 		return -EMSGSIZE;
743 
744 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
745 			       entry->group_id))
746 		return -EMSGSIZE;
747 
748 	switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
749 	case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
750 		err = ofdpa_cmd_group_tbl_add_l2_interface(desc_info, entry);
751 		break;
752 	case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
753 		err = ofdpa_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
754 		break;
755 	case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
756 	case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
757 		err = ofdpa_cmd_group_tbl_add_group_ids(desc_info, entry);
758 		break;
759 	case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
760 		err = ofdpa_cmd_group_tbl_add_l3_unicast(desc_info, entry);
761 		break;
762 	default:
763 		err = -ENOTSUPP;
764 		break;
765 	}
766 
767 	if (err)
768 		return err;
769 
770 	rocker_tlv_nest_end(desc_info, cmd_info);
771 
772 	return 0;
773 }
774 
775 static int ofdpa_cmd_group_tbl_del(const struct rocker_port *rocker_port,
776 				   struct rocker_desc_info *desc_info,
777 				   void *priv)
778 {
779 	const struct ofdpa_group_tbl_entry *entry = priv;
780 	struct rocker_tlv *cmd_info;
781 
782 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
783 		return -EMSGSIZE;
784 	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
785 	if (!cmd_info)
786 		return -EMSGSIZE;
787 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
788 			       entry->group_id))
789 		return -EMSGSIZE;
790 	rocker_tlv_nest_end(desc_info, cmd_info);
791 
792 	return 0;
793 }
794 
795 /***************************************************
796  * Flow, group, FDB, internal VLAN and neigh tables
797  ***************************************************/
798 
799 static struct ofdpa_flow_tbl_entry *
800 ofdpa_flow_tbl_find(const struct ofdpa *ofdpa,
801 		    const struct ofdpa_flow_tbl_entry *match)
802 {
803 	struct ofdpa_flow_tbl_entry *found;
804 	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
805 
806 	hash_for_each_possible(ofdpa->flow_tbl, found,
807 			       entry, match->key_crc32) {
808 		if (memcmp(&found->key, &match->key, key_len) == 0)
809 			return found;
810 	}
811 
812 	return NULL;
813 }
814 
815 static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port,
816 			      struct switchdev_trans *trans, int flags,
817 			      struct ofdpa_flow_tbl_entry *match)
818 {
819 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
820 	struct ofdpa_flow_tbl_entry *found;
821 	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
822 	unsigned long lock_flags;
823 
824 	match->key_crc32 = crc32(~0, &match->key, key_len);
825 
826 	spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
827 
828 	found = ofdpa_flow_tbl_find(ofdpa, match);
829 
830 	if (found) {
831 		match->cookie = found->cookie;
832 		if (!switchdev_trans_ph_prepare(trans))
833 			hash_del(&found->entry);
834 		ofdpa_kfree(trans, found);
835 		found = match;
836 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
837 	} else {
838 		found = match;
839 		found->cookie = ofdpa->flow_tbl_next_cookie++;
840 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
841 	}
842 
843 	if (!switchdev_trans_ph_prepare(trans))
844 		hash_add(ofdpa->flow_tbl, &found->entry, found->key_crc32);
845 
846 	spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
847 
848 	if (!switchdev_trans_ph_prepare(trans))
849 		return rocker_cmd_exec(ofdpa_port->rocker_port,
850 				       ofdpa_flags_nowait(flags),
851 				       ofdpa_cmd_flow_tbl_add,
852 				       found, NULL, NULL);
853 	return 0;
854 }
855 
856 static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port,
857 			      struct switchdev_trans *trans, int flags,
858 			      struct ofdpa_flow_tbl_entry *match)
859 {
860 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
861 	struct ofdpa_flow_tbl_entry *found;
862 	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
863 	unsigned long lock_flags;
864 	int err = 0;
865 
866 	match->key_crc32 = crc32(~0, &match->key, key_len);
867 
868 	spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
869 
870 	found = ofdpa_flow_tbl_find(ofdpa, match);
871 
872 	if (found) {
873 		if (!switchdev_trans_ph_prepare(trans))
874 			hash_del(&found->entry);
875 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
876 	}
877 
878 	spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
879 
880 	ofdpa_kfree(trans, match);
881 
882 	if (found) {
883 		if (!switchdev_trans_ph_prepare(trans))
884 			err = rocker_cmd_exec(ofdpa_port->rocker_port,
885 					      ofdpa_flags_nowait(flags),
886 					      ofdpa_cmd_flow_tbl_del,
887 					      found, NULL, NULL);
888 		ofdpa_kfree(trans, found);
889 	}
890 
891 	return err;
892 }
893 
894 static int ofdpa_flow_tbl_do(struct ofdpa_port *ofdpa_port,
895 			     struct switchdev_trans *trans, int flags,
896 			     struct ofdpa_flow_tbl_entry *entry)
897 {
898 	if (flags & OFDPA_OP_FLAG_REMOVE)
899 		return ofdpa_flow_tbl_del(ofdpa_port, trans, flags, entry);
900 	else
901 		return ofdpa_flow_tbl_add(ofdpa_port, trans, flags, entry);
902 }
903 
904 static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port,
905 				  struct switchdev_trans *trans, int flags,
906 				  u32 in_pport, u32 in_pport_mask,
907 				  enum rocker_of_dpa_table_id goto_tbl)
908 {
909 	struct ofdpa_flow_tbl_entry *entry;
910 
911 	entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
912 	if (!entry)
913 		return -ENOMEM;
914 
915 	entry->key.priority = OFDPA_PRIORITY_IG_PORT;
916 	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
917 	entry->key.ig_port.in_pport = in_pport;
918 	entry->key.ig_port.in_pport_mask = in_pport_mask;
919 	entry->key.ig_port.goto_tbl = goto_tbl;
920 
921 	return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
922 }
923 
924 static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port,
925 			       struct switchdev_trans *trans, int flags,
926 			       u32 in_pport, __be16 vlan_id,
927 			       __be16 vlan_id_mask,
928 			       enum rocker_of_dpa_table_id goto_tbl,
929 			       bool untagged, __be16 new_vlan_id)
930 {
931 	struct ofdpa_flow_tbl_entry *entry;
932 
933 	entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
934 	if (!entry)
935 		return -ENOMEM;
936 
937 	entry->key.priority = OFDPA_PRIORITY_VLAN;
938 	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
939 	entry->key.vlan.in_pport = in_pport;
940 	entry->key.vlan.vlan_id = vlan_id;
941 	entry->key.vlan.vlan_id_mask = vlan_id_mask;
942 	entry->key.vlan.goto_tbl = goto_tbl;
943 
944 	entry->key.vlan.untagged = untagged;
945 	entry->key.vlan.new_vlan_id = new_vlan_id;
946 
947 	return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
948 }
949 
950 static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port,
951 				   struct switchdev_trans *trans,
952 				   u32 in_pport, u32 in_pport_mask,
953 				   __be16 eth_type, const u8 *eth_dst,
954 				   const u8 *eth_dst_mask, __be16 vlan_id,
955 				   __be16 vlan_id_mask, bool copy_to_cpu,
956 				   int flags)
957 {
958 	struct ofdpa_flow_tbl_entry *entry;
959 
960 	entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
961 	if (!entry)
962 		return -ENOMEM;
963 
964 	if (is_multicast_ether_addr(eth_dst)) {
965 		entry->key.priority = OFDPA_PRIORITY_TERM_MAC_MCAST;
966 		entry->key.term_mac.goto_tbl =
967 			 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
968 	} else {
969 		entry->key.priority = OFDPA_PRIORITY_TERM_MAC_UCAST;
970 		entry->key.term_mac.goto_tbl =
971 			 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
972 	}
973 
974 	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
975 	entry->key.term_mac.in_pport = in_pport;
976 	entry->key.term_mac.in_pport_mask = in_pport_mask;
977 	entry->key.term_mac.eth_type = eth_type;
978 	ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
979 	ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
980 	entry->key.term_mac.vlan_id = vlan_id;
981 	entry->key.term_mac.vlan_id_mask = vlan_id_mask;
982 	entry->key.term_mac.copy_to_cpu = copy_to_cpu;
983 
984 	return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
985 }
986 
987 static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port,
988 				 struct switchdev_trans *trans, int flags,
989 				 const u8 *eth_dst, const u8 *eth_dst_mask,
990 				 __be16 vlan_id, u32 tunnel_id,
991 				 enum rocker_of_dpa_table_id goto_tbl,
992 				 u32 group_id, bool copy_to_cpu)
993 {
994 	struct ofdpa_flow_tbl_entry *entry;
995 	u32 priority;
996 	bool vlan_bridging = !!vlan_id;
997 	bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
998 	bool wild = false;
999 
1000 	entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1001 	if (!entry)
1002 		return -ENOMEM;
1003 
1004 	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
1005 
1006 	if (eth_dst) {
1007 		entry->key.bridge.has_eth_dst = 1;
1008 		ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
1009 	}
1010 	if (eth_dst_mask) {
1011 		entry->key.bridge.has_eth_dst_mask = 1;
1012 		ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
1013 		if (!ether_addr_equal(eth_dst_mask, ff_mac))
1014 			wild = true;
1015 	}
1016 
1017 	priority = OFDPA_PRIORITY_UNKNOWN;
1018 	if (vlan_bridging && dflt && wild)
1019 		priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
1020 	else if (vlan_bridging && dflt && !wild)
1021 		priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
1022 	else if (vlan_bridging && !dflt)
1023 		priority = OFDPA_PRIORITY_BRIDGING_VLAN;
1024 	else if (!vlan_bridging && dflt && wild)
1025 		priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
1026 	else if (!vlan_bridging && dflt && !wild)
1027 		priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
1028 	else if (!vlan_bridging && !dflt)
1029 		priority = OFDPA_PRIORITY_BRIDGING_TENANT;
1030 
1031 	entry->key.priority = priority;
1032 	entry->key.bridge.vlan_id = vlan_id;
1033 	entry->key.bridge.tunnel_id = tunnel_id;
1034 	entry->key.bridge.goto_tbl = goto_tbl;
1035 	entry->key.bridge.group_id = group_id;
1036 	entry->key.bridge.copy_to_cpu = copy_to_cpu;
1037 
1038 	return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
1039 }
1040 
1041 static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
1042 					 struct switchdev_trans *trans,
1043 					 __be16 eth_type, __be32 dst,
1044 					 __be32 dst_mask, u32 priority,
1045 					 enum rocker_of_dpa_table_id goto_tbl,
1046 					 u32 group_id, int flags)
1047 {
1048 	struct ofdpa_flow_tbl_entry *entry;
1049 
1050 	entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1051 	if (!entry)
1052 		return -ENOMEM;
1053 
1054 	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
1055 	entry->key.priority = priority;
1056 	entry->key.ucast_routing.eth_type = eth_type;
1057 	entry->key.ucast_routing.dst4 = dst;
1058 	entry->key.ucast_routing.dst4_mask = dst_mask;
1059 	entry->key.ucast_routing.goto_tbl = goto_tbl;
1060 	entry->key.ucast_routing.group_id = group_id;
1061 	entry->key_len = offsetof(struct ofdpa_flow_tbl_key,
1062 				  ucast_routing.group_id);
1063 
1064 	return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
1065 }
1066 
1067 static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port,
1068 			      struct switchdev_trans *trans, int flags,
1069 			      u32 in_pport, u32 in_pport_mask,
1070 			      const u8 *eth_src, const u8 *eth_src_mask,
1071 			      const u8 *eth_dst, const u8 *eth_dst_mask,
1072 			      __be16 eth_type, __be16 vlan_id,
1073 			      __be16 vlan_id_mask, u8 ip_proto,
1074 			      u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
1075 			      u32 group_id)
1076 {
1077 	u32 priority;
1078 	struct ofdpa_flow_tbl_entry *entry;
1079 
1080 	entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1081 	if (!entry)
1082 		return -ENOMEM;
1083 
1084 	priority = OFDPA_PRIORITY_ACL_NORMAL;
1085 	if (eth_dst && eth_dst_mask) {
1086 		if (ether_addr_equal(eth_dst_mask, mcast_mac))
1087 			priority = OFDPA_PRIORITY_ACL_DFLT;
1088 		else if (is_link_local_ether_addr(eth_dst))
1089 			priority = OFDPA_PRIORITY_ACL_CTRL;
1090 	}
1091 
1092 	entry->key.priority = priority;
1093 	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1094 	entry->key.acl.in_pport = in_pport;
1095 	entry->key.acl.in_pport_mask = in_pport_mask;
1096 
1097 	if (eth_src)
1098 		ether_addr_copy(entry->key.acl.eth_src, eth_src);
1099 	if (eth_src_mask)
1100 		ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
1101 	if (eth_dst)
1102 		ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
1103 	if (eth_dst_mask)
1104 		ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
1105 
1106 	entry->key.acl.eth_type = eth_type;
1107 	entry->key.acl.vlan_id = vlan_id;
1108 	entry->key.acl.vlan_id_mask = vlan_id_mask;
1109 	entry->key.acl.ip_proto = ip_proto;
1110 	entry->key.acl.ip_proto_mask = ip_proto_mask;
1111 	entry->key.acl.ip_tos = ip_tos;
1112 	entry->key.acl.ip_tos_mask = ip_tos_mask;
1113 	entry->key.acl.group_id = group_id;
1114 
1115 	return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
1116 }
1117 
1118 static struct ofdpa_group_tbl_entry *
1119 ofdpa_group_tbl_find(const struct ofdpa *ofdpa,
1120 		     const struct ofdpa_group_tbl_entry *match)
1121 {
1122 	struct ofdpa_group_tbl_entry *found;
1123 
1124 	hash_for_each_possible(ofdpa->group_tbl, found,
1125 			       entry, match->group_id) {
1126 		if (found->group_id == match->group_id)
1127 			return found;
1128 	}
1129 
1130 	return NULL;
1131 }
1132 
1133 static void ofdpa_group_tbl_entry_free(struct switchdev_trans *trans,
1134 				       struct ofdpa_group_tbl_entry *entry)
1135 {
1136 	switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
1137 	case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
1138 	case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
1139 		ofdpa_kfree(trans, entry->group_ids);
1140 		break;
1141 	default:
1142 		break;
1143 	}
1144 	ofdpa_kfree(trans, entry);
1145 }
1146 
1147 static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port,
1148 			       struct switchdev_trans *trans, int flags,
1149 			       struct ofdpa_group_tbl_entry *match)
1150 {
1151 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1152 	struct ofdpa_group_tbl_entry *found;
1153 	unsigned long lock_flags;
1154 
1155 	spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
1156 
1157 	found = ofdpa_group_tbl_find(ofdpa, match);
1158 
1159 	if (found) {
1160 		if (!switchdev_trans_ph_prepare(trans))
1161 			hash_del(&found->entry);
1162 		ofdpa_group_tbl_entry_free(trans, found);
1163 		found = match;
1164 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
1165 	} else {
1166 		found = match;
1167 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
1168 	}
1169 
1170 	if (!switchdev_trans_ph_prepare(trans))
1171 		hash_add(ofdpa->group_tbl, &found->entry, found->group_id);
1172 
1173 	spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
1174 
1175 	if (!switchdev_trans_ph_prepare(trans))
1176 		return rocker_cmd_exec(ofdpa_port->rocker_port,
1177 				       ofdpa_flags_nowait(flags),
1178 				       ofdpa_cmd_group_tbl_add,
1179 				       found, NULL, NULL);
1180 	return 0;
1181 }
1182 
1183 static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port,
1184 			       struct switchdev_trans *trans, int flags,
1185 			       struct ofdpa_group_tbl_entry *match)
1186 {
1187 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1188 	struct ofdpa_group_tbl_entry *found;
1189 	unsigned long lock_flags;
1190 	int err = 0;
1191 
1192 	spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
1193 
1194 	found = ofdpa_group_tbl_find(ofdpa, match);
1195 
1196 	if (found) {
1197 		if (!switchdev_trans_ph_prepare(trans))
1198 			hash_del(&found->entry);
1199 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
1200 	}
1201 
1202 	spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
1203 
1204 	ofdpa_group_tbl_entry_free(trans, match);
1205 
1206 	if (found) {
1207 		if (!switchdev_trans_ph_prepare(trans))
1208 			err = rocker_cmd_exec(ofdpa_port->rocker_port,
1209 					      ofdpa_flags_nowait(flags),
1210 					      ofdpa_cmd_group_tbl_del,
1211 					      found, NULL, NULL);
1212 		ofdpa_group_tbl_entry_free(trans, found);
1213 	}
1214 
1215 	return err;
1216 }
1217 
1218 static int ofdpa_group_tbl_do(struct ofdpa_port *ofdpa_port,
1219 			      struct switchdev_trans *trans, int flags,
1220 			      struct ofdpa_group_tbl_entry *entry)
1221 {
1222 	if (flags & OFDPA_OP_FLAG_REMOVE)
1223 		return ofdpa_group_tbl_del(ofdpa_port, trans, flags, entry);
1224 	else
1225 		return ofdpa_group_tbl_add(ofdpa_port, trans, flags, entry);
1226 }
1227 
1228 static int ofdpa_group_l2_interface(struct ofdpa_port *ofdpa_port,
1229 				    struct switchdev_trans *trans, int flags,
1230 				    __be16 vlan_id, u32 out_pport,
1231 				    int pop_vlan)
1232 {
1233 	struct ofdpa_group_tbl_entry *entry;
1234 
1235 	entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1236 	if (!entry)
1237 		return -ENOMEM;
1238 
1239 	entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1240 	entry->l2_interface.pop_vlan = pop_vlan;
1241 
1242 	return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
1243 }
1244 
1245 static int ofdpa_group_l2_fan_out(struct ofdpa_port *ofdpa_port,
1246 				  struct switchdev_trans *trans,
1247 				  int flags, u8 group_count,
1248 				  const u32 *group_ids, u32 group_id)
1249 {
1250 	struct ofdpa_group_tbl_entry *entry;
1251 
1252 	entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1253 	if (!entry)
1254 		return -ENOMEM;
1255 
1256 	entry->group_id = group_id;
1257 	entry->group_count = group_count;
1258 
1259 	entry->group_ids = ofdpa_kcalloc(trans, flags,
1260 					 group_count, sizeof(u32));
1261 	if (!entry->group_ids) {
1262 		ofdpa_kfree(trans, entry);
1263 		return -ENOMEM;
1264 	}
1265 	memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
1266 
1267 	return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
1268 }
1269 
1270 static int ofdpa_group_l2_flood(struct ofdpa_port *ofdpa_port,
1271 				struct switchdev_trans *trans, int flags,
1272 				__be16 vlan_id, u8 group_count,
1273 				const u32 *group_ids, u32 group_id)
1274 {
1275 	return ofdpa_group_l2_fan_out(ofdpa_port, trans, flags,
1276 				      group_count, group_ids,
1277 				      group_id);
1278 }
1279 
1280 static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port,
1281 				  struct switchdev_trans *trans, int flags,
1282 				  u32 index, const u8 *src_mac, const u8 *dst_mac,
1283 				  __be16 vlan_id, bool ttl_check, u32 pport)
1284 {
1285 	struct ofdpa_group_tbl_entry *entry;
1286 
1287 	entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1288 	if (!entry)
1289 		return -ENOMEM;
1290 
1291 	entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
1292 	if (src_mac)
1293 		ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
1294 	if (dst_mac)
1295 		ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
1296 	entry->l3_unicast.vlan_id = vlan_id;
1297 	entry->l3_unicast.ttl_check = ttl_check;
1298 	entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
1299 
1300 	return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
1301 }
1302 
1303 static struct ofdpa_neigh_tbl_entry *
1304 ofdpa_neigh_tbl_find(const struct ofdpa *ofdpa, __be32 ip_addr)
1305 {
1306 	struct ofdpa_neigh_tbl_entry *found;
1307 
1308 	hash_for_each_possible(ofdpa->neigh_tbl, found,
1309 			       entry, be32_to_cpu(ip_addr))
1310 		if (found->ip_addr == ip_addr)
1311 			return found;
1312 
1313 	return NULL;
1314 }
1315 
1316 static void ofdpa_neigh_add(struct ofdpa *ofdpa,
1317 			    struct switchdev_trans *trans,
1318 			    struct ofdpa_neigh_tbl_entry *entry)
1319 {
1320 	if (!switchdev_trans_ph_commit(trans))
1321 		entry->index = ofdpa->neigh_tbl_next_index++;
1322 	if (switchdev_trans_ph_prepare(trans))
1323 		return;
1324 	entry->ref_count++;
1325 	hash_add(ofdpa->neigh_tbl, &entry->entry,
1326 		 be32_to_cpu(entry->ip_addr));
1327 }
1328 
1329 static void ofdpa_neigh_del(struct switchdev_trans *trans,
1330 			    struct ofdpa_neigh_tbl_entry *entry)
1331 {
1332 	if (switchdev_trans_ph_prepare(trans))
1333 		return;
1334 	if (--entry->ref_count == 0) {
1335 		hash_del(&entry->entry);
1336 		ofdpa_kfree(trans, entry);
1337 	}
1338 }
1339 
1340 static void ofdpa_neigh_update(struct ofdpa_neigh_tbl_entry *entry,
1341 			       struct switchdev_trans *trans,
1342 			       const u8 *eth_dst, bool ttl_check)
1343 {
1344 	if (eth_dst) {
1345 		ether_addr_copy(entry->eth_dst, eth_dst);
1346 		entry->ttl_check = ttl_check;
1347 	} else if (!switchdev_trans_ph_prepare(trans)) {
1348 		entry->ref_count++;
1349 	}
1350 }
1351 
1352 static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
1353 				 struct switchdev_trans *trans,
1354 				 int flags, __be32 ip_addr, const u8 *eth_dst)
1355 {
1356 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1357 	struct ofdpa_neigh_tbl_entry *entry;
1358 	struct ofdpa_neigh_tbl_entry *found;
1359 	unsigned long lock_flags;
1360 	__be16 eth_type = htons(ETH_P_IP);
1361 	enum rocker_of_dpa_table_id goto_tbl =
1362 			ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1363 	u32 group_id;
1364 	u32 priority = 0;
1365 	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1366 	bool updating;
1367 	bool removing;
1368 	int err = 0;
1369 
1370 	entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1371 	if (!entry)
1372 		return -ENOMEM;
1373 
1374 	spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1375 
1376 	found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
1377 
1378 	updating = found && adding;
1379 	removing = found && !adding;
1380 	adding = !found && adding;
1381 
1382 	if (adding) {
1383 		entry->ip_addr = ip_addr;
1384 		entry->dev = ofdpa_port->dev;
1385 		ether_addr_copy(entry->eth_dst, eth_dst);
1386 		entry->ttl_check = true;
1387 		ofdpa_neigh_add(ofdpa, trans, entry);
1388 	} else if (removing) {
1389 		memcpy(entry, found, sizeof(*entry));
1390 		ofdpa_neigh_del(trans, found);
1391 	} else if (updating) {
1392 		ofdpa_neigh_update(found, trans, eth_dst, true);
1393 		memcpy(entry, found, sizeof(*entry));
1394 	} else {
1395 		err = -ENOENT;
1396 	}
1397 
1398 	spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
1399 
1400 	if (err)
1401 		goto err_out;
1402 
1403 	/* For each active neighbor, we have an L3 unicast group and
1404 	 * a /32 route to the neighbor, which uses the L3 unicast
1405 	 * group.  The L3 unicast group can also be referred to by
1406 	 * other routes' nexthops.
1407 	 */
1408 
1409 	err = ofdpa_group_l3_unicast(ofdpa_port, trans, flags,
1410 				     entry->index,
1411 				     ofdpa_port->dev->dev_addr,
1412 				     entry->eth_dst,
1413 				     ofdpa_port->internal_vlan_id,
1414 				     entry->ttl_check,
1415 				     ofdpa_port->pport);
1416 	if (err) {
1417 		netdev_err(ofdpa_port->dev, "Error (%d) L3 unicast group index %d\n",
1418 			   err, entry->index);
1419 		goto err_out;
1420 	}
1421 
1422 	if (adding || removing) {
1423 		group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
1424 		err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans,
1425 						    eth_type, ip_addr,
1426 						    inet_make_mask(32),
1427 						    priority, goto_tbl,
1428 						    group_id, flags);
1429 
1430 		if (err)
1431 			netdev_err(ofdpa_port->dev, "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
1432 				   err, &entry->ip_addr, group_id);
1433 	}
1434 
1435 err_out:
1436 	if (!adding)
1437 		ofdpa_kfree(trans, entry);
1438 
1439 	return err;
1440 }
1441 
1442 static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port,
1443 				   struct switchdev_trans *trans,
1444 				   __be32 ip_addr)
1445 {
1446 	struct net_device *dev = ofdpa_port->dev;
1447 	struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
1448 	int err = 0;
1449 
1450 	if (!n) {
1451 		n = neigh_create(&arp_tbl, &ip_addr, dev);
1452 		if (IS_ERR(n))
1453 			return PTR_ERR(n);
1454 	}
1455 
1456 	/* If the neigh is already resolved, then go ahead and
1457 	 * install the entry, otherwise start the ARP process to
1458 	 * resolve the neigh.
1459 	 */
1460 
1461 	if (n->nud_state & NUD_VALID)
1462 		err = ofdpa_port_ipv4_neigh(ofdpa_port, trans, 0,
1463 					    ip_addr, n->ha);
1464 	else
1465 		neigh_event_send(n, NULL);
1466 
1467 	neigh_release(n);
1468 	return err;
1469 }
1470 
1471 static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
1472 			      struct switchdev_trans *trans, int flags,
1473 			      __be32 ip_addr, u32 *index)
1474 {
1475 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1476 	struct ofdpa_neigh_tbl_entry *entry;
1477 	struct ofdpa_neigh_tbl_entry *found;
1478 	unsigned long lock_flags;
1479 	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1480 	bool updating;
1481 	bool removing;
1482 	bool resolved = true;
1483 	int err = 0;
1484 
1485 	entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1486 	if (!entry)
1487 		return -ENOMEM;
1488 
1489 	spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1490 
1491 	found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
1492 	if (found)
1493 		*index = found->index;
1494 
1495 	updating = found && adding;
1496 	removing = found && !adding;
1497 	adding = !found && adding;
1498 
1499 	if (adding) {
1500 		entry->ip_addr = ip_addr;
1501 		entry->dev = ofdpa_port->dev;
1502 		ofdpa_neigh_add(ofdpa, trans, entry);
1503 		*index = entry->index;
1504 		resolved = false;
1505 	} else if (removing) {
1506 		ofdpa_neigh_del(trans, found);
1507 	} else if (updating) {
1508 		ofdpa_neigh_update(found, trans, NULL, false);
1509 		resolved = !is_zero_ether_addr(found->eth_dst);
1510 	} else {
1511 		err = -ENOENT;
1512 	}
1513 
1514 	spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
1515 
1516 	if (!adding)
1517 		ofdpa_kfree(trans, entry);
1518 
1519 	if (err)
1520 		return err;
1521 
1522 	/* Resolved means neigh ip_addr is resolved to neigh mac. */
1523 
1524 	if (!resolved)
1525 		err = ofdpa_port_ipv4_resolve(ofdpa_port, trans, ip_addr);
1526 
1527 	return err;
1528 }
1529 
1530 static struct ofdpa_port *ofdpa_port_get(const struct ofdpa *ofdpa,
1531 					 int port_index)
1532 {
1533 	struct rocker_port *rocker_port;
1534 
1535 	rocker_port = ofdpa->rocker->ports[port_index];
1536 	return rocker_port ? rocker_port->wpriv : NULL;
1537 }
1538 
1539 static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port,
1540 				       struct switchdev_trans *trans,
1541 				       int flags, __be16 vlan_id)
1542 {
1543 	struct ofdpa_port *p;
1544 	const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1545 	unsigned int port_count = ofdpa->rocker->port_count;
1546 	u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
1547 	u32 *group_ids;
1548 	u8 group_count = 0;
1549 	int err = 0;
1550 	int i;
1551 
1552 	group_ids = ofdpa_kcalloc(trans, flags, port_count, sizeof(u32));
1553 	if (!group_ids)
1554 		return -ENOMEM;
1555 
1556 	/* Adjust the flood group for this VLAN.  The flood group
1557 	 * references an L2 interface group for each port in this
1558 	 * VLAN.
1559 	 */
1560 
1561 	for (i = 0; i < port_count; i++) {
1562 		p = ofdpa_port_get(ofdpa, i);
1563 		if (!p)
1564 			continue;
1565 		if (!ofdpa_port_is_bridged(p))
1566 			continue;
1567 		if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
1568 			group_ids[group_count++] =
1569 				ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
1570 		}
1571 	}
1572 
1573 	/* If there are no bridged ports in this VLAN, we're done */
1574 	if (group_count == 0)
1575 		goto no_ports_in_vlan;
1576 
1577 	err = ofdpa_group_l2_flood(ofdpa_port, trans, flags, vlan_id,
1578 				   group_count, group_ids, group_id);
1579 	if (err)
1580 		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
1581 
1582 no_ports_in_vlan:
1583 	ofdpa_kfree(trans, group_ids);
1584 	return err;
1585 }
1586 
1587 static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port,
1588 				     struct switchdev_trans *trans, int flags,
1589 				     __be16 vlan_id, bool pop_vlan)
1590 {
1591 	const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1592 	unsigned int port_count = ofdpa->rocker->port_count;
1593 	struct ofdpa_port *p;
1594 	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1595 	u32 out_pport;
1596 	int ref = 0;
1597 	int err;
1598 	int i;
1599 
1600 	/* An L2 interface group for this port in this VLAN, but
1601 	 * only when port STP state is LEARNING|FORWARDING.
1602 	 */
1603 
1604 	if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
1605 	    ofdpa_port->stp_state == BR_STATE_FORWARDING) {
1606 		out_pport = ofdpa_port->pport;
1607 		err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
1608 					       vlan_id, out_pport, pop_vlan);
1609 		if (err) {
1610 			netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
1611 				   err, out_pport);
1612 			return err;
1613 		}
1614 	}
1615 
1616 	/* An L2 interface group for this VLAN to CPU port.
1617 	 * Add when first port joins this VLAN and destroy when
1618 	 * last port leaves this VLAN.
1619 	 */
1620 
1621 	for (i = 0; i < port_count; i++) {
1622 		p = ofdpa_port_get(ofdpa, i);
1623 		if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
1624 			ref++;
1625 	}
1626 
1627 	if ((!adding || ref != 1) && (adding || ref != 0))
1628 		return 0;
1629 
1630 	out_pport = 0;
1631 	err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
1632 				       vlan_id, out_pport, pop_vlan);
1633 	if (err) {
1634 		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for CPU port\n", err);
1635 		return err;
1636 	}
1637 
1638 	return 0;
1639 }
1640 
1641 static struct ofdpa_ctrl {
1642 	const u8 *eth_dst;
1643 	const u8 *eth_dst_mask;
1644 	__be16 eth_type;
1645 	bool acl;
1646 	bool bridge;
1647 	bool term;
1648 	bool copy_to_cpu;
1649 } ofdpa_ctrls[] = {
1650 	[OFDPA_CTRL_LINK_LOCAL_MCAST] = {
1651 		/* pass link local multicast pkts up to CPU for filtering */
1652 		.eth_dst = ll_mac,
1653 		.eth_dst_mask = ll_mask,
1654 		.acl = true,
1655 	},
1656 	[OFDPA_CTRL_LOCAL_ARP] = {
1657 		/* pass local ARP pkts up to CPU */
1658 		.eth_dst = zero_mac,
1659 		.eth_dst_mask = zero_mac,
1660 		.eth_type = htons(ETH_P_ARP),
1661 		.acl = true,
1662 	},
1663 	[OFDPA_CTRL_IPV4_MCAST] = {
1664 		/* pass IPv4 mcast pkts up to CPU, RFC 1112 */
1665 		.eth_dst = ipv4_mcast,
1666 		.eth_dst_mask = ipv4_mask,
1667 		.eth_type = htons(ETH_P_IP),
1668 		.term  = true,
1669 		.copy_to_cpu = true,
1670 	},
1671 	[OFDPA_CTRL_IPV6_MCAST] = {
1672 		/* pass IPv6 mcast pkts up to CPU, RFC 2464 */
1673 		.eth_dst = ipv6_mcast,
1674 		.eth_dst_mask = ipv6_mask,
1675 		.eth_type = htons(ETH_P_IPV6),
1676 		.term  = true,
1677 		.copy_to_cpu = true,
1678 	},
1679 	[OFDPA_CTRL_DFLT_BRIDGING] = {
1680 		/* flood any pkts on vlan */
1681 		.bridge = true,
1682 		.copy_to_cpu = true,
1683 	},
1684 	[OFDPA_CTRL_DFLT_OVS] = {
1685 		/* pass all pkts up to CPU */
1686 		.eth_dst = zero_mac,
1687 		.eth_dst_mask = zero_mac,
1688 		.acl = true,
1689 	},
1690 };
1691 
1692 static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port,
1693 				    struct switchdev_trans *trans, int flags,
1694 				    const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1695 {
1696 	u32 in_pport = ofdpa_port->pport;
1697 	u32 in_pport_mask = 0xffffffff;
1698 	u32 out_pport = 0;
1699 	const u8 *eth_src = NULL;
1700 	const u8 *eth_src_mask = NULL;
1701 	__be16 vlan_id_mask = htons(0xffff);
1702 	u8 ip_proto = 0;
1703 	u8 ip_proto_mask = 0;
1704 	u8 ip_tos = 0;
1705 	u8 ip_tos_mask = 0;
1706 	u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1707 	int err;
1708 
1709 	err = ofdpa_flow_tbl_acl(ofdpa_port, trans, flags,
1710 				 in_pport, in_pport_mask,
1711 				 eth_src, eth_src_mask,
1712 				 ctrl->eth_dst, ctrl->eth_dst_mask,
1713 				 ctrl->eth_type,
1714 				 vlan_id, vlan_id_mask,
1715 				 ip_proto, ip_proto_mask,
1716 				 ip_tos, ip_tos_mask,
1717 				 group_id);
1718 
1719 	if (err)
1720 		netdev_err(ofdpa_port->dev, "Error (%d) ctrl ACL\n", err);
1721 
1722 	return err;
1723 }
1724 
1725 static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port,
1726 				       struct switchdev_trans *trans,
1727 				       int flags,
1728 				       const struct ofdpa_ctrl *ctrl,
1729 				       __be16 vlan_id)
1730 {
1731 	enum rocker_of_dpa_table_id goto_tbl =
1732 			ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1733 	u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
1734 	u32 tunnel_id = 0;
1735 	int err;
1736 
1737 	if (!ofdpa_port_is_bridged(ofdpa_port))
1738 		return 0;
1739 
1740 	err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags,
1741 				    ctrl->eth_dst, ctrl->eth_dst_mask,
1742 				    vlan_id, tunnel_id,
1743 				    goto_tbl, group_id, ctrl->copy_to_cpu);
1744 
1745 	if (err)
1746 		netdev_err(ofdpa_port->dev, "Error (%d) ctrl FLOOD\n", err);
1747 
1748 	return err;
1749 }
1750 
1751 static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port,
1752 				     struct switchdev_trans *trans, int flags,
1753 				     const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1754 {
1755 	u32 in_pport_mask = 0xffffffff;
1756 	__be16 vlan_id_mask = htons(0xffff);
1757 	int err;
1758 
1759 	if (ntohs(vlan_id) == 0)
1760 		vlan_id = ofdpa_port->internal_vlan_id;
1761 
1762 	err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
1763 				      ofdpa_port->pport, in_pport_mask,
1764 				      ctrl->eth_type, ctrl->eth_dst,
1765 				      ctrl->eth_dst_mask, vlan_id,
1766 				      vlan_id_mask, ctrl->copy_to_cpu,
1767 				      flags);
1768 
1769 	if (err)
1770 		netdev_err(ofdpa_port->dev, "Error (%d) ctrl term\n", err);
1771 
1772 	return err;
1773 }
1774 
1775 static int ofdpa_port_ctrl_vlan(struct ofdpa_port *ofdpa_port,
1776 				struct switchdev_trans *trans, int flags,
1777 				const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1778 {
1779 	if (ctrl->acl)
1780 		return ofdpa_port_ctrl_vlan_acl(ofdpa_port, trans, flags,
1781 						ctrl, vlan_id);
1782 	if (ctrl->bridge)
1783 		return ofdpa_port_ctrl_vlan_bridge(ofdpa_port, trans, flags,
1784 						   ctrl, vlan_id);
1785 
1786 	if (ctrl->term)
1787 		return ofdpa_port_ctrl_vlan_term(ofdpa_port, trans, flags,
1788 						 ctrl, vlan_id);
1789 
1790 	return -EOPNOTSUPP;
1791 }
1792 
1793 static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port,
1794 				    struct switchdev_trans *trans, int flags,
1795 				    __be16 vlan_id)
1796 {
1797 	int err = 0;
1798 	int i;
1799 
1800 	for (i = 0; i < OFDPA_CTRL_MAX; i++) {
1801 		if (ofdpa_port->ctrls[i]) {
1802 			err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags,
1803 						   &ofdpa_ctrls[i], vlan_id);
1804 			if (err)
1805 				return err;
1806 		}
1807 	}
1808 
1809 	return err;
1810 }
1811 
1812 static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port,
1813 			   struct switchdev_trans *trans, int flags,
1814 			   const struct ofdpa_ctrl *ctrl)
1815 {
1816 	u16 vid;
1817 	int err = 0;
1818 
1819 	for (vid = 1; vid < VLAN_N_VID; vid++) {
1820 		if (!test_bit(vid, ofdpa_port->vlan_bitmap))
1821 			continue;
1822 		err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags,
1823 					   ctrl, htons(vid));
1824 		if (err)
1825 			break;
1826 	}
1827 
1828 	return err;
1829 }
1830 
1831 static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port,
1832 			   struct switchdev_trans *trans, int flags, u16 vid)
1833 {
1834 	enum rocker_of_dpa_table_id goto_tbl =
1835 			ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
1836 	u32 in_pport = ofdpa_port->pport;
1837 	__be16 vlan_id = htons(vid);
1838 	__be16 vlan_id_mask = htons(0xffff);
1839 	__be16 internal_vlan_id;
1840 	bool untagged;
1841 	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1842 	int err;
1843 
1844 	internal_vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, &untagged);
1845 
1846 	if (adding &&
1847 	    test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
1848 		return 0; /* already added */
1849 	else if (!adding &&
1850 		 !test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
1851 		return 0; /* already removed */
1852 
1853 	change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
1854 
1855 	if (adding) {
1856 		err = ofdpa_port_ctrl_vlan_add(ofdpa_port, trans, flags,
1857 					       internal_vlan_id);
1858 		if (err) {
1859 			netdev_err(ofdpa_port->dev, "Error (%d) port ctrl vlan add\n", err);
1860 			goto err_out;
1861 		}
1862 	}
1863 
1864 	err = ofdpa_port_vlan_l2_groups(ofdpa_port, trans, flags,
1865 					internal_vlan_id, untagged);
1866 	if (err) {
1867 		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 groups\n", err);
1868 		goto err_out;
1869 	}
1870 
1871 	err = ofdpa_port_vlan_flood_group(ofdpa_port, trans, flags,
1872 					  internal_vlan_id);
1873 	if (err) {
1874 		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
1875 		goto err_out;
1876 	}
1877 
1878 	err = ofdpa_flow_tbl_vlan(ofdpa_port, trans, flags,
1879 				  in_pport, vlan_id, vlan_id_mask,
1880 				  goto_tbl, untagged, internal_vlan_id);
1881 	if (err)
1882 		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN table\n", err);
1883 
1884 err_out:
1885 	if (switchdev_trans_ph_prepare(trans))
1886 		change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
1887 
1888 	return err;
1889 }
1890 
1891 static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port,
1892 			     struct switchdev_trans *trans, int flags)
1893 {
1894 	enum rocker_of_dpa_table_id goto_tbl;
1895 	u32 in_pport;
1896 	u32 in_pport_mask;
1897 	int err;
1898 
1899 	/* Normal Ethernet Frames.  Matches pkts from any local physical
1900 	 * ports.  Goto VLAN tbl.
1901 	 */
1902 
1903 	in_pport = 0;
1904 	in_pport_mask = 0xffff0000;
1905 	goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
1906 
1907 	err = ofdpa_flow_tbl_ig_port(ofdpa_port, trans, flags,
1908 				     in_pport, in_pport_mask,
1909 				     goto_tbl);
1910 	if (err)
1911 		netdev_err(ofdpa_port->dev, "Error (%d) ingress port table entry\n", err);
1912 
1913 	return err;
1914 }
1915 
1916 struct ofdpa_fdb_learn_work {
1917 	struct work_struct work;
1918 	struct ofdpa_port *ofdpa_port;
1919 	struct switchdev_trans *trans;
1920 	int flags;
1921 	u8 addr[ETH_ALEN];
1922 	u16 vid;
1923 };
1924 
1925 static void ofdpa_port_fdb_learn_work(struct work_struct *work)
1926 {
1927 	const struct ofdpa_fdb_learn_work *lw =
1928 		container_of(work, struct ofdpa_fdb_learn_work, work);
1929 	bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE);
1930 	bool learned = (lw->flags & OFDPA_OP_FLAG_LEARNED);
1931 	struct switchdev_notifier_fdb_info info;
1932 
1933 	info.addr = lw->addr;
1934 	info.vid = lw->vid;
1935 
1936 	rtnl_lock();
1937 	if (learned && removing)
1938 		call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
1939 					 lw->ofdpa_port->dev, &info.info);
1940 	else if (learned && !removing)
1941 		call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
1942 					 lw->ofdpa_port->dev, &info.info);
1943 	rtnl_unlock();
1944 
1945 	ofdpa_kfree(lw->trans, work);
1946 }
1947 
1948 static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port,
1949 				struct switchdev_trans *trans, int flags,
1950 				const u8 *addr, __be16 vlan_id)
1951 {
1952 	struct ofdpa_fdb_learn_work *lw;
1953 	enum rocker_of_dpa_table_id goto_tbl =
1954 			ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1955 	u32 out_pport = ofdpa_port->pport;
1956 	u32 tunnel_id = 0;
1957 	u32 group_id = ROCKER_GROUP_NONE;
1958 	bool syncing = !!(ofdpa_port->brport_flags & BR_LEARNING_SYNC);
1959 	bool copy_to_cpu = false;
1960 	int err;
1961 
1962 	if (ofdpa_port_is_bridged(ofdpa_port))
1963 		group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1964 
1965 	if (!(flags & OFDPA_OP_FLAG_REFRESH)) {
1966 		err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags, addr,
1967 					    NULL, vlan_id, tunnel_id, goto_tbl,
1968 					    group_id, copy_to_cpu);
1969 		if (err)
1970 			return err;
1971 	}
1972 
1973 	if (!syncing)
1974 		return 0;
1975 
1976 	if (!ofdpa_port_is_bridged(ofdpa_port))
1977 		return 0;
1978 
1979 	lw = ofdpa_kzalloc(trans, flags, sizeof(*lw));
1980 	if (!lw)
1981 		return -ENOMEM;
1982 
1983 	INIT_WORK(&lw->work, ofdpa_port_fdb_learn_work);
1984 
1985 	lw->ofdpa_port = ofdpa_port;
1986 	lw->trans = trans;
1987 	lw->flags = flags;
1988 	ether_addr_copy(lw->addr, addr);
1989 	lw->vid = ofdpa_port_vlan_to_vid(ofdpa_port, vlan_id);
1990 
1991 	if (switchdev_trans_ph_prepare(trans))
1992 		ofdpa_kfree(trans, lw);
1993 	else
1994 		schedule_work(&lw->work);
1995 
1996 	return 0;
1997 }
1998 
1999 static struct ofdpa_fdb_tbl_entry *
2000 ofdpa_fdb_tbl_find(const struct ofdpa *ofdpa,
2001 		   const struct ofdpa_fdb_tbl_entry *match)
2002 {
2003 	struct ofdpa_fdb_tbl_entry *found;
2004 
2005 	hash_for_each_possible(ofdpa->fdb_tbl, found, entry, match->key_crc32)
2006 		if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
2007 			return found;
2008 
2009 	return NULL;
2010 }
2011 
2012 static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
2013 			  struct switchdev_trans *trans,
2014 			  const unsigned char *addr,
2015 			  __be16 vlan_id, int flags)
2016 {
2017 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2018 	struct ofdpa_fdb_tbl_entry *fdb;
2019 	struct ofdpa_fdb_tbl_entry *found;
2020 	bool removing = (flags & OFDPA_OP_FLAG_REMOVE);
2021 	unsigned long lock_flags;
2022 
2023 	fdb = ofdpa_kzalloc(trans, flags, sizeof(*fdb));
2024 	if (!fdb)
2025 		return -ENOMEM;
2026 
2027 	fdb->learned = (flags & OFDPA_OP_FLAG_LEARNED);
2028 	fdb->touched = jiffies;
2029 	fdb->key.ofdpa_port = ofdpa_port;
2030 	ether_addr_copy(fdb->key.addr, addr);
2031 	fdb->key.vlan_id = vlan_id;
2032 	fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
2033 
2034 	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
2035 
2036 	found = ofdpa_fdb_tbl_find(ofdpa, fdb);
2037 
2038 	if (found) {
2039 		found->touched = jiffies;
2040 		if (removing) {
2041 			ofdpa_kfree(trans, fdb);
2042 			if (!switchdev_trans_ph_prepare(trans))
2043 				hash_del(&found->entry);
2044 		}
2045 	} else if (!removing) {
2046 		if (!switchdev_trans_ph_prepare(trans))
2047 			hash_add(ofdpa->fdb_tbl, &fdb->entry,
2048 				 fdb->key_crc32);
2049 	}
2050 
2051 	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2052 
2053 	/* Check if adding and already exists, or removing and can't find */
2054 	if (!found != !removing) {
2055 		ofdpa_kfree(trans, fdb);
2056 		if (!found && removing)
2057 			return 0;
2058 		/* Refreshing existing to update aging timers */
2059 		flags |= OFDPA_OP_FLAG_REFRESH;
2060 	}
2061 
2062 	return ofdpa_port_fdb_learn(ofdpa_port, trans, flags, addr, vlan_id);
2063 }
2064 
2065 static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port,
2066 				struct switchdev_trans *trans, int flags)
2067 {
2068 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2069 	struct ofdpa_fdb_tbl_entry *found;
2070 	unsigned long lock_flags;
2071 	struct hlist_node *tmp;
2072 	int bkt;
2073 	int err = 0;
2074 
2075 	if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
2076 	    ofdpa_port->stp_state == BR_STATE_FORWARDING)
2077 		return 0;
2078 
2079 	flags |= OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE;
2080 
2081 	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
2082 
2083 	hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) {
2084 		if (found->key.ofdpa_port != ofdpa_port)
2085 			continue;
2086 		if (!found->learned)
2087 			continue;
2088 		err = ofdpa_port_fdb_learn(ofdpa_port, trans, flags,
2089 					   found->key.addr,
2090 					   found->key.vlan_id);
2091 		if (err)
2092 			goto err_out;
2093 		if (!switchdev_trans_ph_prepare(trans))
2094 			hash_del(&found->entry);
2095 	}
2096 
2097 err_out:
2098 	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2099 
2100 	return err;
2101 }
2102 
2103 static void ofdpa_fdb_cleanup(unsigned long data)
2104 {
2105 	struct ofdpa *ofdpa = (struct ofdpa *)data;
2106 	struct ofdpa_port *ofdpa_port;
2107 	struct ofdpa_fdb_tbl_entry *entry;
2108 	struct hlist_node *tmp;
2109 	unsigned long next_timer = jiffies + ofdpa->ageing_time;
2110 	unsigned long expires;
2111 	unsigned long lock_flags;
2112 	int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE |
2113 		    OFDPA_OP_FLAG_LEARNED;
2114 	int bkt;
2115 
2116 	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
2117 
2118 	hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, entry, entry) {
2119 		if (!entry->learned)
2120 			continue;
2121 		ofdpa_port = entry->key.ofdpa_port;
2122 		expires = entry->touched + ofdpa_port->ageing_time;
2123 		if (time_before_eq(expires, jiffies)) {
2124 			ofdpa_port_fdb_learn(ofdpa_port, NULL,
2125 					     flags, entry->key.addr,
2126 					     entry->key.vlan_id);
2127 			hash_del(&entry->entry);
2128 		} else if (time_before(expires, next_timer)) {
2129 			next_timer = expires;
2130 		}
2131 	}
2132 
2133 	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2134 
2135 	mod_timer(&ofdpa->fdb_cleanup_timer, round_jiffies_up(next_timer));
2136 }
2137 
2138 static int ofdpa_port_router_mac(struct ofdpa_port *ofdpa_port,
2139 				 struct switchdev_trans *trans, int flags,
2140 				 __be16 vlan_id)
2141 {
2142 	u32 in_pport_mask = 0xffffffff;
2143 	__be16 eth_type;
2144 	const u8 *dst_mac_mask = ff_mac;
2145 	__be16 vlan_id_mask = htons(0xffff);
2146 	bool copy_to_cpu = false;
2147 	int err;
2148 
2149 	if (ntohs(vlan_id) == 0)
2150 		vlan_id = ofdpa_port->internal_vlan_id;
2151 
2152 	eth_type = htons(ETH_P_IP);
2153 	err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
2154 				      ofdpa_port->pport, in_pport_mask,
2155 				      eth_type, ofdpa_port->dev->dev_addr,
2156 				      dst_mac_mask, vlan_id, vlan_id_mask,
2157 				      copy_to_cpu, flags);
2158 	if (err)
2159 		return err;
2160 
2161 	eth_type = htons(ETH_P_IPV6);
2162 	err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
2163 				      ofdpa_port->pport, in_pport_mask,
2164 				      eth_type, ofdpa_port->dev->dev_addr,
2165 				      dst_mac_mask, vlan_id, vlan_id_mask,
2166 				      copy_to_cpu, flags);
2167 
2168 	return err;
2169 }
2170 
2171 static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port,
2172 			     struct switchdev_trans *trans, int flags)
2173 {
2174 	bool pop_vlan;
2175 	u32 out_pport;
2176 	__be16 vlan_id;
2177 	u16 vid;
2178 	int err;
2179 
2180 	/* Port will be forwarding-enabled if its STP state is LEARNING
2181 	 * or FORWARDING.  Traffic from CPU can still egress, regardless of
2182 	 * port STP state.  Use L2 interface group on port VLANs as a way
2183 	 * to toggle port forwarding: if forwarding is disabled, L2
2184 	 * interface group will not exist.
2185 	 */
2186 
2187 	if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
2188 	    ofdpa_port->stp_state != BR_STATE_FORWARDING)
2189 		flags |= OFDPA_OP_FLAG_REMOVE;
2190 
2191 	out_pport = ofdpa_port->pport;
2192 	for (vid = 1; vid < VLAN_N_VID; vid++) {
2193 		if (!test_bit(vid, ofdpa_port->vlan_bitmap))
2194 			continue;
2195 		vlan_id = htons(vid);
2196 		pop_vlan = ofdpa_vlan_id_is_internal(vlan_id);
2197 		err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
2198 					       vlan_id, out_pport, pop_vlan);
2199 		if (err) {
2200 			netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
2201 				   err, out_pport);
2202 			return err;
2203 		}
2204 	}
2205 
2206 	return 0;
2207 }
2208 
2209 static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
2210 				 struct switchdev_trans *trans,
2211 				 int flags, u8 state)
2212 {
2213 	bool want[OFDPA_CTRL_MAX] = { 0, };
2214 	bool prev_ctrls[OFDPA_CTRL_MAX];
2215 	u8 uninitialized_var(prev_state);
2216 	int err;
2217 	int i;
2218 
2219 	if (switchdev_trans_ph_prepare(trans)) {
2220 		memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
2221 		prev_state = ofdpa_port->stp_state;
2222 	}
2223 
2224 	if (ofdpa_port->stp_state == state)
2225 		return 0;
2226 
2227 	ofdpa_port->stp_state = state;
2228 
2229 	switch (state) {
2230 	case BR_STATE_DISABLED:
2231 		/* port is completely disabled */
2232 		break;
2233 	case BR_STATE_LISTENING:
2234 	case BR_STATE_BLOCKING:
2235 		want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
2236 		break;
2237 	case BR_STATE_LEARNING:
2238 	case BR_STATE_FORWARDING:
2239 		if (!ofdpa_port_is_ovsed(ofdpa_port))
2240 			want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
2241 		want[OFDPA_CTRL_IPV4_MCAST] = true;
2242 		want[OFDPA_CTRL_IPV6_MCAST] = true;
2243 		if (ofdpa_port_is_bridged(ofdpa_port))
2244 			want[OFDPA_CTRL_DFLT_BRIDGING] = true;
2245 		else if (ofdpa_port_is_ovsed(ofdpa_port))
2246 			want[OFDPA_CTRL_DFLT_OVS] = true;
2247 		else
2248 			want[OFDPA_CTRL_LOCAL_ARP] = true;
2249 		break;
2250 	}
2251 
2252 	for (i = 0; i < OFDPA_CTRL_MAX; i++) {
2253 		if (want[i] != ofdpa_port->ctrls[i]) {
2254 			int ctrl_flags = flags |
2255 					 (want[i] ? 0 : OFDPA_OP_FLAG_REMOVE);
2256 			err = ofdpa_port_ctrl(ofdpa_port, trans, ctrl_flags,
2257 					      &ofdpa_ctrls[i]);
2258 			if (err)
2259 				goto err_out;
2260 			ofdpa_port->ctrls[i] = want[i];
2261 		}
2262 	}
2263 
2264 	err = ofdpa_port_fdb_flush(ofdpa_port, trans, flags);
2265 	if (err)
2266 		goto err_out;
2267 
2268 	err = ofdpa_port_fwding(ofdpa_port, trans, flags);
2269 
2270 err_out:
2271 	if (switchdev_trans_ph_prepare(trans)) {
2272 		memcpy(ofdpa_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
2273 		ofdpa_port->stp_state = prev_state;
2274 	}
2275 
2276 	return err;
2277 }
2278 
2279 static int ofdpa_port_fwd_enable(struct ofdpa_port *ofdpa_port, int flags)
2280 {
2281 	if (ofdpa_port_is_bridged(ofdpa_port))
2282 		/* bridge STP will enable port */
2283 		return 0;
2284 
2285 	/* port is not bridged, so simulate going to FORWARDING state */
2286 	return ofdpa_port_stp_update(ofdpa_port, NULL, flags,
2287 				     BR_STATE_FORWARDING);
2288 }
2289 
2290 static int ofdpa_port_fwd_disable(struct ofdpa_port *ofdpa_port, int flags)
2291 {
2292 	if (ofdpa_port_is_bridged(ofdpa_port))
2293 		/* bridge STP will disable port */
2294 		return 0;
2295 
2296 	/* port is not bridged, so simulate going to DISABLED state */
2297 	return ofdpa_port_stp_update(ofdpa_port, NULL, flags,
2298 				     BR_STATE_DISABLED);
2299 }
2300 
2301 static int ofdpa_port_vlan_add(struct ofdpa_port *ofdpa_port,
2302 			       struct switchdev_trans *trans,
2303 			       u16 vid, u16 flags)
2304 {
2305 	int err;
2306 
2307 	/* XXX deal with flags for PVID and untagged */
2308 
2309 	err = ofdpa_port_vlan(ofdpa_port, trans, 0, vid);
2310 	if (err)
2311 		return err;
2312 
2313 	err = ofdpa_port_router_mac(ofdpa_port, trans, 0, htons(vid));
2314 	if (err)
2315 		ofdpa_port_vlan(ofdpa_port, trans,
2316 				OFDPA_OP_FLAG_REMOVE, vid);
2317 
2318 	return err;
2319 }
2320 
2321 static int ofdpa_port_vlan_del(struct ofdpa_port *ofdpa_port,
2322 			       u16 vid, u16 flags)
2323 {
2324 	int err;
2325 
2326 	err = ofdpa_port_router_mac(ofdpa_port, NULL,
2327 				    OFDPA_OP_FLAG_REMOVE, htons(vid));
2328 	if (err)
2329 		return err;
2330 
2331 	return ofdpa_port_vlan(ofdpa_port, NULL,
2332 			       OFDPA_OP_FLAG_REMOVE, vid);
2333 }
2334 
2335 static struct ofdpa_internal_vlan_tbl_entry *
2336 ofdpa_internal_vlan_tbl_find(const struct ofdpa *ofdpa, int ifindex)
2337 {
2338 	struct ofdpa_internal_vlan_tbl_entry *found;
2339 
2340 	hash_for_each_possible(ofdpa->internal_vlan_tbl, found,
2341 			       entry, ifindex) {
2342 		if (found->ifindex == ifindex)
2343 			return found;
2344 	}
2345 
2346 	return NULL;
2347 }
2348 
2349 static __be16 ofdpa_port_internal_vlan_id_get(struct ofdpa_port *ofdpa_port,
2350 					      int ifindex)
2351 {
2352 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2353 	struct ofdpa_internal_vlan_tbl_entry *entry;
2354 	struct ofdpa_internal_vlan_tbl_entry *found;
2355 	unsigned long lock_flags;
2356 	int i;
2357 
2358 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2359 	if (!entry)
2360 		return 0;
2361 
2362 	entry->ifindex = ifindex;
2363 
2364 	spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2365 
2366 	found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
2367 	if (found) {
2368 		kfree(entry);
2369 		goto found;
2370 	}
2371 
2372 	found = entry;
2373 	hash_add(ofdpa->internal_vlan_tbl, &found->entry, found->ifindex);
2374 
2375 	for (i = 0; i < OFDPA_N_INTERNAL_VLANS; i++) {
2376 		if (test_and_set_bit(i, ofdpa->internal_vlan_bitmap))
2377 			continue;
2378 		found->vlan_id = htons(OFDPA_INTERNAL_VLAN_ID_BASE + i);
2379 		goto found;
2380 	}
2381 
2382 	netdev_err(ofdpa_port->dev, "Out of internal VLAN IDs\n");
2383 
2384 found:
2385 	found->ref_count++;
2386 	spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2387 
2388 	return found->vlan_id;
2389 }
2390 
2391 static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port,
2392 			       struct switchdev_trans *trans, __be32 dst,
2393 			       int dst_len, const struct fib_info *fi,
2394 			       u32 tb_id, int flags)
2395 {
2396 	const struct fib_nh *nh;
2397 	__be16 eth_type = htons(ETH_P_IP);
2398 	__be32 dst_mask = inet_make_mask(dst_len);
2399 	__be16 internal_vlan_id = ofdpa_port->internal_vlan_id;
2400 	u32 priority = fi->fib_priority;
2401 	enum rocker_of_dpa_table_id goto_tbl =
2402 		ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2403 	u32 group_id;
2404 	bool nh_on_port;
2405 	bool has_gw;
2406 	u32 index;
2407 	int err;
2408 
2409 	/* XXX support ECMP */
2410 
2411 	nh = fi->fib_nh;
2412 	nh_on_port = (fi->fib_dev == ofdpa_port->dev);
2413 	has_gw = !!nh->nh_gw;
2414 
2415 	if (has_gw && nh_on_port) {
2416 		err = ofdpa_port_ipv4_nh(ofdpa_port, trans, flags,
2417 					 nh->nh_gw, &index);
2418 		if (err)
2419 			return err;
2420 
2421 		group_id = ROCKER_GROUP_L3_UNICAST(index);
2422 	} else {
2423 		/* Send to CPU for processing */
2424 		group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
2425 	}
2426 
2427 	err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans, eth_type, dst,
2428 					    dst_mask, priority, goto_tbl,
2429 					    group_id, flags);
2430 	if (err)
2431 		netdev_err(ofdpa_port->dev, "Error (%d) IPv4 route %pI4\n",
2432 			   err, &dst);
2433 
2434 	return err;
2435 }
2436 
2437 static void
2438 ofdpa_port_internal_vlan_id_put(const struct ofdpa_port *ofdpa_port,
2439 				int ifindex)
2440 {
2441 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2442 	struct ofdpa_internal_vlan_tbl_entry *found;
2443 	unsigned long lock_flags;
2444 	unsigned long bit;
2445 
2446 	spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2447 
2448 	found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
2449 	if (!found) {
2450 		netdev_err(ofdpa_port->dev,
2451 			   "ifindex (%d) not found in internal VLAN tbl\n",
2452 			   ifindex);
2453 		goto not_found;
2454 	}
2455 
2456 	if (--found->ref_count <= 0) {
2457 		bit = ntohs(found->vlan_id) - OFDPA_INTERNAL_VLAN_ID_BASE;
2458 		clear_bit(bit, ofdpa->internal_vlan_bitmap);
2459 		hash_del(&found->entry);
2460 		kfree(found);
2461 	}
2462 
2463 not_found:
2464 	spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2465 }
2466 
2467 /**********************************
2468  * Rocker world ops implementation
2469  **********************************/
2470 
2471 static int ofdpa_init(struct rocker *rocker)
2472 {
2473 	struct ofdpa *ofdpa = rocker->wpriv;
2474 
2475 	ofdpa->rocker = rocker;
2476 
2477 	hash_init(ofdpa->flow_tbl);
2478 	spin_lock_init(&ofdpa->flow_tbl_lock);
2479 
2480 	hash_init(ofdpa->group_tbl);
2481 	spin_lock_init(&ofdpa->group_tbl_lock);
2482 
2483 	hash_init(ofdpa->fdb_tbl);
2484 	spin_lock_init(&ofdpa->fdb_tbl_lock);
2485 
2486 	hash_init(ofdpa->internal_vlan_tbl);
2487 	spin_lock_init(&ofdpa->internal_vlan_tbl_lock);
2488 
2489 	hash_init(ofdpa->neigh_tbl);
2490 	spin_lock_init(&ofdpa->neigh_tbl_lock);
2491 
2492 	setup_timer(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup,
2493 		    (unsigned long) ofdpa);
2494 	mod_timer(&ofdpa->fdb_cleanup_timer, jiffies);
2495 
2496 	ofdpa->ageing_time = BR_DEFAULT_AGEING_TIME;
2497 
2498 	return 0;
2499 }
2500 
2501 static void ofdpa_fini(struct rocker *rocker)
2502 {
2503 	struct ofdpa *ofdpa = rocker->wpriv;
2504 
2505 	unsigned long flags;
2506 	struct ofdpa_flow_tbl_entry *flow_entry;
2507 	struct ofdpa_group_tbl_entry *group_entry;
2508 	struct ofdpa_fdb_tbl_entry *fdb_entry;
2509 	struct ofdpa_internal_vlan_tbl_entry *internal_vlan_entry;
2510 	struct ofdpa_neigh_tbl_entry *neigh_entry;
2511 	struct hlist_node *tmp;
2512 	int bkt;
2513 
2514 	del_timer_sync(&ofdpa->fdb_cleanup_timer);
2515 
2516 	spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
2517 	hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry)
2518 		hash_del(&flow_entry->entry);
2519 	spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
2520 
2521 	spin_lock_irqsave(&ofdpa->group_tbl_lock, flags);
2522 	hash_for_each_safe(ofdpa->group_tbl, bkt, tmp, group_entry, entry)
2523 		hash_del(&group_entry->entry);
2524 	spin_unlock_irqrestore(&ofdpa->group_tbl_lock, flags);
2525 
2526 	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, flags);
2527 	hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, fdb_entry, entry)
2528 		hash_del(&fdb_entry->entry);
2529 	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, flags);
2530 
2531 	spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, flags);
2532 	hash_for_each_safe(ofdpa->internal_vlan_tbl, bkt,
2533 			   tmp, internal_vlan_entry, entry)
2534 		hash_del(&internal_vlan_entry->entry);
2535 	spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, flags);
2536 
2537 	spin_lock_irqsave(&ofdpa->neigh_tbl_lock, flags);
2538 	hash_for_each_safe(ofdpa->neigh_tbl, bkt, tmp, neigh_entry, entry)
2539 		hash_del(&neigh_entry->entry);
2540 	spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, flags);
2541 }
2542 
2543 static int ofdpa_port_pre_init(struct rocker_port *rocker_port)
2544 {
2545 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2546 
2547 	ofdpa_port->ofdpa = rocker_port->rocker->wpriv;
2548 	ofdpa_port->rocker_port = rocker_port;
2549 	ofdpa_port->dev = rocker_port->dev;
2550 	ofdpa_port->pport = rocker_port->pport;
2551 	ofdpa_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
2552 	ofdpa_port->ageing_time = BR_DEFAULT_AGEING_TIME;
2553 	return 0;
2554 }
2555 
2556 static int ofdpa_port_init(struct rocker_port *rocker_port)
2557 {
2558 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2559 	int err;
2560 
2561 	switchdev_port_fwd_mark_set(ofdpa_port->dev, NULL, false);
2562 	rocker_port_set_learning(rocker_port,
2563 				 !!(ofdpa_port->brport_flags & BR_LEARNING));
2564 
2565 	err = ofdpa_port_ig_tbl(ofdpa_port, NULL, 0);
2566 	if (err) {
2567 		netdev_err(ofdpa_port->dev, "install ig port table failed\n");
2568 		return err;
2569 	}
2570 
2571 	ofdpa_port->internal_vlan_id =
2572 		ofdpa_port_internal_vlan_id_get(ofdpa_port,
2573 						ofdpa_port->dev->ifindex);
2574 
2575 	err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
2576 	if (err) {
2577 		netdev_err(ofdpa_port->dev, "install untagged VLAN failed\n");
2578 		goto err_untagged_vlan;
2579 	}
2580 	return 0;
2581 
2582 err_untagged_vlan:
2583 	ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE);
2584 	return err;
2585 }
2586 
2587 static void ofdpa_port_fini(struct rocker_port *rocker_port)
2588 {
2589 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2590 
2591 	ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE);
2592 }
2593 
2594 static int ofdpa_port_open(struct rocker_port *rocker_port)
2595 {
2596 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2597 
2598 	return ofdpa_port_fwd_enable(ofdpa_port, 0);
2599 }
2600 
2601 static void ofdpa_port_stop(struct rocker_port *rocker_port)
2602 {
2603 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2604 
2605 	ofdpa_port_fwd_disable(ofdpa_port, OFDPA_OP_FLAG_NOWAIT);
2606 }
2607 
2608 static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port,
2609 					 u8 state,
2610 					 struct switchdev_trans *trans)
2611 {
2612 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2613 
2614 	return ofdpa_port_stp_update(ofdpa_port, trans, 0, state);
2615 }
2616 
2617 static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
2618 					    unsigned long brport_flags,
2619 					    struct switchdev_trans *trans)
2620 {
2621 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2622 	unsigned long orig_flags;
2623 	int err = 0;
2624 
2625 	orig_flags = ofdpa_port->brport_flags;
2626 	ofdpa_port->brport_flags = brport_flags;
2627 	if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING &&
2628 	    !switchdev_trans_ph_prepare(trans))
2629 		err = rocker_port_set_learning(ofdpa_port->rocker_port,
2630 					       !!(ofdpa_port->brport_flags & BR_LEARNING));
2631 
2632 	if (switchdev_trans_ph_prepare(trans))
2633 		ofdpa_port->brport_flags = orig_flags;
2634 
2635 	return err;
2636 }
2637 
2638 static int
2639 ofdpa_port_attr_bridge_flags_get(const struct rocker_port *rocker_port,
2640 				 unsigned long *p_brport_flags)
2641 {
2642 	const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2643 
2644 	*p_brport_flags = ofdpa_port->brport_flags;
2645 	return 0;
2646 }
2647 
2648 static int
2649 ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
2650 				       u32 ageing_time,
2651 				       struct switchdev_trans *trans)
2652 {
2653 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2654 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2655 
2656 	if (!switchdev_trans_ph_prepare(trans)) {
2657 		ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time);
2658 		if (ofdpa_port->ageing_time < ofdpa->ageing_time)
2659 			ofdpa->ageing_time = ofdpa_port->ageing_time;
2660 		mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies);
2661 	}
2662 
2663 	return 0;
2664 }
2665 
2666 static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port,
2667 				   const struct switchdev_obj_port_vlan *vlan,
2668 				   struct switchdev_trans *trans)
2669 {
2670 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2671 	u16 vid;
2672 	int err;
2673 
2674 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2675 		err = ofdpa_port_vlan_add(ofdpa_port, trans, vid, vlan->flags);
2676 		if (err)
2677 			return err;
2678 	}
2679 
2680 	return 0;
2681 }
2682 
2683 static int ofdpa_port_obj_vlan_del(struct rocker_port *rocker_port,
2684 				   const struct switchdev_obj_port_vlan *vlan)
2685 {
2686 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2687 	u16 vid;
2688 	int err;
2689 
2690 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2691 		err = ofdpa_port_vlan_del(ofdpa_port, vid, vlan->flags);
2692 		if (err)
2693 			return err;
2694 	}
2695 
2696 	return 0;
2697 }
2698 
2699 static int ofdpa_port_obj_vlan_dump(const struct rocker_port *rocker_port,
2700 				    struct switchdev_obj_port_vlan *vlan,
2701 				    switchdev_obj_dump_cb_t *cb)
2702 {
2703 	const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2704 	u16 vid;
2705 	int err = 0;
2706 
2707 	for (vid = 1; vid < VLAN_N_VID; vid++) {
2708 		if (!test_bit(vid, ofdpa_port->vlan_bitmap))
2709 			continue;
2710 		vlan->flags = 0;
2711 		if (ofdpa_vlan_id_is_internal(htons(vid)))
2712 			vlan->flags |= BRIDGE_VLAN_INFO_PVID;
2713 		vlan->vid_begin = vlan->vid_end = vid;
2714 		err = cb(&vlan->obj);
2715 		if (err)
2716 			break;
2717 	}
2718 
2719 	return err;
2720 }
2721 
2722 static int ofdpa_port_obj_fib4_add(struct rocker_port *rocker_port,
2723 				   const struct switchdev_obj_ipv4_fib *fib4,
2724 				   struct switchdev_trans *trans)
2725 {
2726 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2727 
2728 	return ofdpa_port_fib_ipv4(ofdpa_port, trans,
2729 				   htonl(fib4->dst), fib4->dst_len,
2730 				   fib4->fi, fib4->tb_id, 0);
2731 }
2732 
2733 static int ofdpa_port_obj_fib4_del(struct rocker_port *rocker_port,
2734 				   const struct switchdev_obj_ipv4_fib *fib4)
2735 {
2736 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2737 
2738 	return ofdpa_port_fib_ipv4(ofdpa_port, NULL,
2739 				   htonl(fib4->dst), fib4->dst_len,
2740 				   fib4->fi, fib4->tb_id,
2741 				   OFDPA_OP_FLAG_REMOVE);
2742 }
2743 
2744 static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port,
2745 				  const struct switchdev_obj_port_fdb *fdb,
2746 				  struct switchdev_trans *trans)
2747 {
2748 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2749 	__be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL);
2750 
2751 	if (!ofdpa_port_is_bridged(ofdpa_port))
2752 		return -EINVAL;
2753 
2754 	return ofdpa_port_fdb(ofdpa_port, trans, fdb->addr, vlan_id, 0);
2755 }
2756 
2757 static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port,
2758 				  const struct switchdev_obj_port_fdb *fdb)
2759 {
2760 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2761 	__be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL);
2762 	int flags = OFDPA_OP_FLAG_REMOVE;
2763 
2764 	if (!ofdpa_port_is_bridged(ofdpa_port))
2765 		return -EINVAL;
2766 
2767 	return ofdpa_port_fdb(ofdpa_port, NULL, fdb->addr, vlan_id, flags);
2768 }
2769 
2770 static int ofdpa_port_obj_fdb_dump(const struct rocker_port *rocker_port,
2771 				   struct switchdev_obj_port_fdb *fdb,
2772 				   switchdev_obj_dump_cb_t *cb)
2773 {
2774 	const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2775 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2776 	struct ofdpa_fdb_tbl_entry *found;
2777 	struct hlist_node *tmp;
2778 	unsigned long lock_flags;
2779 	int bkt;
2780 	int err = 0;
2781 
2782 	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
2783 	hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) {
2784 		if (found->key.ofdpa_port != ofdpa_port)
2785 			continue;
2786 		ether_addr_copy(fdb->addr, found->key.addr);
2787 		fdb->ndm_state = NUD_REACHABLE;
2788 		fdb->vid = ofdpa_port_vlan_to_vid(ofdpa_port,
2789 						  found->key.vlan_id);
2790 		err = cb(&fdb->obj);
2791 		if (err)
2792 			break;
2793 	}
2794 	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2795 
2796 	return err;
2797 }
2798 
2799 static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port,
2800 				  struct net_device *bridge)
2801 {
2802 	int err;
2803 
2804 	/* Port is joining bridge, so the internal VLAN for the
2805 	 * port is going to change to the bridge internal VLAN.
2806 	 * Let's remove untagged VLAN (vid=0) from port and
2807 	 * re-add once internal VLAN has changed.
2808 	 */
2809 
2810 	err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2811 	if (err)
2812 		return err;
2813 
2814 	ofdpa_port_internal_vlan_id_put(ofdpa_port,
2815 					ofdpa_port->dev->ifindex);
2816 	ofdpa_port->internal_vlan_id =
2817 		ofdpa_port_internal_vlan_id_get(ofdpa_port, bridge->ifindex);
2818 
2819 	ofdpa_port->bridge_dev = bridge;
2820 	switchdev_port_fwd_mark_set(ofdpa_port->dev, bridge, true);
2821 
2822 	return ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
2823 }
2824 
2825 static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port)
2826 {
2827 	int err;
2828 
2829 	err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2830 	if (err)
2831 		return err;
2832 
2833 	ofdpa_port_internal_vlan_id_put(ofdpa_port,
2834 					ofdpa_port->bridge_dev->ifindex);
2835 	ofdpa_port->internal_vlan_id =
2836 		ofdpa_port_internal_vlan_id_get(ofdpa_port,
2837 						ofdpa_port->dev->ifindex);
2838 
2839 	switchdev_port_fwd_mark_set(ofdpa_port->dev, ofdpa_port->bridge_dev,
2840 				    false);
2841 	ofdpa_port->bridge_dev = NULL;
2842 
2843 	err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
2844 	if (err)
2845 		return err;
2846 
2847 	if (ofdpa_port->dev->flags & IFF_UP)
2848 		err = ofdpa_port_fwd_enable(ofdpa_port, 0);
2849 
2850 	return err;
2851 }
2852 
2853 static int ofdpa_port_ovs_changed(struct ofdpa_port *ofdpa_port,
2854 				  struct net_device *master)
2855 {
2856 	int err;
2857 
2858 	ofdpa_port->bridge_dev = master;
2859 
2860 	err = ofdpa_port_fwd_disable(ofdpa_port, 0);
2861 	if (err)
2862 		return err;
2863 	err = ofdpa_port_fwd_enable(ofdpa_port, 0);
2864 
2865 	return err;
2866 }
2867 
2868 static int ofdpa_port_master_linked(struct rocker_port *rocker_port,
2869 				    struct net_device *master)
2870 {
2871 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2872 	int err = 0;
2873 
2874 	if (netif_is_bridge_master(master))
2875 		err = ofdpa_port_bridge_join(ofdpa_port, master);
2876 	else if (netif_is_ovs_master(master))
2877 		err = ofdpa_port_ovs_changed(ofdpa_port, master);
2878 	return err;
2879 }
2880 
2881 static int ofdpa_port_master_unlinked(struct rocker_port *rocker_port,
2882 				      struct net_device *master)
2883 {
2884 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2885 	int err = 0;
2886 
2887 	if (ofdpa_port_is_bridged(ofdpa_port))
2888 		err = ofdpa_port_bridge_leave(ofdpa_port);
2889 	else if (ofdpa_port_is_ovsed(ofdpa_port))
2890 		err = ofdpa_port_ovs_changed(ofdpa_port, NULL);
2891 	return err;
2892 }
2893 
2894 static int ofdpa_port_neigh_update(struct rocker_port *rocker_port,
2895 				   struct neighbour *n)
2896 {
2897 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2898 	int flags = (n->nud_state & NUD_VALID ? 0 : OFDPA_OP_FLAG_REMOVE) |
2899 						    OFDPA_OP_FLAG_NOWAIT;
2900 	__be32 ip_addr = *(__be32 *) n->primary_key;
2901 
2902 	return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha);
2903 }
2904 
2905 static int ofdpa_port_neigh_destroy(struct rocker_port *rocker_port,
2906 				    struct neighbour *n)
2907 {
2908 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2909 	int flags = OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT;
2910 	__be32 ip_addr = *(__be32 *) n->primary_key;
2911 
2912 	return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha);
2913 }
2914 
2915 static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
2916 				       const unsigned char *addr,
2917 				       __be16 vlan_id)
2918 {
2919 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2920 	int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_LEARNED;
2921 
2922 	if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
2923 	    ofdpa_port->stp_state != BR_STATE_FORWARDING)
2924 		return 0;
2925 
2926 	return ofdpa_port_fdb(ofdpa_port, NULL, addr, vlan_id, flags);
2927 }
2928 
2929 struct rocker_world_ops rocker_ofdpa_ops = {
2930 	.kind = "ofdpa",
2931 	.priv_size = sizeof(struct ofdpa),
2932 	.port_priv_size = sizeof(struct ofdpa_port),
2933 	.mode = ROCKER_PORT_MODE_OF_DPA,
2934 	.init = ofdpa_init,
2935 	.fini = ofdpa_fini,
2936 	.port_pre_init = ofdpa_port_pre_init,
2937 	.port_init = ofdpa_port_init,
2938 	.port_fini = ofdpa_port_fini,
2939 	.port_open = ofdpa_port_open,
2940 	.port_stop = ofdpa_port_stop,
2941 	.port_attr_stp_state_set = ofdpa_port_attr_stp_state_set,
2942 	.port_attr_bridge_flags_set = ofdpa_port_attr_bridge_flags_set,
2943 	.port_attr_bridge_flags_get = ofdpa_port_attr_bridge_flags_get,
2944 	.port_attr_bridge_ageing_time_set = ofdpa_port_attr_bridge_ageing_time_set,
2945 	.port_obj_vlan_add = ofdpa_port_obj_vlan_add,
2946 	.port_obj_vlan_del = ofdpa_port_obj_vlan_del,
2947 	.port_obj_vlan_dump = ofdpa_port_obj_vlan_dump,
2948 	.port_obj_fib4_add = ofdpa_port_obj_fib4_add,
2949 	.port_obj_fib4_del = ofdpa_port_obj_fib4_del,
2950 	.port_obj_fdb_add = ofdpa_port_obj_fdb_add,
2951 	.port_obj_fdb_del = ofdpa_port_obj_fdb_del,
2952 	.port_obj_fdb_dump = ofdpa_port_obj_fdb_dump,
2953 	.port_master_linked = ofdpa_port_master_linked,
2954 	.port_master_unlinked = ofdpa_port_master_unlinked,
2955 	.port_neigh_update = ofdpa_port_neigh_update,
2956 	.port_neigh_destroy = ofdpa_port_neigh_destroy,
2957 	.port_ev_mac_vlan_seen = ofdpa_port_ev_mac_vlan_seen,
2958 };
2959