xref: /openbmc/linux/drivers/net/ethernet/intel/ice/ice_eswitch_br.c (revision 3d40aed862874db14e1dd41fd6f12636dcfdcc3e)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2023, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_eswitch_br.h"
6 #include "ice_repr.h"
7 #include "ice_switch.h"
8 #include "ice_vlan.h"
9 #include "ice_vf_vsi_vlan_ops.h"
10 #include "ice_trace.h"
11 
12 #define ICE_ESW_BRIDGE_UPDATE_INTERVAL msecs_to_jiffies(1000)
13 
14 static const struct rhashtable_params ice_fdb_ht_params = {
15 	.key_offset = offsetof(struct ice_esw_br_fdb_entry, data),
16 	.key_len = sizeof(struct ice_esw_br_fdb_data),
17 	.head_offset = offsetof(struct ice_esw_br_fdb_entry, ht_node),
18 	.automatic_shrinking = true,
19 };
20 
21 static bool ice_eswitch_br_is_dev_valid(const struct net_device *dev)
22 {
23 	/* Accept only PF netdev and PRs */
24 	return ice_is_port_repr_netdev(dev) || netif_is_ice(dev);
25 }
26 
27 static struct ice_esw_br_port *
28 ice_eswitch_br_netdev_to_port(struct net_device *dev)
29 {
30 	if (ice_is_port_repr_netdev(dev)) {
31 		struct ice_repr *repr = ice_netdev_to_repr(dev);
32 
33 		return repr->br_port;
34 	} else if (netif_is_ice(dev)) {
35 		struct ice_pf *pf = ice_netdev_to_pf(dev);
36 
37 		return pf->br_port;
38 	}
39 
40 	return NULL;
41 }
42 
43 static void
44 ice_eswitch_br_ingress_rule_setup(struct ice_adv_rule_info *rule_info,
45 				  u8 pf_id, u16 vf_vsi_idx)
46 {
47 	rule_info->sw_act.vsi_handle = vf_vsi_idx;
48 	rule_info->sw_act.flag |= ICE_FLTR_RX;
49 	rule_info->sw_act.src = pf_id;
50 	rule_info->priority = 5;
51 }
52 
53 static void
54 ice_eswitch_br_egress_rule_setup(struct ice_adv_rule_info *rule_info,
55 				 u16 pf_vsi_idx)
56 {
57 	rule_info->sw_act.vsi_handle = pf_vsi_idx;
58 	rule_info->sw_act.flag |= ICE_FLTR_TX;
59 	rule_info->flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
60 	rule_info->flags_info.act_valid = true;
61 	rule_info->priority = 5;
62 }
63 
64 static int
65 ice_eswitch_br_rule_delete(struct ice_hw *hw, struct ice_rule_query_data *rule)
66 {
67 	int err;
68 
69 	if (!rule)
70 		return -EINVAL;
71 
72 	err = ice_rem_adv_rule_by_id(hw, rule);
73 	kfree(rule);
74 
75 	return err;
76 }
77 
78 static u16
79 ice_eswitch_br_get_lkups_cnt(u16 vid)
80 {
81 	return ice_eswitch_br_is_vid_valid(vid) ? 2 : 1;
82 }
83 
84 static void
85 ice_eswitch_br_add_vlan_lkup(struct ice_adv_lkup_elem *list, u16 vid)
86 {
87 	if (ice_eswitch_br_is_vid_valid(vid)) {
88 		list[1].type = ICE_VLAN_OFOS;
89 		list[1].h_u.vlan_hdr.vlan = cpu_to_be16(vid & VLAN_VID_MASK);
90 		list[1].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
91 	}
92 }
93 
94 static struct ice_rule_query_data *
95 ice_eswitch_br_fwd_rule_create(struct ice_hw *hw, int vsi_idx, int port_type,
96 			       const unsigned char *mac, u16 vid)
97 {
98 	struct ice_adv_rule_info rule_info = { 0 };
99 	struct ice_rule_query_data *rule;
100 	struct ice_adv_lkup_elem *list;
101 	u16 lkups_cnt;
102 	int err;
103 
104 	lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid);
105 
106 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
107 	if (!rule)
108 		return ERR_PTR(-ENOMEM);
109 
110 	list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
111 	if (!list) {
112 		err = -ENOMEM;
113 		goto err_list_alloc;
114 	}
115 
116 	switch (port_type) {
117 	case ICE_ESWITCH_BR_UPLINK_PORT:
118 		ice_eswitch_br_egress_rule_setup(&rule_info, vsi_idx);
119 		break;
120 	case ICE_ESWITCH_BR_VF_REPR_PORT:
121 		ice_eswitch_br_ingress_rule_setup(&rule_info, hw->pf_id,
122 						  vsi_idx);
123 		break;
124 	default:
125 		err = -EINVAL;
126 		goto err_add_rule;
127 	}
128 
129 	list[0].type = ICE_MAC_OFOS;
130 	ether_addr_copy(list[0].h_u.eth_hdr.dst_addr, mac);
131 	eth_broadcast_addr(list[0].m_u.eth_hdr.dst_addr);
132 
133 	ice_eswitch_br_add_vlan_lkup(list, vid);
134 
135 	rule_info.need_pass_l2 = true;
136 
137 	rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
138 
139 	err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule);
140 	if (err)
141 		goto err_add_rule;
142 
143 	kfree(list);
144 
145 	return rule;
146 
147 err_add_rule:
148 	kfree(list);
149 err_list_alloc:
150 	kfree(rule);
151 
152 	return ERR_PTR(err);
153 }
154 
155 static struct ice_rule_query_data *
156 ice_eswitch_br_guard_rule_create(struct ice_hw *hw, u16 vsi_idx,
157 				 const unsigned char *mac, u16 vid)
158 {
159 	struct ice_adv_rule_info rule_info = { 0 };
160 	struct ice_rule_query_data *rule;
161 	struct ice_adv_lkup_elem *list;
162 	int err = -ENOMEM;
163 	u16 lkups_cnt;
164 
165 	lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid);
166 
167 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
168 	if (!rule)
169 		goto err_exit;
170 
171 	list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
172 	if (!list)
173 		goto err_list_alloc;
174 
175 	list[0].type = ICE_MAC_OFOS;
176 	ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac);
177 	eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr);
178 
179 	ice_eswitch_br_add_vlan_lkup(list, vid);
180 
181 	rule_info.allow_pass_l2 = true;
182 	rule_info.sw_act.vsi_handle = vsi_idx;
183 	rule_info.sw_act.fltr_act = ICE_NOP;
184 	rule_info.priority = 5;
185 
186 	err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule);
187 	if (err)
188 		goto err_add_rule;
189 
190 	kfree(list);
191 
192 	return rule;
193 
194 err_add_rule:
195 	kfree(list);
196 err_list_alloc:
197 	kfree(rule);
198 err_exit:
199 	return ERR_PTR(err);
200 }
201 
202 static struct ice_esw_br_flow *
203 ice_eswitch_br_flow_create(struct device *dev, struct ice_hw *hw, int vsi_idx,
204 			   int port_type, const unsigned char *mac, u16 vid)
205 {
206 	struct ice_rule_query_data *fwd_rule, *guard_rule;
207 	struct ice_esw_br_flow *flow;
208 	int err;
209 
210 	flow = kzalloc(sizeof(*flow), GFP_KERNEL);
211 	if (!flow)
212 		return ERR_PTR(-ENOMEM);
213 
214 	fwd_rule = ice_eswitch_br_fwd_rule_create(hw, vsi_idx, port_type, mac,
215 						  vid);
216 	err = PTR_ERR_OR_ZERO(fwd_rule);
217 	if (err) {
218 		dev_err(dev, "Failed to create eswitch bridge %sgress forward rule, err: %d\n",
219 			port_type == ICE_ESWITCH_BR_UPLINK_PORT ? "e" : "in",
220 			err);
221 		goto err_fwd_rule;
222 	}
223 
224 	guard_rule = ice_eswitch_br_guard_rule_create(hw, vsi_idx, mac, vid);
225 	err = PTR_ERR_OR_ZERO(guard_rule);
226 	if (err) {
227 		dev_err(dev, "Failed to create eswitch bridge %sgress guard rule, err: %d\n",
228 			port_type == ICE_ESWITCH_BR_UPLINK_PORT ? "e" : "in",
229 			err);
230 		goto err_guard_rule;
231 	}
232 
233 	flow->fwd_rule = fwd_rule;
234 	flow->guard_rule = guard_rule;
235 
236 	return flow;
237 
238 err_guard_rule:
239 	ice_eswitch_br_rule_delete(hw, fwd_rule);
240 err_fwd_rule:
241 	kfree(flow);
242 
243 	return ERR_PTR(err);
244 }
245 
246 static struct ice_esw_br_fdb_entry *
247 ice_eswitch_br_fdb_find(struct ice_esw_br *bridge, const unsigned char *mac,
248 			u16 vid)
249 {
250 	struct ice_esw_br_fdb_data data = {
251 		.vid = vid,
252 	};
253 
254 	ether_addr_copy(data.addr, mac);
255 	return rhashtable_lookup_fast(&bridge->fdb_ht, &data,
256 				      ice_fdb_ht_params);
257 }
258 
259 static void
260 ice_eswitch_br_flow_delete(struct ice_pf *pf, struct ice_esw_br_flow *flow)
261 {
262 	struct device *dev = ice_pf_to_dev(pf);
263 	int err;
264 
265 	err = ice_eswitch_br_rule_delete(&pf->hw, flow->fwd_rule);
266 	if (err)
267 		dev_err(dev, "Failed to delete FDB forward rule, err: %d\n",
268 			err);
269 
270 	err = ice_eswitch_br_rule_delete(&pf->hw, flow->guard_rule);
271 	if (err)
272 		dev_err(dev, "Failed to delete FDB guard rule, err: %d\n",
273 			err);
274 
275 	kfree(flow);
276 }
277 
278 static struct ice_esw_br_vlan *
279 ice_esw_br_port_vlan_lookup(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid)
280 {
281 	struct ice_pf *pf = bridge->br_offloads->pf;
282 	struct device *dev = ice_pf_to_dev(pf);
283 	struct ice_esw_br_port *port;
284 	struct ice_esw_br_vlan *vlan;
285 
286 	port = xa_load(&bridge->ports, vsi_idx);
287 	if (!port) {
288 		dev_info(dev, "Bridge port lookup failed (vsi=%u)\n", vsi_idx);
289 		return ERR_PTR(-EINVAL);
290 	}
291 
292 	vlan = xa_load(&port->vlans, vid);
293 	if (!vlan) {
294 		dev_info(dev, "Bridge port vlan metadata lookup failed (vsi=%u)\n",
295 			 vsi_idx);
296 		return ERR_PTR(-EINVAL);
297 	}
298 
299 	return vlan;
300 }
301 
302 static void
303 ice_eswitch_br_fdb_entry_delete(struct ice_esw_br *bridge,
304 				struct ice_esw_br_fdb_entry *fdb_entry)
305 {
306 	struct ice_pf *pf = bridge->br_offloads->pf;
307 
308 	rhashtable_remove_fast(&bridge->fdb_ht, &fdb_entry->ht_node,
309 			       ice_fdb_ht_params);
310 	list_del(&fdb_entry->list);
311 
312 	ice_eswitch_br_flow_delete(pf, fdb_entry->flow);
313 
314 	kfree(fdb_entry);
315 }
316 
317 static void
318 ice_eswitch_br_fdb_offload_notify(struct net_device *dev,
319 				  const unsigned char *mac, u16 vid,
320 				  unsigned long val)
321 {
322 	struct switchdev_notifier_fdb_info fdb_info = {
323 		.addr = mac,
324 		.vid = vid,
325 		.offloaded = true,
326 	};
327 
328 	call_switchdev_notifiers(val, dev, &fdb_info.info, NULL);
329 }
330 
331 static void
332 ice_eswitch_br_fdb_entry_notify_and_cleanup(struct ice_esw_br *bridge,
333 					    struct ice_esw_br_fdb_entry *entry)
334 {
335 	if (!(entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER))
336 		ice_eswitch_br_fdb_offload_notify(entry->dev, entry->data.addr,
337 						  entry->data.vid,
338 						  SWITCHDEV_FDB_DEL_TO_BRIDGE);
339 	ice_eswitch_br_fdb_entry_delete(bridge, entry);
340 }
341 
342 static void
343 ice_eswitch_br_fdb_entry_find_and_delete(struct ice_esw_br *bridge,
344 					 const unsigned char *mac, u16 vid)
345 {
346 	struct ice_pf *pf = bridge->br_offloads->pf;
347 	struct ice_esw_br_fdb_entry *fdb_entry;
348 	struct device *dev = ice_pf_to_dev(pf);
349 
350 	fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid);
351 	if (!fdb_entry) {
352 		dev_err(dev, "FDB entry with mac: %pM and vid: %u not found\n",
353 			mac, vid);
354 		return;
355 	}
356 
357 	trace_ice_eswitch_br_fdb_entry_find_and_delete(fdb_entry);
358 	ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry);
359 }
360 
361 static void
362 ice_eswitch_br_fdb_entry_create(struct net_device *netdev,
363 				struct ice_esw_br_port *br_port,
364 				bool added_by_user,
365 				const unsigned char *mac, u16 vid)
366 {
367 	struct ice_esw_br *bridge = br_port->bridge;
368 	struct ice_pf *pf = bridge->br_offloads->pf;
369 	struct device *dev = ice_pf_to_dev(pf);
370 	struct ice_esw_br_fdb_entry *fdb_entry;
371 	struct ice_esw_br_flow *flow;
372 	struct ice_esw_br_vlan *vlan;
373 	struct ice_hw *hw = &pf->hw;
374 	unsigned long event;
375 	int err;
376 
377 	/* untagged filtering is not yet supported */
378 	if (!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING) && vid)
379 		return;
380 
381 	if ((bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING)) {
382 		vlan = ice_esw_br_port_vlan_lookup(bridge, br_port->vsi_idx,
383 						   vid);
384 		if (IS_ERR(vlan)) {
385 			dev_err(dev, "Failed to find vlan lookup, err: %ld\n",
386 				PTR_ERR(vlan));
387 			return;
388 		}
389 	}
390 
391 	fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid);
392 	if (fdb_entry)
393 		ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry);
394 
395 	fdb_entry = kzalloc(sizeof(*fdb_entry), GFP_KERNEL);
396 	if (!fdb_entry) {
397 		err = -ENOMEM;
398 		goto err_exit;
399 	}
400 
401 	flow = ice_eswitch_br_flow_create(dev, hw, br_port->vsi_idx,
402 					  br_port->type, mac, vid);
403 	if (IS_ERR(flow)) {
404 		err = PTR_ERR(flow);
405 		goto err_add_flow;
406 	}
407 
408 	ether_addr_copy(fdb_entry->data.addr, mac);
409 	fdb_entry->data.vid = vid;
410 	fdb_entry->br_port = br_port;
411 	fdb_entry->flow = flow;
412 	fdb_entry->dev = netdev;
413 	fdb_entry->last_use = jiffies;
414 	event = SWITCHDEV_FDB_ADD_TO_BRIDGE;
415 
416 	if (added_by_user) {
417 		fdb_entry->flags |= ICE_ESWITCH_BR_FDB_ADDED_BY_USER;
418 		event = SWITCHDEV_FDB_OFFLOADED;
419 	}
420 
421 	err = rhashtable_insert_fast(&bridge->fdb_ht, &fdb_entry->ht_node,
422 				     ice_fdb_ht_params);
423 	if (err)
424 		goto err_fdb_insert;
425 
426 	list_add(&fdb_entry->list, &bridge->fdb_list);
427 	trace_ice_eswitch_br_fdb_entry_create(fdb_entry);
428 
429 	ice_eswitch_br_fdb_offload_notify(netdev, mac, vid, event);
430 
431 	return;
432 
433 err_fdb_insert:
434 	ice_eswitch_br_flow_delete(pf, flow);
435 err_add_flow:
436 	kfree(fdb_entry);
437 err_exit:
438 	dev_err(dev, "Failed to create fdb entry, err: %d\n", err);
439 }
440 
441 static void
442 ice_eswitch_br_fdb_work_dealloc(struct ice_esw_br_fdb_work *fdb_work)
443 {
444 	kfree(fdb_work->fdb_info.addr);
445 	kfree(fdb_work);
446 }
447 
448 static void
449 ice_eswitch_br_fdb_event_work(struct work_struct *work)
450 {
451 	struct ice_esw_br_fdb_work *fdb_work = ice_work_to_fdb_work(work);
452 	bool added_by_user = fdb_work->fdb_info.added_by_user;
453 	const unsigned char *mac = fdb_work->fdb_info.addr;
454 	u16 vid = fdb_work->fdb_info.vid;
455 	struct ice_esw_br_port *br_port;
456 
457 	rtnl_lock();
458 
459 	br_port = ice_eswitch_br_netdev_to_port(fdb_work->dev);
460 	if (!br_port)
461 		goto err_exit;
462 
463 	switch (fdb_work->event) {
464 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
465 		ice_eswitch_br_fdb_entry_create(fdb_work->dev, br_port,
466 						added_by_user, mac, vid);
467 		break;
468 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
469 		ice_eswitch_br_fdb_entry_find_and_delete(br_port->bridge,
470 							 mac, vid);
471 		break;
472 	default:
473 		goto err_exit;
474 	}
475 
476 err_exit:
477 	rtnl_unlock();
478 	dev_put(fdb_work->dev);
479 	ice_eswitch_br_fdb_work_dealloc(fdb_work);
480 }
481 
482 static struct ice_esw_br_fdb_work *
483 ice_eswitch_br_fdb_work_alloc(struct switchdev_notifier_fdb_info *fdb_info,
484 			      struct net_device *dev,
485 			      unsigned long event)
486 {
487 	struct ice_esw_br_fdb_work *work;
488 	unsigned char *mac;
489 
490 	work = kzalloc(sizeof(*work), GFP_ATOMIC);
491 	if (!work)
492 		return ERR_PTR(-ENOMEM);
493 
494 	INIT_WORK(&work->work, ice_eswitch_br_fdb_event_work);
495 	memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info));
496 
497 	mac = kzalloc(ETH_ALEN, GFP_ATOMIC);
498 	if (!mac) {
499 		kfree(work);
500 		return ERR_PTR(-ENOMEM);
501 	}
502 
503 	ether_addr_copy(mac, fdb_info->addr);
504 	work->fdb_info.addr = mac;
505 	work->event = event;
506 	work->dev = dev;
507 
508 	return work;
509 }
510 
511 static int
512 ice_eswitch_br_switchdev_event(struct notifier_block *nb,
513 			       unsigned long event, void *ptr)
514 {
515 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
516 	struct switchdev_notifier_fdb_info *fdb_info;
517 	struct switchdev_notifier_info *info = ptr;
518 	struct ice_esw_br_offloads *br_offloads;
519 	struct ice_esw_br_fdb_work *work;
520 	struct netlink_ext_ack *extack;
521 	struct net_device *upper;
522 
523 	br_offloads = ice_nb_to_br_offloads(nb, switchdev_nb);
524 	extack = switchdev_notifier_info_to_extack(ptr);
525 
526 	upper = netdev_master_upper_dev_get_rcu(dev);
527 	if (!upper)
528 		return NOTIFY_DONE;
529 
530 	if (!netif_is_bridge_master(upper))
531 		return NOTIFY_DONE;
532 
533 	if (!ice_eswitch_br_is_dev_valid(dev))
534 		return NOTIFY_DONE;
535 
536 	if (!ice_eswitch_br_netdev_to_port(dev))
537 		return NOTIFY_DONE;
538 
539 	switch (event) {
540 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
541 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
542 		fdb_info = container_of(info, typeof(*fdb_info), info);
543 
544 		work = ice_eswitch_br_fdb_work_alloc(fdb_info, dev, event);
545 		if (IS_ERR(work)) {
546 			NL_SET_ERR_MSG_MOD(extack, "Failed to init switchdev fdb work");
547 			return notifier_from_errno(PTR_ERR(work));
548 		}
549 		dev_hold(dev);
550 
551 		queue_work(br_offloads->wq, &work->work);
552 		break;
553 	default:
554 		break;
555 	}
556 	return NOTIFY_DONE;
557 }
558 
559 static void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge)
560 {
561 	struct ice_esw_br_fdb_entry *entry, *tmp;
562 
563 	list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
564 		ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry);
565 }
566 
567 static void
568 ice_eswitch_br_vlan_filtering_set(struct ice_esw_br *bridge, bool enable)
569 {
570 	if (enable == !!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING))
571 		return;
572 
573 	ice_eswitch_br_fdb_flush(bridge);
574 	if (enable)
575 		bridge->flags |= ICE_ESWITCH_BR_VLAN_FILTERING;
576 	else
577 		bridge->flags &= ~ICE_ESWITCH_BR_VLAN_FILTERING;
578 }
579 
580 static void
581 ice_eswitch_br_clear_pvid(struct ice_esw_br_port *port)
582 {
583 	struct ice_vlan port_vlan = ICE_VLAN(ETH_P_8021Q, port->pvid, 0);
584 	struct ice_vsi_vlan_ops *vlan_ops;
585 
586 	vlan_ops = ice_get_compat_vsi_vlan_ops(port->vsi);
587 
588 	vlan_ops->del_vlan(port->vsi, &port_vlan);
589 	vlan_ops->clear_port_vlan(port->vsi);
590 
591 	ice_vf_vsi_disable_port_vlan(port->vsi);
592 
593 	port->pvid = 0;
594 }
595 
596 static void
597 ice_eswitch_br_vlan_cleanup(struct ice_esw_br_port *port,
598 			    struct ice_esw_br_vlan *vlan)
599 {
600 	struct ice_esw_br_fdb_entry *fdb_entry, *tmp;
601 	struct ice_esw_br *bridge = port->bridge;
602 
603 	trace_ice_eswitch_br_vlan_cleanup(vlan);
604 
605 	list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) {
606 		if (vlan->vid == fdb_entry->data.vid)
607 			ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry);
608 	}
609 
610 	xa_erase(&port->vlans, vlan->vid);
611 	if (port->pvid == vlan->vid)
612 		ice_eswitch_br_clear_pvid(port);
613 	kfree(vlan);
614 }
615 
616 static void ice_eswitch_br_port_vlans_flush(struct ice_esw_br_port *port)
617 {
618 	struct ice_esw_br_vlan *vlan;
619 	unsigned long index;
620 
621 	xa_for_each(&port->vlans, index, vlan)
622 		ice_eswitch_br_vlan_cleanup(port, vlan);
623 }
624 
625 static int
626 ice_eswitch_br_set_pvid(struct ice_esw_br_port *port,
627 			struct ice_esw_br_vlan *vlan)
628 {
629 	struct ice_vlan port_vlan = ICE_VLAN(ETH_P_8021Q, vlan->vid, 0);
630 	struct device *dev = ice_pf_to_dev(port->vsi->back);
631 	struct ice_vsi_vlan_ops *vlan_ops;
632 	int err;
633 
634 	if (port->pvid == vlan->vid || vlan->vid == 1)
635 		return 0;
636 
637 	/* Setting port vlan on uplink isn't supported by hw */
638 	if (port->type == ICE_ESWITCH_BR_UPLINK_PORT)
639 		return -EOPNOTSUPP;
640 
641 	if (port->pvid) {
642 		dev_info(dev,
643 			 "Port VLAN (vsi=%u, vid=%u) already exists on the port, remove it before adding new one\n",
644 			 port->vsi_idx, port->pvid);
645 		return -EEXIST;
646 	}
647 
648 	ice_vf_vsi_enable_port_vlan(port->vsi);
649 
650 	vlan_ops = ice_get_compat_vsi_vlan_ops(port->vsi);
651 	err = vlan_ops->set_port_vlan(port->vsi, &port_vlan);
652 	if (err)
653 		return err;
654 
655 	err = vlan_ops->add_vlan(port->vsi, &port_vlan);
656 	if (err)
657 		return err;
658 
659 	ice_eswitch_br_port_vlans_flush(port);
660 	port->pvid = vlan->vid;
661 
662 	return 0;
663 }
664 
665 static struct ice_esw_br_vlan *
666 ice_eswitch_br_vlan_create(u16 vid, u16 flags, struct ice_esw_br_port *port)
667 {
668 	struct device *dev = ice_pf_to_dev(port->vsi->back);
669 	struct ice_esw_br_vlan *vlan;
670 	int err;
671 
672 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
673 	if (!vlan)
674 		return ERR_PTR(-ENOMEM);
675 
676 	vlan->vid = vid;
677 	vlan->flags = flags;
678 	if ((flags & BRIDGE_VLAN_INFO_PVID) &&
679 	    (flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
680 		err = ice_eswitch_br_set_pvid(port, vlan);
681 		if (err)
682 			goto err_set_pvid;
683 	} else if ((flags & BRIDGE_VLAN_INFO_PVID) ||
684 		   (flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
685 		dev_info(dev, "VLAN push and pop are supported only simultaneously\n");
686 		err = -EOPNOTSUPP;
687 		goto err_set_pvid;
688 	}
689 
690 	err = xa_insert(&port->vlans, vlan->vid, vlan, GFP_KERNEL);
691 	if (err)
692 		goto err_insert;
693 
694 	trace_ice_eswitch_br_vlan_create(vlan);
695 
696 	return vlan;
697 
698 err_insert:
699 	if (port->pvid)
700 		ice_eswitch_br_clear_pvid(port);
701 err_set_pvid:
702 	kfree(vlan);
703 	return ERR_PTR(err);
704 }
705 
706 static int
707 ice_eswitch_br_port_vlan_add(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid,
708 			     u16 flags, struct netlink_ext_ack *extack)
709 {
710 	struct ice_esw_br_port *port;
711 	struct ice_esw_br_vlan *vlan;
712 
713 	port = xa_load(&bridge->ports, vsi_idx);
714 	if (!port)
715 		return -EINVAL;
716 
717 	if (port->pvid) {
718 		dev_info(ice_pf_to_dev(port->vsi->back),
719 			 "Port VLAN (vsi=%u, vid=%d) exists on the port, remove it to add trunk VLANs\n",
720 			 port->vsi_idx, port->pvid);
721 		return -EEXIST;
722 	}
723 
724 	vlan = xa_load(&port->vlans, vid);
725 	if (vlan) {
726 		if (vlan->flags == flags)
727 			return 0;
728 
729 		ice_eswitch_br_vlan_cleanup(port, vlan);
730 	}
731 
732 	vlan = ice_eswitch_br_vlan_create(vid, flags, port);
733 	if (IS_ERR(vlan)) {
734 		NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to create VLAN entry, vid: %u, vsi: %u",
735 				       vid, vsi_idx);
736 		return PTR_ERR(vlan);
737 	}
738 
739 	return 0;
740 }
741 
742 static void
743 ice_eswitch_br_port_vlan_del(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid)
744 {
745 	struct ice_esw_br_port *port;
746 	struct ice_esw_br_vlan *vlan;
747 
748 	port = xa_load(&bridge->ports, vsi_idx);
749 	if (!port)
750 		return;
751 
752 	vlan = xa_load(&port->vlans, vid);
753 	if (!vlan)
754 		return;
755 
756 	ice_eswitch_br_vlan_cleanup(port, vlan);
757 }
758 
759 static int
760 ice_eswitch_br_port_obj_add(struct net_device *netdev, const void *ctx,
761 			    const struct switchdev_obj *obj,
762 			    struct netlink_ext_ack *extack)
763 {
764 	struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
765 	struct switchdev_obj_port_vlan *vlan;
766 	int err;
767 
768 	if (!br_port)
769 		return -EINVAL;
770 
771 	switch (obj->id) {
772 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
773 		vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
774 		err = ice_eswitch_br_port_vlan_add(br_port->bridge,
775 						   br_port->vsi_idx, vlan->vid,
776 						   vlan->flags, extack);
777 		return err;
778 	default:
779 		return -EOPNOTSUPP;
780 	}
781 }
782 
783 static int
784 ice_eswitch_br_port_obj_del(struct net_device *netdev, const void *ctx,
785 			    const struct switchdev_obj *obj)
786 {
787 	struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
788 	struct switchdev_obj_port_vlan *vlan;
789 
790 	if (!br_port)
791 		return -EINVAL;
792 
793 	switch (obj->id) {
794 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
795 		vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
796 		ice_eswitch_br_port_vlan_del(br_port->bridge, br_port->vsi_idx,
797 					     vlan->vid);
798 		return 0;
799 	default:
800 		return -EOPNOTSUPP;
801 	}
802 }
803 
804 static int
805 ice_eswitch_br_port_obj_attr_set(struct net_device *netdev, const void *ctx,
806 				 const struct switchdev_attr *attr,
807 				 struct netlink_ext_ack *extack)
808 {
809 	struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
810 
811 	if (!br_port)
812 		return -EINVAL;
813 
814 	switch (attr->id) {
815 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
816 		ice_eswitch_br_vlan_filtering_set(br_port->bridge,
817 						  attr->u.vlan_filtering);
818 		return 0;
819 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
820 		br_port->bridge->ageing_time =
821 			clock_t_to_jiffies(attr->u.ageing_time);
822 		return 0;
823 	default:
824 		return -EOPNOTSUPP;
825 	}
826 }
827 
828 static int
829 ice_eswitch_br_event_blocking(struct notifier_block *nb, unsigned long event,
830 			      void *ptr)
831 {
832 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
833 	int err;
834 
835 	switch (event) {
836 	case SWITCHDEV_PORT_OBJ_ADD:
837 		err = switchdev_handle_port_obj_add(dev, ptr,
838 						    ice_eswitch_br_is_dev_valid,
839 						    ice_eswitch_br_port_obj_add);
840 		break;
841 	case SWITCHDEV_PORT_OBJ_DEL:
842 		err = switchdev_handle_port_obj_del(dev, ptr,
843 						    ice_eswitch_br_is_dev_valid,
844 						    ice_eswitch_br_port_obj_del);
845 		break;
846 	case SWITCHDEV_PORT_ATTR_SET:
847 		err = switchdev_handle_port_attr_set(dev, ptr,
848 						     ice_eswitch_br_is_dev_valid,
849 						     ice_eswitch_br_port_obj_attr_set);
850 		break;
851 	default:
852 		err = 0;
853 	}
854 
855 	return notifier_from_errno(err);
856 }
857 
858 static void
859 ice_eswitch_br_port_deinit(struct ice_esw_br *bridge,
860 			   struct ice_esw_br_port *br_port)
861 {
862 	struct ice_esw_br_fdb_entry *fdb_entry, *tmp;
863 	struct ice_vsi *vsi = br_port->vsi;
864 
865 	list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) {
866 		if (br_port == fdb_entry->br_port)
867 			ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry);
868 	}
869 
870 	if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back)
871 		vsi->back->br_port = NULL;
872 	else if (vsi->vf && vsi->vf->repr)
873 		vsi->vf->repr->br_port = NULL;
874 
875 	xa_erase(&bridge->ports, br_port->vsi_idx);
876 	ice_eswitch_br_port_vlans_flush(br_port);
877 	kfree(br_port);
878 }
879 
880 static struct ice_esw_br_port *
881 ice_eswitch_br_port_init(struct ice_esw_br *bridge)
882 {
883 	struct ice_esw_br_port *br_port;
884 
885 	br_port = kzalloc(sizeof(*br_port), GFP_KERNEL);
886 	if (!br_port)
887 		return ERR_PTR(-ENOMEM);
888 
889 	xa_init(&br_port->vlans);
890 
891 	br_port->bridge = bridge;
892 
893 	return br_port;
894 }
895 
896 static int
897 ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge,
898 				 struct ice_repr *repr)
899 {
900 	struct ice_esw_br_port *br_port;
901 	int err;
902 
903 	br_port = ice_eswitch_br_port_init(bridge);
904 	if (IS_ERR(br_port))
905 		return PTR_ERR(br_port);
906 
907 	br_port->vsi = repr->src_vsi;
908 	br_port->vsi_idx = br_port->vsi->idx;
909 	br_port->type = ICE_ESWITCH_BR_VF_REPR_PORT;
910 	repr->br_port = br_port;
911 
912 	err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL);
913 	if (err) {
914 		ice_eswitch_br_port_deinit(bridge, br_port);
915 		return err;
916 	}
917 
918 	return 0;
919 }
920 
921 static int
922 ice_eswitch_br_uplink_port_init(struct ice_esw_br *bridge, struct ice_pf *pf)
923 {
924 	struct ice_vsi *vsi = pf->switchdev.uplink_vsi;
925 	struct ice_esw_br_port *br_port;
926 	int err;
927 
928 	br_port = ice_eswitch_br_port_init(bridge);
929 	if (IS_ERR(br_port))
930 		return PTR_ERR(br_port);
931 
932 	br_port->vsi = vsi;
933 	br_port->vsi_idx = br_port->vsi->idx;
934 	br_port->type = ICE_ESWITCH_BR_UPLINK_PORT;
935 	pf->br_port = br_port;
936 
937 	err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL);
938 	if (err) {
939 		ice_eswitch_br_port_deinit(bridge, br_port);
940 		return err;
941 	}
942 
943 	return 0;
944 }
945 
946 static void
947 ice_eswitch_br_ports_flush(struct ice_esw_br *bridge)
948 {
949 	struct ice_esw_br_port *port;
950 	unsigned long i;
951 
952 	xa_for_each(&bridge->ports, i, port)
953 		ice_eswitch_br_port_deinit(bridge, port);
954 }
955 
956 static void
957 ice_eswitch_br_deinit(struct ice_esw_br_offloads *br_offloads,
958 		      struct ice_esw_br *bridge)
959 {
960 	if (!bridge)
961 		return;
962 
963 	/* Cleanup all the ports that were added asynchronously
964 	 * through NETDEV_CHANGEUPPER event.
965 	 */
966 	ice_eswitch_br_ports_flush(bridge);
967 	WARN_ON(!xa_empty(&bridge->ports));
968 	xa_destroy(&bridge->ports);
969 	rhashtable_destroy(&bridge->fdb_ht);
970 
971 	br_offloads->bridge = NULL;
972 	kfree(bridge);
973 }
974 
975 static struct ice_esw_br *
976 ice_eswitch_br_init(struct ice_esw_br_offloads *br_offloads, int ifindex)
977 {
978 	struct ice_esw_br *bridge;
979 	int err;
980 
981 	bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
982 	if (!bridge)
983 		return ERR_PTR(-ENOMEM);
984 
985 	err = rhashtable_init(&bridge->fdb_ht, &ice_fdb_ht_params);
986 	if (err) {
987 		kfree(bridge);
988 		return ERR_PTR(err);
989 	}
990 
991 	INIT_LIST_HEAD(&bridge->fdb_list);
992 	bridge->br_offloads = br_offloads;
993 	bridge->ifindex = ifindex;
994 	bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
995 	xa_init(&bridge->ports);
996 	br_offloads->bridge = bridge;
997 
998 	return bridge;
999 }
1000 
1001 static struct ice_esw_br *
1002 ice_eswitch_br_get(struct ice_esw_br_offloads *br_offloads, int ifindex,
1003 		   struct netlink_ext_ack *extack)
1004 {
1005 	struct ice_esw_br *bridge = br_offloads->bridge;
1006 
1007 	if (bridge) {
1008 		if (bridge->ifindex != ifindex) {
1009 			NL_SET_ERR_MSG_MOD(extack,
1010 					   "Only one bridge is supported per eswitch");
1011 			return ERR_PTR(-EOPNOTSUPP);
1012 		}
1013 		return bridge;
1014 	}
1015 
1016 	/* Create the bridge if it doesn't exist yet */
1017 	bridge = ice_eswitch_br_init(br_offloads, ifindex);
1018 	if (IS_ERR(bridge))
1019 		NL_SET_ERR_MSG_MOD(extack, "Failed to init the bridge");
1020 
1021 	return bridge;
1022 }
1023 
1024 static void
1025 ice_eswitch_br_verify_deinit(struct ice_esw_br_offloads *br_offloads,
1026 			     struct ice_esw_br *bridge)
1027 {
1028 	/* Remove the bridge if it exists and there are no ports left */
1029 	if (!bridge || !xa_empty(&bridge->ports))
1030 		return;
1031 
1032 	ice_eswitch_br_deinit(br_offloads, bridge);
1033 }
1034 
1035 static int
1036 ice_eswitch_br_port_unlink(struct ice_esw_br_offloads *br_offloads,
1037 			   struct net_device *dev, int ifindex,
1038 			   struct netlink_ext_ack *extack)
1039 {
1040 	struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(dev);
1041 	struct ice_esw_br *bridge;
1042 
1043 	if (!br_port) {
1044 		NL_SET_ERR_MSG_MOD(extack,
1045 				   "Port representor is not attached to any bridge");
1046 		return -EINVAL;
1047 	}
1048 
1049 	if (br_port->bridge->ifindex != ifindex) {
1050 		NL_SET_ERR_MSG_MOD(extack,
1051 				   "Port representor is attached to another bridge");
1052 		return -EINVAL;
1053 	}
1054 
1055 	bridge = br_port->bridge;
1056 
1057 	trace_ice_eswitch_br_port_unlink(br_port);
1058 	ice_eswitch_br_port_deinit(br_port->bridge, br_port);
1059 	ice_eswitch_br_verify_deinit(br_offloads, bridge);
1060 
1061 	return 0;
1062 }
1063 
1064 static int
1065 ice_eswitch_br_port_link(struct ice_esw_br_offloads *br_offloads,
1066 			 struct net_device *dev, int ifindex,
1067 			 struct netlink_ext_ack *extack)
1068 {
1069 	struct ice_esw_br *bridge;
1070 	int err;
1071 
1072 	if (ice_eswitch_br_netdev_to_port(dev)) {
1073 		NL_SET_ERR_MSG_MOD(extack,
1074 				   "Port is already attached to the bridge");
1075 		return -EINVAL;
1076 	}
1077 
1078 	bridge = ice_eswitch_br_get(br_offloads, ifindex, extack);
1079 	if (IS_ERR(bridge))
1080 		return PTR_ERR(bridge);
1081 
1082 	if (ice_is_port_repr_netdev(dev)) {
1083 		struct ice_repr *repr = ice_netdev_to_repr(dev);
1084 
1085 		err = ice_eswitch_br_vf_repr_port_init(bridge, repr);
1086 		trace_ice_eswitch_br_port_link(repr->br_port);
1087 	} else {
1088 		struct ice_pf *pf = ice_netdev_to_pf(dev);
1089 
1090 		err = ice_eswitch_br_uplink_port_init(bridge, pf);
1091 		trace_ice_eswitch_br_port_link(pf->br_port);
1092 	}
1093 	if (err) {
1094 		NL_SET_ERR_MSG_MOD(extack, "Failed to init bridge port");
1095 		goto err_port_init;
1096 	}
1097 
1098 	return 0;
1099 
1100 err_port_init:
1101 	ice_eswitch_br_verify_deinit(br_offloads, bridge);
1102 	return err;
1103 }
1104 
1105 static int
1106 ice_eswitch_br_port_changeupper(struct notifier_block *nb, void *ptr)
1107 {
1108 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1109 	struct netdev_notifier_changeupper_info *info = ptr;
1110 	struct ice_esw_br_offloads *br_offloads;
1111 	struct netlink_ext_ack *extack;
1112 	struct net_device *upper;
1113 
1114 	br_offloads = ice_nb_to_br_offloads(nb, netdev_nb);
1115 
1116 	if (!ice_eswitch_br_is_dev_valid(dev))
1117 		return 0;
1118 
1119 	upper = info->upper_dev;
1120 	if (!netif_is_bridge_master(upper))
1121 		return 0;
1122 
1123 	extack = netdev_notifier_info_to_extack(&info->info);
1124 
1125 	if (info->linking)
1126 		return ice_eswitch_br_port_link(br_offloads, dev,
1127 						upper->ifindex, extack);
1128 	else
1129 		return ice_eswitch_br_port_unlink(br_offloads, dev,
1130 						  upper->ifindex, extack);
1131 }
1132 
1133 static int
1134 ice_eswitch_br_port_event(struct notifier_block *nb,
1135 			  unsigned long event, void *ptr)
1136 {
1137 	int err = 0;
1138 
1139 	switch (event) {
1140 	case NETDEV_CHANGEUPPER:
1141 		err = ice_eswitch_br_port_changeupper(nb, ptr);
1142 		break;
1143 	}
1144 
1145 	return notifier_from_errno(err);
1146 }
1147 
1148 static void
1149 ice_eswitch_br_offloads_dealloc(struct ice_pf *pf)
1150 {
1151 	struct ice_esw_br_offloads *br_offloads = pf->switchdev.br_offloads;
1152 
1153 	ASSERT_RTNL();
1154 
1155 	if (!br_offloads)
1156 		return;
1157 
1158 	ice_eswitch_br_deinit(br_offloads, br_offloads->bridge);
1159 
1160 	pf->switchdev.br_offloads = NULL;
1161 	kfree(br_offloads);
1162 }
1163 
1164 static struct ice_esw_br_offloads *
1165 ice_eswitch_br_offloads_alloc(struct ice_pf *pf)
1166 {
1167 	struct ice_esw_br_offloads *br_offloads;
1168 
1169 	ASSERT_RTNL();
1170 
1171 	if (pf->switchdev.br_offloads)
1172 		return ERR_PTR(-EEXIST);
1173 
1174 	br_offloads = kzalloc(sizeof(*br_offloads), GFP_KERNEL);
1175 	if (!br_offloads)
1176 		return ERR_PTR(-ENOMEM);
1177 
1178 	pf->switchdev.br_offloads = br_offloads;
1179 	br_offloads->pf = pf;
1180 
1181 	return br_offloads;
1182 }
1183 
1184 void
1185 ice_eswitch_br_offloads_deinit(struct ice_pf *pf)
1186 {
1187 	struct ice_esw_br_offloads *br_offloads;
1188 
1189 	br_offloads = pf->switchdev.br_offloads;
1190 	if (!br_offloads)
1191 		return;
1192 
1193 	cancel_delayed_work_sync(&br_offloads->update_work);
1194 	unregister_netdevice_notifier(&br_offloads->netdev_nb);
1195 	unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
1196 	unregister_switchdev_notifier(&br_offloads->switchdev_nb);
1197 	destroy_workqueue(br_offloads->wq);
1198 	/* Although notifier block is unregistered just before,
1199 	 * so we don't get any new events, some events might be
1200 	 * already in progress. Hold the rtnl lock and wait for
1201 	 * them to finished.
1202 	 */
1203 	rtnl_lock();
1204 	ice_eswitch_br_offloads_dealloc(pf);
1205 	rtnl_unlock();
1206 }
1207 
1208 static void ice_eswitch_br_update(struct ice_esw_br_offloads *br_offloads)
1209 {
1210 	struct ice_esw_br *bridge = br_offloads->bridge;
1211 	struct ice_esw_br_fdb_entry *entry, *tmp;
1212 
1213 	if (!bridge)
1214 		return;
1215 
1216 	rtnl_lock();
1217 	list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
1218 		if (entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER)
1219 			continue;
1220 
1221 		if (time_is_after_eq_jiffies(entry->last_use +
1222 					     bridge->ageing_time))
1223 			continue;
1224 
1225 		ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry);
1226 	}
1227 	rtnl_unlock();
1228 }
1229 
1230 static void ice_eswitch_br_update_work(struct work_struct *work)
1231 {
1232 	struct ice_esw_br_offloads *br_offloads;
1233 
1234 	br_offloads = ice_work_to_br_offloads(work);
1235 
1236 	ice_eswitch_br_update(br_offloads);
1237 
1238 	queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
1239 			   ICE_ESW_BRIDGE_UPDATE_INTERVAL);
1240 }
1241 
1242 int
1243 ice_eswitch_br_offloads_init(struct ice_pf *pf)
1244 {
1245 	struct ice_esw_br_offloads *br_offloads;
1246 	struct device *dev = ice_pf_to_dev(pf);
1247 	int err;
1248 
1249 	rtnl_lock();
1250 	br_offloads = ice_eswitch_br_offloads_alloc(pf);
1251 	rtnl_unlock();
1252 	if (IS_ERR(br_offloads)) {
1253 		dev_err(dev, "Failed to init eswitch bridge\n");
1254 		return PTR_ERR(br_offloads);
1255 	}
1256 
1257 	br_offloads->wq = alloc_ordered_workqueue("ice_bridge_wq", 0);
1258 	if (!br_offloads->wq) {
1259 		err = -ENOMEM;
1260 		dev_err(dev, "Failed to allocate bridge workqueue\n");
1261 		goto err_alloc_wq;
1262 	}
1263 
1264 	br_offloads->switchdev_nb.notifier_call =
1265 		ice_eswitch_br_switchdev_event;
1266 	err = register_switchdev_notifier(&br_offloads->switchdev_nb);
1267 	if (err) {
1268 		dev_err(dev,
1269 			"Failed to register switchdev notifier\n");
1270 		goto err_reg_switchdev_nb;
1271 	}
1272 
1273 	br_offloads->switchdev_blk.notifier_call =
1274 		ice_eswitch_br_event_blocking;
1275 	err = register_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
1276 	if (err) {
1277 		dev_err(dev,
1278 			"Failed to register bridge blocking switchdev notifier\n");
1279 		goto err_reg_switchdev_blk;
1280 	}
1281 
1282 	br_offloads->netdev_nb.notifier_call = ice_eswitch_br_port_event;
1283 	err = register_netdevice_notifier(&br_offloads->netdev_nb);
1284 	if (err) {
1285 		dev_err(dev,
1286 			"Failed to register bridge port event notifier\n");
1287 		goto err_reg_netdev_nb;
1288 	}
1289 
1290 	INIT_DELAYED_WORK(&br_offloads->update_work,
1291 			  ice_eswitch_br_update_work);
1292 	queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
1293 			   ICE_ESW_BRIDGE_UPDATE_INTERVAL);
1294 
1295 	return 0;
1296 
1297 err_reg_netdev_nb:
1298 	unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
1299 err_reg_switchdev_blk:
1300 	unregister_switchdev_notifier(&br_offloads->switchdev_nb);
1301 err_reg_switchdev_nb:
1302 	destroy_workqueue(br_offloads->wq);
1303 err_alloc_wq:
1304 	rtnl_lock();
1305 	ice_eswitch_br_offloads_dealloc(pf);
1306 	rtnl_unlock();
1307 
1308 	return err;
1309 }
1310