1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2023, Intel Corporation. */
3
4 #include "ice.h"
5 #include "ice_eswitch_br.h"
6 #include "ice_repr.h"
7 #include "ice_switch.h"
8 #include "ice_vlan.h"
9 #include "ice_vf_vsi_vlan_ops.h"
10 #include "ice_trace.h"
11
12 #define ICE_ESW_BRIDGE_UPDATE_INTERVAL msecs_to_jiffies(1000)
13
14 static const struct rhashtable_params ice_fdb_ht_params = {
15 .key_offset = offsetof(struct ice_esw_br_fdb_entry, data),
16 .key_len = sizeof(struct ice_esw_br_fdb_data),
17 .head_offset = offsetof(struct ice_esw_br_fdb_entry, ht_node),
18 .automatic_shrinking = true,
19 };
20
ice_eswitch_br_is_dev_valid(const struct net_device * dev)21 static bool ice_eswitch_br_is_dev_valid(const struct net_device *dev)
22 {
23 /* Accept only PF netdev, PRs and LAG */
24 return ice_is_port_repr_netdev(dev) || netif_is_ice(dev) ||
25 netif_is_lag_master(dev);
26 }
27
28 static struct net_device *
ice_eswitch_br_get_uplink_from_lag(struct net_device * lag_dev)29 ice_eswitch_br_get_uplink_from_lag(struct net_device *lag_dev)
30 {
31 struct net_device *lower;
32 struct list_head *iter;
33
34 netdev_for_each_lower_dev(lag_dev, lower, iter) {
35 if (netif_is_ice(lower))
36 return lower;
37 }
38
39 return NULL;
40 }
41
42 static struct ice_esw_br_port *
ice_eswitch_br_netdev_to_port(struct net_device * dev)43 ice_eswitch_br_netdev_to_port(struct net_device *dev)
44 {
45 if (ice_is_port_repr_netdev(dev)) {
46 struct ice_repr *repr = ice_netdev_to_repr(dev);
47
48 return repr->br_port;
49 } else if (netif_is_ice(dev) || netif_is_lag_master(dev)) {
50 struct net_device *ice_dev;
51 struct ice_pf *pf;
52
53 if (netif_is_lag_master(dev))
54 ice_dev = ice_eswitch_br_get_uplink_from_lag(dev);
55 else
56 ice_dev = dev;
57
58 if (!ice_dev)
59 return NULL;
60
61 pf = ice_netdev_to_pf(ice_dev);
62
63 return pf->br_port;
64 }
65
66 return NULL;
67 }
68
69 static void
ice_eswitch_br_ingress_rule_setup(struct ice_adv_rule_info * rule_info,u8 pf_id,u16 vf_vsi_idx)70 ice_eswitch_br_ingress_rule_setup(struct ice_adv_rule_info *rule_info,
71 u8 pf_id, u16 vf_vsi_idx)
72 {
73 rule_info->sw_act.vsi_handle = vf_vsi_idx;
74 rule_info->sw_act.flag |= ICE_FLTR_RX;
75 rule_info->sw_act.src = pf_id;
76 rule_info->priority = 5;
77 }
78
79 static void
ice_eswitch_br_egress_rule_setup(struct ice_adv_rule_info * rule_info,u16 pf_vsi_idx)80 ice_eswitch_br_egress_rule_setup(struct ice_adv_rule_info *rule_info,
81 u16 pf_vsi_idx)
82 {
83 rule_info->sw_act.vsi_handle = pf_vsi_idx;
84 rule_info->sw_act.flag |= ICE_FLTR_TX;
85 rule_info->flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
86 rule_info->flags_info.act_valid = true;
87 rule_info->priority = 5;
88 }
89
90 static int
ice_eswitch_br_rule_delete(struct ice_hw * hw,struct ice_rule_query_data * rule)91 ice_eswitch_br_rule_delete(struct ice_hw *hw, struct ice_rule_query_data *rule)
92 {
93 int err;
94
95 if (!rule)
96 return -EINVAL;
97
98 err = ice_rem_adv_rule_by_id(hw, rule);
99 kfree(rule);
100
101 return err;
102 }
103
104 static u16
ice_eswitch_br_get_lkups_cnt(u16 vid)105 ice_eswitch_br_get_lkups_cnt(u16 vid)
106 {
107 return ice_eswitch_br_is_vid_valid(vid) ? 2 : 1;
108 }
109
110 static void
ice_eswitch_br_add_vlan_lkup(struct ice_adv_lkup_elem * list,u16 vid)111 ice_eswitch_br_add_vlan_lkup(struct ice_adv_lkup_elem *list, u16 vid)
112 {
113 if (ice_eswitch_br_is_vid_valid(vid)) {
114 list[1].type = ICE_VLAN_OFOS;
115 list[1].h_u.vlan_hdr.vlan = cpu_to_be16(vid & VLAN_VID_MASK);
116 list[1].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
117 }
118 }
119
120 static struct ice_rule_query_data *
ice_eswitch_br_fwd_rule_create(struct ice_hw * hw,int vsi_idx,int port_type,const unsigned char * mac,u16 vid)121 ice_eswitch_br_fwd_rule_create(struct ice_hw *hw, int vsi_idx, int port_type,
122 const unsigned char *mac, u16 vid)
123 {
124 struct ice_adv_rule_info rule_info = { 0 };
125 struct ice_rule_query_data *rule;
126 struct ice_adv_lkup_elem *list;
127 u16 lkups_cnt;
128 int err;
129
130 lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid);
131
132 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
133 if (!rule)
134 return ERR_PTR(-ENOMEM);
135
136 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
137 if (!list) {
138 err = -ENOMEM;
139 goto err_list_alloc;
140 }
141
142 switch (port_type) {
143 case ICE_ESWITCH_BR_UPLINK_PORT:
144 ice_eswitch_br_egress_rule_setup(&rule_info, vsi_idx);
145 break;
146 case ICE_ESWITCH_BR_VF_REPR_PORT:
147 ice_eswitch_br_ingress_rule_setup(&rule_info, hw->pf_id,
148 vsi_idx);
149 break;
150 default:
151 err = -EINVAL;
152 goto err_add_rule;
153 }
154
155 list[0].type = ICE_MAC_OFOS;
156 ether_addr_copy(list[0].h_u.eth_hdr.dst_addr, mac);
157 eth_broadcast_addr(list[0].m_u.eth_hdr.dst_addr);
158
159 ice_eswitch_br_add_vlan_lkup(list, vid);
160
161 rule_info.need_pass_l2 = true;
162
163 rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
164
165 err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule);
166 if (err)
167 goto err_add_rule;
168
169 kfree(list);
170
171 return rule;
172
173 err_add_rule:
174 kfree(list);
175 err_list_alloc:
176 kfree(rule);
177
178 return ERR_PTR(err);
179 }
180
181 static struct ice_rule_query_data *
ice_eswitch_br_guard_rule_create(struct ice_hw * hw,u16 vsi_idx,const unsigned char * mac,u16 vid)182 ice_eswitch_br_guard_rule_create(struct ice_hw *hw, u16 vsi_idx,
183 const unsigned char *mac, u16 vid)
184 {
185 struct ice_adv_rule_info rule_info = { 0 };
186 struct ice_rule_query_data *rule;
187 struct ice_adv_lkup_elem *list;
188 int err = -ENOMEM;
189 u16 lkups_cnt;
190
191 lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid);
192
193 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
194 if (!rule)
195 goto err_exit;
196
197 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
198 if (!list)
199 goto err_list_alloc;
200
201 list[0].type = ICE_MAC_OFOS;
202 ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac);
203 eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr);
204
205 ice_eswitch_br_add_vlan_lkup(list, vid);
206
207 rule_info.allow_pass_l2 = true;
208 rule_info.sw_act.vsi_handle = vsi_idx;
209 rule_info.sw_act.fltr_act = ICE_NOP;
210 rule_info.priority = 5;
211
212 err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule);
213 if (err)
214 goto err_add_rule;
215
216 kfree(list);
217
218 return rule;
219
220 err_add_rule:
221 kfree(list);
222 err_list_alloc:
223 kfree(rule);
224 err_exit:
225 return ERR_PTR(err);
226 }
227
228 static struct ice_esw_br_flow *
ice_eswitch_br_flow_create(struct device * dev,struct ice_hw * hw,int vsi_idx,int port_type,const unsigned char * mac,u16 vid)229 ice_eswitch_br_flow_create(struct device *dev, struct ice_hw *hw, int vsi_idx,
230 int port_type, const unsigned char *mac, u16 vid)
231 {
232 struct ice_rule_query_data *fwd_rule, *guard_rule;
233 struct ice_esw_br_flow *flow;
234 int err;
235
236 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
237 if (!flow)
238 return ERR_PTR(-ENOMEM);
239
240 fwd_rule = ice_eswitch_br_fwd_rule_create(hw, vsi_idx, port_type, mac,
241 vid);
242 err = PTR_ERR_OR_ZERO(fwd_rule);
243 if (err) {
244 dev_err(dev, "Failed to create eswitch bridge %sgress forward rule, err: %d\n",
245 port_type == ICE_ESWITCH_BR_UPLINK_PORT ? "e" : "in",
246 err);
247 goto err_fwd_rule;
248 }
249
250 guard_rule = ice_eswitch_br_guard_rule_create(hw, vsi_idx, mac, vid);
251 err = PTR_ERR_OR_ZERO(guard_rule);
252 if (err) {
253 dev_err(dev, "Failed to create eswitch bridge %sgress guard rule, err: %d\n",
254 port_type == ICE_ESWITCH_BR_UPLINK_PORT ? "e" : "in",
255 err);
256 goto err_guard_rule;
257 }
258
259 flow->fwd_rule = fwd_rule;
260 flow->guard_rule = guard_rule;
261
262 return flow;
263
264 err_guard_rule:
265 ice_eswitch_br_rule_delete(hw, fwd_rule);
266 err_fwd_rule:
267 kfree(flow);
268
269 return ERR_PTR(err);
270 }
271
272 static struct ice_esw_br_fdb_entry *
ice_eswitch_br_fdb_find(struct ice_esw_br * bridge,const unsigned char * mac,u16 vid)273 ice_eswitch_br_fdb_find(struct ice_esw_br *bridge, const unsigned char *mac,
274 u16 vid)
275 {
276 struct ice_esw_br_fdb_data data = {
277 .vid = vid,
278 };
279
280 ether_addr_copy(data.addr, mac);
281 return rhashtable_lookup_fast(&bridge->fdb_ht, &data,
282 ice_fdb_ht_params);
283 }
284
285 static void
ice_eswitch_br_flow_delete(struct ice_pf * pf,struct ice_esw_br_flow * flow)286 ice_eswitch_br_flow_delete(struct ice_pf *pf, struct ice_esw_br_flow *flow)
287 {
288 struct device *dev = ice_pf_to_dev(pf);
289 int err;
290
291 err = ice_eswitch_br_rule_delete(&pf->hw, flow->fwd_rule);
292 if (err)
293 dev_err(dev, "Failed to delete FDB forward rule, err: %d\n",
294 err);
295
296 err = ice_eswitch_br_rule_delete(&pf->hw, flow->guard_rule);
297 if (err)
298 dev_err(dev, "Failed to delete FDB guard rule, err: %d\n",
299 err);
300
301 kfree(flow);
302 }
303
304 static struct ice_esw_br_vlan *
ice_esw_br_port_vlan_lookup(struct ice_esw_br * bridge,u16 vsi_idx,u16 vid)305 ice_esw_br_port_vlan_lookup(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid)
306 {
307 struct ice_pf *pf = bridge->br_offloads->pf;
308 struct device *dev = ice_pf_to_dev(pf);
309 struct ice_esw_br_port *port;
310 struct ice_esw_br_vlan *vlan;
311
312 port = xa_load(&bridge->ports, vsi_idx);
313 if (!port) {
314 dev_info(dev, "Bridge port lookup failed (vsi=%u)\n", vsi_idx);
315 return ERR_PTR(-EINVAL);
316 }
317
318 vlan = xa_load(&port->vlans, vid);
319 if (!vlan) {
320 dev_info(dev, "Bridge port vlan metadata lookup failed (vsi=%u)\n",
321 vsi_idx);
322 return ERR_PTR(-EINVAL);
323 }
324
325 return vlan;
326 }
327
328 static void
ice_eswitch_br_fdb_entry_delete(struct ice_esw_br * bridge,struct ice_esw_br_fdb_entry * fdb_entry)329 ice_eswitch_br_fdb_entry_delete(struct ice_esw_br *bridge,
330 struct ice_esw_br_fdb_entry *fdb_entry)
331 {
332 struct ice_pf *pf = bridge->br_offloads->pf;
333
334 rhashtable_remove_fast(&bridge->fdb_ht, &fdb_entry->ht_node,
335 ice_fdb_ht_params);
336 list_del(&fdb_entry->list);
337
338 ice_eswitch_br_flow_delete(pf, fdb_entry->flow);
339
340 kfree(fdb_entry);
341 }
342
343 static void
ice_eswitch_br_fdb_offload_notify(struct net_device * dev,const unsigned char * mac,u16 vid,unsigned long val)344 ice_eswitch_br_fdb_offload_notify(struct net_device *dev,
345 const unsigned char *mac, u16 vid,
346 unsigned long val)
347 {
348 struct switchdev_notifier_fdb_info fdb_info = {
349 .addr = mac,
350 .vid = vid,
351 .offloaded = true,
352 };
353
354 call_switchdev_notifiers(val, dev, &fdb_info.info, NULL);
355 }
356
357 static void
ice_eswitch_br_fdb_entry_notify_and_cleanup(struct ice_esw_br * bridge,struct ice_esw_br_fdb_entry * entry)358 ice_eswitch_br_fdb_entry_notify_and_cleanup(struct ice_esw_br *bridge,
359 struct ice_esw_br_fdb_entry *entry)
360 {
361 if (!(entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER))
362 ice_eswitch_br_fdb_offload_notify(entry->dev, entry->data.addr,
363 entry->data.vid,
364 SWITCHDEV_FDB_DEL_TO_BRIDGE);
365 ice_eswitch_br_fdb_entry_delete(bridge, entry);
366 }
367
368 static void
ice_eswitch_br_fdb_entry_find_and_delete(struct ice_esw_br * bridge,const unsigned char * mac,u16 vid)369 ice_eswitch_br_fdb_entry_find_and_delete(struct ice_esw_br *bridge,
370 const unsigned char *mac, u16 vid)
371 {
372 struct ice_pf *pf = bridge->br_offloads->pf;
373 struct ice_esw_br_fdb_entry *fdb_entry;
374 struct device *dev = ice_pf_to_dev(pf);
375
376 fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid);
377 if (!fdb_entry) {
378 dev_err(dev, "FDB entry with mac: %pM and vid: %u not found\n",
379 mac, vid);
380 return;
381 }
382
383 trace_ice_eswitch_br_fdb_entry_find_and_delete(fdb_entry);
384 ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry);
385 }
386
387 static void
ice_eswitch_br_fdb_entry_create(struct net_device * netdev,struct ice_esw_br_port * br_port,bool added_by_user,const unsigned char * mac,u16 vid)388 ice_eswitch_br_fdb_entry_create(struct net_device *netdev,
389 struct ice_esw_br_port *br_port,
390 bool added_by_user,
391 const unsigned char *mac, u16 vid)
392 {
393 struct ice_esw_br *bridge = br_port->bridge;
394 struct ice_pf *pf = bridge->br_offloads->pf;
395 struct device *dev = ice_pf_to_dev(pf);
396 struct ice_esw_br_fdb_entry *fdb_entry;
397 struct ice_esw_br_flow *flow;
398 struct ice_esw_br_vlan *vlan;
399 struct ice_hw *hw = &pf->hw;
400 unsigned long event;
401 int err;
402
403 /* untagged filtering is not yet supported */
404 if (!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING) && vid)
405 return;
406
407 if ((bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING)) {
408 vlan = ice_esw_br_port_vlan_lookup(bridge, br_port->vsi_idx,
409 vid);
410 if (IS_ERR(vlan)) {
411 dev_err(dev, "Failed to find vlan lookup, err: %ld\n",
412 PTR_ERR(vlan));
413 return;
414 }
415 }
416
417 fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid);
418 if (fdb_entry)
419 ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry);
420
421 fdb_entry = kzalloc(sizeof(*fdb_entry), GFP_KERNEL);
422 if (!fdb_entry) {
423 err = -ENOMEM;
424 goto err_exit;
425 }
426
427 flow = ice_eswitch_br_flow_create(dev, hw, br_port->vsi_idx,
428 br_port->type, mac, vid);
429 if (IS_ERR(flow)) {
430 err = PTR_ERR(flow);
431 goto err_add_flow;
432 }
433
434 ether_addr_copy(fdb_entry->data.addr, mac);
435 fdb_entry->data.vid = vid;
436 fdb_entry->br_port = br_port;
437 fdb_entry->flow = flow;
438 fdb_entry->dev = netdev;
439 fdb_entry->last_use = jiffies;
440 event = SWITCHDEV_FDB_ADD_TO_BRIDGE;
441
442 if (added_by_user) {
443 fdb_entry->flags |= ICE_ESWITCH_BR_FDB_ADDED_BY_USER;
444 event = SWITCHDEV_FDB_OFFLOADED;
445 }
446
447 err = rhashtable_insert_fast(&bridge->fdb_ht, &fdb_entry->ht_node,
448 ice_fdb_ht_params);
449 if (err)
450 goto err_fdb_insert;
451
452 list_add(&fdb_entry->list, &bridge->fdb_list);
453 trace_ice_eswitch_br_fdb_entry_create(fdb_entry);
454
455 ice_eswitch_br_fdb_offload_notify(netdev, mac, vid, event);
456
457 return;
458
459 err_fdb_insert:
460 ice_eswitch_br_flow_delete(pf, flow);
461 err_add_flow:
462 kfree(fdb_entry);
463 err_exit:
464 dev_err(dev, "Failed to create fdb entry, err: %d\n", err);
465 }
466
467 static void
ice_eswitch_br_fdb_work_dealloc(struct ice_esw_br_fdb_work * fdb_work)468 ice_eswitch_br_fdb_work_dealloc(struct ice_esw_br_fdb_work *fdb_work)
469 {
470 kfree(fdb_work->fdb_info.addr);
471 kfree(fdb_work);
472 }
473
474 static void
ice_eswitch_br_fdb_event_work(struct work_struct * work)475 ice_eswitch_br_fdb_event_work(struct work_struct *work)
476 {
477 struct ice_esw_br_fdb_work *fdb_work = ice_work_to_fdb_work(work);
478 bool added_by_user = fdb_work->fdb_info.added_by_user;
479 const unsigned char *mac = fdb_work->fdb_info.addr;
480 u16 vid = fdb_work->fdb_info.vid;
481 struct ice_esw_br_port *br_port;
482
483 rtnl_lock();
484
485 br_port = ice_eswitch_br_netdev_to_port(fdb_work->dev);
486 if (!br_port)
487 goto err_exit;
488
489 switch (fdb_work->event) {
490 case SWITCHDEV_FDB_ADD_TO_DEVICE:
491 ice_eswitch_br_fdb_entry_create(fdb_work->dev, br_port,
492 added_by_user, mac, vid);
493 break;
494 case SWITCHDEV_FDB_DEL_TO_DEVICE:
495 ice_eswitch_br_fdb_entry_find_and_delete(br_port->bridge,
496 mac, vid);
497 break;
498 default:
499 goto err_exit;
500 }
501
502 err_exit:
503 rtnl_unlock();
504 dev_put(fdb_work->dev);
505 ice_eswitch_br_fdb_work_dealloc(fdb_work);
506 }
507
508 static struct ice_esw_br_fdb_work *
ice_eswitch_br_fdb_work_alloc(struct switchdev_notifier_fdb_info * fdb_info,struct net_device * dev,unsigned long event)509 ice_eswitch_br_fdb_work_alloc(struct switchdev_notifier_fdb_info *fdb_info,
510 struct net_device *dev,
511 unsigned long event)
512 {
513 struct ice_esw_br_fdb_work *work;
514 unsigned char *mac;
515
516 work = kzalloc(sizeof(*work), GFP_ATOMIC);
517 if (!work)
518 return ERR_PTR(-ENOMEM);
519
520 INIT_WORK(&work->work, ice_eswitch_br_fdb_event_work);
521 memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info));
522
523 mac = kzalloc(ETH_ALEN, GFP_ATOMIC);
524 if (!mac) {
525 kfree(work);
526 return ERR_PTR(-ENOMEM);
527 }
528
529 ether_addr_copy(mac, fdb_info->addr);
530 work->fdb_info.addr = mac;
531 work->event = event;
532 work->dev = dev;
533
534 return work;
535 }
536
537 static int
ice_eswitch_br_switchdev_event(struct notifier_block * nb,unsigned long event,void * ptr)538 ice_eswitch_br_switchdev_event(struct notifier_block *nb,
539 unsigned long event, void *ptr)
540 {
541 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
542 struct switchdev_notifier_fdb_info *fdb_info;
543 struct switchdev_notifier_info *info = ptr;
544 struct ice_esw_br_offloads *br_offloads;
545 struct ice_esw_br_fdb_work *work;
546 struct netlink_ext_ack *extack;
547 struct net_device *upper;
548
549 br_offloads = ice_nb_to_br_offloads(nb, switchdev_nb);
550 extack = switchdev_notifier_info_to_extack(ptr);
551
552 upper = netdev_master_upper_dev_get_rcu(dev);
553 if (!upper)
554 return NOTIFY_DONE;
555
556 if (!netif_is_bridge_master(upper))
557 return NOTIFY_DONE;
558
559 if (!ice_eswitch_br_is_dev_valid(dev))
560 return NOTIFY_DONE;
561
562 if (!ice_eswitch_br_netdev_to_port(dev))
563 return NOTIFY_DONE;
564
565 switch (event) {
566 case SWITCHDEV_FDB_ADD_TO_DEVICE:
567 case SWITCHDEV_FDB_DEL_TO_DEVICE:
568 fdb_info = container_of(info, typeof(*fdb_info), info);
569
570 work = ice_eswitch_br_fdb_work_alloc(fdb_info, dev, event);
571 if (IS_ERR(work)) {
572 NL_SET_ERR_MSG_MOD(extack, "Failed to init switchdev fdb work");
573 return notifier_from_errno(PTR_ERR(work));
574 }
575 dev_hold(dev);
576
577 queue_work(br_offloads->wq, &work->work);
578 break;
579 default:
580 break;
581 }
582 return NOTIFY_DONE;
583 }
584
ice_eswitch_br_fdb_flush(struct ice_esw_br * bridge)585 void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge)
586 {
587 struct ice_esw_br_fdb_entry *entry, *tmp;
588
589 if (!bridge)
590 return;
591
592 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
593 ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry);
594 }
595
596 static void
ice_eswitch_br_vlan_filtering_set(struct ice_esw_br * bridge,bool enable)597 ice_eswitch_br_vlan_filtering_set(struct ice_esw_br *bridge, bool enable)
598 {
599 if (enable == !!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING))
600 return;
601
602 ice_eswitch_br_fdb_flush(bridge);
603 if (enable)
604 bridge->flags |= ICE_ESWITCH_BR_VLAN_FILTERING;
605 else
606 bridge->flags &= ~ICE_ESWITCH_BR_VLAN_FILTERING;
607 }
608
609 static void
ice_eswitch_br_clear_pvid(struct ice_esw_br_port * port)610 ice_eswitch_br_clear_pvid(struct ice_esw_br_port *port)
611 {
612 struct ice_vlan port_vlan = ICE_VLAN(ETH_P_8021Q, port->pvid, 0);
613 struct ice_vsi_vlan_ops *vlan_ops;
614
615 vlan_ops = ice_get_compat_vsi_vlan_ops(port->vsi);
616
617 vlan_ops->del_vlan(port->vsi, &port_vlan);
618 vlan_ops->clear_port_vlan(port->vsi);
619
620 ice_vf_vsi_disable_port_vlan(port->vsi);
621
622 port->pvid = 0;
623 }
624
625 static void
ice_eswitch_br_vlan_cleanup(struct ice_esw_br_port * port,struct ice_esw_br_vlan * vlan)626 ice_eswitch_br_vlan_cleanup(struct ice_esw_br_port *port,
627 struct ice_esw_br_vlan *vlan)
628 {
629 struct ice_esw_br_fdb_entry *fdb_entry, *tmp;
630 struct ice_esw_br *bridge = port->bridge;
631
632 trace_ice_eswitch_br_vlan_cleanup(vlan);
633
634 list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) {
635 if (vlan->vid == fdb_entry->data.vid)
636 ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry);
637 }
638
639 xa_erase(&port->vlans, vlan->vid);
640 if (port->pvid == vlan->vid)
641 ice_eswitch_br_clear_pvid(port);
642 kfree(vlan);
643 }
644
ice_eswitch_br_port_vlans_flush(struct ice_esw_br_port * port)645 static void ice_eswitch_br_port_vlans_flush(struct ice_esw_br_port *port)
646 {
647 struct ice_esw_br_vlan *vlan;
648 unsigned long index;
649
650 xa_for_each(&port->vlans, index, vlan)
651 ice_eswitch_br_vlan_cleanup(port, vlan);
652 }
653
654 static int
ice_eswitch_br_set_pvid(struct ice_esw_br_port * port,struct ice_esw_br_vlan * vlan)655 ice_eswitch_br_set_pvid(struct ice_esw_br_port *port,
656 struct ice_esw_br_vlan *vlan)
657 {
658 struct ice_vlan port_vlan = ICE_VLAN(ETH_P_8021Q, vlan->vid, 0);
659 struct device *dev = ice_pf_to_dev(port->vsi->back);
660 struct ice_vsi_vlan_ops *vlan_ops;
661 int err;
662
663 if (port->pvid == vlan->vid || vlan->vid == 1)
664 return 0;
665
666 /* Setting port vlan on uplink isn't supported by hw */
667 if (port->type == ICE_ESWITCH_BR_UPLINK_PORT)
668 return -EOPNOTSUPP;
669
670 if (port->pvid) {
671 dev_info(dev,
672 "Port VLAN (vsi=%u, vid=%u) already exists on the port, remove it before adding new one\n",
673 port->vsi_idx, port->pvid);
674 return -EEXIST;
675 }
676
677 ice_vf_vsi_enable_port_vlan(port->vsi);
678
679 vlan_ops = ice_get_compat_vsi_vlan_ops(port->vsi);
680 err = vlan_ops->set_port_vlan(port->vsi, &port_vlan);
681 if (err)
682 return err;
683
684 err = vlan_ops->add_vlan(port->vsi, &port_vlan);
685 if (err)
686 return err;
687
688 ice_eswitch_br_port_vlans_flush(port);
689 port->pvid = vlan->vid;
690
691 return 0;
692 }
693
694 static struct ice_esw_br_vlan *
ice_eswitch_br_vlan_create(u16 vid,u16 flags,struct ice_esw_br_port * port)695 ice_eswitch_br_vlan_create(u16 vid, u16 flags, struct ice_esw_br_port *port)
696 {
697 struct device *dev = ice_pf_to_dev(port->vsi->back);
698 struct ice_esw_br_vlan *vlan;
699 int err;
700
701 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
702 if (!vlan)
703 return ERR_PTR(-ENOMEM);
704
705 vlan->vid = vid;
706 vlan->flags = flags;
707 if ((flags & BRIDGE_VLAN_INFO_PVID) &&
708 (flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
709 err = ice_eswitch_br_set_pvid(port, vlan);
710 if (err)
711 goto err_set_pvid;
712 } else if ((flags & BRIDGE_VLAN_INFO_PVID) ||
713 (flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
714 dev_info(dev, "VLAN push and pop are supported only simultaneously\n");
715 err = -EOPNOTSUPP;
716 goto err_set_pvid;
717 }
718
719 err = xa_insert(&port->vlans, vlan->vid, vlan, GFP_KERNEL);
720 if (err)
721 goto err_insert;
722
723 trace_ice_eswitch_br_vlan_create(vlan);
724
725 return vlan;
726
727 err_insert:
728 if (port->pvid)
729 ice_eswitch_br_clear_pvid(port);
730 err_set_pvid:
731 kfree(vlan);
732 return ERR_PTR(err);
733 }
734
735 static int
ice_eswitch_br_port_vlan_add(struct ice_esw_br * bridge,u16 vsi_idx,u16 vid,u16 flags,struct netlink_ext_ack * extack)736 ice_eswitch_br_port_vlan_add(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid,
737 u16 flags, struct netlink_ext_ack *extack)
738 {
739 struct ice_esw_br_port *port;
740 struct ice_esw_br_vlan *vlan;
741
742 port = xa_load(&bridge->ports, vsi_idx);
743 if (!port)
744 return -EINVAL;
745
746 if (port->pvid) {
747 dev_info(ice_pf_to_dev(port->vsi->back),
748 "Port VLAN (vsi=%u, vid=%d) exists on the port, remove it to add trunk VLANs\n",
749 port->vsi_idx, port->pvid);
750 return -EEXIST;
751 }
752
753 vlan = xa_load(&port->vlans, vid);
754 if (vlan) {
755 if (vlan->flags == flags)
756 return 0;
757
758 ice_eswitch_br_vlan_cleanup(port, vlan);
759 }
760
761 vlan = ice_eswitch_br_vlan_create(vid, flags, port);
762 if (IS_ERR(vlan)) {
763 NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to create VLAN entry, vid: %u, vsi: %u",
764 vid, vsi_idx);
765 return PTR_ERR(vlan);
766 }
767
768 return 0;
769 }
770
771 static void
ice_eswitch_br_port_vlan_del(struct ice_esw_br * bridge,u16 vsi_idx,u16 vid)772 ice_eswitch_br_port_vlan_del(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid)
773 {
774 struct ice_esw_br_port *port;
775 struct ice_esw_br_vlan *vlan;
776
777 port = xa_load(&bridge->ports, vsi_idx);
778 if (!port)
779 return;
780
781 vlan = xa_load(&port->vlans, vid);
782 if (!vlan)
783 return;
784
785 ice_eswitch_br_vlan_cleanup(port, vlan);
786 }
787
788 static int
ice_eswitch_br_port_obj_add(struct net_device * netdev,const void * ctx,const struct switchdev_obj * obj,struct netlink_ext_ack * extack)789 ice_eswitch_br_port_obj_add(struct net_device *netdev, const void *ctx,
790 const struct switchdev_obj *obj,
791 struct netlink_ext_ack *extack)
792 {
793 struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
794 struct switchdev_obj_port_vlan *vlan;
795 int err;
796
797 if (!br_port)
798 return -EINVAL;
799
800 switch (obj->id) {
801 case SWITCHDEV_OBJ_ID_PORT_VLAN:
802 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
803 err = ice_eswitch_br_port_vlan_add(br_port->bridge,
804 br_port->vsi_idx, vlan->vid,
805 vlan->flags, extack);
806 return err;
807 default:
808 return -EOPNOTSUPP;
809 }
810 }
811
812 static int
ice_eswitch_br_port_obj_del(struct net_device * netdev,const void * ctx,const struct switchdev_obj * obj)813 ice_eswitch_br_port_obj_del(struct net_device *netdev, const void *ctx,
814 const struct switchdev_obj *obj)
815 {
816 struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
817 struct switchdev_obj_port_vlan *vlan;
818
819 if (!br_port)
820 return -EINVAL;
821
822 switch (obj->id) {
823 case SWITCHDEV_OBJ_ID_PORT_VLAN:
824 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
825 ice_eswitch_br_port_vlan_del(br_port->bridge, br_port->vsi_idx,
826 vlan->vid);
827 return 0;
828 default:
829 return -EOPNOTSUPP;
830 }
831 }
832
833 static int
ice_eswitch_br_port_obj_attr_set(struct net_device * netdev,const void * ctx,const struct switchdev_attr * attr,struct netlink_ext_ack * extack)834 ice_eswitch_br_port_obj_attr_set(struct net_device *netdev, const void *ctx,
835 const struct switchdev_attr *attr,
836 struct netlink_ext_ack *extack)
837 {
838 struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
839
840 if (!br_port)
841 return -EINVAL;
842
843 switch (attr->id) {
844 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
845 ice_eswitch_br_vlan_filtering_set(br_port->bridge,
846 attr->u.vlan_filtering);
847 return 0;
848 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
849 br_port->bridge->ageing_time =
850 clock_t_to_jiffies(attr->u.ageing_time);
851 return 0;
852 default:
853 return -EOPNOTSUPP;
854 }
855 }
856
857 static int
ice_eswitch_br_event_blocking(struct notifier_block * nb,unsigned long event,void * ptr)858 ice_eswitch_br_event_blocking(struct notifier_block *nb, unsigned long event,
859 void *ptr)
860 {
861 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
862 int err;
863
864 switch (event) {
865 case SWITCHDEV_PORT_OBJ_ADD:
866 err = switchdev_handle_port_obj_add(dev, ptr,
867 ice_eswitch_br_is_dev_valid,
868 ice_eswitch_br_port_obj_add);
869 break;
870 case SWITCHDEV_PORT_OBJ_DEL:
871 err = switchdev_handle_port_obj_del(dev, ptr,
872 ice_eswitch_br_is_dev_valid,
873 ice_eswitch_br_port_obj_del);
874 break;
875 case SWITCHDEV_PORT_ATTR_SET:
876 err = switchdev_handle_port_attr_set(dev, ptr,
877 ice_eswitch_br_is_dev_valid,
878 ice_eswitch_br_port_obj_attr_set);
879 break;
880 default:
881 err = 0;
882 }
883
884 return notifier_from_errno(err);
885 }
886
887 static void
ice_eswitch_br_port_deinit(struct ice_esw_br * bridge,struct ice_esw_br_port * br_port)888 ice_eswitch_br_port_deinit(struct ice_esw_br *bridge,
889 struct ice_esw_br_port *br_port)
890 {
891 struct ice_esw_br_fdb_entry *fdb_entry, *tmp;
892 struct ice_vsi *vsi = br_port->vsi;
893
894 list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) {
895 if (br_port == fdb_entry->br_port)
896 ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry);
897 }
898
899 if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back)
900 vsi->back->br_port = NULL;
901 else if (vsi->vf && vsi->vf->repr)
902 vsi->vf->repr->br_port = NULL;
903
904 xa_erase(&bridge->ports, br_port->vsi_idx);
905 ice_eswitch_br_port_vlans_flush(br_port);
906 kfree(br_port);
907 }
908
909 static struct ice_esw_br_port *
ice_eswitch_br_port_init(struct ice_esw_br * bridge)910 ice_eswitch_br_port_init(struct ice_esw_br *bridge)
911 {
912 struct ice_esw_br_port *br_port;
913
914 br_port = kzalloc(sizeof(*br_port), GFP_KERNEL);
915 if (!br_port)
916 return ERR_PTR(-ENOMEM);
917
918 xa_init(&br_port->vlans);
919
920 br_port->bridge = bridge;
921
922 return br_port;
923 }
924
925 static int
ice_eswitch_br_vf_repr_port_init(struct ice_esw_br * bridge,struct ice_repr * repr)926 ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge,
927 struct ice_repr *repr)
928 {
929 struct ice_esw_br_port *br_port;
930 int err;
931
932 br_port = ice_eswitch_br_port_init(bridge);
933 if (IS_ERR(br_port))
934 return PTR_ERR(br_port);
935
936 br_port->vsi = repr->src_vsi;
937 br_port->vsi_idx = br_port->vsi->idx;
938 br_port->type = ICE_ESWITCH_BR_VF_REPR_PORT;
939 repr->br_port = br_port;
940
941 err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL);
942 if (err) {
943 ice_eswitch_br_port_deinit(bridge, br_port);
944 return err;
945 }
946
947 return 0;
948 }
949
950 static int
ice_eswitch_br_uplink_port_init(struct ice_esw_br * bridge,struct ice_pf * pf)951 ice_eswitch_br_uplink_port_init(struct ice_esw_br *bridge, struct ice_pf *pf)
952 {
953 struct ice_vsi *vsi = pf->eswitch.uplink_vsi;
954 struct ice_esw_br_port *br_port;
955 int err;
956
957 br_port = ice_eswitch_br_port_init(bridge);
958 if (IS_ERR(br_port))
959 return PTR_ERR(br_port);
960
961 br_port->vsi = vsi;
962 br_port->vsi_idx = br_port->vsi->idx;
963 br_port->type = ICE_ESWITCH_BR_UPLINK_PORT;
964 pf->br_port = br_port;
965
966 err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL);
967 if (err) {
968 ice_eswitch_br_port_deinit(bridge, br_port);
969 return err;
970 }
971
972 return 0;
973 }
974
975 static void
ice_eswitch_br_ports_flush(struct ice_esw_br * bridge)976 ice_eswitch_br_ports_flush(struct ice_esw_br *bridge)
977 {
978 struct ice_esw_br_port *port;
979 unsigned long i;
980
981 xa_for_each(&bridge->ports, i, port)
982 ice_eswitch_br_port_deinit(bridge, port);
983 }
984
985 static void
ice_eswitch_br_deinit(struct ice_esw_br_offloads * br_offloads,struct ice_esw_br * bridge)986 ice_eswitch_br_deinit(struct ice_esw_br_offloads *br_offloads,
987 struct ice_esw_br *bridge)
988 {
989 if (!bridge)
990 return;
991
992 /* Cleanup all the ports that were added asynchronously
993 * through NETDEV_CHANGEUPPER event.
994 */
995 ice_eswitch_br_ports_flush(bridge);
996 WARN_ON(!xa_empty(&bridge->ports));
997 xa_destroy(&bridge->ports);
998 rhashtable_destroy(&bridge->fdb_ht);
999
1000 br_offloads->bridge = NULL;
1001 kfree(bridge);
1002 }
1003
1004 static struct ice_esw_br *
ice_eswitch_br_init(struct ice_esw_br_offloads * br_offloads,int ifindex)1005 ice_eswitch_br_init(struct ice_esw_br_offloads *br_offloads, int ifindex)
1006 {
1007 struct ice_esw_br *bridge;
1008 int err;
1009
1010 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
1011 if (!bridge)
1012 return ERR_PTR(-ENOMEM);
1013
1014 err = rhashtable_init(&bridge->fdb_ht, &ice_fdb_ht_params);
1015 if (err) {
1016 kfree(bridge);
1017 return ERR_PTR(err);
1018 }
1019
1020 INIT_LIST_HEAD(&bridge->fdb_list);
1021 bridge->br_offloads = br_offloads;
1022 bridge->ifindex = ifindex;
1023 bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
1024 xa_init(&bridge->ports);
1025 br_offloads->bridge = bridge;
1026
1027 return bridge;
1028 }
1029
1030 static struct ice_esw_br *
ice_eswitch_br_get(struct ice_esw_br_offloads * br_offloads,int ifindex,struct netlink_ext_ack * extack)1031 ice_eswitch_br_get(struct ice_esw_br_offloads *br_offloads, int ifindex,
1032 struct netlink_ext_ack *extack)
1033 {
1034 struct ice_esw_br *bridge = br_offloads->bridge;
1035
1036 if (bridge) {
1037 if (bridge->ifindex != ifindex) {
1038 NL_SET_ERR_MSG_MOD(extack,
1039 "Only one bridge is supported per eswitch");
1040 return ERR_PTR(-EOPNOTSUPP);
1041 }
1042 return bridge;
1043 }
1044
1045 /* Create the bridge if it doesn't exist yet */
1046 bridge = ice_eswitch_br_init(br_offloads, ifindex);
1047 if (IS_ERR(bridge))
1048 NL_SET_ERR_MSG_MOD(extack, "Failed to init the bridge");
1049
1050 return bridge;
1051 }
1052
1053 static void
ice_eswitch_br_verify_deinit(struct ice_esw_br_offloads * br_offloads,struct ice_esw_br * bridge)1054 ice_eswitch_br_verify_deinit(struct ice_esw_br_offloads *br_offloads,
1055 struct ice_esw_br *bridge)
1056 {
1057 /* Remove the bridge if it exists and there are no ports left */
1058 if (!bridge || !xa_empty(&bridge->ports))
1059 return;
1060
1061 ice_eswitch_br_deinit(br_offloads, bridge);
1062 }
1063
1064 static int
ice_eswitch_br_port_unlink(struct ice_esw_br_offloads * br_offloads,struct net_device * dev,int ifindex,struct netlink_ext_ack * extack)1065 ice_eswitch_br_port_unlink(struct ice_esw_br_offloads *br_offloads,
1066 struct net_device *dev, int ifindex,
1067 struct netlink_ext_ack *extack)
1068 {
1069 struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(dev);
1070 struct ice_esw_br *bridge;
1071
1072 if (!br_port) {
1073 NL_SET_ERR_MSG_MOD(extack,
1074 "Port representor is not attached to any bridge");
1075 return -EINVAL;
1076 }
1077
1078 if (br_port->bridge->ifindex != ifindex) {
1079 NL_SET_ERR_MSG_MOD(extack,
1080 "Port representor is attached to another bridge");
1081 return -EINVAL;
1082 }
1083
1084 bridge = br_port->bridge;
1085
1086 trace_ice_eswitch_br_port_unlink(br_port);
1087 ice_eswitch_br_port_deinit(br_port->bridge, br_port);
1088 ice_eswitch_br_verify_deinit(br_offloads, bridge);
1089
1090 return 0;
1091 }
1092
1093 static int
ice_eswitch_br_port_link(struct ice_esw_br_offloads * br_offloads,struct net_device * dev,int ifindex,struct netlink_ext_ack * extack)1094 ice_eswitch_br_port_link(struct ice_esw_br_offloads *br_offloads,
1095 struct net_device *dev, int ifindex,
1096 struct netlink_ext_ack *extack)
1097 {
1098 struct ice_esw_br *bridge;
1099 int err;
1100
1101 if (ice_eswitch_br_netdev_to_port(dev)) {
1102 NL_SET_ERR_MSG_MOD(extack,
1103 "Port is already attached to the bridge");
1104 return -EINVAL;
1105 }
1106
1107 bridge = ice_eswitch_br_get(br_offloads, ifindex, extack);
1108 if (IS_ERR(bridge))
1109 return PTR_ERR(bridge);
1110
1111 if (ice_is_port_repr_netdev(dev)) {
1112 struct ice_repr *repr = ice_netdev_to_repr(dev);
1113
1114 err = ice_eswitch_br_vf_repr_port_init(bridge, repr);
1115 trace_ice_eswitch_br_port_link(repr->br_port);
1116 } else {
1117 struct net_device *ice_dev;
1118 struct ice_pf *pf;
1119
1120 if (netif_is_lag_master(dev))
1121 ice_dev = ice_eswitch_br_get_uplink_from_lag(dev);
1122 else
1123 ice_dev = dev;
1124
1125 if (!ice_dev)
1126 return 0;
1127
1128 pf = ice_netdev_to_pf(ice_dev);
1129
1130 err = ice_eswitch_br_uplink_port_init(bridge, pf);
1131 trace_ice_eswitch_br_port_link(pf->br_port);
1132 }
1133 if (err) {
1134 NL_SET_ERR_MSG_MOD(extack, "Failed to init bridge port");
1135 goto err_port_init;
1136 }
1137
1138 return 0;
1139
1140 err_port_init:
1141 ice_eswitch_br_verify_deinit(br_offloads, bridge);
1142 return err;
1143 }
1144
1145 static int
ice_eswitch_br_port_changeupper(struct notifier_block * nb,void * ptr)1146 ice_eswitch_br_port_changeupper(struct notifier_block *nb, void *ptr)
1147 {
1148 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1149 struct netdev_notifier_changeupper_info *info = ptr;
1150 struct ice_esw_br_offloads *br_offloads;
1151 struct netlink_ext_ack *extack;
1152 struct net_device *upper;
1153
1154 br_offloads = ice_nb_to_br_offloads(nb, netdev_nb);
1155
1156 if (!ice_eswitch_br_is_dev_valid(dev))
1157 return 0;
1158
1159 upper = info->upper_dev;
1160 if (!netif_is_bridge_master(upper))
1161 return 0;
1162
1163 extack = netdev_notifier_info_to_extack(&info->info);
1164
1165 if (info->linking)
1166 return ice_eswitch_br_port_link(br_offloads, dev,
1167 upper->ifindex, extack);
1168 else
1169 return ice_eswitch_br_port_unlink(br_offloads, dev,
1170 upper->ifindex, extack);
1171 }
1172
1173 static int
ice_eswitch_br_port_event(struct notifier_block * nb,unsigned long event,void * ptr)1174 ice_eswitch_br_port_event(struct notifier_block *nb,
1175 unsigned long event, void *ptr)
1176 {
1177 int err = 0;
1178
1179 switch (event) {
1180 case NETDEV_CHANGEUPPER:
1181 err = ice_eswitch_br_port_changeupper(nb, ptr);
1182 break;
1183 }
1184
1185 return notifier_from_errno(err);
1186 }
1187
1188 static void
ice_eswitch_br_offloads_dealloc(struct ice_pf * pf)1189 ice_eswitch_br_offloads_dealloc(struct ice_pf *pf)
1190 {
1191 struct ice_esw_br_offloads *br_offloads = pf->eswitch.br_offloads;
1192
1193 ASSERT_RTNL();
1194
1195 if (!br_offloads)
1196 return;
1197
1198 ice_eswitch_br_deinit(br_offloads, br_offloads->bridge);
1199
1200 pf->eswitch.br_offloads = NULL;
1201 kfree(br_offloads);
1202 }
1203
1204 static struct ice_esw_br_offloads *
ice_eswitch_br_offloads_alloc(struct ice_pf * pf)1205 ice_eswitch_br_offloads_alloc(struct ice_pf *pf)
1206 {
1207 struct ice_esw_br_offloads *br_offloads;
1208
1209 ASSERT_RTNL();
1210
1211 if (pf->eswitch.br_offloads)
1212 return ERR_PTR(-EEXIST);
1213
1214 br_offloads = kzalloc(sizeof(*br_offloads), GFP_KERNEL);
1215 if (!br_offloads)
1216 return ERR_PTR(-ENOMEM);
1217
1218 pf->eswitch.br_offloads = br_offloads;
1219 br_offloads->pf = pf;
1220
1221 return br_offloads;
1222 }
1223
1224 void
ice_eswitch_br_offloads_deinit(struct ice_pf * pf)1225 ice_eswitch_br_offloads_deinit(struct ice_pf *pf)
1226 {
1227 struct ice_esw_br_offloads *br_offloads;
1228
1229 br_offloads = pf->eswitch.br_offloads;
1230 if (!br_offloads)
1231 return;
1232
1233 cancel_delayed_work_sync(&br_offloads->update_work);
1234 unregister_netdevice_notifier(&br_offloads->netdev_nb);
1235 unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
1236 unregister_switchdev_notifier(&br_offloads->switchdev_nb);
1237 destroy_workqueue(br_offloads->wq);
1238 /* Although notifier block is unregistered just before,
1239 * so we don't get any new events, some events might be
1240 * already in progress. Hold the rtnl lock and wait for
1241 * them to finished.
1242 */
1243 rtnl_lock();
1244 ice_eswitch_br_offloads_dealloc(pf);
1245 rtnl_unlock();
1246 }
1247
ice_eswitch_br_update(struct ice_esw_br_offloads * br_offloads)1248 static void ice_eswitch_br_update(struct ice_esw_br_offloads *br_offloads)
1249 {
1250 struct ice_esw_br *bridge = br_offloads->bridge;
1251 struct ice_esw_br_fdb_entry *entry, *tmp;
1252
1253 if (!bridge)
1254 return;
1255
1256 rtnl_lock();
1257 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
1258 if (entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER)
1259 continue;
1260
1261 if (time_is_after_eq_jiffies(entry->last_use +
1262 bridge->ageing_time))
1263 continue;
1264
1265 ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry);
1266 }
1267 rtnl_unlock();
1268 }
1269
ice_eswitch_br_update_work(struct work_struct * work)1270 static void ice_eswitch_br_update_work(struct work_struct *work)
1271 {
1272 struct ice_esw_br_offloads *br_offloads;
1273
1274 br_offloads = ice_work_to_br_offloads(work);
1275
1276 ice_eswitch_br_update(br_offloads);
1277
1278 queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
1279 ICE_ESW_BRIDGE_UPDATE_INTERVAL);
1280 }
1281
1282 int
ice_eswitch_br_offloads_init(struct ice_pf * pf)1283 ice_eswitch_br_offloads_init(struct ice_pf *pf)
1284 {
1285 struct ice_esw_br_offloads *br_offloads;
1286 struct device *dev = ice_pf_to_dev(pf);
1287 int err;
1288
1289 rtnl_lock();
1290 br_offloads = ice_eswitch_br_offloads_alloc(pf);
1291 rtnl_unlock();
1292 if (IS_ERR(br_offloads)) {
1293 dev_err(dev, "Failed to init eswitch bridge\n");
1294 return PTR_ERR(br_offloads);
1295 }
1296
1297 br_offloads->wq = alloc_ordered_workqueue("ice_bridge_wq", 0);
1298 if (!br_offloads->wq) {
1299 err = -ENOMEM;
1300 dev_err(dev, "Failed to allocate bridge workqueue\n");
1301 goto err_alloc_wq;
1302 }
1303
1304 br_offloads->switchdev_nb.notifier_call =
1305 ice_eswitch_br_switchdev_event;
1306 err = register_switchdev_notifier(&br_offloads->switchdev_nb);
1307 if (err) {
1308 dev_err(dev,
1309 "Failed to register switchdev notifier\n");
1310 goto err_reg_switchdev_nb;
1311 }
1312
1313 br_offloads->switchdev_blk.notifier_call =
1314 ice_eswitch_br_event_blocking;
1315 err = register_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
1316 if (err) {
1317 dev_err(dev,
1318 "Failed to register bridge blocking switchdev notifier\n");
1319 goto err_reg_switchdev_blk;
1320 }
1321
1322 br_offloads->netdev_nb.notifier_call = ice_eswitch_br_port_event;
1323 err = register_netdevice_notifier(&br_offloads->netdev_nb);
1324 if (err) {
1325 dev_err(dev,
1326 "Failed to register bridge port event notifier\n");
1327 goto err_reg_netdev_nb;
1328 }
1329
1330 INIT_DELAYED_WORK(&br_offloads->update_work,
1331 ice_eswitch_br_update_work);
1332 queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
1333 ICE_ESW_BRIDGE_UPDATE_INTERVAL);
1334
1335 return 0;
1336
1337 err_reg_netdev_nb:
1338 unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
1339 err_reg_switchdev_blk:
1340 unregister_switchdev_notifier(&br_offloads->switchdev_nb);
1341 err_reg_switchdev_nb:
1342 destroy_workqueue(br_offloads->wq);
1343 err_alloc_wq:
1344 rtnl_lock();
1345 ice_eswitch_br_offloads_dealloc(pf);
1346 rtnl_unlock();
1347
1348 return err;
1349 }
1350