1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2023, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_eswitch_br.h" 6 #include "ice_repr.h" 7 #include "ice_switch.h" 8 #include "ice_vlan.h" 9 #include "ice_vf_vsi_vlan_ops.h" 10 #include "ice_trace.h" 11 12 #define ICE_ESW_BRIDGE_UPDATE_INTERVAL msecs_to_jiffies(1000) 13 14 static const struct rhashtable_params ice_fdb_ht_params = { 15 .key_offset = offsetof(struct ice_esw_br_fdb_entry, data), 16 .key_len = sizeof(struct ice_esw_br_fdb_data), 17 .head_offset = offsetof(struct ice_esw_br_fdb_entry, ht_node), 18 .automatic_shrinking = true, 19 }; 20 21 static bool ice_eswitch_br_is_dev_valid(const struct net_device *dev) 22 { 23 /* Accept only PF netdev, PRs and LAG */ 24 return ice_is_port_repr_netdev(dev) || netif_is_ice(dev) || 25 netif_is_lag_master(dev); 26 } 27 28 static struct net_device * 29 ice_eswitch_br_get_uplink_from_lag(struct net_device *lag_dev) 30 { 31 struct net_device *lower; 32 struct list_head *iter; 33 34 netdev_for_each_lower_dev(lag_dev, lower, iter) { 35 if (netif_is_ice(lower)) 36 return lower; 37 } 38 39 return NULL; 40 } 41 42 static struct ice_esw_br_port * 43 ice_eswitch_br_netdev_to_port(struct net_device *dev) 44 { 45 if (ice_is_port_repr_netdev(dev)) { 46 struct ice_repr *repr = ice_netdev_to_repr(dev); 47 48 return repr->br_port; 49 } else if (netif_is_ice(dev) || netif_is_lag_master(dev)) { 50 struct net_device *ice_dev; 51 struct ice_pf *pf; 52 53 if (netif_is_lag_master(dev)) 54 ice_dev = ice_eswitch_br_get_uplink_from_lag(dev); 55 else 56 ice_dev = dev; 57 58 if (!ice_dev) 59 return NULL; 60 61 pf = ice_netdev_to_pf(ice_dev); 62 63 return pf->br_port; 64 } 65 66 return NULL; 67 } 68 69 static void 70 ice_eswitch_br_ingress_rule_setup(struct ice_adv_rule_info *rule_info, 71 u8 pf_id, u16 vf_vsi_idx) 72 { 73 rule_info->sw_act.vsi_handle = vf_vsi_idx; 74 rule_info->sw_act.flag |= ICE_FLTR_RX; 75 rule_info->sw_act.src = pf_id; 76 rule_info->priority = 5; 77 } 78 79 static void 80 ice_eswitch_br_egress_rule_setup(struct ice_adv_rule_info *rule_info, 81 u16 pf_vsi_idx) 82 { 83 rule_info->sw_act.vsi_handle = pf_vsi_idx; 84 rule_info->sw_act.flag |= ICE_FLTR_TX; 85 rule_info->flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE; 86 rule_info->flags_info.act_valid = true; 87 rule_info->priority = 5; 88 } 89 90 static int 91 ice_eswitch_br_rule_delete(struct ice_hw *hw, struct ice_rule_query_data *rule) 92 { 93 int err; 94 95 if (!rule) 96 return -EINVAL; 97 98 err = ice_rem_adv_rule_by_id(hw, rule); 99 kfree(rule); 100 101 return err; 102 } 103 104 static u16 105 ice_eswitch_br_get_lkups_cnt(u16 vid) 106 { 107 return ice_eswitch_br_is_vid_valid(vid) ? 2 : 1; 108 } 109 110 static void 111 ice_eswitch_br_add_vlan_lkup(struct ice_adv_lkup_elem *list, u16 vid) 112 { 113 if (ice_eswitch_br_is_vid_valid(vid)) { 114 list[1].type = ICE_VLAN_OFOS; 115 list[1].h_u.vlan_hdr.vlan = cpu_to_be16(vid & VLAN_VID_MASK); 116 list[1].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF); 117 } 118 } 119 120 static struct ice_rule_query_data * 121 ice_eswitch_br_fwd_rule_create(struct ice_hw *hw, int vsi_idx, int port_type, 122 const unsigned char *mac, u16 vid) 123 { 124 struct ice_adv_rule_info rule_info = { 0 }; 125 struct ice_rule_query_data *rule; 126 struct ice_adv_lkup_elem *list; 127 u16 lkups_cnt; 128 int err; 129 130 lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid); 131 132 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 133 if (!rule) 134 return ERR_PTR(-ENOMEM); 135 136 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); 137 if (!list) { 138 err = -ENOMEM; 139 goto err_list_alloc; 140 } 141 142 switch (port_type) { 143 case ICE_ESWITCH_BR_UPLINK_PORT: 144 ice_eswitch_br_egress_rule_setup(&rule_info, vsi_idx); 145 break; 146 case ICE_ESWITCH_BR_VF_REPR_PORT: 147 ice_eswitch_br_ingress_rule_setup(&rule_info, hw->pf_id, 148 vsi_idx); 149 break; 150 default: 151 err = -EINVAL; 152 goto err_add_rule; 153 } 154 155 list[0].type = ICE_MAC_OFOS; 156 ether_addr_copy(list[0].h_u.eth_hdr.dst_addr, mac); 157 eth_broadcast_addr(list[0].m_u.eth_hdr.dst_addr); 158 159 ice_eswitch_br_add_vlan_lkup(list, vid); 160 161 rule_info.need_pass_l2 = true; 162 163 rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI; 164 165 err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule); 166 if (err) 167 goto err_add_rule; 168 169 kfree(list); 170 171 return rule; 172 173 err_add_rule: 174 kfree(list); 175 err_list_alloc: 176 kfree(rule); 177 178 return ERR_PTR(err); 179 } 180 181 static struct ice_rule_query_data * 182 ice_eswitch_br_guard_rule_create(struct ice_hw *hw, u16 vsi_idx, 183 const unsigned char *mac, u16 vid) 184 { 185 struct ice_adv_rule_info rule_info = { 0 }; 186 struct ice_rule_query_data *rule; 187 struct ice_adv_lkup_elem *list; 188 int err = -ENOMEM; 189 u16 lkups_cnt; 190 191 lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid); 192 193 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 194 if (!rule) 195 goto err_exit; 196 197 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); 198 if (!list) 199 goto err_list_alloc; 200 201 list[0].type = ICE_MAC_OFOS; 202 ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac); 203 eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr); 204 205 ice_eswitch_br_add_vlan_lkup(list, vid); 206 207 rule_info.allow_pass_l2 = true; 208 rule_info.sw_act.vsi_handle = vsi_idx; 209 rule_info.sw_act.fltr_act = ICE_NOP; 210 rule_info.priority = 5; 211 212 err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule); 213 if (err) 214 goto err_add_rule; 215 216 kfree(list); 217 218 return rule; 219 220 err_add_rule: 221 kfree(list); 222 err_list_alloc: 223 kfree(rule); 224 err_exit: 225 return ERR_PTR(err); 226 } 227 228 static struct ice_esw_br_flow * 229 ice_eswitch_br_flow_create(struct device *dev, struct ice_hw *hw, int vsi_idx, 230 int port_type, const unsigned char *mac, u16 vid) 231 { 232 struct ice_rule_query_data *fwd_rule, *guard_rule; 233 struct ice_esw_br_flow *flow; 234 int err; 235 236 flow = kzalloc(sizeof(*flow), GFP_KERNEL); 237 if (!flow) 238 return ERR_PTR(-ENOMEM); 239 240 fwd_rule = ice_eswitch_br_fwd_rule_create(hw, vsi_idx, port_type, mac, 241 vid); 242 err = PTR_ERR_OR_ZERO(fwd_rule); 243 if (err) { 244 dev_err(dev, "Failed to create eswitch bridge %sgress forward rule, err: %d\n", 245 port_type == ICE_ESWITCH_BR_UPLINK_PORT ? "e" : "in", 246 err); 247 goto err_fwd_rule; 248 } 249 250 guard_rule = ice_eswitch_br_guard_rule_create(hw, vsi_idx, mac, vid); 251 err = PTR_ERR_OR_ZERO(guard_rule); 252 if (err) { 253 dev_err(dev, "Failed to create eswitch bridge %sgress guard rule, err: %d\n", 254 port_type == ICE_ESWITCH_BR_UPLINK_PORT ? "e" : "in", 255 err); 256 goto err_guard_rule; 257 } 258 259 flow->fwd_rule = fwd_rule; 260 flow->guard_rule = guard_rule; 261 262 return flow; 263 264 err_guard_rule: 265 ice_eswitch_br_rule_delete(hw, fwd_rule); 266 err_fwd_rule: 267 kfree(flow); 268 269 return ERR_PTR(err); 270 } 271 272 static struct ice_esw_br_fdb_entry * 273 ice_eswitch_br_fdb_find(struct ice_esw_br *bridge, const unsigned char *mac, 274 u16 vid) 275 { 276 struct ice_esw_br_fdb_data data = { 277 .vid = vid, 278 }; 279 280 ether_addr_copy(data.addr, mac); 281 return rhashtable_lookup_fast(&bridge->fdb_ht, &data, 282 ice_fdb_ht_params); 283 } 284 285 static void 286 ice_eswitch_br_flow_delete(struct ice_pf *pf, struct ice_esw_br_flow *flow) 287 { 288 struct device *dev = ice_pf_to_dev(pf); 289 int err; 290 291 err = ice_eswitch_br_rule_delete(&pf->hw, flow->fwd_rule); 292 if (err) 293 dev_err(dev, "Failed to delete FDB forward rule, err: %d\n", 294 err); 295 296 err = ice_eswitch_br_rule_delete(&pf->hw, flow->guard_rule); 297 if (err) 298 dev_err(dev, "Failed to delete FDB guard rule, err: %d\n", 299 err); 300 301 kfree(flow); 302 } 303 304 static struct ice_esw_br_vlan * 305 ice_esw_br_port_vlan_lookup(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid) 306 { 307 struct ice_pf *pf = bridge->br_offloads->pf; 308 struct device *dev = ice_pf_to_dev(pf); 309 struct ice_esw_br_port *port; 310 struct ice_esw_br_vlan *vlan; 311 312 port = xa_load(&bridge->ports, vsi_idx); 313 if (!port) { 314 dev_info(dev, "Bridge port lookup failed (vsi=%u)\n", vsi_idx); 315 return ERR_PTR(-EINVAL); 316 } 317 318 vlan = xa_load(&port->vlans, vid); 319 if (!vlan) { 320 dev_info(dev, "Bridge port vlan metadata lookup failed (vsi=%u)\n", 321 vsi_idx); 322 return ERR_PTR(-EINVAL); 323 } 324 325 return vlan; 326 } 327 328 static void 329 ice_eswitch_br_fdb_entry_delete(struct ice_esw_br *bridge, 330 struct ice_esw_br_fdb_entry *fdb_entry) 331 { 332 struct ice_pf *pf = bridge->br_offloads->pf; 333 334 rhashtable_remove_fast(&bridge->fdb_ht, &fdb_entry->ht_node, 335 ice_fdb_ht_params); 336 list_del(&fdb_entry->list); 337 338 ice_eswitch_br_flow_delete(pf, fdb_entry->flow); 339 340 kfree(fdb_entry); 341 } 342 343 static void 344 ice_eswitch_br_fdb_offload_notify(struct net_device *dev, 345 const unsigned char *mac, u16 vid, 346 unsigned long val) 347 { 348 struct switchdev_notifier_fdb_info fdb_info = { 349 .addr = mac, 350 .vid = vid, 351 .offloaded = true, 352 }; 353 354 call_switchdev_notifiers(val, dev, &fdb_info.info, NULL); 355 } 356 357 static void 358 ice_eswitch_br_fdb_entry_notify_and_cleanup(struct ice_esw_br *bridge, 359 struct ice_esw_br_fdb_entry *entry) 360 { 361 if (!(entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER)) 362 ice_eswitch_br_fdb_offload_notify(entry->dev, entry->data.addr, 363 entry->data.vid, 364 SWITCHDEV_FDB_DEL_TO_BRIDGE); 365 ice_eswitch_br_fdb_entry_delete(bridge, entry); 366 } 367 368 static void 369 ice_eswitch_br_fdb_entry_find_and_delete(struct ice_esw_br *bridge, 370 const unsigned char *mac, u16 vid) 371 { 372 struct ice_pf *pf = bridge->br_offloads->pf; 373 struct ice_esw_br_fdb_entry *fdb_entry; 374 struct device *dev = ice_pf_to_dev(pf); 375 376 fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid); 377 if (!fdb_entry) { 378 dev_err(dev, "FDB entry with mac: %pM and vid: %u not found\n", 379 mac, vid); 380 return; 381 } 382 383 trace_ice_eswitch_br_fdb_entry_find_and_delete(fdb_entry); 384 ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry); 385 } 386 387 static void 388 ice_eswitch_br_fdb_entry_create(struct net_device *netdev, 389 struct ice_esw_br_port *br_port, 390 bool added_by_user, 391 const unsigned char *mac, u16 vid) 392 { 393 struct ice_esw_br *bridge = br_port->bridge; 394 struct ice_pf *pf = bridge->br_offloads->pf; 395 struct device *dev = ice_pf_to_dev(pf); 396 struct ice_esw_br_fdb_entry *fdb_entry; 397 struct ice_esw_br_flow *flow; 398 struct ice_esw_br_vlan *vlan; 399 struct ice_hw *hw = &pf->hw; 400 unsigned long event; 401 int err; 402 403 /* untagged filtering is not yet supported */ 404 if (!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING) && vid) 405 return; 406 407 if ((bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING)) { 408 vlan = ice_esw_br_port_vlan_lookup(bridge, br_port->vsi_idx, 409 vid); 410 if (IS_ERR(vlan)) { 411 dev_err(dev, "Failed to find vlan lookup, err: %ld\n", 412 PTR_ERR(vlan)); 413 return; 414 } 415 } 416 417 fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid); 418 if (fdb_entry) 419 ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry); 420 421 fdb_entry = kzalloc(sizeof(*fdb_entry), GFP_KERNEL); 422 if (!fdb_entry) { 423 err = -ENOMEM; 424 goto err_exit; 425 } 426 427 flow = ice_eswitch_br_flow_create(dev, hw, br_port->vsi_idx, 428 br_port->type, mac, vid); 429 if (IS_ERR(flow)) { 430 err = PTR_ERR(flow); 431 goto err_add_flow; 432 } 433 434 ether_addr_copy(fdb_entry->data.addr, mac); 435 fdb_entry->data.vid = vid; 436 fdb_entry->br_port = br_port; 437 fdb_entry->flow = flow; 438 fdb_entry->dev = netdev; 439 fdb_entry->last_use = jiffies; 440 event = SWITCHDEV_FDB_ADD_TO_BRIDGE; 441 442 if (added_by_user) { 443 fdb_entry->flags |= ICE_ESWITCH_BR_FDB_ADDED_BY_USER; 444 event = SWITCHDEV_FDB_OFFLOADED; 445 } 446 447 err = rhashtable_insert_fast(&bridge->fdb_ht, &fdb_entry->ht_node, 448 ice_fdb_ht_params); 449 if (err) 450 goto err_fdb_insert; 451 452 list_add(&fdb_entry->list, &bridge->fdb_list); 453 trace_ice_eswitch_br_fdb_entry_create(fdb_entry); 454 455 ice_eswitch_br_fdb_offload_notify(netdev, mac, vid, event); 456 457 return; 458 459 err_fdb_insert: 460 ice_eswitch_br_flow_delete(pf, flow); 461 err_add_flow: 462 kfree(fdb_entry); 463 err_exit: 464 dev_err(dev, "Failed to create fdb entry, err: %d\n", err); 465 } 466 467 static void 468 ice_eswitch_br_fdb_work_dealloc(struct ice_esw_br_fdb_work *fdb_work) 469 { 470 kfree(fdb_work->fdb_info.addr); 471 kfree(fdb_work); 472 } 473 474 static void 475 ice_eswitch_br_fdb_event_work(struct work_struct *work) 476 { 477 struct ice_esw_br_fdb_work *fdb_work = ice_work_to_fdb_work(work); 478 bool added_by_user = fdb_work->fdb_info.added_by_user; 479 const unsigned char *mac = fdb_work->fdb_info.addr; 480 u16 vid = fdb_work->fdb_info.vid; 481 struct ice_esw_br_port *br_port; 482 483 rtnl_lock(); 484 485 br_port = ice_eswitch_br_netdev_to_port(fdb_work->dev); 486 if (!br_port) 487 goto err_exit; 488 489 switch (fdb_work->event) { 490 case SWITCHDEV_FDB_ADD_TO_DEVICE: 491 ice_eswitch_br_fdb_entry_create(fdb_work->dev, br_port, 492 added_by_user, mac, vid); 493 break; 494 case SWITCHDEV_FDB_DEL_TO_DEVICE: 495 ice_eswitch_br_fdb_entry_find_and_delete(br_port->bridge, 496 mac, vid); 497 break; 498 default: 499 goto err_exit; 500 } 501 502 err_exit: 503 rtnl_unlock(); 504 dev_put(fdb_work->dev); 505 ice_eswitch_br_fdb_work_dealloc(fdb_work); 506 } 507 508 static struct ice_esw_br_fdb_work * 509 ice_eswitch_br_fdb_work_alloc(struct switchdev_notifier_fdb_info *fdb_info, 510 struct net_device *dev, 511 unsigned long event) 512 { 513 struct ice_esw_br_fdb_work *work; 514 unsigned char *mac; 515 516 work = kzalloc(sizeof(*work), GFP_ATOMIC); 517 if (!work) 518 return ERR_PTR(-ENOMEM); 519 520 INIT_WORK(&work->work, ice_eswitch_br_fdb_event_work); 521 memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info)); 522 523 mac = kzalloc(ETH_ALEN, GFP_ATOMIC); 524 if (!mac) { 525 kfree(work); 526 return ERR_PTR(-ENOMEM); 527 } 528 529 ether_addr_copy(mac, fdb_info->addr); 530 work->fdb_info.addr = mac; 531 work->event = event; 532 work->dev = dev; 533 534 return work; 535 } 536 537 static int 538 ice_eswitch_br_switchdev_event(struct notifier_block *nb, 539 unsigned long event, void *ptr) 540 { 541 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 542 struct switchdev_notifier_fdb_info *fdb_info; 543 struct switchdev_notifier_info *info = ptr; 544 struct ice_esw_br_offloads *br_offloads; 545 struct ice_esw_br_fdb_work *work; 546 struct netlink_ext_ack *extack; 547 struct net_device *upper; 548 549 br_offloads = ice_nb_to_br_offloads(nb, switchdev_nb); 550 extack = switchdev_notifier_info_to_extack(ptr); 551 552 upper = netdev_master_upper_dev_get_rcu(dev); 553 if (!upper) 554 return NOTIFY_DONE; 555 556 if (!netif_is_bridge_master(upper)) 557 return NOTIFY_DONE; 558 559 if (!ice_eswitch_br_is_dev_valid(dev)) 560 return NOTIFY_DONE; 561 562 if (!ice_eswitch_br_netdev_to_port(dev)) 563 return NOTIFY_DONE; 564 565 switch (event) { 566 case SWITCHDEV_FDB_ADD_TO_DEVICE: 567 case SWITCHDEV_FDB_DEL_TO_DEVICE: 568 fdb_info = container_of(info, typeof(*fdb_info), info); 569 570 work = ice_eswitch_br_fdb_work_alloc(fdb_info, dev, event); 571 if (IS_ERR(work)) { 572 NL_SET_ERR_MSG_MOD(extack, "Failed to init switchdev fdb work"); 573 return notifier_from_errno(PTR_ERR(work)); 574 } 575 dev_hold(dev); 576 577 queue_work(br_offloads->wq, &work->work); 578 break; 579 default: 580 break; 581 } 582 return NOTIFY_DONE; 583 } 584 585 static void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge) 586 { 587 struct ice_esw_br_fdb_entry *entry, *tmp; 588 589 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) 590 ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry); 591 } 592 593 static void 594 ice_eswitch_br_vlan_filtering_set(struct ice_esw_br *bridge, bool enable) 595 { 596 if (enable == !!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING)) 597 return; 598 599 ice_eswitch_br_fdb_flush(bridge); 600 if (enable) 601 bridge->flags |= ICE_ESWITCH_BR_VLAN_FILTERING; 602 else 603 bridge->flags &= ~ICE_ESWITCH_BR_VLAN_FILTERING; 604 } 605 606 static void 607 ice_eswitch_br_clear_pvid(struct ice_esw_br_port *port) 608 { 609 struct ice_vlan port_vlan = ICE_VLAN(ETH_P_8021Q, port->pvid, 0); 610 struct ice_vsi_vlan_ops *vlan_ops; 611 612 vlan_ops = ice_get_compat_vsi_vlan_ops(port->vsi); 613 614 vlan_ops->del_vlan(port->vsi, &port_vlan); 615 vlan_ops->clear_port_vlan(port->vsi); 616 617 ice_vf_vsi_disable_port_vlan(port->vsi); 618 619 port->pvid = 0; 620 } 621 622 static void 623 ice_eswitch_br_vlan_cleanup(struct ice_esw_br_port *port, 624 struct ice_esw_br_vlan *vlan) 625 { 626 struct ice_esw_br_fdb_entry *fdb_entry, *tmp; 627 struct ice_esw_br *bridge = port->bridge; 628 629 trace_ice_eswitch_br_vlan_cleanup(vlan); 630 631 list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) { 632 if (vlan->vid == fdb_entry->data.vid) 633 ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry); 634 } 635 636 xa_erase(&port->vlans, vlan->vid); 637 if (port->pvid == vlan->vid) 638 ice_eswitch_br_clear_pvid(port); 639 kfree(vlan); 640 } 641 642 static void ice_eswitch_br_port_vlans_flush(struct ice_esw_br_port *port) 643 { 644 struct ice_esw_br_vlan *vlan; 645 unsigned long index; 646 647 xa_for_each(&port->vlans, index, vlan) 648 ice_eswitch_br_vlan_cleanup(port, vlan); 649 } 650 651 static int 652 ice_eswitch_br_set_pvid(struct ice_esw_br_port *port, 653 struct ice_esw_br_vlan *vlan) 654 { 655 struct ice_vlan port_vlan = ICE_VLAN(ETH_P_8021Q, vlan->vid, 0); 656 struct device *dev = ice_pf_to_dev(port->vsi->back); 657 struct ice_vsi_vlan_ops *vlan_ops; 658 int err; 659 660 if (port->pvid == vlan->vid || vlan->vid == 1) 661 return 0; 662 663 /* Setting port vlan on uplink isn't supported by hw */ 664 if (port->type == ICE_ESWITCH_BR_UPLINK_PORT) 665 return -EOPNOTSUPP; 666 667 if (port->pvid) { 668 dev_info(dev, 669 "Port VLAN (vsi=%u, vid=%u) already exists on the port, remove it before adding new one\n", 670 port->vsi_idx, port->pvid); 671 return -EEXIST; 672 } 673 674 ice_vf_vsi_enable_port_vlan(port->vsi); 675 676 vlan_ops = ice_get_compat_vsi_vlan_ops(port->vsi); 677 err = vlan_ops->set_port_vlan(port->vsi, &port_vlan); 678 if (err) 679 return err; 680 681 err = vlan_ops->add_vlan(port->vsi, &port_vlan); 682 if (err) 683 return err; 684 685 ice_eswitch_br_port_vlans_flush(port); 686 port->pvid = vlan->vid; 687 688 return 0; 689 } 690 691 static struct ice_esw_br_vlan * 692 ice_eswitch_br_vlan_create(u16 vid, u16 flags, struct ice_esw_br_port *port) 693 { 694 struct device *dev = ice_pf_to_dev(port->vsi->back); 695 struct ice_esw_br_vlan *vlan; 696 int err; 697 698 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 699 if (!vlan) 700 return ERR_PTR(-ENOMEM); 701 702 vlan->vid = vid; 703 vlan->flags = flags; 704 if ((flags & BRIDGE_VLAN_INFO_PVID) && 705 (flags & BRIDGE_VLAN_INFO_UNTAGGED)) { 706 err = ice_eswitch_br_set_pvid(port, vlan); 707 if (err) 708 goto err_set_pvid; 709 } else if ((flags & BRIDGE_VLAN_INFO_PVID) || 710 (flags & BRIDGE_VLAN_INFO_UNTAGGED)) { 711 dev_info(dev, "VLAN push and pop are supported only simultaneously\n"); 712 err = -EOPNOTSUPP; 713 goto err_set_pvid; 714 } 715 716 err = xa_insert(&port->vlans, vlan->vid, vlan, GFP_KERNEL); 717 if (err) 718 goto err_insert; 719 720 trace_ice_eswitch_br_vlan_create(vlan); 721 722 return vlan; 723 724 err_insert: 725 if (port->pvid) 726 ice_eswitch_br_clear_pvid(port); 727 err_set_pvid: 728 kfree(vlan); 729 return ERR_PTR(err); 730 } 731 732 static int 733 ice_eswitch_br_port_vlan_add(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid, 734 u16 flags, struct netlink_ext_ack *extack) 735 { 736 struct ice_esw_br_port *port; 737 struct ice_esw_br_vlan *vlan; 738 739 port = xa_load(&bridge->ports, vsi_idx); 740 if (!port) 741 return -EINVAL; 742 743 if (port->pvid) { 744 dev_info(ice_pf_to_dev(port->vsi->back), 745 "Port VLAN (vsi=%u, vid=%d) exists on the port, remove it to add trunk VLANs\n", 746 port->vsi_idx, port->pvid); 747 return -EEXIST; 748 } 749 750 vlan = xa_load(&port->vlans, vid); 751 if (vlan) { 752 if (vlan->flags == flags) 753 return 0; 754 755 ice_eswitch_br_vlan_cleanup(port, vlan); 756 } 757 758 vlan = ice_eswitch_br_vlan_create(vid, flags, port); 759 if (IS_ERR(vlan)) { 760 NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to create VLAN entry, vid: %u, vsi: %u", 761 vid, vsi_idx); 762 return PTR_ERR(vlan); 763 } 764 765 return 0; 766 } 767 768 static void 769 ice_eswitch_br_port_vlan_del(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid) 770 { 771 struct ice_esw_br_port *port; 772 struct ice_esw_br_vlan *vlan; 773 774 port = xa_load(&bridge->ports, vsi_idx); 775 if (!port) 776 return; 777 778 vlan = xa_load(&port->vlans, vid); 779 if (!vlan) 780 return; 781 782 ice_eswitch_br_vlan_cleanup(port, vlan); 783 } 784 785 static int 786 ice_eswitch_br_port_obj_add(struct net_device *netdev, const void *ctx, 787 const struct switchdev_obj *obj, 788 struct netlink_ext_ack *extack) 789 { 790 struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev); 791 struct switchdev_obj_port_vlan *vlan; 792 int err; 793 794 if (!br_port) 795 return -EINVAL; 796 797 switch (obj->id) { 798 case SWITCHDEV_OBJ_ID_PORT_VLAN: 799 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 800 err = ice_eswitch_br_port_vlan_add(br_port->bridge, 801 br_port->vsi_idx, vlan->vid, 802 vlan->flags, extack); 803 return err; 804 default: 805 return -EOPNOTSUPP; 806 } 807 } 808 809 static int 810 ice_eswitch_br_port_obj_del(struct net_device *netdev, const void *ctx, 811 const struct switchdev_obj *obj) 812 { 813 struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev); 814 struct switchdev_obj_port_vlan *vlan; 815 816 if (!br_port) 817 return -EINVAL; 818 819 switch (obj->id) { 820 case SWITCHDEV_OBJ_ID_PORT_VLAN: 821 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 822 ice_eswitch_br_port_vlan_del(br_port->bridge, br_port->vsi_idx, 823 vlan->vid); 824 return 0; 825 default: 826 return -EOPNOTSUPP; 827 } 828 } 829 830 static int 831 ice_eswitch_br_port_obj_attr_set(struct net_device *netdev, const void *ctx, 832 const struct switchdev_attr *attr, 833 struct netlink_ext_ack *extack) 834 { 835 struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev); 836 837 if (!br_port) 838 return -EINVAL; 839 840 switch (attr->id) { 841 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 842 ice_eswitch_br_vlan_filtering_set(br_port->bridge, 843 attr->u.vlan_filtering); 844 return 0; 845 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: 846 br_port->bridge->ageing_time = 847 clock_t_to_jiffies(attr->u.ageing_time); 848 return 0; 849 default: 850 return -EOPNOTSUPP; 851 } 852 } 853 854 static int 855 ice_eswitch_br_event_blocking(struct notifier_block *nb, unsigned long event, 856 void *ptr) 857 { 858 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 859 int err; 860 861 switch (event) { 862 case SWITCHDEV_PORT_OBJ_ADD: 863 err = switchdev_handle_port_obj_add(dev, ptr, 864 ice_eswitch_br_is_dev_valid, 865 ice_eswitch_br_port_obj_add); 866 break; 867 case SWITCHDEV_PORT_OBJ_DEL: 868 err = switchdev_handle_port_obj_del(dev, ptr, 869 ice_eswitch_br_is_dev_valid, 870 ice_eswitch_br_port_obj_del); 871 break; 872 case SWITCHDEV_PORT_ATTR_SET: 873 err = switchdev_handle_port_attr_set(dev, ptr, 874 ice_eswitch_br_is_dev_valid, 875 ice_eswitch_br_port_obj_attr_set); 876 break; 877 default: 878 err = 0; 879 } 880 881 return notifier_from_errno(err); 882 } 883 884 static void 885 ice_eswitch_br_port_deinit(struct ice_esw_br *bridge, 886 struct ice_esw_br_port *br_port) 887 { 888 struct ice_esw_br_fdb_entry *fdb_entry, *tmp; 889 struct ice_vsi *vsi = br_port->vsi; 890 891 list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) { 892 if (br_port == fdb_entry->br_port) 893 ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry); 894 } 895 896 if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) 897 vsi->back->br_port = NULL; 898 else if (vsi->vf && vsi->vf->repr) 899 vsi->vf->repr->br_port = NULL; 900 901 xa_erase(&bridge->ports, br_port->vsi_idx); 902 ice_eswitch_br_port_vlans_flush(br_port); 903 kfree(br_port); 904 } 905 906 static struct ice_esw_br_port * 907 ice_eswitch_br_port_init(struct ice_esw_br *bridge) 908 { 909 struct ice_esw_br_port *br_port; 910 911 br_port = kzalloc(sizeof(*br_port), GFP_KERNEL); 912 if (!br_port) 913 return ERR_PTR(-ENOMEM); 914 915 xa_init(&br_port->vlans); 916 917 br_port->bridge = bridge; 918 919 return br_port; 920 } 921 922 static int 923 ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge, 924 struct ice_repr *repr) 925 { 926 struct ice_esw_br_port *br_port; 927 int err; 928 929 br_port = ice_eswitch_br_port_init(bridge); 930 if (IS_ERR(br_port)) 931 return PTR_ERR(br_port); 932 933 br_port->vsi = repr->src_vsi; 934 br_port->vsi_idx = br_port->vsi->idx; 935 br_port->type = ICE_ESWITCH_BR_VF_REPR_PORT; 936 repr->br_port = br_port; 937 938 err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL); 939 if (err) { 940 ice_eswitch_br_port_deinit(bridge, br_port); 941 return err; 942 } 943 944 return 0; 945 } 946 947 static int 948 ice_eswitch_br_uplink_port_init(struct ice_esw_br *bridge, struct ice_pf *pf) 949 { 950 struct ice_vsi *vsi = pf->switchdev.uplink_vsi; 951 struct ice_esw_br_port *br_port; 952 int err; 953 954 br_port = ice_eswitch_br_port_init(bridge); 955 if (IS_ERR(br_port)) 956 return PTR_ERR(br_port); 957 958 br_port->vsi = vsi; 959 br_port->vsi_idx = br_port->vsi->idx; 960 br_port->type = ICE_ESWITCH_BR_UPLINK_PORT; 961 pf->br_port = br_port; 962 963 err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL); 964 if (err) { 965 ice_eswitch_br_port_deinit(bridge, br_port); 966 return err; 967 } 968 969 return 0; 970 } 971 972 static void 973 ice_eswitch_br_ports_flush(struct ice_esw_br *bridge) 974 { 975 struct ice_esw_br_port *port; 976 unsigned long i; 977 978 xa_for_each(&bridge->ports, i, port) 979 ice_eswitch_br_port_deinit(bridge, port); 980 } 981 982 static void 983 ice_eswitch_br_deinit(struct ice_esw_br_offloads *br_offloads, 984 struct ice_esw_br *bridge) 985 { 986 if (!bridge) 987 return; 988 989 /* Cleanup all the ports that were added asynchronously 990 * through NETDEV_CHANGEUPPER event. 991 */ 992 ice_eswitch_br_ports_flush(bridge); 993 WARN_ON(!xa_empty(&bridge->ports)); 994 xa_destroy(&bridge->ports); 995 rhashtable_destroy(&bridge->fdb_ht); 996 997 br_offloads->bridge = NULL; 998 kfree(bridge); 999 } 1000 1001 static struct ice_esw_br * 1002 ice_eswitch_br_init(struct ice_esw_br_offloads *br_offloads, int ifindex) 1003 { 1004 struct ice_esw_br *bridge; 1005 int err; 1006 1007 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); 1008 if (!bridge) 1009 return ERR_PTR(-ENOMEM); 1010 1011 err = rhashtable_init(&bridge->fdb_ht, &ice_fdb_ht_params); 1012 if (err) { 1013 kfree(bridge); 1014 return ERR_PTR(err); 1015 } 1016 1017 INIT_LIST_HEAD(&bridge->fdb_list); 1018 bridge->br_offloads = br_offloads; 1019 bridge->ifindex = ifindex; 1020 bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME); 1021 xa_init(&bridge->ports); 1022 br_offloads->bridge = bridge; 1023 1024 return bridge; 1025 } 1026 1027 static struct ice_esw_br * 1028 ice_eswitch_br_get(struct ice_esw_br_offloads *br_offloads, int ifindex, 1029 struct netlink_ext_ack *extack) 1030 { 1031 struct ice_esw_br *bridge = br_offloads->bridge; 1032 1033 if (bridge) { 1034 if (bridge->ifindex != ifindex) { 1035 NL_SET_ERR_MSG_MOD(extack, 1036 "Only one bridge is supported per eswitch"); 1037 return ERR_PTR(-EOPNOTSUPP); 1038 } 1039 return bridge; 1040 } 1041 1042 /* Create the bridge if it doesn't exist yet */ 1043 bridge = ice_eswitch_br_init(br_offloads, ifindex); 1044 if (IS_ERR(bridge)) 1045 NL_SET_ERR_MSG_MOD(extack, "Failed to init the bridge"); 1046 1047 return bridge; 1048 } 1049 1050 static void 1051 ice_eswitch_br_verify_deinit(struct ice_esw_br_offloads *br_offloads, 1052 struct ice_esw_br *bridge) 1053 { 1054 /* Remove the bridge if it exists and there are no ports left */ 1055 if (!bridge || !xa_empty(&bridge->ports)) 1056 return; 1057 1058 ice_eswitch_br_deinit(br_offloads, bridge); 1059 } 1060 1061 static int 1062 ice_eswitch_br_port_unlink(struct ice_esw_br_offloads *br_offloads, 1063 struct net_device *dev, int ifindex, 1064 struct netlink_ext_ack *extack) 1065 { 1066 struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(dev); 1067 struct ice_esw_br *bridge; 1068 1069 if (!br_port) { 1070 NL_SET_ERR_MSG_MOD(extack, 1071 "Port representor is not attached to any bridge"); 1072 return -EINVAL; 1073 } 1074 1075 if (br_port->bridge->ifindex != ifindex) { 1076 NL_SET_ERR_MSG_MOD(extack, 1077 "Port representor is attached to another bridge"); 1078 return -EINVAL; 1079 } 1080 1081 bridge = br_port->bridge; 1082 1083 trace_ice_eswitch_br_port_unlink(br_port); 1084 ice_eswitch_br_port_deinit(br_port->bridge, br_port); 1085 ice_eswitch_br_verify_deinit(br_offloads, bridge); 1086 1087 return 0; 1088 } 1089 1090 static int 1091 ice_eswitch_br_port_link(struct ice_esw_br_offloads *br_offloads, 1092 struct net_device *dev, int ifindex, 1093 struct netlink_ext_ack *extack) 1094 { 1095 struct ice_esw_br *bridge; 1096 int err; 1097 1098 if (ice_eswitch_br_netdev_to_port(dev)) { 1099 NL_SET_ERR_MSG_MOD(extack, 1100 "Port is already attached to the bridge"); 1101 return -EINVAL; 1102 } 1103 1104 bridge = ice_eswitch_br_get(br_offloads, ifindex, extack); 1105 if (IS_ERR(bridge)) 1106 return PTR_ERR(bridge); 1107 1108 if (ice_is_port_repr_netdev(dev)) { 1109 struct ice_repr *repr = ice_netdev_to_repr(dev); 1110 1111 err = ice_eswitch_br_vf_repr_port_init(bridge, repr); 1112 trace_ice_eswitch_br_port_link(repr->br_port); 1113 } else { 1114 struct net_device *ice_dev; 1115 struct ice_pf *pf; 1116 1117 if (netif_is_lag_master(dev)) 1118 ice_dev = ice_eswitch_br_get_uplink_from_lag(dev); 1119 else 1120 ice_dev = dev; 1121 1122 if (!ice_dev) 1123 return 0; 1124 1125 pf = ice_netdev_to_pf(ice_dev); 1126 1127 err = ice_eswitch_br_uplink_port_init(bridge, pf); 1128 trace_ice_eswitch_br_port_link(pf->br_port); 1129 } 1130 if (err) { 1131 NL_SET_ERR_MSG_MOD(extack, "Failed to init bridge port"); 1132 goto err_port_init; 1133 } 1134 1135 return 0; 1136 1137 err_port_init: 1138 ice_eswitch_br_verify_deinit(br_offloads, bridge); 1139 return err; 1140 } 1141 1142 static int 1143 ice_eswitch_br_port_changeupper(struct notifier_block *nb, void *ptr) 1144 { 1145 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1146 struct netdev_notifier_changeupper_info *info = ptr; 1147 struct ice_esw_br_offloads *br_offloads; 1148 struct netlink_ext_ack *extack; 1149 struct net_device *upper; 1150 1151 br_offloads = ice_nb_to_br_offloads(nb, netdev_nb); 1152 1153 if (!ice_eswitch_br_is_dev_valid(dev)) 1154 return 0; 1155 1156 upper = info->upper_dev; 1157 if (!netif_is_bridge_master(upper)) 1158 return 0; 1159 1160 extack = netdev_notifier_info_to_extack(&info->info); 1161 1162 if (info->linking) 1163 return ice_eswitch_br_port_link(br_offloads, dev, 1164 upper->ifindex, extack); 1165 else 1166 return ice_eswitch_br_port_unlink(br_offloads, dev, 1167 upper->ifindex, extack); 1168 } 1169 1170 static int 1171 ice_eswitch_br_port_event(struct notifier_block *nb, 1172 unsigned long event, void *ptr) 1173 { 1174 int err = 0; 1175 1176 switch (event) { 1177 case NETDEV_CHANGEUPPER: 1178 err = ice_eswitch_br_port_changeupper(nb, ptr); 1179 break; 1180 } 1181 1182 return notifier_from_errno(err); 1183 } 1184 1185 static void 1186 ice_eswitch_br_offloads_dealloc(struct ice_pf *pf) 1187 { 1188 struct ice_esw_br_offloads *br_offloads = pf->switchdev.br_offloads; 1189 1190 ASSERT_RTNL(); 1191 1192 if (!br_offloads) 1193 return; 1194 1195 ice_eswitch_br_deinit(br_offloads, br_offloads->bridge); 1196 1197 pf->switchdev.br_offloads = NULL; 1198 kfree(br_offloads); 1199 } 1200 1201 static struct ice_esw_br_offloads * 1202 ice_eswitch_br_offloads_alloc(struct ice_pf *pf) 1203 { 1204 struct ice_esw_br_offloads *br_offloads; 1205 1206 ASSERT_RTNL(); 1207 1208 if (pf->switchdev.br_offloads) 1209 return ERR_PTR(-EEXIST); 1210 1211 br_offloads = kzalloc(sizeof(*br_offloads), GFP_KERNEL); 1212 if (!br_offloads) 1213 return ERR_PTR(-ENOMEM); 1214 1215 pf->switchdev.br_offloads = br_offloads; 1216 br_offloads->pf = pf; 1217 1218 return br_offloads; 1219 } 1220 1221 void 1222 ice_eswitch_br_offloads_deinit(struct ice_pf *pf) 1223 { 1224 struct ice_esw_br_offloads *br_offloads; 1225 1226 br_offloads = pf->switchdev.br_offloads; 1227 if (!br_offloads) 1228 return; 1229 1230 cancel_delayed_work_sync(&br_offloads->update_work); 1231 unregister_netdevice_notifier(&br_offloads->netdev_nb); 1232 unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk); 1233 unregister_switchdev_notifier(&br_offloads->switchdev_nb); 1234 destroy_workqueue(br_offloads->wq); 1235 /* Although notifier block is unregistered just before, 1236 * so we don't get any new events, some events might be 1237 * already in progress. Hold the rtnl lock and wait for 1238 * them to finished. 1239 */ 1240 rtnl_lock(); 1241 ice_eswitch_br_offloads_dealloc(pf); 1242 rtnl_unlock(); 1243 } 1244 1245 static void ice_eswitch_br_update(struct ice_esw_br_offloads *br_offloads) 1246 { 1247 struct ice_esw_br *bridge = br_offloads->bridge; 1248 struct ice_esw_br_fdb_entry *entry, *tmp; 1249 1250 if (!bridge) 1251 return; 1252 1253 rtnl_lock(); 1254 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) { 1255 if (entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER) 1256 continue; 1257 1258 if (time_is_after_eq_jiffies(entry->last_use + 1259 bridge->ageing_time)) 1260 continue; 1261 1262 ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry); 1263 } 1264 rtnl_unlock(); 1265 } 1266 1267 static void ice_eswitch_br_update_work(struct work_struct *work) 1268 { 1269 struct ice_esw_br_offloads *br_offloads; 1270 1271 br_offloads = ice_work_to_br_offloads(work); 1272 1273 ice_eswitch_br_update(br_offloads); 1274 1275 queue_delayed_work(br_offloads->wq, &br_offloads->update_work, 1276 ICE_ESW_BRIDGE_UPDATE_INTERVAL); 1277 } 1278 1279 int 1280 ice_eswitch_br_offloads_init(struct ice_pf *pf) 1281 { 1282 struct ice_esw_br_offloads *br_offloads; 1283 struct device *dev = ice_pf_to_dev(pf); 1284 int err; 1285 1286 rtnl_lock(); 1287 br_offloads = ice_eswitch_br_offloads_alloc(pf); 1288 rtnl_unlock(); 1289 if (IS_ERR(br_offloads)) { 1290 dev_err(dev, "Failed to init eswitch bridge\n"); 1291 return PTR_ERR(br_offloads); 1292 } 1293 1294 br_offloads->wq = alloc_ordered_workqueue("ice_bridge_wq", 0); 1295 if (!br_offloads->wq) { 1296 err = -ENOMEM; 1297 dev_err(dev, "Failed to allocate bridge workqueue\n"); 1298 goto err_alloc_wq; 1299 } 1300 1301 br_offloads->switchdev_nb.notifier_call = 1302 ice_eswitch_br_switchdev_event; 1303 err = register_switchdev_notifier(&br_offloads->switchdev_nb); 1304 if (err) { 1305 dev_err(dev, 1306 "Failed to register switchdev notifier\n"); 1307 goto err_reg_switchdev_nb; 1308 } 1309 1310 br_offloads->switchdev_blk.notifier_call = 1311 ice_eswitch_br_event_blocking; 1312 err = register_switchdev_blocking_notifier(&br_offloads->switchdev_blk); 1313 if (err) { 1314 dev_err(dev, 1315 "Failed to register bridge blocking switchdev notifier\n"); 1316 goto err_reg_switchdev_blk; 1317 } 1318 1319 br_offloads->netdev_nb.notifier_call = ice_eswitch_br_port_event; 1320 err = register_netdevice_notifier(&br_offloads->netdev_nb); 1321 if (err) { 1322 dev_err(dev, 1323 "Failed to register bridge port event notifier\n"); 1324 goto err_reg_netdev_nb; 1325 } 1326 1327 INIT_DELAYED_WORK(&br_offloads->update_work, 1328 ice_eswitch_br_update_work); 1329 queue_delayed_work(br_offloads->wq, &br_offloads->update_work, 1330 ICE_ESW_BRIDGE_UPDATE_INTERVAL); 1331 1332 return 0; 1333 1334 err_reg_netdev_nb: 1335 unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk); 1336 err_reg_switchdev_blk: 1337 unregister_switchdev_notifier(&br_offloads->switchdev_nb); 1338 err_reg_switchdev_nb: 1339 destroy_workqueue(br_offloads->wq); 1340 err_alloc_wq: 1341 rtnl_lock(); 1342 ice_eswitch_br_offloads_dealloc(pf); 1343 rtnl_unlock(); 1344 1345 return err; 1346 } 1347