1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Spanning tree protocol; generic parts 4 * Linux ethernet bridge 5 * 6 * Authors: 7 * Lennert Buytenhek <buytenh@gnu.org> 8 */ 9 #include <linux/kernel.h> 10 #include <linux/rculist.h> 11 #include <net/switchdev.h> 12 13 #include "br_private.h" 14 #include "br_private_stp.h" 15 16 /* since time values in bpdu are in jiffies and then scaled (1/256) 17 * before sending, make sure that is at least one STP tick. 18 */ 19 #define MESSAGE_AGE_INCR ((HZ / 256) + 1) 20 21 static const char *const br_port_state_names[] = { 22 [BR_STATE_DISABLED] = "disabled", 23 [BR_STATE_LISTENING] = "listening", 24 [BR_STATE_LEARNING] = "learning", 25 [BR_STATE_FORWARDING] = "forwarding", 26 [BR_STATE_BLOCKING] = "blocking", 27 }; 28 29 void br_set_state(struct net_bridge_port *p, unsigned int state) 30 { 31 struct switchdev_attr attr = { 32 .orig_dev = p->dev, 33 .id = SWITCHDEV_ATTR_ID_PORT_STP_STATE, 34 .flags = SWITCHDEV_F_DEFER, 35 .u.stp_state = state, 36 }; 37 int err; 38 39 p->state = state; 40 err = switchdev_port_attr_set(p->dev, &attr); 41 if (err && err != -EOPNOTSUPP) 42 br_warn(p->br, "error setting offload STP state on port %u(%s)\n", 43 (unsigned int) p->port_no, p->dev->name); 44 else 45 br_info(p->br, "port %u(%s) entered %s state\n", 46 (unsigned int) p->port_no, p->dev->name, 47 br_port_state_names[p->state]); 48 49 if (p->br->stp_enabled == BR_KERNEL_STP) { 50 switch (p->state) { 51 case BR_STATE_BLOCKING: 52 p->stp_xstats.transition_blk++; 53 break; 54 case BR_STATE_FORWARDING: 55 p->stp_xstats.transition_fwd++; 56 break; 57 } 58 } 59 } 60 61 /* called under bridge lock */ 62 struct net_bridge_port *br_get_port(struct net_bridge *br, u16 port_no) 63 { 64 struct net_bridge_port *p; 65 66 list_for_each_entry_rcu(p, &br->port_list, list) { 67 if (p->port_no == port_no) 68 return p; 69 } 70 71 return NULL; 72 } 73 74 /* called under bridge lock */ 75 static int br_should_become_root_port(const struct net_bridge_port *p, 76 u16 root_port) 77 { 78 struct net_bridge *br; 79 struct net_bridge_port *rp; 80 int t; 81 82 br = p->br; 83 if (p->state == BR_STATE_DISABLED || 84 br_is_designated_port(p)) 85 return 0; 86 87 if (memcmp(&br->bridge_id, &p->designated_root, 8) <= 0) 88 return 0; 89 90 if (!root_port) 91 return 1; 92 93 rp = br_get_port(br, root_port); 94 95 t = memcmp(&p->designated_root, &rp->designated_root, 8); 96 if (t < 0) 97 return 1; 98 else if (t > 0) 99 return 0; 100 101 if (p->designated_cost + p->path_cost < 102 rp->designated_cost + rp->path_cost) 103 return 1; 104 else if (p->designated_cost + p->path_cost > 105 rp->designated_cost + rp->path_cost) 106 return 0; 107 108 t = memcmp(&p->designated_bridge, &rp->designated_bridge, 8); 109 if (t < 0) 110 return 1; 111 else if (t > 0) 112 return 0; 113 114 if (p->designated_port < rp->designated_port) 115 return 1; 116 else if (p->designated_port > rp->designated_port) 117 return 0; 118 119 if (p->port_id < rp->port_id) 120 return 1; 121 122 return 0; 123 } 124 125 static void br_root_port_block(const struct net_bridge *br, 126 struct net_bridge_port *p) 127 { 128 129 br_notice(br, "port %u(%s) tried to become root port (blocked)", 130 (unsigned int) p->port_no, p->dev->name); 131 132 br_set_state(p, BR_STATE_LISTENING); 133 br_ifinfo_notify(RTM_NEWLINK, NULL, p); 134 135 if (br->forward_delay > 0) 136 mod_timer(&p->forward_delay_timer, jiffies + br->forward_delay); 137 } 138 139 /* called under bridge lock */ 140 static void br_root_selection(struct net_bridge *br) 141 { 142 struct net_bridge_port *p; 143 u16 root_port = 0; 144 145 list_for_each_entry(p, &br->port_list, list) { 146 if (!br_should_become_root_port(p, root_port)) 147 continue; 148 149 if (p->flags & BR_ROOT_BLOCK) 150 br_root_port_block(br, p); 151 else 152 root_port = p->port_no; 153 } 154 155 br->root_port = root_port; 156 157 if (!root_port) { 158 br->designated_root = br->bridge_id; 159 br->root_path_cost = 0; 160 } else { 161 p = br_get_port(br, root_port); 162 br->designated_root = p->designated_root; 163 br->root_path_cost = p->designated_cost + p->path_cost; 164 } 165 } 166 167 /* called under bridge lock */ 168 void br_become_root_bridge(struct net_bridge *br) 169 { 170 br->max_age = br->bridge_max_age; 171 br->hello_time = br->bridge_hello_time; 172 br->forward_delay = br->bridge_forward_delay; 173 br_topology_change_detection(br); 174 del_timer(&br->tcn_timer); 175 176 if (br->dev->flags & IFF_UP) { 177 br_config_bpdu_generation(br); 178 mod_timer(&br->hello_timer, jiffies + br->hello_time); 179 } 180 } 181 182 /* called under bridge lock */ 183 void br_transmit_config(struct net_bridge_port *p) 184 { 185 struct br_config_bpdu bpdu; 186 struct net_bridge *br; 187 188 if (timer_pending(&p->hold_timer)) { 189 p->config_pending = 1; 190 return; 191 } 192 193 br = p->br; 194 195 bpdu.topology_change = br->topology_change; 196 bpdu.topology_change_ack = p->topology_change_ack; 197 bpdu.root = br->designated_root; 198 bpdu.root_path_cost = br->root_path_cost; 199 bpdu.bridge_id = br->bridge_id; 200 bpdu.port_id = p->port_id; 201 if (br_is_root_bridge(br)) 202 bpdu.message_age = 0; 203 else { 204 struct net_bridge_port *root 205 = br_get_port(br, br->root_port); 206 bpdu.message_age = (jiffies - root->designated_age) 207 + MESSAGE_AGE_INCR; 208 } 209 bpdu.max_age = br->max_age; 210 bpdu.hello_time = br->hello_time; 211 bpdu.forward_delay = br->forward_delay; 212 213 if (bpdu.message_age < br->max_age) { 214 br_send_config_bpdu(p, &bpdu); 215 p->topology_change_ack = 0; 216 p->config_pending = 0; 217 if (p->br->stp_enabled == BR_KERNEL_STP) 218 mod_timer(&p->hold_timer, 219 round_jiffies(jiffies + BR_HOLD_TIME)); 220 } 221 } 222 223 /* called under bridge lock */ 224 static void br_record_config_information(struct net_bridge_port *p, 225 const struct br_config_bpdu *bpdu) 226 { 227 p->designated_root = bpdu->root; 228 p->designated_cost = bpdu->root_path_cost; 229 p->designated_bridge = bpdu->bridge_id; 230 p->designated_port = bpdu->port_id; 231 p->designated_age = jiffies - bpdu->message_age; 232 233 mod_timer(&p->message_age_timer, jiffies 234 + (bpdu->max_age - bpdu->message_age)); 235 } 236 237 /* called under bridge lock */ 238 static void br_record_config_timeout_values(struct net_bridge *br, 239 const struct br_config_bpdu *bpdu) 240 { 241 br->max_age = bpdu->max_age; 242 br->hello_time = bpdu->hello_time; 243 br->forward_delay = bpdu->forward_delay; 244 __br_set_topology_change(br, bpdu->topology_change); 245 } 246 247 /* called under bridge lock */ 248 void br_transmit_tcn(struct net_bridge *br) 249 { 250 struct net_bridge_port *p; 251 252 p = br_get_port(br, br->root_port); 253 if (p) 254 br_send_tcn_bpdu(p); 255 else 256 br_notice(br, "root port %u not found for topology notice\n", 257 br->root_port); 258 } 259 260 /* called under bridge lock */ 261 static int br_should_become_designated_port(const struct net_bridge_port *p) 262 { 263 struct net_bridge *br; 264 int t; 265 266 br = p->br; 267 if (br_is_designated_port(p)) 268 return 1; 269 270 if (memcmp(&p->designated_root, &br->designated_root, 8)) 271 return 1; 272 273 if (br->root_path_cost < p->designated_cost) 274 return 1; 275 else if (br->root_path_cost > p->designated_cost) 276 return 0; 277 278 t = memcmp(&br->bridge_id, &p->designated_bridge, 8); 279 if (t < 0) 280 return 1; 281 else if (t > 0) 282 return 0; 283 284 if (p->port_id < p->designated_port) 285 return 1; 286 287 return 0; 288 } 289 290 /* called under bridge lock */ 291 static void br_designated_port_selection(struct net_bridge *br) 292 { 293 struct net_bridge_port *p; 294 295 list_for_each_entry(p, &br->port_list, list) { 296 if (p->state != BR_STATE_DISABLED && 297 br_should_become_designated_port(p)) 298 br_become_designated_port(p); 299 300 } 301 } 302 303 /* called under bridge lock */ 304 static int br_supersedes_port_info(const struct net_bridge_port *p, 305 const struct br_config_bpdu *bpdu) 306 { 307 int t; 308 309 t = memcmp(&bpdu->root, &p->designated_root, 8); 310 if (t < 0) 311 return 1; 312 else if (t > 0) 313 return 0; 314 315 if (bpdu->root_path_cost < p->designated_cost) 316 return 1; 317 else if (bpdu->root_path_cost > p->designated_cost) 318 return 0; 319 320 t = memcmp(&bpdu->bridge_id, &p->designated_bridge, 8); 321 if (t < 0) 322 return 1; 323 else if (t > 0) 324 return 0; 325 326 if (memcmp(&bpdu->bridge_id, &p->br->bridge_id, 8)) 327 return 1; 328 329 if (bpdu->port_id <= p->designated_port) 330 return 1; 331 332 return 0; 333 } 334 335 /* called under bridge lock */ 336 static void br_topology_change_acknowledged(struct net_bridge *br) 337 { 338 br->topology_change_detected = 0; 339 del_timer(&br->tcn_timer); 340 } 341 342 /* called under bridge lock */ 343 void br_topology_change_detection(struct net_bridge *br) 344 { 345 int isroot = br_is_root_bridge(br); 346 347 if (br->stp_enabled != BR_KERNEL_STP) 348 return; 349 350 br_info(br, "topology change detected, %s\n", 351 isroot ? "propagating" : "sending tcn bpdu"); 352 353 if (isroot) { 354 __br_set_topology_change(br, 1); 355 mod_timer(&br->topology_change_timer, jiffies 356 + br->bridge_forward_delay + br->bridge_max_age); 357 } else if (!br->topology_change_detected) { 358 br_transmit_tcn(br); 359 mod_timer(&br->tcn_timer, jiffies + br->bridge_hello_time); 360 } 361 362 br->topology_change_detected = 1; 363 } 364 365 /* called under bridge lock */ 366 void br_config_bpdu_generation(struct net_bridge *br) 367 { 368 struct net_bridge_port *p; 369 370 list_for_each_entry(p, &br->port_list, list) { 371 if (p->state != BR_STATE_DISABLED && 372 br_is_designated_port(p)) 373 br_transmit_config(p); 374 } 375 } 376 377 /* called under bridge lock */ 378 static void br_reply(struct net_bridge_port *p) 379 { 380 br_transmit_config(p); 381 } 382 383 /* called under bridge lock */ 384 void br_configuration_update(struct net_bridge *br) 385 { 386 br_root_selection(br); 387 br_designated_port_selection(br); 388 } 389 390 /* called under bridge lock */ 391 void br_become_designated_port(struct net_bridge_port *p) 392 { 393 struct net_bridge *br; 394 395 br = p->br; 396 p->designated_root = br->designated_root; 397 p->designated_cost = br->root_path_cost; 398 p->designated_bridge = br->bridge_id; 399 p->designated_port = p->port_id; 400 } 401 402 403 /* called under bridge lock */ 404 static void br_make_blocking(struct net_bridge_port *p) 405 { 406 if (p->state != BR_STATE_DISABLED && 407 p->state != BR_STATE_BLOCKING) { 408 if (p->state == BR_STATE_FORWARDING || 409 p->state == BR_STATE_LEARNING) 410 br_topology_change_detection(p->br); 411 412 br_set_state(p, BR_STATE_BLOCKING); 413 br_ifinfo_notify(RTM_NEWLINK, NULL, p); 414 415 del_timer(&p->forward_delay_timer); 416 } 417 } 418 419 /* called under bridge lock */ 420 static void br_make_forwarding(struct net_bridge_port *p) 421 { 422 struct net_bridge *br = p->br; 423 424 if (p->state != BR_STATE_BLOCKING) 425 return; 426 427 if (br->stp_enabled == BR_NO_STP || br->forward_delay == 0) { 428 br_set_state(p, BR_STATE_FORWARDING); 429 br_topology_change_detection(br); 430 del_timer(&p->forward_delay_timer); 431 } else if (br->stp_enabled == BR_KERNEL_STP) 432 br_set_state(p, BR_STATE_LISTENING); 433 else 434 br_set_state(p, BR_STATE_LEARNING); 435 436 br_ifinfo_notify(RTM_NEWLINK, NULL, p); 437 438 if (br->forward_delay != 0) 439 mod_timer(&p->forward_delay_timer, jiffies + br->forward_delay); 440 } 441 442 /* called under bridge lock */ 443 void br_port_state_selection(struct net_bridge *br) 444 { 445 struct net_bridge_port *p; 446 unsigned int liveports = 0; 447 448 list_for_each_entry(p, &br->port_list, list) { 449 if (p->state == BR_STATE_DISABLED) 450 continue; 451 452 /* Don't change port states if userspace is handling STP */ 453 if (br->stp_enabled != BR_USER_STP) { 454 if (p->port_no == br->root_port) { 455 p->config_pending = 0; 456 p->topology_change_ack = 0; 457 br_make_forwarding(p); 458 } else if (br_is_designated_port(p)) { 459 del_timer(&p->message_age_timer); 460 br_make_forwarding(p); 461 } else { 462 p->config_pending = 0; 463 p->topology_change_ack = 0; 464 br_make_blocking(p); 465 } 466 } 467 468 if (p->state != BR_STATE_BLOCKING) 469 br_multicast_enable_port(p); 470 /* Multicast is not disabled for the port when it goes in 471 * blocking state because the timers will expire and stop by 472 * themselves without sending more queries. 473 */ 474 if (p->state == BR_STATE_FORWARDING) 475 ++liveports; 476 } 477 478 if (liveports == 0) 479 netif_carrier_off(br->dev); 480 else 481 netif_carrier_on(br->dev); 482 } 483 484 /* called under bridge lock */ 485 static void br_topology_change_acknowledge(struct net_bridge_port *p) 486 { 487 p->topology_change_ack = 1; 488 br_transmit_config(p); 489 } 490 491 /* called under bridge lock */ 492 void br_received_config_bpdu(struct net_bridge_port *p, 493 const struct br_config_bpdu *bpdu) 494 { 495 struct net_bridge *br; 496 int was_root; 497 498 p->stp_xstats.rx_bpdu++; 499 500 br = p->br; 501 was_root = br_is_root_bridge(br); 502 503 if (br_supersedes_port_info(p, bpdu)) { 504 br_record_config_information(p, bpdu); 505 br_configuration_update(br); 506 br_port_state_selection(br); 507 508 if (!br_is_root_bridge(br) && was_root) { 509 del_timer(&br->hello_timer); 510 if (br->topology_change_detected) { 511 del_timer(&br->topology_change_timer); 512 br_transmit_tcn(br); 513 514 mod_timer(&br->tcn_timer, 515 jiffies + br->bridge_hello_time); 516 } 517 } 518 519 if (p->port_no == br->root_port) { 520 br_record_config_timeout_values(br, bpdu); 521 br_config_bpdu_generation(br); 522 if (bpdu->topology_change_ack) 523 br_topology_change_acknowledged(br); 524 } 525 } else if (br_is_designated_port(p)) { 526 br_reply(p); 527 } 528 } 529 530 /* called under bridge lock */ 531 void br_received_tcn_bpdu(struct net_bridge_port *p) 532 { 533 p->stp_xstats.rx_tcn++; 534 535 if (br_is_designated_port(p)) { 536 br_info(p->br, "port %u(%s) received tcn bpdu\n", 537 (unsigned int) p->port_no, p->dev->name); 538 539 br_topology_change_detection(p->br); 540 br_topology_change_acknowledge(p); 541 } 542 } 543 544 /* Change bridge STP parameter */ 545 int br_set_hello_time(struct net_bridge *br, unsigned long val) 546 { 547 unsigned long t = clock_t_to_jiffies(val); 548 549 if (t < BR_MIN_HELLO_TIME || t > BR_MAX_HELLO_TIME) 550 return -ERANGE; 551 552 spin_lock_bh(&br->lock); 553 br->bridge_hello_time = t; 554 if (br_is_root_bridge(br)) 555 br->hello_time = br->bridge_hello_time; 556 spin_unlock_bh(&br->lock); 557 return 0; 558 } 559 560 int br_set_max_age(struct net_bridge *br, unsigned long val) 561 { 562 unsigned long t = clock_t_to_jiffies(val); 563 564 if (t < BR_MIN_MAX_AGE || t > BR_MAX_MAX_AGE) 565 return -ERANGE; 566 567 spin_lock_bh(&br->lock); 568 br->bridge_max_age = t; 569 if (br_is_root_bridge(br)) 570 br->max_age = br->bridge_max_age; 571 spin_unlock_bh(&br->lock); 572 return 0; 573 574 } 575 576 /* called under bridge lock */ 577 int __set_ageing_time(struct net_device *dev, unsigned long t) 578 { 579 struct switchdev_attr attr = { 580 .orig_dev = dev, 581 .id = SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME, 582 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP | SWITCHDEV_F_DEFER, 583 .u.ageing_time = jiffies_to_clock_t(t), 584 }; 585 int err; 586 587 err = switchdev_port_attr_set(dev, &attr); 588 if (err && err != -EOPNOTSUPP) 589 return err; 590 591 return 0; 592 } 593 594 /* Set time interval that dynamic forwarding entries live 595 * For pure software bridge, allow values outside the 802.1 596 * standard specification for special cases: 597 * 0 - entry never ages (all permanant) 598 * 1 - entry disappears (no persistance) 599 * 600 * Offloaded switch entries maybe more restrictive 601 */ 602 int br_set_ageing_time(struct net_bridge *br, clock_t ageing_time) 603 { 604 unsigned long t = clock_t_to_jiffies(ageing_time); 605 int err; 606 607 err = __set_ageing_time(br->dev, t); 608 if (err) 609 return err; 610 611 spin_lock_bh(&br->lock); 612 br->bridge_ageing_time = t; 613 br->ageing_time = t; 614 spin_unlock_bh(&br->lock); 615 616 mod_delayed_work(system_long_wq, &br->gc_work, 0); 617 618 return 0; 619 } 620 621 /* called under bridge lock */ 622 void __br_set_topology_change(struct net_bridge *br, unsigned char val) 623 { 624 unsigned long t; 625 int err; 626 627 if (br->stp_enabled == BR_KERNEL_STP && br->topology_change != val) { 628 /* On topology change, set the bridge ageing time to twice the 629 * forward delay. Otherwise, restore its default ageing time. 630 */ 631 632 if (val) { 633 t = 2 * br->forward_delay; 634 br_debug(br, "decreasing ageing time to %lu\n", t); 635 } else { 636 t = br->bridge_ageing_time; 637 br_debug(br, "restoring ageing time to %lu\n", t); 638 } 639 640 err = __set_ageing_time(br->dev, t); 641 if (err) 642 br_warn(br, "error offloading ageing time\n"); 643 else 644 br->ageing_time = t; 645 } 646 647 br->topology_change = val; 648 } 649 650 void __br_set_forward_delay(struct net_bridge *br, unsigned long t) 651 { 652 br->bridge_forward_delay = t; 653 if (br_is_root_bridge(br)) 654 br->forward_delay = br->bridge_forward_delay; 655 } 656 657 int br_set_forward_delay(struct net_bridge *br, unsigned long val) 658 { 659 unsigned long t = clock_t_to_jiffies(val); 660 int err = -ERANGE; 661 662 spin_lock_bh(&br->lock); 663 if (br->stp_enabled != BR_NO_STP && 664 (t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY)) 665 goto unlock; 666 667 __br_set_forward_delay(br, t); 668 err = 0; 669 670 unlock: 671 spin_unlock_bh(&br->lock); 672 return err; 673 } 674