1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Spanning tree protocol; generic parts
4 * Linux ethernet bridge
5 *
6 * Authors:
7 * Lennert Buytenhek <buytenh@gnu.org>
8 */
9 #include <linux/kernel.h>
10 #include <linux/rculist.h>
11 #include <net/switchdev.h>
12
13 #include "br_private.h"
14 #include "br_private_stp.h"
15
16 /* since time values in bpdu are in jiffies and then scaled (1/256)
17 * before sending, make sure that is at least one STP tick.
18 */
19 #define MESSAGE_AGE_INCR ((HZ / 256) + 1)
20
21 static const char *const br_port_state_names[] = {
22 [BR_STATE_DISABLED] = "disabled",
23 [BR_STATE_LISTENING] = "listening",
24 [BR_STATE_LEARNING] = "learning",
25 [BR_STATE_FORWARDING] = "forwarding",
26 [BR_STATE_BLOCKING] = "blocking",
27 };
28
br_set_state(struct net_bridge_port * p,unsigned int state)29 void br_set_state(struct net_bridge_port *p, unsigned int state)
30 {
31 struct switchdev_attr attr = {
32 .orig_dev = p->dev,
33 .id = SWITCHDEV_ATTR_ID_PORT_STP_STATE,
34 .flags = SWITCHDEV_F_DEFER,
35 .u.stp_state = state,
36 };
37 int err;
38
39 /* Don't change the state of the ports if they are driven by a different
40 * protocol.
41 */
42 if (p->flags & BR_MRP_AWARE)
43 return;
44
45 p->state = state;
46 if (br_opt_get(p->br, BROPT_MST_ENABLED)) {
47 err = br_mst_set_state(p, 0, state, NULL);
48 if (err)
49 br_warn(p->br, "error setting MST state on port %u(%s)\n",
50 p->port_no, netdev_name(p->dev));
51 }
52 err = switchdev_port_attr_set(p->dev, &attr, NULL);
53 if (err && err != -EOPNOTSUPP)
54 br_warn(p->br, "error setting offload STP state on port %u(%s)\n",
55 (unsigned int) p->port_no, p->dev->name);
56 else
57 br_info(p->br, "port %u(%s) entered %s state\n",
58 (unsigned int) p->port_no, p->dev->name,
59 br_port_state_names[p->state]);
60
61 if (p->br->stp_enabled == BR_KERNEL_STP) {
62 switch (p->state) {
63 case BR_STATE_BLOCKING:
64 p->stp_xstats.transition_blk++;
65 break;
66 case BR_STATE_FORWARDING:
67 p->stp_xstats.transition_fwd++;
68 break;
69 }
70 }
71 }
72
br_port_get_stp_state(const struct net_device * dev)73 u8 br_port_get_stp_state(const struct net_device *dev)
74 {
75 struct net_bridge_port *p;
76
77 ASSERT_RTNL();
78
79 p = br_port_get_rtnl(dev);
80 if (!p)
81 return BR_STATE_DISABLED;
82
83 return p->state;
84 }
85 EXPORT_SYMBOL_GPL(br_port_get_stp_state);
86
87 /* called under bridge lock */
br_get_port(struct net_bridge * br,u16 port_no)88 struct net_bridge_port *br_get_port(struct net_bridge *br, u16 port_no)
89 {
90 struct net_bridge_port *p;
91
92 list_for_each_entry_rcu(p, &br->port_list, list,
93 lockdep_is_held(&br->lock)) {
94 if (p->port_no == port_no)
95 return p;
96 }
97
98 return NULL;
99 }
100
101 /* called under bridge lock */
br_should_become_root_port(const struct net_bridge_port * p,u16 root_port)102 static int br_should_become_root_port(const struct net_bridge_port *p,
103 u16 root_port)
104 {
105 struct net_bridge *br;
106 struct net_bridge_port *rp;
107 int t;
108
109 br = p->br;
110 if (p->state == BR_STATE_DISABLED ||
111 br_is_designated_port(p))
112 return 0;
113
114 if (memcmp(&br->bridge_id, &p->designated_root, 8) <= 0)
115 return 0;
116
117 if (!root_port)
118 return 1;
119
120 rp = br_get_port(br, root_port);
121
122 t = memcmp(&p->designated_root, &rp->designated_root, 8);
123 if (t < 0)
124 return 1;
125 else if (t > 0)
126 return 0;
127
128 if (p->designated_cost + p->path_cost <
129 rp->designated_cost + rp->path_cost)
130 return 1;
131 else if (p->designated_cost + p->path_cost >
132 rp->designated_cost + rp->path_cost)
133 return 0;
134
135 t = memcmp(&p->designated_bridge, &rp->designated_bridge, 8);
136 if (t < 0)
137 return 1;
138 else if (t > 0)
139 return 0;
140
141 if (p->designated_port < rp->designated_port)
142 return 1;
143 else if (p->designated_port > rp->designated_port)
144 return 0;
145
146 if (p->port_id < rp->port_id)
147 return 1;
148
149 return 0;
150 }
151
br_root_port_block(const struct net_bridge * br,struct net_bridge_port * p)152 static void br_root_port_block(const struct net_bridge *br,
153 struct net_bridge_port *p)
154 {
155
156 br_notice(br, "port %u(%s) tried to become root port (blocked)",
157 (unsigned int) p->port_no, p->dev->name);
158
159 br_set_state(p, BR_STATE_LISTENING);
160 br_ifinfo_notify(RTM_NEWLINK, NULL, p);
161
162 if (br->forward_delay > 0)
163 mod_timer(&p->forward_delay_timer, jiffies + br->forward_delay);
164 }
165
166 /* called under bridge lock */
br_root_selection(struct net_bridge * br)167 static void br_root_selection(struct net_bridge *br)
168 {
169 struct net_bridge_port *p;
170 u16 root_port = 0;
171
172 list_for_each_entry(p, &br->port_list, list) {
173 if (!br_should_become_root_port(p, root_port))
174 continue;
175
176 if (p->flags & BR_ROOT_BLOCK)
177 br_root_port_block(br, p);
178 else
179 root_port = p->port_no;
180 }
181
182 br->root_port = root_port;
183
184 if (!root_port) {
185 br->designated_root = br->bridge_id;
186 br->root_path_cost = 0;
187 } else {
188 p = br_get_port(br, root_port);
189 br->designated_root = p->designated_root;
190 br->root_path_cost = p->designated_cost + p->path_cost;
191 }
192 }
193
194 /* called under bridge lock */
br_become_root_bridge(struct net_bridge * br)195 void br_become_root_bridge(struct net_bridge *br)
196 {
197 br->max_age = br->bridge_max_age;
198 br->hello_time = br->bridge_hello_time;
199 br->forward_delay = br->bridge_forward_delay;
200 br_topology_change_detection(br);
201 del_timer(&br->tcn_timer);
202
203 if (br->dev->flags & IFF_UP) {
204 br_config_bpdu_generation(br);
205 mod_timer(&br->hello_timer, jiffies + br->hello_time);
206 }
207 }
208
209 /* called under bridge lock */
br_transmit_config(struct net_bridge_port * p)210 void br_transmit_config(struct net_bridge_port *p)
211 {
212 struct br_config_bpdu bpdu;
213 struct net_bridge *br;
214
215 if (timer_pending(&p->hold_timer)) {
216 p->config_pending = 1;
217 return;
218 }
219
220 br = p->br;
221
222 bpdu.topology_change = br->topology_change;
223 bpdu.topology_change_ack = p->topology_change_ack;
224 bpdu.root = br->designated_root;
225 bpdu.root_path_cost = br->root_path_cost;
226 bpdu.bridge_id = br->bridge_id;
227 bpdu.port_id = p->port_id;
228 if (br_is_root_bridge(br))
229 bpdu.message_age = 0;
230 else {
231 struct net_bridge_port *root
232 = br_get_port(br, br->root_port);
233 bpdu.message_age = (jiffies - root->designated_age)
234 + MESSAGE_AGE_INCR;
235 }
236 bpdu.max_age = br->max_age;
237 bpdu.hello_time = br->hello_time;
238 bpdu.forward_delay = br->forward_delay;
239
240 if (bpdu.message_age < br->max_age) {
241 br_send_config_bpdu(p, &bpdu);
242 p->topology_change_ack = 0;
243 p->config_pending = 0;
244 if (p->br->stp_enabled == BR_KERNEL_STP)
245 mod_timer(&p->hold_timer,
246 round_jiffies(jiffies + BR_HOLD_TIME));
247 }
248 }
249
250 /* called under bridge lock */
br_record_config_information(struct net_bridge_port * p,const struct br_config_bpdu * bpdu)251 static void br_record_config_information(struct net_bridge_port *p,
252 const struct br_config_bpdu *bpdu)
253 {
254 p->designated_root = bpdu->root;
255 p->designated_cost = bpdu->root_path_cost;
256 p->designated_bridge = bpdu->bridge_id;
257 p->designated_port = bpdu->port_id;
258 p->designated_age = jiffies - bpdu->message_age;
259
260 mod_timer(&p->message_age_timer, jiffies
261 + (bpdu->max_age - bpdu->message_age));
262 }
263
264 /* called under bridge lock */
br_record_config_timeout_values(struct net_bridge * br,const struct br_config_bpdu * bpdu)265 static void br_record_config_timeout_values(struct net_bridge *br,
266 const struct br_config_bpdu *bpdu)
267 {
268 br->max_age = bpdu->max_age;
269 br->hello_time = bpdu->hello_time;
270 br->forward_delay = bpdu->forward_delay;
271 __br_set_topology_change(br, bpdu->topology_change);
272 }
273
274 /* called under bridge lock */
br_transmit_tcn(struct net_bridge * br)275 void br_transmit_tcn(struct net_bridge *br)
276 {
277 struct net_bridge_port *p;
278
279 p = br_get_port(br, br->root_port);
280 if (p)
281 br_send_tcn_bpdu(p);
282 else
283 br_notice(br, "root port %u not found for topology notice\n",
284 br->root_port);
285 }
286
287 /* called under bridge lock */
br_should_become_designated_port(const struct net_bridge_port * p)288 static int br_should_become_designated_port(const struct net_bridge_port *p)
289 {
290 struct net_bridge *br;
291 int t;
292
293 br = p->br;
294 if (br_is_designated_port(p))
295 return 1;
296
297 if (memcmp(&p->designated_root, &br->designated_root, 8))
298 return 1;
299
300 if (br->root_path_cost < p->designated_cost)
301 return 1;
302 else if (br->root_path_cost > p->designated_cost)
303 return 0;
304
305 t = memcmp(&br->bridge_id, &p->designated_bridge, 8);
306 if (t < 0)
307 return 1;
308 else if (t > 0)
309 return 0;
310
311 if (p->port_id < p->designated_port)
312 return 1;
313
314 return 0;
315 }
316
317 /* called under bridge lock */
br_designated_port_selection(struct net_bridge * br)318 static void br_designated_port_selection(struct net_bridge *br)
319 {
320 struct net_bridge_port *p;
321
322 list_for_each_entry(p, &br->port_list, list) {
323 if (p->state != BR_STATE_DISABLED &&
324 br_should_become_designated_port(p))
325 br_become_designated_port(p);
326
327 }
328 }
329
330 /* called under bridge lock */
br_supersedes_port_info(const struct net_bridge_port * p,const struct br_config_bpdu * bpdu)331 static int br_supersedes_port_info(const struct net_bridge_port *p,
332 const struct br_config_bpdu *bpdu)
333 {
334 int t;
335
336 t = memcmp(&bpdu->root, &p->designated_root, 8);
337 if (t < 0)
338 return 1;
339 else if (t > 0)
340 return 0;
341
342 if (bpdu->root_path_cost < p->designated_cost)
343 return 1;
344 else if (bpdu->root_path_cost > p->designated_cost)
345 return 0;
346
347 t = memcmp(&bpdu->bridge_id, &p->designated_bridge, 8);
348 if (t < 0)
349 return 1;
350 else if (t > 0)
351 return 0;
352
353 if (memcmp(&bpdu->bridge_id, &p->br->bridge_id, 8))
354 return 1;
355
356 if (bpdu->port_id <= p->designated_port)
357 return 1;
358
359 return 0;
360 }
361
362 /* called under bridge lock */
br_topology_change_acknowledged(struct net_bridge * br)363 static void br_topology_change_acknowledged(struct net_bridge *br)
364 {
365 br->topology_change_detected = 0;
366 del_timer(&br->tcn_timer);
367 }
368
369 /* called under bridge lock */
br_topology_change_detection(struct net_bridge * br)370 void br_topology_change_detection(struct net_bridge *br)
371 {
372 int isroot = br_is_root_bridge(br);
373
374 if (br->stp_enabled != BR_KERNEL_STP)
375 return;
376
377 br_info(br, "topology change detected, %s\n",
378 isroot ? "propagating" : "sending tcn bpdu");
379
380 if (isroot) {
381 __br_set_topology_change(br, 1);
382 mod_timer(&br->topology_change_timer, jiffies
383 + br->bridge_forward_delay + br->bridge_max_age);
384 } else if (!br->topology_change_detected) {
385 br_transmit_tcn(br);
386 mod_timer(&br->tcn_timer, jiffies + br->bridge_hello_time);
387 }
388
389 br->topology_change_detected = 1;
390 }
391
392 /* called under bridge lock */
br_config_bpdu_generation(struct net_bridge * br)393 void br_config_bpdu_generation(struct net_bridge *br)
394 {
395 struct net_bridge_port *p;
396
397 list_for_each_entry(p, &br->port_list, list) {
398 if (p->state != BR_STATE_DISABLED &&
399 br_is_designated_port(p))
400 br_transmit_config(p);
401 }
402 }
403
404 /* called under bridge lock */
br_reply(struct net_bridge_port * p)405 static void br_reply(struct net_bridge_port *p)
406 {
407 br_transmit_config(p);
408 }
409
410 /* called under bridge lock */
br_configuration_update(struct net_bridge * br)411 void br_configuration_update(struct net_bridge *br)
412 {
413 br_root_selection(br);
414 br_designated_port_selection(br);
415 }
416
417 /* called under bridge lock */
br_become_designated_port(struct net_bridge_port * p)418 void br_become_designated_port(struct net_bridge_port *p)
419 {
420 struct net_bridge *br;
421
422 br = p->br;
423 p->designated_root = br->designated_root;
424 p->designated_cost = br->root_path_cost;
425 p->designated_bridge = br->bridge_id;
426 p->designated_port = p->port_id;
427 }
428
429
430 /* called under bridge lock */
br_make_blocking(struct net_bridge_port * p)431 static void br_make_blocking(struct net_bridge_port *p)
432 {
433 if (p->state != BR_STATE_DISABLED &&
434 p->state != BR_STATE_BLOCKING) {
435 if (p->state == BR_STATE_FORWARDING ||
436 p->state == BR_STATE_LEARNING)
437 br_topology_change_detection(p->br);
438
439 br_set_state(p, BR_STATE_BLOCKING);
440 br_ifinfo_notify(RTM_NEWLINK, NULL, p);
441
442 del_timer(&p->forward_delay_timer);
443 }
444 }
445
446 /* called under bridge lock */
br_make_forwarding(struct net_bridge_port * p)447 static void br_make_forwarding(struct net_bridge_port *p)
448 {
449 struct net_bridge *br = p->br;
450
451 if (p->state != BR_STATE_BLOCKING)
452 return;
453
454 if (br->stp_enabled == BR_NO_STP || br->forward_delay == 0) {
455 br_set_state(p, BR_STATE_FORWARDING);
456 br_topology_change_detection(br);
457 del_timer(&p->forward_delay_timer);
458 } else if (br->stp_enabled == BR_KERNEL_STP)
459 br_set_state(p, BR_STATE_LISTENING);
460 else
461 br_set_state(p, BR_STATE_LEARNING);
462
463 br_ifinfo_notify(RTM_NEWLINK, NULL, p);
464
465 if (br->forward_delay != 0)
466 mod_timer(&p->forward_delay_timer, jiffies + br->forward_delay);
467 }
468
469 /* called under bridge lock */
br_port_state_selection(struct net_bridge * br)470 void br_port_state_selection(struct net_bridge *br)
471 {
472 struct net_bridge_port *p;
473 unsigned int liveports = 0;
474
475 list_for_each_entry(p, &br->port_list, list) {
476 if (p->state == BR_STATE_DISABLED)
477 continue;
478
479 /* Don't change port states if userspace is handling STP */
480 if (br->stp_enabled != BR_USER_STP) {
481 if (p->port_no == br->root_port) {
482 p->config_pending = 0;
483 p->topology_change_ack = 0;
484 br_make_forwarding(p);
485 } else if (br_is_designated_port(p)) {
486 del_timer(&p->message_age_timer);
487 br_make_forwarding(p);
488 } else {
489 p->config_pending = 0;
490 p->topology_change_ack = 0;
491 br_make_blocking(p);
492 }
493 }
494
495 if (p->state != BR_STATE_BLOCKING)
496 br_multicast_enable_port(p);
497 /* Multicast is not disabled for the port when it goes in
498 * blocking state because the timers will expire and stop by
499 * themselves without sending more queries.
500 */
501 if (p->state == BR_STATE_FORWARDING)
502 ++liveports;
503 }
504
505 if (liveports == 0)
506 netif_carrier_off(br->dev);
507 else
508 netif_carrier_on(br->dev);
509 }
510
511 /* called under bridge lock */
br_topology_change_acknowledge(struct net_bridge_port * p)512 static void br_topology_change_acknowledge(struct net_bridge_port *p)
513 {
514 p->topology_change_ack = 1;
515 br_transmit_config(p);
516 }
517
518 /* called under bridge lock */
br_received_config_bpdu(struct net_bridge_port * p,const struct br_config_bpdu * bpdu)519 void br_received_config_bpdu(struct net_bridge_port *p,
520 const struct br_config_bpdu *bpdu)
521 {
522 struct net_bridge *br;
523 int was_root;
524
525 p->stp_xstats.rx_bpdu++;
526
527 br = p->br;
528 was_root = br_is_root_bridge(br);
529
530 if (br_supersedes_port_info(p, bpdu)) {
531 br_record_config_information(p, bpdu);
532 br_configuration_update(br);
533 br_port_state_selection(br);
534
535 if (!br_is_root_bridge(br) && was_root) {
536 del_timer(&br->hello_timer);
537 if (br->topology_change_detected) {
538 del_timer(&br->topology_change_timer);
539 br_transmit_tcn(br);
540
541 mod_timer(&br->tcn_timer,
542 jiffies + br->bridge_hello_time);
543 }
544 }
545
546 if (p->port_no == br->root_port) {
547 br_record_config_timeout_values(br, bpdu);
548 br_config_bpdu_generation(br);
549 if (bpdu->topology_change_ack)
550 br_topology_change_acknowledged(br);
551 }
552 } else if (br_is_designated_port(p)) {
553 br_reply(p);
554 }
555 }
556
557 /* called under bridge lock */
br_received_tcn_bpdu(struct net_bridge_port * p)558 void br_received_tcn_bpdu(struct net_bridge_port *p)
559 {
560 p->stp_xstats.rx_tcn++;
561
562 if (br_is_designated_port(p)) {
563 br_info(p->br, "port %u(%s) received tcn bpdu\n",
564 (unsigned int) p->port_no, p->dev->name);
565
566 br_topology_change_detection(p->br);
567 br_topology_change_acknowledge(p);
568 }
569 }
570
571 /* Change bridge STP parameter */
br_set_hello_time(struct net_bridge * br,unsigned long val)572 int br_set_hello_time(struct net_bridge *br, unsigned long val)
573 {
574 unsigned long t = clock_t_to_jiffies(val);
575
576 if (t < BR_MIN_HELLO_TIME || t > BR_MAX_HELLO_TIME)
577 return -ERANGE;
578
579 spin_lock_bh(&br->lock);
580 br->bridge_hello_time = t;
581 if (br_is_root_bridge(br))
582 br->hello_time = br->bridge_hello_time;
583 spin_unlock_bh(&br->lock);
584 return 0;
585 }
586
br_set_max_age(struct net_bridge * br,unsigned long val)587 int br_set_max_age(struct net_bridge *br, unsigned long val)
588 {
589 unsigned long t = clock_t_to_jiffies(val);
590
591 if (t < BR_MIN_MAX_AGE || t > BR_MAX_MAX_AGE)
592 return -ERANGE;
593
594 spin_lock_bh(&br->lock);
595 br->bridge_max_age = t;
596 if (br_is_root_bridge(br))
597 br->max_age = br->bridge_max_age;
598 spin_unlock_bh(&br->lock);
599 return 0;
600
601 }
602
603 /* called under bridge lock */
__set_ageing_time(struct net_device * dev,unsigned long t)604 int __set_ageing_time(struct net_device *dev, unsigned long t)
605 {
606 struct switchdev_attr attr = {
607 .orig_dev = dev,
608 .id = SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
609 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP | SWITCHDEV_F_DEFER,
610 .u.ageing_time = jiffies_to_clock_t(t),
611 };
612 int err;
613
614 err = switchdev_port_attr_set(dev, &attr, NULL);
615 if (err && err != -EOPNOTSUPP)
616 return err;
617
618 return 0;
619 }
620
621 /* Set time interval that dynamic forwarding entries live
622 * For pure software bridge, allow values outside the 802.1
623 * standard specification for special cases:
624 * 0 - entry never ages (all permanent)
625 * 1 - entry disappears (no persistence)
626 *
627 * Offloaded switch entries maybe more restrictive
628 */
br_set_ageing_time(struct net_bridge * br,clock_t ageing_time)629 int br_set_ageing_time(struct net_bridge *br, clock_t ageing_time)
630 {
631 unsigned long t = clock_t_to_jiffies(ageing_time);
632 int err;
633
634 err = __set_ageing_time(br->dev, t);
635 if (err)
636 return err;
637
638 spin_lock_bh(&br->lock);
639 br->bridge_ageing_time = t;
640 br->ageing_time = t;
641 spin_unlock_bh(&br->lock);
642
643 mod_delayed_work(system_long_wq, &br->gc_work, 0);
644
645 return 0;
646 }
647
br_get_ageing_time(const struct net_device * br_dev)648 clock_t br_get_ageing_time(const struct net_device *br_dev)
649 {
650 const struct net_bridge *br;
651
652 if (!netif_is_bridge_master(br_dev))
653 return 0;
654
655 br = netdev_priv(br_dev);
656
657 return jiffies_to_clock_t(br->ageing_time);
658 }
659 EXPORT_SYMBOL_GPL(br_get_ageing_time);
660
661 /* called under bridge lock */
__br_set_topology_change(struct net_bridge * br,unsigned char val)662 void __br_set_topology_change(struct net_bridge *br, unsigned char val)
663 {
664 unsigned long t;
665 int err;
666
667 if (br->stp_enabled == BR_KERNEL_STP && br->topology_change != val) {
668 /* On topology change, set the bridge ageing time to twice the
669 * forward delay. Otherwise, restore its default ageing time.
670 */
671
672 if (val) {
673 t = 2 * br->forward_delay;
674 br_debug(br, "decreasing ageing time to %lu\n", t);
675 } else {
676 t = br->bridge_ageing_time;
677 br_debug(br, "restoring ageing time to %lu\n", t);
678 }
679
680 err = __set_ageing_time(br->dev, t);
681 if (err)
682 br_warn(br, "error offloading ageing time\n");
683 else
684 br->ageing_time = t;
685 }
686
687 br->topology_change = val;
688 }
689
__br_set_forward_delay(struct net_bridge * br,unsigned long t)690 void __br_set_forward_delay(struct net_bridge *br, unsigned long t)
691 {
692 br->bridge_forward_delay = t;
693 if (br_is_root_bridge(br))
694 br->forward_delay = br->bridge_forward_delay;
695 }
696
br_set_forward_delay(struct net_bridge * br,unsigned long val)697 int br_set_forward_delay(struct net_bridge *br, unsigned long val)
698 {
699 unsigned long t = clock_t_to_jiffies(val);
700 int err = -ERANGE;
701
702 spin_lock_bh(&br->lock);
703 if (br->stp_enabled != BR_NO_STP &&
704 (t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY))
705 goto unlock;
706
707 __br_set_forward_delay(br, t);
708 err = 0;
709
710 unlock:
711 spin_unlock_bh(&br->lock);
712 return err;
713 }
714