xref: /openbmc/linux/net/core/link_watch.c (revision d32fd6bb9f2bc8178cdd65ebec1ad670a8bfa241)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * Linux network device link state notification
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * Author:
61da177e4SLinus Torvalds  *     Stefan Rompf <sux@loplof.de>
71da177e4SLinus Torvalds  */
81da177e4SLinus Torvalds 
91da177e4SLinus Torvalds #include <linux/module.h>
101da177e4SLinus Torvalds #include <linux/netdevice.h>
111da177e4SLinus Torvalds #include <linux/if.h>
121da177e4SLinus Torvalds #include <net/sock.h>
13cacaddf5STommy S. Christensen #include <net/pkt_sched.h>
141da177e4SLinus Torvalds #include <linux/rtnetlink.h>
151da177e4SLinus Torvalds #include <linux/jiffies.h>
161da177e4SLinus Torvalds #include <linux/spinlock.h>
171da177e4SLinus Torvalds #include <linux/workqueue.h>
181da177e4SLinus Torvalds #include <linux/bitops.h>
19e56f7359SFabian Frederick #include <linux/types.h>
201da177e4SLinus Torvalds 
216264f58cSJakub Kicinski #include "dev.h"
221da177e4SLinus Torvalds 
231da177e4SLinus Torvalds enum lw_bits {
24d9568ba9SHerbert Xu 	LW_URGENT = 0,
251da177e4SLinus Torvalds };
261da177e4SLinus Torvalds 
271da177e4SLinus Torvalds static unsigned long linkwatch_flags;
281da177e4SLinus Torvalds static unsigned long linkwatch_nextevent;
291da177e4SLinus Torvalds 
3065f27f38SDavid Howells static void linkwatch_event(struct work_struct *dummy);
3165f27f38SDavid Howells static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
321da177e4SLinus Torvalds 
33e014debeSEric Dumazet static LIST_HEAD(lweventlist);
341da177e4SLinus Torvalds static DEFINE_SPINLOCK(lweventlist_lock);
351da177e4SLinus Torvalds 
default_operstate(const struct net_device * dev)36b00055aaSStefan Rompf static unsigned char default_operstate(const struct net_device *dev)
37b00055aaSStefan Rompf {
38eec517cdSAndrew Lunn 	if (netif_testing(dev))
39eec517cdSAndrew Lunn 		return IF_OPER_TESTING;
40eec517cdSAndrew Lunn 
418c55faceSVladimir Oltean 	/* Some uppers (DSA) have additional sources for being down, so
428c55faceSVladimir Oltean 	 * first check whether lower is indeed the source of its down state.
438c55faceSVladimir Oltean 	 */
448c55faceSVladimir Oltean 	if (!netif_carrier_ok(dev)) {
458c55faceSVladimir Oltean 		struct net_device *peer;
46*ba9f7c16SKuniyuki Iwashima 		int iflink;
478c55faceSVladimir Oltean 
483265aab0SEric Dumazet 		/* If called from netdev_run_todo()/linkwatch_sync_dev(),
493265aab0SEric Dumazet 		 * dev_net(dev) can be already freed, and RTNL is not held.
503265aab0SEric Dumazet 		 */
51*ba9f7c16SKuniyuki Iwashima 		if (dev->reg_state <= NETREG_REGISTERED)
52*ba9f7c16SKuniyuki Iwashima 			iflink = dev_get_iflink(dev);
53*ba9f7c16SKuniyuki Iwashima 		else
54*ba9f7c16SKuniyuki Iwashima 			iflink = dev->ifindex;
55*ba9f7c16SKuniyuki Iwashima 
56*ba9f7c16SKuniyuki Iwashima 		if (iflink == dev->ifindex)
578c55faceSVladimir Oltean 			return IF_OPER_DOWN;
588c55faceSVladimir Oltean 
593265aab0SEric Dumazet 		ASSERT_RTNL();
608c55faceSVladimir Oltean 		peer = __dev_get_by_index(dev_net(dev), iflink);
618c55faceSVladimir Oltean 		if (!peer)
628c55faceSVladimir Oltean 			return IF_OPER_DOWN;
638c55faceSVladimir Oltean 
648c55faceSVladimir Oltean 		return netif_carrier_ok(peer) ? IF_OPER_DOWN :
658c55faceSVladimir Oltean 						IF_OPER_LOWERLAYERDOWN;
668c55faceSVladimir Oltean 	}
67b00055aaSStefan Rompf 
68b00055aaSStefan Rompf 	if (netif_dormant(dev))
69b00055aaSStefan Rompf 		return IF_OPER_DORMANT;
70b00055aaSStefan Rompf 
71b00055aaSStefan Rompf 	return IF_OPER_UP;
72b00055aaSStefan Rompf }
73b00055aaSStefan Rompf 
74b00055aaSStefan Rompf 
rfc2863_policy(struct net_device * dev)75b00055aaSStefan Rompf static void rfc2863_policy(struct net_device *dev)
76b00055aaSStefan Rompf {
77b00055aaSStefan Rompf 	unsigned char operstate = default_operstate(dev);
78b00055aaSStefan Rompf 
791c2b0f08SEric Dumazet 	if (operstate == READ_ONCE(dev->operstate))
80b00055aaSStefan Rompf 		return;
81b00055aaSStefan Rompf 
82fd888e85SSebastian Andrzej Siewior 	write_lock(&dev_base_lock);
83b00055aaSStefan Rompf 
84b00055aaSStefan Rompf 	switch(dev->link_mode) {
85eec517cdSAndrew Lunn 	case IF_LINK_MODE_TESTING:
86eec517cdSAndrew Lunn 		if (operstate == IF_OPER_UP)
87eec517cdSAndrew Lunn 			operstate = IF_OPER_TESTING;
88eec517cdSAndrew Lunn 		break;
89eec517cdSAndrew Lunn 
90b00055aaSStefan Rompf 	case IF_LINK_MODE_DORMANT:
91b00055aaSStefan Rompf 		if (operstate == IF_OPER_UP)
92b00055aaSStefan Rompf 			operstate = IF_OPER_DORMANT;
93b00055aaSStefan Rompf 		break;
94b00055aaSStefan Rompf 	case IF_LINK_MODE_DEFAULT:
95b00055aaSStefan Rompf 	default:
96b00055aaSStefan Rompf 		break;
973ff50b79SStephen Hemminger 	}
98b00055aaSStefan Rompf 
991c2b0f08SEric Dumazet 	WRITE_ONCE(dev->operstate, operstate);
100b00055aaSStefan Rompf 
101fd888e85SSebastian Andrzej Siewior 	write_unlock(&dev_base_lock);
102b00055aaSStefan Rompf }
103b00055aaSStefan Rompf 
104b00055aaSStefan Rompf 
linkwatch_init_dev(struct net_device * dev)1058f4cccbbSBen Hutchings void linkwatch_init_dev(struct net_device *dev)
1068f4cccbbSBen Hutchings {
1078f4cccbbSBen Hutchings 	/* Handle pre-registration link state changes */
108eec517cdSAndrew Lunn 	if (!netif_carrier_ok(dev) || netif_dormant(dev) ||
109eec517cdSAndrew Lunn 	    netif_testing(dev))
1108f4cccbbSBen Hutchings 		rfc2863_policy(dev);
1118f4cccbbSBen Hutchings }
1128f4cccbbSBen Hutchings 
1138f4cccbbSBen Hutchings 
linkwatch_urgent_event(struct net_device * dev)1146fa9864bSDavid S. Miller static bool linkwatch_urgent_event(struct net_device *dev)
115294cc44bSHerbert Xu {
116c37e0c99SEric Dumazet 	if (!netif_running(dev))
117c37e0c99SEric Dumazet 		return false;
118c37e0c99SEric Dumazet 
119a54acb3aSNicolas Dichtel 	if (dev->ifindex != dev_get_iflink(dev))
120c37e0c99SEric Dumazet 		return true;
121c37e0c99SEric Dumazet 
122b76f4189SRoopa Prabhu 	if (netif_is_lag_port(dev) || netif_is_lag_master(dev))
123194f4a6dSFlavio Leitner 		return true;
124194f4a6dSFlavio Leitner 
125c37e0c99SEric Dumazet 	return netif_carrier_ok(dev) &&	qdisc_tx_changing(dev);
126294cc44bSHerbert Xu }
127294cc44bSHerbert Xu 
128294cc44bSHerbert Xu 
linkwatch_add_event(struct net_device * dev)129294cc44bSHerbert Xu static void linkwatch_add_event(struct net_device *dev)
130294cc44bSHerbert Xu {
131294cc44bSHerbert Xu 	unsigned long flags;
132294cc44bSHerbert Xu 
133294cc44bSHerbert Xu 	spin_lock_irqsave(&lweventlist_lock, flags);
134e014debeSEric Dumazet 	if (list_empty(&dev->link_watch_list)) {
135e014debeSEric Dumazet 		list_add_tail(&dev->link_watch_list, &lweventlist);
136d62607c3SJakub Kicinski 		netdev_hold(dev, &dev->linkwatch_dev_tracker, GFP_ATOMIC);
137e014debeSEric Dumazet 	}
138294cc44bSHerbert Xu 	spin_unlock_irqrestore(&lweventlist_lock, flags);
139294cc44bSHerbert Xu }
140294cc44bSHerbert Xu 
141294cc44bSHerbert Xu 
linkwatch_schedule_work(int urgent)142d9568ba9SHerbert Xu static void linkwatch_schedule_work(int urgent)
143294cc44bSHerbert Xu {
144d9568ba9SHerbert Xu 	unsigned long delay = linkwatch_nextevent - jiffies;
145d9568ba9SHerbert Xu 
146d9568ba9SHerbert Xu 	if (test_bit(LW_URGENT, &linkwatch_flags))
147294cc44bSHerbert Xu 		return;
148294cc44bSHerbert Xu 
149d9568ba9SHerbert Xu 	/* Minimise down-time: drop delay for up event. */
150d9568ba9SHerbert Xu 	if (urgent) {
151d9568ba9SHerbert Xu 		if (test_and_set_bit(LW_URGENT, &linkwatch_flags))
152d9568ba9SHerbert Xu 			return;
153294cc44bSHerbert Xu 		delay = 0;
154db0ccffeSHerbert Xu 	}
155294cc44bSHerbert Xu 
156d9568ba9SHerbert Xu 	/* If we wrap around we'll delay it by at most HZ. */
157d9568ba9SHerbert Xu 	if (delay > HZ)
158d9568ba9SHerbert Xu 		delay = 0;
159d9568ba9SHerbert Xu 
160d9568ba9SHerbert Xu 	/*
161e7c2f967STejun Heo 	 * If urgent, schedule immediate execution; otherwise, don't
162e7c2f967STejun Heo 	 * override the existing timer.
163d9568ba9SHerbert Xu 	 */
164e7c2f967STejun Heo 	if (test_bit(LW_URGENT, &linkwatch_flags))
1653b1fae78SEric Dumazet 		mod_delayed_work(system_unbound_wq, &linkwatch_work, 0);
166e7c2f967STejun Heo 	else
1673b1fae78SEric Dumazet 		queue_delayed_work(system_unbound_wq, &linkwatch_work, delay);
168294cc44bSHerbert Xu }
169294cc44bSHerbert Xu 
170294cc44bSHerbert Xu 
linkwatch_do_dev(struct net_device * dev)171e014debeSEric Dumazet static void linkwatch_do_dev(struct net_device *dev)
1721da177e4SLinus Torvalds {
173572a103dSHerbert Xu 	/*
174572a103dSHerbert Xu 	 * Make sure the above read is complete since it can be
175572a103dSHerbert Xu 	 * rewritten as soon as we clear the bit below.
176572a103dSHerbert Xu 	 */
1774e857c58SPeter Zijlstra 	smp_mb__before_atomic();
1781da177e4SLinus Torvalds 
1791da177e4SLinus Torvalds 	/* We are about to handle this device,
1801da177e4SLinus Torvalds 	 * so new events can be accepted
1811da177e4SLinus Torvalds 	 */
1821da177e4SLinus Torvalds 	clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
1831da177e4SLinus Torvalds 
184b00055aaSStefan Rompf 	rfc2863_policy(dev);
1856922110dSWilly Tarreau 	if (dev->flags & IFF_UP) {
1866fa9864bSDavid S. Miller 		if (netif_carrier_ok(dev))
187cacaddf5STommy S. Christensen 			dev_activate(dev);
1886fa9864bSDavid S. Miller 		else
189cacaddf5STommy S. Christensen 			dev_deactivate(dev);
190cacaddf5STommy S. Christensen 
1911da177e4SLinus Torvalds 		netdev_state_change(dev);
1921da177e4SLinus Torvalds 	}
1934c6c11eaSEric Dumazet 	/* Note: our callers are responsible for calling netdev_tracker_free().
1944c6c11eaSEric Dumazet 	 * This is the reason we use __dev_put() instead of dev_put().
195123e495eSEric Dumazet 	 */
1964c6c11eaSEric Dumazet 	__dev_put(dev);
1971da177e4SLinus Torvalds }
198294cc44bSHerbert Xu 
__linkwatch_run_queue(int urgent_only)199e014debeSEric Dumazet static void __linkwatch_run_queue(int urgent_only)
200e014debeSEric Dumazet {
20127ba4059SYunsheng Lin #define MAX_DO_DEV_PER_LOOP	100
20227ba4059SYunsheng Lin 
20327ba4059SYunsheng Lin 	int do_dev = MAX_DO_DEV_PER_LOOP;
204e014debeSEric Dumazet 	struct net_device *dev;
205e014debeSEric Dumazet 	LIST_HEAD(wrk);
206e014debeSEric Dumazet 
20727ba4059SYunsheng Lin 	/* Give urgent case more budget */
20827ba4059SYunsheng Lin 	if (urgent_only)
20927ba4059SYunsheng Lin 		do_dev += MAX_DO_DEV_PER_LOOP;
21027ba4059SYunsheng Lin 
211e014debeSEric Dumazet 	/*
212e014debeSEric Dumazet 	 * Limit the number of linkwatch events to one
213e014debeSEric Dumazet 	 * per second so that a runaway driver does not
214e014debeSEric Dumazet 	 * cause a storm of messages on the netlink
215e014debeSEric Dumazet 	 * socket.  This limit does not apply to up events
216e014debeSEric Dumazet 	 * while the device qdisc is down.
217e014debeSEric Dumazet 	 */
218e014debeSEric Dumazet 	if (!urgent_only)
219e014debeSEric Dumazet 		linkwatch_nextevent = jiffies + HZ;
220e014debeSEric Dumazet 	/* Limit wrap-around effect on delay. */
221e014debeSEric Dumazet 	else if (time_after(linkwatch_nextevent, jiffies + HZ))
222e014debeSEric Dumazet 		linkwatch_nextevent = jiffies;
223e014debeSEric Dumazet 
224e014debeSEric Dumazet 	clear_bit(LW_URGENT, &linkwatch_flags);
225e014debeSEric Dumazet 
226e014debeSEric Dumazet 	spin_lock_irq(&lweventlist_lock);
227e014debeSEric Dumazet 	list_splice_init(&lweventlist, &wrk);
228e014debeSEric Dumazet 
22927ba4059SYunsheng Lin 	while (!list_empty(&wrk) && do_dev > 0) {
230e014debeSEric Dumazet 
231e014debeSEric Dumazet 		dev = list_first_entry(&wrk, struct net_device, link_watch_list);
232e014debeSEric Dumazet 		list_del_init(&dev->link_watch_list);
233e014debeSEric Dumazet 
2346922110dSWilly Tarreau 		if (!netif_device_present(dev) ||
2356922110dSWilly Tarreau 		    (urgent_only && !linkwatch_urgent_event(dev))) {
236e014debeSEric Dumazet 			list_add_tail(&dev->link_watch_list, &lweventlist);
237e014debeSEric Dumazet 			continue;
238e014debeSEric Dumazet 		}
239123e495eSEric Dumazet 		/* We must free netdev tracker under
240123e495eSEric Dumazet 		 * the spinlock protection.
241123e495eSEric Dumazet 		 */
242123e495eSEric Dumazet 		netdev_tracker_free(dev, &dev->linkwatch_dev_tracker);
243e014debeSEric Dumazet 		spin_unlock_irq(&lweventlist_lock);
244e014debeSEric Dumazet 		linkwatch_do_dev(dev);
24527ba4059SYunsheng Lin 		do_dev--;
246e014debeSEric Dumazet 		spin_lock_irq(&lweventlist_lock);
247e014debeSEric Dumazet 	}
248e014debeSEric Dumazet 
24927ba4059SYunsheng Lin 	/* Add the remaining work back to lweventlist */
25027ba4059SYunsheng Lin 	list_splice_init(&wrk, &lweventlist);
25127ba4059SYunsheng Lin 
252e014debeSEric Dumazet 	if (!list_empty(&lweventlist))
253d9568ba9SHerbert Xu 		linkwatch_schedule_work(0);
254e014debeSEric Dumazet 	spin_unlock_irq(&lweventlist_lock);
255e014debeSEric Dumazet }
256e014debeSEric Dumazet 
linkwatch_forget_dev(struct net_device * dev)257e014debeSEric Dumazet void linkwatch_forget_dev(struct net_device *dev)
258e014debeSEric Dumazet {
259e014debeSEric Dumazet 	unsigned long flags;
260e014debeSEric Dumazet 	int clean = 0;
261e014debeSEric Dumazet 
262e014debeSEric Dumazet 	spin_lock_irqsave(&lweventlist_lock, flags);
263e014debeSEric Dumazet 	if (!list_empty(&dev->link_watch_list)) {
264e014debeSEric Dumazet 		list_del_init(&dev->link_watch_list);
265e014debeSEric Dumazet 		clean = 1;
266123e495eSEric Dumazet 		/* We must release netdev tracker under
267123e495eSEric Dumazet 		 * the spinlock protection.
268123e495eSEric Dumazet 		 */
269123e495eSEric Dumazet 		netdev_tracker_free(dev, &dev->linkwatch_dev_tracker);
270e014debeSEric Dumazet 	}
271e014debeSEric Dumazet 	spin_unlock_irqrestore(&lweventlist_lock, flags);
272e014debeSEric Dumazet 	if (clean)
273e014debeSEric Dumazet 		linkwatch_do_dev(dev);
274294cc44bSHerbert Xu }
275294cc44bSHerbert Xu 
276294cc44bSHerbert Xu 
277294cc44bSHerbert Xu /* Must be called with the rtnl semaphore held */
linkwatch_run_queue(void)278294cc44bSHerbert Xu void linkwatch_run_queue(void)
279294cc44bSHerbert Xu {
280294cc44bSHerbert Xu 	__linkwatch_run_queue(0);
2811da177e4SLinus Torvalds }
2821da177e4SLinus Torvalds 
2831da177e4SLinus Torvalds 
linkwatch_event(struct work_struct * dummy)28465f27f38SDavid Howells static void linkwatch_event(struct work_struct *dummy)
2851da177e4SLinus Torvalds {
2866756ae4bSStephen Hemminger 	rtnl_lock();
287294cc44bSHerbert Xu 	__linkwatch_run_queue(time_after(linkwatch_nextevent, jiffies));
2886756ae4bSStephen Hemminger 	rtnl_unlock();
2891da177e4SLinus Torvalds }
2901da177e4SLinus Torvalds 
2911da177e4SLinus Torvalds 
linkwatch_fire_event(struct net_device * dev)2921da177e4SLinus Torvalds void linkwatch_fire_event(struct net_device *dev)
2931da177e4SLinus Torvalds {
2946fa9864bSDavid S. Miller 	bool urgent = linkwatch_urgent_event(dev);
2951da177e4SLinus Torvalds 
296d9568ba9SHerbert Xu 	if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
297294cc44bSHerbert Xu 		linkwatch_add_event(dev);
298d9568ba9SHerbert Xu 	} else if (!urgent)
299d9568ba9SHerbert Xu 		return;
3001da177e4SLinus Torvalds 
301d9568ba9SHerbert Xu 	linkwatch_schedule_work(urgent);
3021da177e4SLinus Torvalds }
3031da177e4SLinus Torvalds EXPORT_SYMBOL(linkwatch_fire_event);
304