xref: /openbmc/linux/net/core/link_watch.c (revision 62e7ca52)
1 /*
2  * Linux network device link state notification
3  *
4  * Author:
5  *     Stefan Rompf <sux@loplof.de>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version
10  * 2 of the License, or (at your option) any later version.
11  *
12  */
13 
14 #include <linux/module.h>
15 #include <linux/netdevice.h>
16 #include <linux/if.h>
17 #include <net/sock.h>
18 #include <net/pkt_sched.h>
19 #include <linux/rtnetlink.h>
20 #include <linux/jiffies.h>
21 #include <linux/spinlock.h>
22 #include <linux/workqueue.h>
23 #include <linux/bitops.h>
24 #include <asm/types.h>
25 
26 
27 enum lw_bits {
28 	LW_URGENT = 0,
29 };
30 
31 static unsigned long linkwatch_flags;
32 static unsigned long linkwatch_nextevent;
33 
34 static void linkwatch_event(struct work_struct *dummy);
35 static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
36 
37 static LIST_HEAD(lweventlist);
38 static DEFINE_SPINLOCK(lweventlist_lock);
39 
40 static unsigned char default_operstate(const struct net_device *dev)
41 {
42 	if (!netif_carrier_ok(dev))
43 		return (dev->ifindex != dev->iflink ?
44 			IF_OPER_LOWERLAYERDOWN : IF_OPER_DOWN);
45 
46 	if (netif_dormant(dev))
47 		return IF_OPER_DORMANT;
48 
49 	return IF_OPER_UP;
50 }
51 
52 
53 static void rfc2863_policy(struct net_device *dev)
54 {
55 	unsigned char operstate = default_operstate(dev);
56 
57 	if (operstate == dev->operstate)
58 		return;
59 
60 	write_lock_bh(&dev_base_lock);
61 
62 	switch(dev->link_mode) {
63 	case IF_LINK_MODE_DORMANT:
64 		if (operstate == IF_OPER_UP)
65 			operstate = IF_OPER_DORMANT;
66 		break;
67 
68 	case IF_LINK_MODE_DEFAULT:
69 	default:
70 		break;
71 	}
72 
73 	dev->operstate = operstate;
74 
75 	write_unlock_bh(&dev_base_lock);
76 }
77 
78 
79 void linkwatch_init_dev(struct net_device *dev)
80 {
81 	/* Handle pre-registration link state changes */
82 	if (!netif_carrier_ok(dev) || netif_dormant(dev))
83 		rfc2863_policy(dev);
84 }
85 
86 
87 static bool linkwatch_urgent_event(struct net_device *dev)
88 {
89 	if (!netif_running(dev))
90 		return false;
91 
92 	if (dev->ifindex != dev->iflink)
93 		return true;
94 
95 	if (dev->priv_flags & IFF_TEAM_PORT)
96 		return true;
97 
98 	return netif_carrier_ok(dev) &&	qdisc_tx_changing(dev);
99 }
100 
101 
102 static void linkwatch_add_event(struct net_device *dev)
103 {
104 	unsigned long flags;
105 
106 	spin_lock_irqsave(&lweventlist_lock, flags);
107 	if (list_empty(&dev->link_watch_list)) {
108 		list_add_tail(&dev->link_watch_list, &lweventlist);
109 		dev_hold(dev);
110 	}
111 	spin_unlock_irqrestore(&lweventlist_lock, flags);
112 }
113 
114 
115 static void linkwatch_schedule_work(int urgent)
116 {
117 	unsigned long delay = linkwatch_nextevent - jiffies;
118 
119 	if (test_bit(LW_URGENT, &linkwatch_flags))
120 		return;
121 
122 	/* Minimise down-time: drop delay for up event. */
123 	if (urgent) {
124 		if (test_and_set_bit(LW_URGENT, &linkwatch_flags))
125 			return;
126 		delay = 0;
127 	}
128 
129 	/* If we wrap around we'll delay it by at most HZ. */
130 	if (delay > HZ)
131 		delay = 0;
132 
133 	/*
134 	 * If urgent, schedule immediate execution; otherwise, don't
135 	 * override the existing timer.
136 	 */
137 	if (test_bit(LW_URGENT, &linkwatch_flags))
138 		mod_delayed_work(system_wq, &linkwatch_work, 0);
139 	else
140 		schedule_delayed_work(&linkwatch_work, delay);
141 }
142 
143 
144 static void linkwatch_do_dev(struct net_device *dev)
145 {
146 	/*
147 	 * Make sure the above read is complete since it can be
148 	 * rewritten as soon as we clear the bit below.
149 	 */
150 	smp_mb__before_atomic();
151 
152 	/* We are about to handle this device,
153 	 * so new events can be accepted
154 	 */
155 	clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
156 
157 	rfc2863_policy(dev);
158 	if (dev->flags & IFF_UP) {
159 		if (netif_carrier_ok(dev))
160 			dev_activate(dev);
161 		else
162 			dev_deactivate(dev);
163 
164 		netdev_state_change(dev);
165 	}
166 	dev_put(dev);
167 }
168 
169 static void __linkwatch_run_queue(int urgent_only)
170 {
171 	struct net_device *dev;
172 	LIST_HEAD(wrk);
173 
174 	/*
175 	 * Limit the number of linkwatch events to one
176 	 * per second so that a runaway driver does not
177 	 * cause a storm of messages on the netlink
178 	 * socket.  This limit does not apply to up events
179 	 * while the device qdisc is down.
180 	 */
181 	if (!urgent_only)
182 		linkwatch_nextevent = jiffies + HZ;
183 	/* Limit wrap-around effect on delay. */
184 	else if (time_after(linkwatch_nextevent, jiffies + HZ))
185 		linkwatch_nextevent = jiffies;
186 
187 	clear_bit(LW_URGENT, &linkwatch_flags);
188 
189 	spin_lock_irq(&lweventlist_lock);
190 	list_splice_init(&lweventlist, &wrk);
191 
192 	while (!list_empty(&wrk)) {
193 
194 		dev = list_first_entry(&wrk, struct net_device, link_watch_list);
195 		list_del_init(&dev->link_watch_list);
196 
197 		if (urgent_only && !linkwatch_urgent_event(dev)) {
198 			list_add_tail(&dev->link_watch_list, &lweventlist);
199 			continue;
200 		}
201 		spin_unlock_irq(&lweventlist_lock);
202 		linkwatch_do_dev(dev);
203 		spin_lock_irq(&lweventlist_lock);
204 	}
205 
206 	if (!list_empty(&lweventlist))
207 		linkwatch_schedule_work(0);
208 	spin_unlock_irq(&lweventlist_lock);
209 }
210 
211 void linkwatch_forget_dev(struct net_device *dev)
212 {
213 	unsigned long flags;
214 	int clean = 0;
215 
216 	spin_lock_irqsave(&lweventlist_lock, flags);
217 	if (!list_empty(&dev->link_watch_list)) {
218 		list_del_init(&dev->link_watch_list);
219 		clean = 1;
220 	}
221 	spin_unlock_irqrestore(&lweventlist_lock, flags);
222 	if (clean)
223 		linkwatch_do_dev(dev);
224 }
225 
226 
227 /* Must be called with the rtnl semaphore held */
228 void linkwatch_run_queue(void)
229 {
230 	__linkwatch_run_queue(0);
231 }
232 
233 
234 static void linkwatch_event(struct work_struct *dummy)
235 {
236 	rtnl_lock();
237 	__linkwatch_run_queue(time_after(linkwatch_nextevent, jiffies));
238 	rtnl_unlock();
239 }
240 
241 
242 void linkwatch_fire_event(struct net_device *dev)
243 {
244 	bool urgent = linkwatch_urgent_event(dev);
245 
246 	if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
247 		linkwatch_add_event(dev);
248 	} else if (!urgent)
249 		return;
250 
251 	linkwatch_schedule_work(urgent);
252 }
253 EXPORT_SYMBOL(linkwatch_fire_event);
254